#include "dr_types.h"
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048)
#define DR_RULE_MAX_STES_OPTIMIZED 0
#else
#define DR_RULE_MAX_STES_OPTIMIZED 5
#endif
#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info_last;
struct mlx5dr_ste *last_ste;
last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
WARN_ON(!last_ste);
ste_info_last = mlx5dr_send_info_alloc(dmn, nic_type);
if (!ste_info_last)
return -ENOMEM;
mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
0, mlx5dr_ste_get_hw_ste(last_ste),
ste_info_last, send_list, true);
return 0;
}
static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste)
{
struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
u64 icm_addr;
if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
return;
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
}
static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
DR_CHUNK_SIZE_1,
MLX5DR_STE_LU_TYPE_DONT_CARE,
0);
if (!new_htbl) {
mlx5dr_dbg(dmn, "Failed allocating collision table\n");
return NULL;
}
ste = new_htbl->chunk->ste_arr;
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
mlx5dr_htbl_get(new_htbl);
return ste;
}
static struct mlx5dr_ste *
dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste,
struct mlx5dr_ste *orig_ste)
{
struct mlx5dr_ste *ste;
ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
if (!ste) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
return NULL;
}
ste->ste_chain_location = orig_ste->ste_chain_location;
ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
DR_CHUNK_SIZE_1)) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
goto free_tbl;
}
return ste;
free_tbl:
mlx5dr_ste_free(ste, matcher, nic_matcher);
return NULL;
}
static int
dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
struct mlx5dr_domain *dmn)
{
int ret;
list_del(&ste_info->send_list);
if (ste_info->size == DR_STE_SIZE_CTRL)
memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
ste_info->data, DR_STE_SIZE_CTRL);
else
memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
ste_info->data, DR_STE_SIZE_REDUCED);
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
if (ret)
goto out;
out:
mlx5dr_send_info_free(ste_info);
return ret;
}
static int dr_rule_send_update_list(struct list_head *send_ste_list,
struct mlx5dr_domain *dmn,
bool is_reverse)
{
struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
int ret;
if (is_reverse) {
list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
send_ste_list, send_list) {
ret = dr_rule_handle_one_ste_in_update_list(ste_info,
dmn);
if (ret)
return ret;
}
} else {
list_for_each_entry_safe(ste_info, tmp_ste_info,
send_ste_list, send_list) {
ret = dr_rule_handle_one_ste_in_update_list(ste_info,
dmn);
if (ret)
return ret;
}
}
return 0;
}
static struct mlx5dr_ste *
dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
{
struct mlx5dr_ste *ste;
if (list_empty(miss_list))
return NULL;
list_for_each_entry(ste, miss_list, miss_list_node) {
if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
return ste;
}
return NULL;
}
static struct mlx5dr_ste *
dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct list_head *update_list,
struct mlx5dr_ste *col_ste,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste *new_ste;
int ret;
new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
if (!new_ste)
return NULL;
new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
ret = dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
new_ste, mlx5dr_ste_get_miss_list(col_ste),
update_list);
if (ret) {
mlx5dr_dbg(dmn, "Failed update dup entry\n");
goto err_exit;
}
return new_ste;
err_exit:
mlx5dr_ste_free(new_ste, matcher, nic_matcher);
return NULL;
}
static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste *cur_ste,
struct mlx5dr_ste *new_ste)
{
new_ste->next_htbl = cur_ste->next_htbl;
new_ste->ste_chain_location = cur_ste->ste_chain_location;
if (new_ste->next_htbl)
new_ste->next_htbl->pointing_ste = new_ste;
new_ste->refcount = cur_ste->refcount;
mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
}
static struct mlx5dr_ste *
dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste *cur_ste,
struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
struct mlx5dr_ste *new_ste;
int new_idx;
u8 sb_idx;
sb_idx = cur_ste->ste_chain_location - 1;
mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->chunk->ste_arr[new_idx];
if (mlx5dr_ste_is_not_used(new_ste)) {
mlx5dr_htbl_get(new_htbl);
list_add_tail(&new_ste->miss_list_node,
mlx5dr_ste_get_miss_list(new_ste));
} else {
new_ste = dr_rule_rehash_handle_collision(matcher,
nic_matcher,
update_list,
new_ste,
hw_ste);
if (!new_ste) {
mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
new_idx);
return NULL;
}
new_htbl->ctrl.num_of_collisions++;
use_update_list = true;
}
memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
new_htbl->ctrl.num_of_valid_entries++;
if (use_update_list) {
ste_info = mlx5dr_send_info_alloc(dmn,
nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
goto err_exit;
mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
hw_ste, ste_info,
update_list, true);
}
dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
return new_ste;
err_exit:
mlx5dr_ste_free(new_ste, matcher, nic_matcher);
return NULL;
}
static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct list_head *cur_miss_list,
struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list)
{
struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
if (list_empty(cur_miss_list))
return 0;
list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
new_ste = dr_rule_rehash_copy_ste(matcher,
nic_matcher,
cur_ste,
new_htbl,
update_list);
if (!new_ste)
goto err_insert;
list_del(&cur_ste->miss_list_node);
mlx5dr_htbl_put(cur_ste->htbl);
}
return 0;
err_insert:
mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
WARN_ON(true);
return -EINVAL;
}
static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste_htbl *cur_htbl,
struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list)
{
struct mlx5dr_ste *cur_ste;
int cur_entries;
int err = 0;
int i;
cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
if (cur_entries < 1) {
mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
return -EINVAL;
}
for (i = 0; i < cur_entries; i++) {
cur_ste = &cur_htbl->chunk->ste_arr[i];
if (mlx5dr_ste_is_not_used(cur_ste))
continue;
err = dr_rule_rehash_copy_miss_list(matcher,
nic_matcher,
mlx5dr_ste_get_miss_list(cur_ste),
new_htbl,
update_list);
if (err)
goto clean_copy;
err = dr_rule_send_update_list(update_list, matcher->tbl->dmn, false);
if (err) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed updating table to HW\n");
goto clean_copy;
}
}
clean_copy:
return err;
}
static struct mlx5dr_ste_htbl *
dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule,
struct mlx5dr_ste_htbl *cur_htbl,
u8 ste_location,
struct list_head *update_list,
enum mlx5dr_icm_chunk_size new_size)
{
struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_matcher_rx_tx *nic_matcher;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_domain_rx_tx *nic_dmn;
u8 formatted_ste[DR_STE_SIZE] = {};
LIST_HEAD(rehash_table_send_list);
struct mlx5dr_ste *ste_to_update;
struct mlx5dr_ste_htbl *new_htbl;
int err;
nic_matcher = nic_rule->nic_matcher;
nic_dmn = nic_matcher->nic_tbl->nic_dmn;
ste_info = mlx5dr_send_info_alloc(dmn,
nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
return NULL;
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
new_size,
cur_htbl->lu_type,
cur_htbl->byte_mask);
if (!new_htbl) {
mlx5dr_err(dmn, "Failed to allocate new hash table\n");
goto free_ste_info;
}
info.type = CONNECT_MISS;
info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn->type,
new_htbl,
formatted_ste,
&info);
new_htbl->pointing_ste = cur_htbl->pointing_ste;
new_htbl->pointing_ste->next_htbl = new_htbl;
err = dr_rule_rehash_copy_htbl(matcher,
nic_matcher,
cur_htbl,
new_htbl,
&rehash_table_send_list);
if (err)
goto free_new_htbl;
if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
mlx5dr_err(dmn, "Failed writing table to HW\n");
goto free_new_htbl;
}
if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
mlx5dr_err(dmn, "Failed updating table to HW\n");
goto free_ste_list;
}
if (ste_location == 1) {
struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
mlx5dr_htbl_get(new_htbl);
mlx5dr_htbl_put(cur_htbl);
nic_matcher->s_htbl = new_htbl;
mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
prev_htbl->chunk->hw_ste_arr,
mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
ste_to_update = &prev_htbl->chunk->ste_arr[0];
} else {
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
0, mlx5dr_ste_get_hw_ste(ste_to_update),
ste_info, update_list, false);
return new_htbl;
free_ste_list:
list_for_each_entry_safe(del_ste_info, tmp_ste_info,
&rehash_table_send_list, send_list) {
list_del(&del_ste_info->send_list);
mlx5dr_send_info_free(del_ste_info);
}
free_new_htbl:
mlx5dr_ste_htbl_free(new_htbl);
free_ste_info:
mlx5dr_send_info_free(ste_info);
mlx5dr_info(dmn, "Failed creating rehash table\n");
return NULL;
}
static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule,
struct mlx5dr_ste_htbl *cur_htbl,
u8 ste_location,
struct list_head *update_list)
{
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
enum mlx5dr_icm_chunk_size new_size;
new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
if (new_size == cur_htbl->chunk->size)
return NULL;
return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
update_list, new_size);
}
static struct mlx5dr_ste *
dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste *ste,
u8 *hw_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste;
ste_info = mlx5dr_send_info_alloc(dmn,
nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
return NULL;
new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
if (!new_ste)
goto free_send_info;
if (dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
new_ste, miss_list, send_list)) {
mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit;
}
mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
ste_info, send_list, false);
ste->htbl->ctrl.num_of_collisions++;
ste->htbl->ctrl.num_of_valid_entries++;
return new_ste;
err_exit:
mlx5dr_ste_free(new_ste, matcher, nic_matcher);
free_send_info:
mlx5dr_send_info_free(ste_info);
return NULL;
}
static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
struct mlx5dr_rule_action_member *tmp;
list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
list_del(&action_mem->list);
refcount_dec(&action_mem->action->refcount);
kvfree(action_mem);
}
}
static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
size_t num_actions,
struct mlx5dr_action *actions[])
{
struct mlx5dr_rule_action_member *action_mem;
int i;
for (i = 0; i < num_actions; i++) {
action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
if (!action_mem)
goto free_action_members;
action_mem->action = actions[i];
INIT_LIST_HEAD(&action_mem->list);
list_add_tail(&action_mem->list, &rule->rule_actions_list);
refcount_inc(&action_mem->action->refcount);
}
return 0;
free_action_members:
dr_rule_remove_action_members(rule);
return -ENOMEM;
}
void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
struct mlx5dr_ste *ste,
bool force)
{
if (ste->next_htbl && !force)
return;
ste->rule_rx_tx = nic_rule;
nic_rule->last_rule_ste = ste;
}
static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
{
struct mlx5dr_ste *first_ste;
first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
struct mlx5dr_ste, miss_list_node);
return first_ste->htbl->pointing_ste;
}
int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_ste *curr_ste,
int *num_of_stes)
{
bool first = false;
*num_of_stes = 0;
if (!curr_ste)
return -ENOENT;
while (!first) {
first = curr_ste->ste_chain_location == 1;
ste_arr[*num_of_stes] = curr_ste;
*num_of_stes += 1;
curr_ste = dr_rule_get_pointed_ste(curr_ste);
}
return 0;
}
static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
int i;
if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
return;
while (i--)
mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
}
static u16 dr_get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn)
{
struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
int threshold;
if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
return false;
if (!mlx5dr_ste_htbl_may_grow(htbl))
return false;
if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
return false;
threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
if (ctrl->num_of_collisions >= threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
return true;
return false;
}
static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule,
struct list_head *send_ste_list,
struct mlx5dr_ste *last_ste,
u8 *hw_ste_arr,
u32 new_hw_ste_arr_sz)
{
struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
u8 num_of_builders = nic_matcher->num_of_builders;
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste;
int i, k;
for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
action_ste = dr_rule_create_collision_htbl(matcher,
nic_matcher,
curr_hw_ste);
if (!action_ste)
return -ENOMEM;
mlx5dr_ste_get(action_ste);
action_ste->htbl->pointing_ste = last_ste;
last_ste->next_htbl = action_ste->htbl;
last_ste = action_ste;
list_add_tail(&action_ste->miss_list_node,
mlx5dr_ste_get_miss_list(action_ste));
ste_info_arr[k] = mlx5dr_send_info_alloc(dmn,
nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info_arr[k])
goto err_exit;
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
prev_hw_ste,
action_ste->htbl);
mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
curr_hw_ste,
ste_info_arr[k],
send_ste_list, false);
}
last_ste->next_htbl = NULL;
return 0;
err_exit:
mlx5dr_ste_put(action_ste, matcher, nic_matcher);
return -ENOMEM;
}
static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste_htbl *cur_htbl,
struct mlx5dr_ste *ste,
u8 ste_location,
u8 *hw_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
mlx5dr_htbl_get(cur_htbl);
list_add_tail(&ste->miss_list_node, miss_list);
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
ste->ste_chain_location = ste_location;
ste_info = mlx5dr_send_info_alloc(dmn,
nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
goto clean_ste_setting;
if (mlx5dr_ste_create_next_htbl(matcher,
nic_matcher,
ste,
hw_ste,
DR_CHUNK_SIZE_1)) {
mlx5dr_dbg(dmn, "Failed allocating table\n");
goto clean_ste_info;
}
cur_htbl->ctrl.num_of_valid_entries++;
mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
ste_info, send_list, false);
return 0;
clean_ste_info:
mlx5dr_send_info_free(ste_info);
clean_ste_setting:
list_del_init(&ste->miss_list_node);
mlx5dr_htbl_put(cur_htbl);
return -ENOMEM;
}
static struct mlx5dr_ste *
dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *cur_htbl,
u8 *hw_ste,
u8 ste_location,
struct mlx5dr_ste_htbl **put_htbl)
{
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_matcher_rx_tx *nic_matcher;
struct mlx5dr_domain_rx_tx *nic_dmn;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *matched_ste;
struct list_head *miss_list;
bool skip_rehash = false;
struct mlx5dr_ste *ste;
int index;
nic_matcher = nic_rule->nic_matcher;
nic_dmn = nic_matcher->nic_tbl->nic_dmn;
again:
index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
miss_list = &cur_htbl->chunk->miss_list[index];
ste = &cur_htbl->chunk->ste_arr[index];
if (mlx5dr_ste_is_not_used(ste)) {
if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
ste, ste_location,
hw_ste, miss_list,
send_ste_list))
return NULL;
} else {
matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
if (matched_ste) {
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
return matched_ste;
mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
}
if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
skip_rehash = true;
*put_htbl = cur_htbl;
mlx5dr_htbl_get(cur_htbl);
new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
ste_location, send_ste_list);
if (!new_htbl) {
mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
cur_htbl->chunk->size);
mlx5dr_htbl_put(cur_htbl);
} else {
cur_htbl = new_htbl;
}
goto again;
} else {
ste = dr_rule_handle_collision(matcher,
nic_matcher,
ste,
hw_ste,
miss_list,
send_ste_list);
if (!ste) {
mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
index);
return NULL;
}
}
}
return ste;
}
static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
u32 s_idx, u32 e_idx)
{
u32 i;
for (i = s_idx; i < e_idx; i++) {
if (value[i] & ~mask[i]) {
pr_info("Rule parameters contains a value not specified by mask\n");
return false;
}
}
return true;
}
static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
struct mlx5dr_match_param *param)
{
u8 match_criteria = matcher->match_criteria;
size_t value_size = value->match_sz;
u8 *mask_p = (u8 *)&matcher->mask;
u8 *param_p = (u8 *)param;
u32 s_idx, e_idx;
if (!value_size ||
(value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
return false;
}
mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
s_idx = offsetof(struct mlx5dr_match_param, outer);
e_idx = min(s_idx + sizeof(param->outer), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
return false;
}
}
if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
s_idx = offsetof(struct mlx5dr_match_param, misc);
e_idx = min(s_idx + sizeof(param->misc), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
return false;
}
}
if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
s_idx = offsetof(struct mlx5dr_match_param, inner);
e_idx = min(s_idx + sizeof(param->inner), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
return false;
}
}
if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
s_idx = offsetof(struct mlx5dr_match_param, misc2);
e_idx = min(s_idx + sizeof(param->misc2), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
return false;
}
}
if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
s_idx = offsetof(struct mlx5dr_match_param, misc3);
e_idx = min(s_idx + sizeof(param->misc3), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
return false;
}
}
if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
s_idx = offsetof(struct mlx5dr_match_param, misc4);
e_idx = min(s_idx + sizeof(param->misc4), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn,
"Rule misc4 parameters contains a value not specified by mask\n");
return false;
}
}
if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
s_idx = offsetof(struct mlx5dr_match_param, misc5);
e_idx = min(s_idx + sizeof(param->misc5), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
return false;
}
}
return true;
}
static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
if (!nic_rule->last_rule_ste)
return 0;
mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
dr_rule_clean_rule_members(rule, nic_rule);
nic_rule->nic_matcher->rules--;
if (!nic_rule->nic_matcher->rules)
mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
nic_rule->nic_matcher);
mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
return 0;
}
static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
{
dr_rule_destroy_rule_nic(rule, &rule->rx);
dr_rule_destroy_rule_nic(rule, &rule->tx);
return 0;
}
static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
{
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
mlx5dr_dbg_rule_del(rule);
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
dr_rule_destroy_rule_nic(rule, &rule->rx);
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
dr_rule_destroy_rule_nic(rule, &rule->tx);
break;
case MLX5DR_DOMAIN_TYPE_FDB:
dr_rule_destroy_rule_fdb(rule);
break;
default:
return -EINVAL;
}
dr_rule_remove_action_members(rule);
kfree(rule);
return 0;
}
static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
{
if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
return DR_RULE_IPV6;
return DR_RULE_IPV4;
}
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value,
u32 flow_source)
{
bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
if (domain != MLX5DR_DOMAIN_TYPE_FDB)
return false;
if (mask->misc.source_port) {
if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
return true;
if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
return true;
}
if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
return true;
if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
return true;
return false;
}
static int
dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule,
struct mlx5dr_match_param *param,
size_t num_actions,
struct mlx5dr_action *actions[])
{
u8 hw_ste_arr_optimized[DR_RULE_MAX_STE_CHAIN_OPTIMIZED * DR_STE_SIZE] = {};
struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_matcher_rx_tx *nic_matcher;
struct mlx5dr_domain_rx_tx *nic_dmn;
struct mlx5dr_ste_htbl *htbl = NULL;
struct mlx5dr_ste_htbl *cur_htbl;
struct mlx5dr_ste *ste = NULL;
LIST_HEAD(send_ste_list);
bool hw_ste_arr_is_opt;
u8 *hw_ste_arr = NULL;
u32 new_hw_ste_arr_sz;
int ret, i;
nic_matcher = nic_rule->nic_matcher;
nic_dmn = nic_matcher->nic_tbl->nic_dmn;
if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
rule->flow_source))
return 0;
mlx5dr_domain_nic_lock(nic_dmn);
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(¶m->outer),
dr_rule_get_ipv(¶m->inner));
if (ret)
goto err_unlock;
hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
if (likely(hw_ste_arr_is_opt)) {
hw_ste_arr = hw_ste_arr_optimized;
} else {
hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
DR_STE_SIZE, GFP_KERNEL);
if (!hw_ste_arr) {
ret = -ENOMEM;
goto err_unlock;
}
}
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
if (ret)
goto remove_from_nic_tbl;
ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
num_actions, hw_ste_arr,
&new_hw_ste_arr_sz);
if (ret)
goto remove_from_nic_tbl;
cur_htbl = nic_matcher->s_htbl;
for (i = 0; i < nic_matcher->num_of_builders; i++) {
u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
ste = dr_rule_handle_ste_branch(rule,
nic_rule,
&send_ste_list,
cur_htbl,
cur_hw_ste_ent,
i + 1,
&htbl);
if (!ste) {
mlx5dr_err(dmn, "Failed creating next branch\n");
ret = -ENOENT;
goto free_rule;
}
cur_htbl = ste->next_htbl;
mlx5dr_ste_get(ste);
mlx5dr_rule_set_last_member(nic_rule, ste, true);
}
ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
ste, hw_ste_arr, new_hw_ste_arr_sz);
if (ret) {
mlx5dr_dbg(dmn, "Failed apply actions\n");
goto free_rule;
}
ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
if (ret) {
mlx5dr_err(dmn, "Failed sending ste!\n");
goto free_rule;
}
if (htbl)
mlx5dr_htbl_put(htbl);
nic_matcher->rules++;
mlx5dr_domain_nic_unlock(nic_dmn);
if (unlikely(!hw_ste_arr_is_opt))
kfree(hw_ste_arr);
return 0;
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
list_del(&ste_info->send_list);
mlx5dr_send_info_free(ste_info);
}
remove_from_nic_tbl:
if (!nic_matcher->rules)
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
if (!hw_ste_arr_is_opt)
kfree(hw_ste_arr);
err_unlock:
mlx5dr_domain_nic_unlock(nic_dmn);
return ret;
}
static int
dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
struct mlx5dr_match_param *param,
size_t num_actions,
struct mlx5dr_action *actions[])
{
struct mlx5dr_match_param copy_param = {};
int ret;
memcpy(©_param, param, sizeof(struct mlx5dr_match_param));
ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
num_actions, actions);
if (ret)
return ret;
ret = dr_rule_create_rule_nic(rule, &rule->tx, ©_param,
num_actions, actions);
if (ret)
goto destroy_rule_nic_rx;
return 0;
destroy_rule_nic_rx:
dr_rule_destroy_rule_nic(rule, &rule->rx);
return ret;
}
static struct mlx5dr_rule *
dr_rule_create_rule(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
struct mlx5dr_action *actions[],
u32 flow_source)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param param = {};
struct mlx5dr_rule *rule;
int ret;
if (!dr_rule_verify(matcher, value, ¶m))
return NULL;
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return NULL;
rule->matcher = matcher;
rule->flow_source = flow_source;
INIT_LIST_HEAD(&rule->rule_actions_list);
ret = dr_rule_add_action_members(rule, num_actions, actions);
if (ret)
goto free_rule;
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
rule->rx.nic_matcher = &matcher->rx;
ret = dr_rule_create_rule_nic(rule, &rule->rx, ¶m,
num_actions, actions);
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
rule->tx.nic_matcher = &matcher->tx;
ret = dr_rule_create_rule_nic(rule, &rule->tx, ¶m,
num_actions, actions);
break;
case MLX5DR_DOMAIN_TYPE_FDB:
rule->rx.nic_matcher = &matcher->rx;
rule->tx.nic_matcher = &matcher->tx;
ret = dr_rule_create_rule_fdb(rule, ¶m,
num_actions, actions);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
goto remove_action_members;
INIT_LIST_HEAD(&rule->dbg_node);
mlx5dr_dbg_rule_add(rule);
return rule;
remove_action_members:
dr_rule_remove_action_members(rule);
free_rule:
kfree(rule);
mlx5dr_err(dmn, "Failed creating rule\n");
return NULL;
}
struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
struct mlx5dr_action *actions[],
u32 flow_source)
{
struct mlx5dr_rule *rule;
refcount_inc(&matcher->refcount);
rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
if (!rule)
refcount_dec(&matcher->refcount);
return rule;
}
int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
{
struct mlx5dr_matcher *matcher = rule->matcher;
int ret;
ret = dr_rule_destroy_rule(rule);
if (!ret)
refcount_dec(&matcher->refcount);
return ret;
}