flow_offload: support get multi-subsystem block

It provide a callback list to find the blocks of tc
and nft subsystems

Signed-off-by: wenxu <wenxu@ucloud.cn>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
wenxu 2019-08-07 09:13:53 +08:00 committed by David S. Miller
parent 4e481908c5
commit 1150ab0f1b
3 changed files with 55 additions and 15 deletions

View File

@ -379,6 +379,15 @@ typedef void flow_indr_block_ing_cmd_t(struct net_device *dev,
void *cb_priv,
enum flow_block_command command);
struct flow_indr_block_ing_entry {
flow_indr_block_ing_cmd_t *cb;
struct list_head list;
};
void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry);
void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry);
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident);
@ -395,7 +404,6 @@ void flow_indr_block_cb_unregister(struct net_device *dev,
void *cb_ident);
void flow_indr_block_call(struct net_device *dev,
flow_indr_block_ing_cmd_t *cb,
struct flow_block_offload *bo,
enum flow_block_command command);

View File

@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <net/flow_offload.h>
#include <linux/rtnetlink.h>
#include <linux/mutex.h>
struct flow_rule *flow_rule_alloc(unsigned int num_actions)
{
@ -282,6 +283,8 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
}
EXPORT_SYMBOL(flow_block_cb_setup_simple);
static LIST_HEAD(block_ing_cb_list);
static struct rhashtable indr_setup_block_ht;
struct flow_indr_block_cb {
@ -295,7 +298,6 @@ struct flow_indr_block_dev {
struct rhash_head ht_node;
struct net_device *dev;
unsigned int refcnt;
flow_indr_block_ing_cmd_t *block_ing_cmd_cb;
struct list_head cb_list;
};
@ -389,6 +391,20 @@ static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
kfree(indr_block_cb);
}
static void flow_block_ing_cmd(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_priv,
enum flow_block_command command)
{
struct flow_indr_block_ing_entry *entry;
rcu_read_lock();
list_for_each_entry_rcu(entry, &block_ing_cb_list, list) {
entry->cb(dev, cb, cb_priv, command);
}
rcu_read_unlock();
}
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
@ -406,9 +422,7 @@ int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
if (err)
goto err_dev_put;
if (indr_dev->block_ing_cmd_cb)
indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb,
indr_block_cb->cb_priv,
flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
FLOW_BLOCK_BIND);
return 0;
@ -448,9 +462,7 @@ void __flow_indr_block_cb_unregister(struct net_device *dev,
if (!indr_block_cb)
return;
if (indr_dev->block_ing_cmd_cb)
indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb,
indr_block_cb->cb_priv,
flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
FLOW_BLOCK_UNBIND);
flow_indr_block_cb_del(indr_block_cb);
@ -469,7 +481,6 @@ void flow_indr_block_cb_unregister(struct net_device *dev,
EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
void flow_indr_block_call(struct net_device *dev,
flow_indr_block_ing_cmd_t cb,
struct flow_block_offload *bo,
enum flow_block_command command)
{
@ -480,15 +491,29 @@ void flow_indr_block_call(struct net_device *dev,
if (!indr_dev)
return;
indr_dev->block_ing_cmd_cb = command == FLOW_BLOCK_BIND
? cb : NULL;
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
bo);
}
EXPORT_SYMBOL_GPL(flow_indr_block_call);
static DEFINE_MUTEX(flow_indr_block_ing_cb_lock);
void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry)
{
mutex_lock(&flow_indr_block_ing_cb_lock);
list_add_tail_rcu(&entry->list, &block_ing_cb_list);
mutex_unlock(&flow_indr_block_ing_cb_lock);
}
EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb);
void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry)
{
mutex_lock(&flow_indr_block_ing_cb_lock);
list_del_rcu(&entry->list);
mutex_unlock(&flow_indr_block_ing_cb_lock);
}
EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb);
static int __init init_flow_indr_rhashtable(void)
{
return rhashtable_init(&indr_setup_block_ht,

View File

@ -621,7 +621,7 @@ static void tc_indr_block_call(struct tcf_block *block,
};
INIT_LIST_HEAD(&bo.cb_list);
flow_indr_block_call(dev, tc_indr_block_get_and_ing_cmd, &bo, command);
flow_indr_block_call(dev, &bo, command);
tcf_block_setup(block, &bo);
}
@ -3183,6 +3183,11 @@ static struct pernet_operations tcf_net_ops = {
.size = sizeof(struct tcf_net),
};
static struct flow_indr_block_ing_entry block_ing_entry = {
.cb = tc_indr_block_get_and_ing_cmd,
.list = LIST_HEAD_INIT(block_ing_entry.list),
};
static int __init tc_filter_init(void)
{
int err;
@ -3195,6 +3200,8 @@ static int __init tc_filter_init(void)
if (err)
goto err_register_pernet_subsys;
flow_indr_add_block_ing_cb(&block_ing_entry);
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
RTNL_FLAG_DOIT_UNLOCKED);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,