openvswitch: Restructure datapath.c and flow.c
Over the time datapath.c and flow.c has became pretty large files. Following patch restructures functionality of component into three different components: flow.c: contains flow extract. flow_netlink.c: netlink flow api. flow_table.c: flow table api. This patch restructures code without changing logic. Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Signed-off-by: Jesse Gross <jesse@nicira.com>
This commit is contained in:
committed by
Jesse Gross
parent
f0627cfa24
commit
e64457191a
517
net/openvswitch/flow_table.c
Normal file
517
net/openvswitch/flow_table.c
Normal file
@ -0,0 +1,517 @@
|
||||
/*
|
||||
* Copyright (c) 2007-2013 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include "flow.h"
|
||||
#include "datapath.h"
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/llc_pdu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/llc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/sctp.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <linux/icmpv6.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/ndisc.h>
|
||||
|
||||
static struct kmem_cache *flow_cache;
|
||||
|
||||
static u16 range_n_bytes(const struct sw_flow_key_range *range)
|
||||
{
|
||||
return range->end - range->start;
|
||||
}
|
||||
|
||||
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
|
||||
const struct sw_flow_mask *mask)
|
||||
{
|
||||
const long *m = (long *)((u8 *)&mask->key + mask->range.start);
|
||||
const long *s = (long *)((u8 *)src + mask->range.start);
|
||||
long *d = (long *)((u8 *)dst + mask->range.start);
|
||||
int i;
|
||||
|
||||
/* The memory outside of the 'mask->range' are not set since
|
||||
* further operations on 'dst' only uses contents within
|
||||
* 'mask->range'.
|
||||
*/
|
||||
for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
|
||||
*d++ = *s++ & *m++;
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_alloc(void)
|
||||
{
|
||||
struct sw_flow *flow;
|
||||
|
||||
flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
|
||||
if (!flow)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock_init(&flow->lock);
|
||||
flow->sf_acts = NULL;
|
||||
flow->mask = NULL;
|
||||
|
||||
return flow;
|
||||
}
|
||||
|
||||
static struct flex_array *alloc_buckets(unsigned int n_buckets)
|
||||
{
|
||||
struct flex_array *buckets;
|
||||
int i, err;
|
||||
|
||||
buckets = flex_array_alloc(sizeof(struct hlist_head),
|
||||
n_buckets, GFP_KERNEL);
|
||||
if (!buckets)
|
||||
return NULL;
|
||||
|
||||
err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
|
||||
if (err) {
|
||||
flex_array_free(buckets);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_buckets; i++)
|
||||
INIT_HLIST_HEAD((struct hlist_head *)
|
||||
flex_array_get(buckets, i));
|
||||
|
||||
return buckets;
|
||||
}
|
||||
|
||||
static void flow_free(struct sw_flow *flow)
|
||||
{
|
||||
kfree((struct sf_flow_acts __force *)flow->sf_acts);
|
||||
kmem_cache_free(flow_cache, flow);
|
||||
}
|
||||
|
||||
static void rcu_free_flow_callback(struct rcu_head *rcu)
|
||||
{
|
||||
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
|
||||
|
||||
flow_free(flow);
|
||||
}
|
||||
|
||||
void ovs_flow_free(struct sw_flow *flow, bool deferred)
|
||||
{
|
||||
if (!flow)
|
||||
return;
|
||||
|
||||
ovs_sw_flow_mask_del_ref(flow->mask, deferred);
|
||||
|
||||
if (deferred)
|
||||
call_rcu(&flow->rcu, rcu_free_flow_callback);
|
||||
else
|
||||
flow_free(flow);
|
||||
}
|
||||
|
||||
static void free_buckets(struct flex_array *buckets)
|
||||
{
|
||||
flex_array_free(buckets);
|
||||
}
|
||||
|
||||
static void __flow_tbl_destroy(struct flow_table *table)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (table->keep_flows)
|
||||
goto skip_flows;
|
||||
|
||||
for (i = 0; i < table->n_buckets; i++) {
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head = flex_array_get(table->buckets, i);
|
||||
struct hlist_node *n;
|
||||
int ver = table->node_ver;
|
||||
|
||||
hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
|
||||
hlist_del(&flow->hash_node[ver]);
|
||||
ovs_flow_free(flow, false);
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(!list_empty(table->mask_list));
|
||||
kfree(table->mask_list);
|
||||
|
||||
skip_flows:
|
||||
free_buckets(table->buckets);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static struct flow_table *__flow_tbl_alloc(int new_size)
|
||||
{
|
||||
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
|
||||
|
||||
if (!table)
|
||||
return NULL;
|
||||
|
||||
table->buckets = alloc_buckets(new_size);
|
||||
|
||||
if (!table->buckets) {
|
||||
kfree(table);
|
||||
return NULL;
|
||||
}
|
||||
table->n_buckets = new_size;
|
||||
table->count = 0;
|
||||
table->node_ver = 0;
|
||||
table->keep_flows = false;
|
||||
get_random_bytes(&table->hash_seed, sizeof(u32));
|
||||
table->mask_list = NULL;
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
struct flow_table *ovs_flow_tbl_alloc(int new_size)
|
||||
{
|
||||
struct flow_table *table = __flow_tbl_alloc(new_size);
|
||||
|
||||
if (!table)
|
||||
return NULL;
|
||||
|
||||
table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
|
||||
if (!table->mask_list) {
|
||||
table->keep_flows = true;
|
||||
__flow_tbl_destroy(table);
|
||||
return NULL;
|
||||
}
|
||||
INIT_LIST_HEAD(table->mask_list);
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
|
||||
{
|
||||
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
|
||||
|
||||
__flow_tbl_destroy(table);
|
||||
}
|
||||
|
||||
void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
|
||||
{
|
||||
if (!table)
|
||||
return;
|
||||
|
||||
if (deferred)
|
||||
call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
|
||||
else
|
||||
__flow_tbl_destroy(table);
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table,
|
||||
u32 *bucket, u32 *last)
|
||||
{
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head;
|
||||
int ver;
|
||||
int i;
|
||||
|
||||
ver = table->node_ver;
|
||||
while (*bucket < table->n_buckets) {
|
||||
i = 0;
|
||||
head = flex_array_get(table->buckets, *bucket);
|
||||
hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
|
||||
if (i < *last) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
*last = i + 1;
|
||||
return flow;
|
||||
}
|
||||
(*bucket)++;
|
||||
*last = 0;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
|
||||
{
|
||||
hash = jhash_1word(hash, table->hash_seed);
|
||||
return flex_array_get(table->buckets,
|
||||
(hash & (table->n_buckets - 1)));
|
||||
}
|
||||
|
||||
static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
|
||||
head = find_bucket(table, flow->hash);
|
||||
hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
|
||||
|
||||
table->count++;
|
||||
}
|
||||
|
||||
static void flow_table_copy_flows(struct flow_table *old,
|
||||
struct flow_table *new)
|
||||
{
|
||||
int old_ver;
|
||||
int i;
|
||||
|
||||
old_ver = old->node_ver;
|
||||
new->node_ver = !old_ver;
|
||||
|
||||
/* Insert in new table. */
|
||||
for (i = 0; i < old->n_buckets; i++) {
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head;
|
||||
|
||||
head = flex_array_get(old->buckets, i);
|
||||
|
||||
hlist_for_each_entry(flow, head, hash_node[old_ver])
|
||||
__tbl_insert(new, flow);
|
||||
}
|
||||
|
||||
new->mask_list = old->mask_list;
|
||||
old->keep_flows = true;
|
||||
}
|
||||
|
||||
static struct flow_table *__flow_tbl_rehash(struct flow_table *table,
|
||||
int n_buckets)
|
||||
{
|
||||
struct flow_table *new_table;
|
||||
|
||||
new_table = __flow_tbl_alloc(n_buckets);
|
||||
if (!new_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
flow_table_copy_flows(table, new_table);
|
||||
|
||||
return new_table;
|
||||
}
|
||||
|
||||
struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
|
||||
{
|
||||
return __flow_tbl_rehash(table, table->n_buckets);
|
||||
}
|
||||
|
||||
struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
|
||||
{
|
||||
return __flow_tbl_rehash(table, table->n_buckets * 2);
|
||||
}
|
||||
|
||||
static u32 flow_hash(const struct sw_flow_key *key, int key_start,
|
||||
int key_end)
|
||||
{
|
||||
u32 *hash_key = (u32 *)((u8 *)key + key_start);
|
||||
int hash_u32s = (key_end - key_start) >> 2;
|
||||
|
||||
/* Make sure number of hash bytes are multiple of u32. */
|
||||
BUILD_BUG_ON(sizeof(long) % sizeof(u32));
|
||||
|
||||
return jhash2(hash_key, hash_u32s, 0);
|
||||
}
|
||||
|
||||
static int flow_key_start(const struct sw_flow_key *key)
|
||||
{
|
||||
if (key->tun_key.ipv4_dst)
|
||||
return 0;
|
||||
else
|
||||
return rounddown(offsetof(struct sw_flow_key, phy),
|
||||
sizeof(long));
|
||||
}
|
||||
|
||||
static bool cmp_key(const struct sw_flow_key *key1,
|
||||
const struct sw_flow_key *key2,
|
||||
int key_start, int key_end)
|
||||
{
|
||||
const long *cp1 = (long *)((u8 *)key1 + key_start);
|
||||
const long *cp2 = (long *)((u8 *)key2 + key_start);
|
||||
long diffs = 0;
|
||||
int i;
|
||||
|
||||
for (i = key_start; i < key_end; i += sizeof(long))
|
||||
diffs |= *cp1++ ^ *cp2++;
|
||||
|
||||
return diffs == 0;
|
||||
}
|
||||
|
||||
static bool flow_cmp_masked_key(const struct sw_flow *flow,
|
||||
const struct sw_flow_key *key,
|
||||
int key_start, int key_end)
|
||||
{
|
||||
return cmp_key(&flow->key, key, key_start, key_end);
|
||||
}
|
||||
|
||||
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
|
||||
struct sw_flow_match *match)
|
||||
{
|
||||
struct sw_flow_key *key = match->key;
|
||||
int key_start = flow_key_start(key);
|
||||
int key_end = match->range.end;
|
||||
|
||||
return cmp_key(&flow->unmasked_key, key, key_start, key_end);
|
||||
}
|
||||
|
||||
static struct sw_flow *masked_flow_lookup(struct flow_table *table,
|
||||
const struct sw_flow_key *unmasked,
|
||||
struct sw_flow_mask *mask)
|
||||
{
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head;
|
||||
int key_start = mask->range.start;
|
||||
int key_end = mask->range.end;
|
||||
u32 hash;
|
||||
struct sw_flow_key masked_key;
|
||||
|
||||
ovs_flow_mask_key(&masked_key, unmasked, mask);
|
||||
hash = flow_hash(&masked_key, key_start, key_end);
|
||||
head = find_bucket(table, hash);
|
||||
hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
|
||||
if (flow->mask == mask &&
|
||||
flow_cmp_masked_key(flow, &masked_key,
|
||||
key_start, key_end))
|
||||
return flow;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
|
||||
const struct sw_flow_key *key)
|
||||
{
|
||||
struct sw_flow *flow = NULL;
|
||||
struct sw_flow_mask *mask;
|
||||
|
||||
list_for_each_entry_rcu(mask, tbl->mask_list, list) {
|
||||
flow = masked_flow_lookup(tbl, key, mask);
|
||||
if (flow) /* Found */
|
||||
break;
|
||||
}
|
||||
|
||||
return flow;
|
||||
}
|
||||
|
||||
void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
|
||||
{
|
||||
flow->hash = flow_hash(&flow->key, flow->mask->range.start,
|
||||
flow->mask->range.end);
|
||||
__tbl_insert(table, flow);
|
||||
}
|
||||
|
||||
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
|
||||
{
|
||||
BUG_ON(table->count == 0);
|
||||
hlist_del_rcu(&flow->hash_node[table->node_ver]);
|
||||
table->count--;
|
||||
}
|
||||
|
||||
struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
|
||||
{
|
||||
struct sw_flow_mask *mask;
|
||||
|
||||
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
|
||||
if (mask)
|
||||
mask->ref_count = 0;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
|
||||
{
|
||||
mask->ref_count++;
|
||||
}
|
||||
|
||||
static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
|
||||
{
|
||||
struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
|
||||
|
||||
kfree(mask);
|
||||
}
|
||||
|
||||
void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
|
||||
{
|
||||
if (!mask)
|
||||
return;
|
||||
|
||||
BUG_ON(!mask->ref_count);
|
||||
mask->ref_count--;
|
||||
|
||||
if (!mask->ref_count) {
|
||||
list_del_rcu(&mask->list);
|
||||
if (deferred)
|
||||
call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
|
||||
else
|
||||
kfree(mask);
|
||||
}
|
||||
}
|
||||
|
||||
static bool mask_equal(const struct sw_flow_mask *a,
|
||||
const struct sw_flow_mask *b)
|
||||
{
|
||||
u8 *a_ = (u8 *)&a->key + a->range.start;
|
||||
u8 *b_ = (u8 *)&b->key + b->range.start;
|
||||
|
||||
return (a->range.end == b->range.end)
|
||||
&& (a->range.start == b->range.start)
|
||||
&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
|
||||
}
|
||||
|
||||
struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
|
||||
const struct sw_flow_mask *mask)
|
||||
{
|
||||
struct list_head *ml;
|
||||
|
||||
list_for_each(ml, tbl->mask_list) {
|
||||
struct sw_flow_mask *m;
|
||||
m = container_of(ml, struct sw_flow_mask, list);
|
||||
if (mask_equal(mask, m))
|
||||
return m;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* add a new mask into the mask list.
|
||||
* The caller needs to make sure that 'mask' is not the same
|
||||
* as any masks that are already on the list.
|
||||
*/
|
||||
void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
|
||||
{
|
||||
list_add_rcu(&mask->list, tbl->mask_list);
|
||||
}
|
||||
|
||||
/* Initializes the flow module.
|
||||
* Returns zero if successful or a negative error code. */
|
||||
int ovs_flow_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
|
||||
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
|
||||
|
||||
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
|
||||
0, NULL);
|
||||
if (flow_cache == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Uninitializes the flow module. */
|
||||
void ovs_flow_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(flow_cache);
|
||||
}
|
Reference in New Issue
Block a user