flow: better memory management
Allocate hash tables for every online cpus, not every possible ones. NUMA aware allocations. Dont use a full page on arches where PAGE_SIZE > 1024*sizeof(void *) misc: __percpu , __read_mostly, __cpuinit annotations flow_compare_t is just an "unsigned long" Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
efbc2d7cfa
commit
83b6b1f5d1
@@ -53,8 +53,7 @@ struct flow_flush_info {
|
|||||||
|
|
||||||
struct flow_cache {
|
struct flow_cache {
|
||||||
u32 hash_shift;
|
u32 hash_shift;
|
||||||
unsigned long order;
|
struct flow_cache_percpu __percpu *percpu;
|
||||||
struct flow_cache_percpu *percpu;
|
|
||||||
struct notifier_block hotcpu_notifier;
|
struct notifier_block hotcpu_notifier;
|
||||||
int low_watermark;
|
int low_watermark;
|
||||||
int high_watermark;
|
int high_watermark;
|
||||||
@@ -64,7 +63,7 @@ struct flow_cache {
|
|||||||
atomic_t flow_cache_genid = ATOMIC_INIT(0);
|
atomic_t flow_cache_genid = ATOMIC_INIT(0);
|
||||||
EXPORT_SYMBOL(flow_cache_genid);
|
EXPORT_SYMBOL(flow_cache_genid);
|
||||||
static struct flow_cache flow_cache_global;
|
static struct flow_cache flow_cache_global;
|
||||||
static struct kmem_cache *flow_cachep;
|
static struct kmem_cache *flow_cachep __read_mostly;
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(flow_cache_gc_lock);
|
static DEFINE_SPINLOCK(flow_cache_gc_lock);
|
||||||
static LIST_HEAD(flow_cache_gc_list);
|
static LIST_HEAD(flow_cache_gc_list);
|
||||||
@@ -181,11 +180,7 @@ static u32 flow_hash_code(struct flow_cache *fc,
|
|||||||
& (flow_cache_hash_size(fc) - 1));
|
& (flow_cache_hash_size(fc) - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
#if (BITS_PER_LONG == 64)
|
typedef unsigned long flow_compare_t;
|
||||||
typedef u64 flow_compare_t;
|
|
||||||
#else
|
|
||||||
typedef u32 flow_compare_t;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* I hear what you're saying, use memcmp. But memcmp cannot make
|
/* I hear what you're saying, use memcmp. But memcmp cannot make
|
||||||
* important assumptions that we can here, such as alignment and
|
* important assumptions that we can here, such as alignment and
|
||||||
@@ -357,62 +352,73 @@ void flow_cache_flush(void)
|
|||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
|
static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
|
||||||
struct flow_cache_percpu *fcp)
|
|
||||||
{
|
{
|
||||||
fcp->hash_table = (struct hlist_head *)
|
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
|
||||||
__get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
|
size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
|
||||||
if (!fcp->hash_table)
|
|
||||||
panic("NET: failed to allocate flow cache order %lu\n", fc->order);
|
|
||||||
|
|
||||||
fcp->hash_rnd_recalc = 1;
|
if (!fcp->hash_table) {
|
||||||
fcp->hash_count = 0;
|
fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
|
||||||
tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
|
if (!fcp->hash_table) {
|
||||||
|
pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
fcp->hash_rnd_recalc = 1;
|
||||||
|
fcp->hash_count = 0;
|
||||||
|
tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flow_cache_cpu(struct notifier_block *nfb,
|
static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
|
||||||
unsigned long action,
|
unsigned long action,
|
||||||
void *hcpu)
|
void *hcpu)
|
||||||
{
|
{
|
||||||
struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
|
struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
|
||||||
int cpu = (unsigned long) hcpu;
|
int res, cpu = (unsigned long) hcpu;
|
||||||
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
|
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
|
||||||
|
|
||||||
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
|
switch (action) {
|
||||||
|
case CPU_UP_PREPARE:
|
||||||
|
case CPU_UP_PREPARE_FROZEN:
|
||||||
|
res = flow_cache_cpu_prepare(fc, cpu);
|
||||||
|
if (res)
|
||||||
|
return notifier_from_errno(res);
|
||||||
|
break;
|
||||||
|
case CPU_DEAD:
|
||||||
|
case CPU_DEAD_FROZEN:
|
||||||
__flow_cache_shrink(fc, fcp, 0);
|
__flow_cache_shrink(fc, fcp, 0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flow_cache_init(struct flow_cache *fc)
|
static int __init flow_cache_init(struct flow_cache *fc)
|
||||||
{
|
{
|
||||||
unsigned long order;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
fc->hash_shift = 10;
|
fc->hash_shift = 10;
|
||||||
fc->low_watermark = 2 * flow_cache_hash_size(fc);
|
fc->low_watermark = 2 * flow_cache_hash_size(fc);
|
||||||
fc->high_watermark = 4 * flow_cache_hash_size(fc);
|
fc->high_watermark = 4 * flow_cache_hash_size(fc);
|
||||||
|
|
||||||
for (order = 0;
|
|
||||||
(PAGE_SIZE << order) <
|
|
||||||
(sizeof(struct hlist_head)*flow_cache_hash_size(fc));
|
|
||||||
order++)
|
|
||||||
/* NOTHING */;
|
|
||||||
fc->order = order;
|
|
||||||
fc->percpu = alloc_percpu(struct flow_cache_percpu);
|
fc->percpu = alloc_percpu(struct flow_cache_percpu);
|
||||||
|
if (!fc->percpu)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
for_each_online_cpu(i) {
|
||||||
|
if (flow_cache_cpu_prepare(fc, i))
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
fc->hotcpu_notifier = (struct notifier_block){
|
||||||
|
.notifier_call = flow_cache_cpu,
|
||||||
|
};
|
||||||
|
register_hotcpu_notifier(&fc->hotcpu_notifier);
|
||||||
|
|
||||||
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
|
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
|
||||||
(unsigned long) fc);
|
(unsigned long) fc);
|
||||||
fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
|
fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
|
||||||
add_timer(&fc->rnd_timer);
|
add_timer(&fc->rnd_timer);
|
||||||
|
|
||||||
for_each_possible_cpu(i)
|
|
||||||
flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
|
|
||||||
|
|
||||||
fc->hotcpu_notifier = (struct notifier_block){
|
|
||||||
.notifier_call = flow_cache_cpu,
|
|
||||||
};
|
|
||||||
register_hotcpu_notifier(&fc->hotcpu_notifier);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user