tcp: replace ehash_size by ehash_mask
Storing the mask (size - 1) instead of the size allows fast path to be a bit faster. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
c3faca053d
commit
f373b53b5f
@@ -125,7 +125,7 @@ struct inet_hashinfo {
|
|||||||
*/
|
*/
|
||||||
struct inet_ehash_bucket *ehash;
|
struct inet_ehash_bucket *ehash;
|
||||||
spinlock_t *ehash_locks;
|
spinlock_t *ehash_locks;
|
||||||
unsigned int ehash_size;
|
unsigned int ehash_mask;
|
||||||
unsigned int ehash_locks_mask;
|
unsigned int ehash_locks_mask;
|
||||||
|
|
||||||
/* Ok, let's try this, I give up, we do need a local binding
|
/* Ok, let's try this, I give up, we do need a local binding
|
||||||
@@ -158,7 +158,7 @@ static inline struct inet_ehash_bucket *inet_ehash_bucket(
|
|||||||
struct inet_hashinfo *hashinfo,
|
struct inet_hashinfo *hashinfo,
|
||||||
unsigned int hash)
|
unsigned int hash)
|
||||||
{
|
{
|
||||||
return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
|
return &hashinfo->ehash[hash & hashinfo->ehash_mask];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline spinlock_t *inet_ehash_lockp(
|
static inline spinlock_t *inet_ehash_lockp(
|
||||||
|
@@ -1060,11 +1060,12 @@ static int __init dccp_init(void)
|
|||||||
for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
|
for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
|
||||||
;
|
;
|
||||||
do {
|
do {
|
||||||
dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
|
unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
|
||||||
sizeof(struct inet_ehash_bucket);
|
sizeof(struct inet_ehash_bucket);
|
||||||
while (dccp_hashinfo.ehash_size &
|
|
||||||
(dccp_hashinfo.ehash_size - 1))
|
while (hash_size & (hash_size - 1))
|
||||||
dccp_hashinfo.ehash_size--;
|
hash_size--;
|
||||||
|
dccp_hashinfo.ehash_mask = hash_size - 1;
|
||||||
dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
|
dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
|
||||||
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
|
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
|
||||||
} while (!dccp_hashinfo.ehash && --ehash_order > 0);
|
} while (!dccp_hashinfo.ehash && --ehash_order > 0);
|
||||||
@@ -1074,7 +1075,7 @@ static int __init dccp_init(void)
|
|||||||
goto out_free_bind_bucket_cachep;
|
goto out_free_bind_bucket_cachep;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
|
for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
|
||||||
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
|
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
|
||||||
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
|
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
|
||||||
}
|
}
|
||||||
@@ -1153,7 +1154,7 @@ static void __exit dccp_fini(void)
|
|||||||
get_order(dccp_hashinfo.bhash_size *
|
get_order(dccp_hashinfo.bhash_size *
|
||||||
sizeof(struct inet_bind_hashbucket)));
|
sizeof(struct inet_bind_hashbucket)));
|
||||||
free_pages((unsigned long)dccp_hashinfo.ehash,
|
free_pages((unsigned long)dccp_hashinfo.ehash,
|
||||||
get_order(dccp_hashinfo.ehash_size *
|
get_order((dccp_hashinfo.ehash_mask + 1) *
|
||||||
sizeof(struct inet_ehash_bucket)));
|
sizeof(struct inet_ehash_bucket)));
|
||||||
inet_ehash_locks_free(&dccp_hashinfo);
|
inet_ehash_locks_free(&dccp_hashinfo);
|
||||||
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
|
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
|
||||||
|
@@ -774,7 +774,7 @@ skip_listen_ht:
|
|||||||
if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
|
if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
for (i = s_i; i < hashinfo->ehash_size; i++) {
|
for (i = s_i; i <= hashinfo->ehash_mask; i++) {
|
||||||
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
|
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
|
||||||
spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
|
spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
|
||||||
struct sock *sk;
|
struct sock *sk;
|
||||||
|
@@ -209,7 +209,7 @@ struct sock * __inet_lookup_established(struct net *net,
|
|||||||
* have wildcards anyways.
|
* have wildcards anyways.
|
||||||
*/
|
*/
|
||||||
unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
|
unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
|
||||||
unsigned int slot = hash & (hashinfo->ehash_size - 1);
|
unsigned int slot = hash & hashinfo->ehash_mask;
|
||||||
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
|
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@@ -430,7 +430,7 @@ void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
|
|||||||
int h;
|
int h;
|
||||||
|
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
for (h = 0; h < (hashinfo->ehash_size); h++) {
|
for (h = 0; h <= hashinfo->ehash_mask; h++) {
|
||||||
struct inet_ehash_bucket *head =
|
struct inet_ehash_bucket *head =
|
||||||
inet_ehash_bucket(hashinfo, h);
|
inet_ehash_bucket(hashinfo, h);
|
||||||
spinlock_t *lock = inet_ehash_lockp(hashinfo, h);
|
spinlock_t *lock = inet_ehash_lockp(hashinfo, h);
|
||||||
|
@@ -2865,11 +2865,10 @@ void __init tcp_init(void)
|
|||||||
(totalram_pages >= 128 * 1024) ?
|
(totalram_pages >= 128 * 1024) ?
|
||||||
13 : 15,
|
13 : 15,
|
||||||
0,
|
0,
|
||||||
&tcp_hashinfo.ehash_size,
|
|
||||||
NULL,
|
NULL,
|
||||||
|
&tcp_hashinfo.ehash_mask,
|
||||||
thash_entries ? 0 : 512 * 1024);
|
thash_entries ? 0 : 512 * 1024);
|
||||||
tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
|
for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
|
||||||
for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
|
|
||||||
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
|
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
|
||||||
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
|
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
|
||||||
}
|
}
|
||||||
@@ -2878,7 +2877,7 @@ void __init tcp_init(void)
|
|||||||
tcp_hashinfo.bhash =
|
tcp_hashinfo.bhash =
|
||||||
alloc_large_system_hash("TCP bind",
|
alloc_large_system_hash("TCP bind",
|
||||||
sizeof(struct inet_bind_hashbucket),
|
sizeof(struct inet_bind_hashbucket),
|
||||||
tcp_hashinfo.ehash_size,
|
tcp_hashinfo.ehash_mask + 1,
|
||||||
(totalram_pages >= 128 * 1024) ?
|
(totalram_pages >= 128 * 1024) ?
|
||||||
13 : 15,
|
13 : 15,
|
||||||
0,
|
0,
|
||||||
@@ -2933,8 +2932,8 @@ void __init tcp_init(void)
|
|||||||
sysctl_tcp_rmem[2] = max(87380, max_share);
|
sysctl_tcp_rmem[2] = max(87380, max_share);
|
||||||
|
|
||||||
printk(KERN_INFO "TCP: Hash tables configured "
|
printk(KERN_INFO "TCP: Hash tables configured "
|
||||||
"(established %d bind %d)\n",
|
"(established %u bind %u)\n",
|
||||||
tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
|
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
|
||||||
|
|
||||||
tcp_register_congestion_control(&tcp_reno);
|
tcp_register_congestion_control(&tcp_reno);
|
||||||
}
|
}
|
||||||
|
@@ -2000,7 +2000,7 @@ static void *established_get_first(struct seq_file *seq)
|
|||||||
struct net *net = seq_file_net(seq);
|
struct net *net = seq_file_net(seq);
|
||||||
void *rc = NULL;
|
void *rc = NULL;
|
||||||
|
|
||||||
for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
|
for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
|
||||||
struct sock *sk;
|
struct sock *sk;
|
||||||
struct hlist_nulls_node *node;
|
struct hlist_nulls_node *node;
|
||||||
struct inet_timewait_sock *tw;
|
struct inet_timewait_sock *tw;
|
||||||
@@ -2061,10 +2061,10 @@ get_tw:
|
|||||||
st->state = TCP_SEQ_STATE_ESTABLISHED;
|
st->state = TCP_SEQ_STATE_ESTABLISHED;
|
||||||
|
|
||||||
/* Look for next non empty bucket */
|
/* Look for next non empty bucket */
|
||||||
while (++st->bucket < tcp_hashinfo.ehash_size &&
|
while (++st->bucket <= tcp_hashinfo.ehash_mask &&
|
||||||
empty_bucket(st))
|
empty_bucket(st))
|
||||||
;
|
;
|
||||||
if (st->bucket >= tcp_hashinfo.ehash_size)
|
if (st->bucket > tcp_hashinfo.ehash_mask)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
|
spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
|
||||||
|
@@ -73,7 +73,7 @@ struct sock *__inet6_lookup_established(struct net *net,
|
|||||||
* have wildcards anyways.
|
* have wildcards anyways.
|
||||||
*/
|
*/
|
||||||
unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
|
unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
|
||||||
unsigned int slot = hash & (hashinfo->ehash_size - 1);
|
unsigned int slot = hash & hashinfo->ehash_mask;
|
||||||
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
|
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
|
||||||
|
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user