[INET]: Move tcp_port_rover to inet_hashinfo
Also expose all of the tcp_hashinfo members, i.e. killing those tcp_ehash, etc macros, this will more clearly expose already generic functions and some that need just a bit of work to become generic, as we'll see in the upcoming changesets. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
2d8c4ce519
commit
6e04e02165
@ -117,6 +117,7 @@ struct inet_hashinfo {
|
||||
wait_queue_head_t lhash_wait;
|
||||
spinlock_t portalloc_lock;
|
||||
kmem_cache_t *bind_bucket_cachep;
|
||||
int port_rover;
|
||||
};
|
||||
|
||||
static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
|
||||
|
@ -136,7 +136,7 @@ struct sock_common {
|
||||
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
|
||||
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
|
||||
* @sk_lingertime: %SO_LINGER l_linger setting
|
||||
* @sk_hashent: hash entry in several tables (e.g. tcp_ehash)
|
||||
* @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
|
||||
* @sk_backlog: always used with the per-socket spinlock held
|
||||
* @sk_callback_lock: used with the callbacks in the end of this struct
|
||||
* @sk_error_queue: rarely used
|
||||
|
@ -41,19 +41,7 @@
|
||||
#endif
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
extern struct inet_hashinfo tcp_hashinfo;
|
||||
#define tcp_ehash (tcp_hashinfo.ehash)
|
||||
#define tcp_bhash (tcp_hashinfo.bhash)
|
||||
#define tcp_ehash_size (tcp_hashinfo.ehash_size)
|
||||
#define tcp_bhash_size (tcp_hashinfo.bhash_size)
|
||||
#define tcp_listening_hash (tcp_hashinfo.listening_hash)
|
||||
#define tcp_lhash_lock (tcp_hashinfo.lhash_lock)
|
||||
#define tcp_lhash_users (tcp_hashinfo.lhash_users)
|
||||
#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
|
||||
#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)
|
||||
#define tcp_bucket_cachep (tcp_hashinfo.bind_bucket_cachep)
|
||||
|
||||
extern int tcp_port_rover;
|
||||
extern struct inet_hashinfo tcp_hashinfo;
|
||||
|
||||
#if (BITS_PER_LONG == 64)
|
||||
#define TCP_ADDRCMP_ALIGN_BYTES 8
|
||||
@ -1463,21 +1451,21 @@ extern void tcp_listen_wlock(void);
|
||||
|
||||
/* - We may sleep inside this lock.
|
||||
* - If sleeping is not required (or called from BH),
|
||||
* use plain read_(un)lock(&tcp_lhash_lock).
|
||||
* use plain read_(un)lock(&inet_hashinfo.lhash_lock).
|
||||
*/
|
||||
|
||||
static inline void tcp_listen_lock(void)
|
||||
{
|
||||
/* read_lock synchronizes to candidates to writers */
|
||||
read_lock(&tcp_lhash_lock);
|
||||
atomic_inc(&tcp_lhash_users);
|
||||
read_unlock(&tcp_lhash_lock);
|
||||
read_lock(&tcp_hashinfo.lhash_lock);
|
||||
atomic_inc(&tcp_hashinfo.lhash_users);
|
||||
read_unlock(&tcp_hashinfo.lhash_lock);
|
||||
}
|
||||
|
||||
static inline void tcp_listen_unlock(void)
|
||||
{
|
||||
if (atomic_dec_and_test(&tcp_lhash_users))
|
||||
wake_up(&tcp_lhash_wait);
|
||||
if (atomic_dec_and_test(&tcp_hashinfo.lhash_users))
|
||||
wake_up(&tcp_hashinfo.lhash_wait);
|
||||
}
|
||||
|
||||
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
|
||||
|
Reference in New Issue
Block a user