Merge branch 'master' of /repos/git/net-next-2.6

Conflicts:
	include/net/netfilter/xt_rateest.h
	net/bridge/br_netfilter.c
	net/netfilter/nf_conntrack_core.c

Signed-off-by: Patrick McHardy <kaber@trash.net>
This commit is contained in:
Patrick McHardy
2010-06-15 17:31:06 +02:00
531 changed files with 14834 additions and 8681 deletions

View File

@ -90,10 +90,10 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
&dest->addr.ip);
return NULL;
}
__ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
__ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst));
IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
&dest->addr.ip,
atomic_read(&rt->u.dst.__refcnt), rtos);
atomic_read(&rt->dst.__refcnt), rtos);
}
spin_unlock(&dest->dst_lock);
} else {
@ -148,10 +148,10 @@ __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
&dest->addr.in6);
return NULL;
}
__ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
__ip_vs_dst_set(dest, 0, dst_clone(&rt->dst));
IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n",
&dest->addr.in6,
atomic_read(&rt->u.dst.__refcnt));
atomic_read(&rt->dst.__refcnt));
}
spin_unlock(&dest->dst_lock);
} else {
@ -198,7 +198,7 @@ do { \
(skb)->ipvs_property = 1; \
skb_forward_csum(skb); \
NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
(rt)->u.dst.dev, dst_output); \
(rt)->dst.dev, dst_output); \
} while (0)
@ -245,7 +245,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
}
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
@ -265,7 +265,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* drop old route */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@ -309,9 +309,9 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
}
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
dst_release(&rt->dst);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@ -323,13 +323,13 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
*/
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(skb == NULL)) {
dst_release(&rt->u.dst);
dst_release(&rt->dst);
return NF_STOLEN;
}
/* drop old route */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@ -376,7 +376,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
@ -388,12 +388,12 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!skb_make_writable(skb, sizeof(struct iphdr)))
goto tx_error_put;
if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error_put;
/* drop old route */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@ -452,9 +452,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
dst_release(&rt->dst);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL_PKT(0, pp, skb, 0,
"ip_vs_nat_xmit_v6(): frag needed for");
@ -465,12 +465,12 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
goto tx_error_put;
if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error_put;
/* drop old route */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@ -498,7 +498,7 @@ tx_error:
kfree_skb(skb);
return NF_STOLEN;
tx_error_put:
dst_release(&rt->u.dst);
dst_release(&rt->dst);
goto tx_error;
}
#endif
@ -549,9 +549,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
goto tx_error_icmp;
tdev = rt->u.dst.dev;
tdev = rt->dst.dev;
mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
ip_rt_put(rt);
IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
@ -601,7 +601,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* drop old route */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
/*
* Push down and install the IPIP header.
@ -615,7 +615,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
iph->ttl = old_iph->ttl;
ip_select_ident(iph, &rt->u.dst, NULL);
ip_select_ident(iph, &rt->dst, NULL);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@ -660,12 +660,12 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!rt)
goto tx_error_icmp;
tdev = rt->u.dst.dev;
tdev = rt->dst.dev;
mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr);
mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
/* TODO IPv6: do we need this check in IPv6? */
if (mtu < 1280) {
dst_release(&rt->u.dst);
dst_release(&rt->dst);
IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__);
goto tx_error;
}
@ -674,7 +674,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
dst_release(&rt->u.dst);
dst_release(&rt->dst);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@ -689,7 +689,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct sk_buff *new_skb =
skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
dst_release(&rt->u.dst);
dst_release(&rt->dst);
kfree_skb(skb);
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NF_STOLEN;
@ -707,7 +707,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* drop old route */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
/*
* Push down and install the IPIP header.
@ -760,7 +760,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
@ -780,7 +780,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* drop old route */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@ -813,10 +813,10 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
if (skb->len > mtu) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
dst_release(&rt->u.dst);
dst_release(&rt->dst);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@ -827,13 +827,13 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
*/
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(skb == NULL)) {
dst_release(&rt->u.dst);
dst_release(&rt->dst);
return NF_STOLEN;
}
/* drop old route */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@ -888,7 +888,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
@ -900,12 +900,12 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!skb_make_writable(skb, offset))
goto tx_error_put;
if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error_put;
/* drop the old route when skb is not shared */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
ip_vs_nat_icmp(skb, pp, cp, 0);
@ -963,9 +963,9 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
dst_release(&rt->dst);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@ -975,12 +975,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!skb_make_writable(skb, offset))
goto tx_error_put;
if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error_put;
/* drop the old route when skb is not shared */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb_dst_set(skb, &rt->dst);
ip_vs_nat_icmp_v6(skb, pp, cp, 0);
@ -1001,7 +1001,7 @@ out:
LeaveFunction(10);
return rc;
tx_error_put:
dst_release(&rt->u.dst);
dst_release(&rt->dst);
goto tx_error;
}
#endif

View File

@ -619,9 +619,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
/* Don't set timer yet: wait for confirmation */
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
#ifdef CONFIG_NET_NS
ct->ct_net = net;
#endif
write_pnet(&ct->ct_net, net);
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (zone) {
struct nf_conntrack_zone *nf_ct_zone;
@ -1385,7 +1383,6 @@ static int nf_conntrack_init_init_net(void)
/* Set up fake conntrack: to never be deleted, not in any hashes */
for_each_possible_cpu(cpu) {
struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
write_pnet(&ct->ct_net, &init_net);
atomic_set(&ct->ct_general.use, 1);
}

View File

@ -734,11 +734,11 @@ static int callforward_do_filter(const union nf_inet_addr *src,
if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) {
if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
if (rt1->rt_gateway == rt2->rt_gateway &&
rt1->u.dst.dev == rt2->u.dst.dev)
rt1->dst.dev == rt2->dst.dev)
ret = 1;
dst_release(&rt2->u.dst);
dst_release(&rt2->dst);
}
dst_release(&rt1->u.dst);
dst_release(&rt1->dst);
}
break;
}
@ -753,11 +753,11 @@ static int callforward_do_filter(const union nf_inet_addr *src,
if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
sizeof(rt1->rt6i_gateway)) &&
rt1->u.dst.dev == rt2->u.dst.dev)
rt1->dst.dev == rt2->dst.dev)
ret = 1;
dst_release(&rt2->u.dst);
dst_release(&rt2->dst);
}
dst_release(&rt1->u.dst);
dst_release(&rt1->dst);
}
break;
}

View File

@ -61,7 +61,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
goto out;
rcu_read_lock();
in_dev = __in_dev_get_rcu(rt->u.dst.dev);
in_dev = __in_dev_get_rcu(rt->dst.dev);
if (in_dev != NULL) {
for_primary_ifa(in_dev) {
if (ifa->ifa_broadcast == iph->daddr) {

View File

@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
vfree(info->jumpstack);
else
kfree(info->jumpstack);
if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
vfree(info->stackptr);
else
kfree(info->stackptr);
free_percpu(info->stackptr);
kfree(info);
}
@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
unsigned int size;
int cpu;
size = sizeof(unsigned int) * nr_cpu_ids;
if (size > PAGE_SIZE)
i->stackptr = vmalloc(size);
else
i->stackptr = kmalloc(size, GFP_KERNEL);
i->stackptr = alloc_percpu(unsigned int);
if (i->stackptr == NULL)
return -ENOMEM;
memset(i->stackptr, 0, size);
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net,
struct xt_table_info *private;
struct xt_table *t, *table;
ret = xt_jumpstack_alloc(newinfo);
if (ret < 0)
return ERR_PTR(ret);
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {

View File

@ -60,13 +60,22 @@ struct xt_rateest *xt_rateest_lookup(const char *name)
}
EXPORT_SYMBOL_GPL(xt_rateest_lookup);
static void xt_rateest_free_rcu(struct rcu_head *head)
{
kfree(container_of(head, struct xt_rateest, rcu));
}
void xt_rateest_put(struct xt_rateest *est)
{
mutex_lock(&xt_rateest_mutex);
if (--est->refcnt == 0) {
hlist_del(&est->list);
gen_kill_estimator(&est->bstats, &est->rstats);
kfree(est);
/*
* gen_estimator est_timer() might access est->lock or bstats,
* wait a RCU grace period before freeing 'est'
*/
call_rcu(&est->rcu, xt_rateest_free_rcu);
}
mutex_unlock(&xt_rateest_mutex);
}
@ -179,6 +188,7 @@ static int __init xt_rateest_tg_init(void)
static void __exit xt_rateest_tg_fini(void)
{
xt_unregister_target(&xt_rateest_tg_reg);
rcu_barrier(); /* Wait for completion of call_rcu()'s (xt_rateest_free_rcu) */
}

View File

@ -165,8 +165,8 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
rcu_read_unlock();
if (rt != NULL) {
mtu = dst_mtu(&rt->u.dst);
dst_release(&rt->u.dst);
mtu = dst_mtu(&rt->dst);
dst_release(&rt->dst);
}
return mtu;
}

View File

@ -77,8 +77,8 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
return false;
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb->dev = rt->u.dst.dev;
skb_dst_set(skb, &rt->dst);
skb->dev = rt->dst.dev;
skb->protocol = htons(ETH_P_IP);
return true;
}