Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix changes. As alpha in percpu tree uses 'weak' attribute instead of inline assembly, there's no need for __used attribute. Conflicts: arch/alpha/include/asm/percpu.h arch/mn10300/kernel/vmlinux.lds.S include/linux/percpu-defs.h
This commit is contained in:
@@ -801,11 +801,8 @@ static int arp_process(struct sk_buff *skb)
|
||||
* cache.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Special case: IPv4 duplicate address detection packet (RFC2131)
|
||||
* and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4)
|
||||
*/
|
||||
if (sip == 0 || tip == sip) {
|
||||
/* Special case: IPv4 duplicate address detection packet (RFC2131) */
|
||||
if (sip == 0) {
|
||||
if (arp->ar_op == htons(ARPOP_REQUEST) &&
|
||||
inet_addr_type(net, tip) == RTN_LOCAL &&
|
||||
!arp_ignore(in_dev, sip, tip))
|
||||
|
@@ -1021,6 +1021,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
|
||||
(struct node *)tn, wasfull);
|
||||
|
||||
tp = node_parent((struct node *) tn);
|
||||
if (!tp)
|
||||
rcu_assign_pointer(t->trie, (struct node *)tn);
|
||||
|
||||
tnode_free_flush();
|
||||
if (!tp)
|
||||
break;
|
||||
|
@@ -440,6 +440,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
/* Remove any debris in the socket control block */
|
||||
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
||||
|
||||
/* Must drop socket now because of tproxy. */
|
||||
skb_orphan(skb);
|
||||
|
||||
return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
|
||||
ip_rcv_finish);
|
||||
|
||||
|
@@ -191,7 +191,8 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
|
||||
ct, ctinfo);
|
||||
/* Tell TCP window tracking about seq change */
|
||||
nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
|
||||
ct, CTINFO2DIR(ctinfo));
|
||||
ct, CTINFO2DIR(ctinfo),
|
||||
(int)rep_len - (int)match_len);
|
||||
|
||||
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
|
||||
}
|
||||
@@ -377,6 +378,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
|
||||
struct tcphdr *tcph;
|
||||
int dir;
|
||||
__be32 newseq, newack;
|
||||
s16 seqoff, ackoff;
|
||||
struct nf_conn_nat *nat = nfct_nat(ct);
|
||||
struct nf_nat_seq *this_way, *other_way;
|
||||
|
||||
@@ -390,15 +392,18 @@ nf_nat_seq_adjust(struct sk_buff *skb,
|
||||
|
||||
tcph = (void *)skb->data + ip_hdrlen(skb);
|
||||
if (after(ntohl(tcph->seq), this_way->correction_pos))
|
||||
newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
|
||||
seqoff = this_way->offset_after;
|
||||
else
|
||||
newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
|
||||
seqoff = this_way->offset_before;
|
||||
|
||||
if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
|
||||
other_way->correction_pos))
|
||||
newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
|
||||
ackoff = other_way->offset_after;
|
||||
else
|
||||
newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
|
||||
ackoff = other_way->offset_before;
|
||||
|
||||
newseq = htonl(ntohl(tcph->seq) + seqoff);
|
||||
newack = htonl(ntohl(tcph->ack_seq) - ackoff);
|
||||
|
||||
inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
|
||||
inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
|
||||
@@ -413,7 +418,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
|
||||
if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
|
||||
return 0;
|
||||
|
||||
nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir);
|
||||
nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@@ -1085,8 +1085,35 @@ restart:
|
||||
now = jiffies;
|
||||
|
||||
if (!rt_caching(dev_net(rt->u.dst.dev))) {
|
||||
rt_drop(rt);
|
||||
return 0;
|
||||
/*
|
||||
* If we're not caching, just tell the caller we
|
||||
* were successful and don't touch the route. The
|
||||
* caller hold the sole reference to the cache entry, and
|
||||
* it will be released when the caller is done with it.
|
||||
* If we drop it here, the callers have no way to resolve routes
|
||||
* when we're not caching. Instead, just point *rp at rt, so
|
||||
* the caller gets a single use out of the route
|
||||
* Note that we do rt_free on this new route entry, so that
|
||||
* once its refcount hits zero, we are still able to reap it
|
||||
* (Thanks Alexey)
|
||||
* Note also the rt_free uses call_rcu. We don't actually
|
||||
* need rcu protection here, this is just our path to get
|
||||
* on the route gc list.
|
||||
*/
|
||||
|
||||
if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
|
||||
int err = arp_bind_neighbour(&rt->u.dst);
|
||||
if (err) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"Neighbour table failure & not caching routes.\n");
|
||||
rt_drop(rt);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
rt_free(rt);
|
||||
goto skip_hashing;
|
||||
}
|
||||
|
||||
rthp = &rt_hash_table[hash].chain;
|
||||
@@ -1203,7 +1230,8 @@ restart:
|
||||
#if RT_CACHE_DEBUG >= 2
|
||||
if (rt->u.dst.rt_next) {
|
||||
struct rtable *trt;
|
||||
printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst);
|
||||
printk(KERN_DEBUG "rt_cache @%02x: %pI4",
|
||||
hash, &rt->rt_dst);
|
||||
for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
|
||||
printk(" . %pI4", &trt->rt_dst);
|
||||
printk("\n");
|
||||
@@ -1217,6 +1245,8 @@ restart:
|
||||
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
|
||||
|
||||
spin_unlock_bh(rt_hash_lock_addr(hash));
|
||||
|
||||
skip_hashing:
|
||||
if (rp)
|
||||
*rp = rt;
|
||||
else
|
||||
|
@@ -903,13 +903,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
|
||||
iov++;
|
||||
|
||||
while (seglen > 0) {
|
||||
int copy;
|
||||
int copy = 0;
|
||||
int max = size_goal;
|
||||
|
||||
skb = tcp_write_queue_tail(sk);
|
||||
if (tcp_send_head(sk)) {
|
||||
if (skb->ip_summed == CHECKSUM_NONE)
|
||||
max = mss_now;
|
||||
copy = max - skb->len;
|
||||
}
|
||||
|
||||
if (!tcp_send_head(sk) ||
|
||||
(copy = size_goal - skb->len) <= 0) {
|
||||
|
||||
if (copy <= 0) {
|
||||
new_segment:
|
||||
/* Allocate new segment. If the interface is SG,
|
||||
* allocate skb fitting to single page.
|
||||
@@ -930,6 +934,7 @@ new_segment:
|
||||
|
||||
skb_entail(sk, skb);
|
||||
copy = size_goal;
|
||||
max = size_goal;
|
||||
}
|
||||
|
||||
/* Try to append data to the end of skb. */
|
||||
@@ -1028,7 +1033,7 @@ new_segment:
|
||||
if ((seglen -= copy) == 0 && iovlen == 0)
|
||||
goto out;
|
||||
|
||||
if (skb->len < size_goal || (flags & MSG_OOB))
|
||||
if (skb->len < max || (flags & MSG_OOB))
|
||||
continue;
|
||||
|
||||
if (forced_push(tp)) {
|
||||
|
@@ -128,7 +128,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
|
||||
goto kill_with_rst;
|
||||
|
||||
/* Dup ACK? */
|
||||
if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
|
||||
if (!th->ack ||
|
||||
!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
|
||||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
|
||||
inet_twsk_put(tw);
|
||||
return TCP_TW_SUCCESS;
|
||||
|
@@ -725,7 +725,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
|
||||
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int mss_now)
|
||||
{
|
||||
if (skb->len <= mss_now || !sk_can_gso(sk)) {
|
||||
if (skb->len <= mss_now || !sk_can_gso(sk) ||
|
||||
skb->ip_summed == CHECKSUM_NONE) {
|
||||
/* Avoid the costly divide in the normal
|
||||
* non-TSO case.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user