Merge branch 'perf/urgent' into perf/core
Conflicts: tools/perf/builtin-record.c tools/perf/builtin-top.c tools/perf/perf.h tools/perf/util/top.h Merge reason: resolve these cherry-picking conflicts. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
|
||||
__be16 dport = 0; /* destination port to forward */
|
||||
unsigned int flags;
|
||||
struct ip_vs_conn_param param;
|
||||
const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
|
||||
union nf_inet_addr snet; /* source network of the client,
|
||||
after masking */
|
||||
|
||||
@@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
|
||||
{
|
||||
int protocol = iph.protocol;
|
||||
const union nf_inet_addr *vaddr = &iph.daddr;
|
||||
const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
|
||||
__be16 vport = 0;
|
||||
|
||||
if (dst_port == svc->port) {
|
||||
|
@@ -404,19 +404,49 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
|
||||
&net->ct.hash[repl_hash]);
|
||||
}
|
||||
|
||||
void nf_conntrack_hash_insert(struct nf_conn *ct)
|
||||
int
|
||||
nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
unsigned int hash, repl_hash;
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_node *n;
|
||||
u16 zone;
|
||||
|
||||
zone = nf_ct_zone(ct);
|
||||
hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
hash = hash_conntrack(net, zone,
|
||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
repl_hash = hash_conntrack(net, zone,
|
||||
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
|
||||
/* See if there's one in the list already, including reverse */
|
||||
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
|
||||
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
&h->tuple) &&
|
||||
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
||||
goto out;
|
||||
hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
|
||||
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
||||
&h->tuple) &&
|
||||
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
||||
goto out;
|
||||
|
||||
add_timer(&ct->timeout);
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
__nf_conntrack_hash_insert(ct, hash, repl_hash);
|
||||
NF_CT_STAT_INC(net, insert);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
NF_CT_STAT_INC(net, insert_failed);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
|
||||
|
||||
/* Confirm a connection given skb; places it in hash table */
|
||||
int
|
||||
|
@@ -1367,15 +1367,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
|
||||
nf_ct_protonum(ct));
|
||||
if (helper == NULL) {
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
#ifdef CONFIG_MODULES
|
||||
if (request_module("nfct-helper-%s", helpname) < 0) {
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
err = -EOPNOTSUPP;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
rcu_read_lock();
|
||||
helper = __nf_conntrack_helper_find(helpname,
|
||||
nf_ct_l3num(ct),
|
||||
@@ -1468,8 +1465,10 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
|
||||
if (tstamp)
|
||||
tstamp->start = ktime_to_ns(ktime_get_real());
|
||||
|
||||
add_timer(&ct->timeout);
|
||||
nf_conntrack_hash_insert(ct);
|
||||
err = nf_conntrack_hash_check_insert(ct);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return ct;
|
||||
@@ -1490,6 +1489,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
struct nf_conntrack_tuple otuple, rtuple;
|
||||
struct nf_conntrack_tuple_hash *h = NULL;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
struct nf_conn *ct;
|
||||
u_int8_t u3 = nfmsg->nfgen_family;
|
||||
u16 zone;
|
||||
int err;
|
||||
@@ -1510,27 +1510,22 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
if (cda[CTA_TUPLE_ORIG])
|
||||
h = __nf_conntrack_find(net, zone, &otuple);
|
||||
h = nf_conntrack_find_get(net, zone, &otuple);
|
||||
else if (cda[CTA_TUPLE_REPLY])
|
||||
h = __nf_conntrack_find(net, zone, &rtuple);
|
||||
h = nf_conntrack_find_get(net, zone, &rtuple);
|
||||
|
||||
if (h == NULL) {
|
||||
err = -ENOENT;
|
||||
if (nlh->nlmsg_flags & NLM_F_CREATE) {
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_events events;
|
||||
|
||||
ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
|
||||
&rtuple, u3);
|
||||
if (IS_ERR(ct)) {
|
||||
err = PTR_ERR(ct);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (IS_ERR(ct))
|
||||
return PTR_ERR(ct);
|
||||
|
||||
err = 0;
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
|
||||
events = IPCT_RELATED;
|
||||
else
|
||||
@@ -1545,23 +1540,19 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
ct, NETLINK_CB(skb).pid,
|
||||
nlmsg_report(nlh));
|
||||
nf_ct_put(ct);
|
||||
} else
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
/* implicit 'else' */
|
||||
|
||||
/* We manipulate the conntrack inside the global conntrack table lock,
|
||||
* so there's no need to increase the refcount */
|
||||
err = -EEXIST;
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
|
||||
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
err = ctnetlink_change_conntrack(ct, cda);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
if (err == 0) {
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
|
||||
(1 << IPCT_ASSURED) |
|
||||
(1 << IPCT_HELPER) |
|
||||
@@ -1570,15 +1561,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
(1 << IPCT_MARK),
|
||||
ct, NETLINK_CB(skb).pid,
|
||||
nlmsg_report(nlh));
|
||||
nf_ct_put(ct);
|
||||
} else
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
nf_ct_put(ct);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -203,6 +203,27 @@ err:
|
||||
return status;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
/* When called from bridge netfilter, skb->data must point to MAC header
|
||||
* before calling skb_gso_segment(). Else, original MAC header is lost
|
||||
* and segmented skbs will be sent to wrong destination.
|
||||
*/
|
||||
static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->nf_bridge)
|
||||
__skb_push(skb, skb->network_header - skb->mac_header);
|
||||
}
|
||||
|
||||
static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->nf_bridge)
|
||||
__skb_pull(skb, skb->network_header - skb->mac_header);
|
||||
}
|
||||
#else
|
||||
#define nf_bridge_adjust_skb_data(s) do {} while (0)
|
||||
#define nf_bridge_adjust_segmented_data(s) do {} while (0)
|
||||
#endif
|
||||
|
||||
int nf_queue(struct sk_buff *skb,
|
||||
struct list_head *elem,
|
||||
u_int8_t pf, unsigned int hook,
|
||||
@@ -212,7 +233,7 @@ int nf_queue(struct sk_buff *skb,
|
||||
unsigned int queuenum)
|
||||
{
|
||||
struct sk_buff *segs;
|
||||
int err;
|
||||
int err = -EINVAL;
|
||||
unsigned int queued;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
@@ -228,23 +249,25 @@ int nf_queue(struct sk_buff *skb,
|
||||
break;
|
||||
}
|
||||
|
||||
nf_bridge_adjust_skb_data(skb);
|
||||
segs = skb_gso_segment(skb, 0);
|
||||
/* Does not use PTR_ERR to limit the number of error codes that can be
|
||||
* returned by nf_queue. For instance, callers rely on -ECANCELED to mean
|
||||
* 'ignore this hook'.
|
||||
*/
|
||||
if (IS_ERR(segs))
|
||||
return -EINVAL;
|
||||
|
||||
goto out_err;
|
||||
queued = 0;
|
||||
err = 0;
|
||||
do {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
|
||||
segs->next = NULL;
|
||||
if (err == 0)
|
||||
if (err == 0) {
|
||||
nf_bridge_adjust_segmented_data(segs);
|
||||
err = __nf_queue(segs, elem, pf, hook, indev,
|
||||
outdev, okfn, queuenum);
|
||||
}
|
||||
if (err == 0)
|
||||
queued++;
|
||||
else
|
||||
@@ -252,11 +275,12 @@ int nf_queue(struct sk_buff *skb,
|
||||
segs = nskb;
|
||||
} while (segs);
|
||||
|
||||
/* also free orig skb if only some segments were queued */
|
||||
if (unlikely(err && queued))
|
||||
err = 0;
|
||||
if (err == 0)
|
||||
if (queued) {
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
out_err:
|
||||
nf_bridge_adjust_segmented_data(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -152,9 +152,10 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
|
||||
fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
|
||||
(iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst == NULL)
|
||||
if (dst->error) {
|
||||
dst_release(dst);
|
||||
return false;
|
||||
|
||||
}
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, dst);
|
||||
skb->dev = dst->dev;
|
||||
|
Reference in New Issue
Block a user