net_sched: Add qdisc __NET_XMIT_STOLEN flag
Patrick McHardy <kaber@trash.net> noticed: "The other problem that affects all qdiscs supporting actions is TC_ACT_QUEUED/TC_ACT_STOLEN getting mapped to NET_XMIT_SUCCESS even though the packet is not queued, corrupting upper qdiscs' qlen counters." and later explained: "The reason why it translates it at all seems to be to not increase the drops counter. Within a single qdisc this could be avoided by other means easily, upper qdiscs would still increase the counter when we return anything besides NET_XMIT_SUCCESS though. This means we need a new NET_XMIT return value to indicate this to the upper qdiscs. So I'd suggest to introduce NET_XMIT_STOLEN, return that to upper qdiscs and translate it to NET_XMIT_SUCCESS in dev_queue_xmit, similar to NET_XMIT_BYPASS." David Miller <davem@davemloft.net> noticed: "Maybe these NET_XMIT_* values being passed around should be a set of bits. They could be composed of base meanings, combined with specific attributes. So you could say "NET_XMIT_DROP | __NET_XMIT_NO_DROP_COUNT" The attributes get masked out by the top-level ->enqueue() caller, such that the base meanings are the only thing that make their way up into the stack. If it's only about communication within the qdisc tree, let's simply code it that way." This patch is trying to realize these ideas. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
6e583ce524
commit
378a2f090f
@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
case TC_ACT_QUEUED:
|
||||
case TC_ACT_STOLEN:
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_SUCCESS;
|
||||
return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
||||
case TC_ACT_SHOT:
|
||||
kfree_skb(skb);
|
||||
goto drop;
|
||||
@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
ret = qdisc_enqueue(skb, flow->q);
|
||||
if (ret != 0) {
|
||||
drop: __maybe_unused
|
||||
sch->qstats.drops++;
|
||||
if (flow)
|
||||
flow->qstats.drops++;
|
||||
if (net_xmit_drop_count(ret)) {
|
||||
sch->qstats.drops++;
|
||||
if (flow)
|
||||
flow->qstats.drops++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
sch->bstats.bytes += qdisc_pkt_len(skb);
|
||||
@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
if (!ret) {
|
||||
sch->q.qlen++;
|
||||
sch->qstats.requeues++;
|
||||
} else {
|
||||
} else if (net_xmit_drop_count(ret)) {
|
||||
sch->qstats.drops++;
|
||||
p->link.qstats.drops++;
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
||||
switch (result) {
|
||||
case TC_ACT_QUEUED:
|
||||
case TC_ACT_STOLEN:
|
||||
*qerr = NET_XMIT_SUCCESS;
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
||||
case TC_ACT_SHOT:
|
||||
return NULL;
|
||||
case TC_ACT_RECLASSIFY:
|
||||
@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
return ret;
|
||||
}
|
||||
|
||||
sch->qstats.drops++;
|
||||
cbq_mark_toplevel(q, cl);
|
||||
cl->qstats.drops++;
|
||||
if (net_xmit_drop_count(ret)) {
|
||||
sch->qstats.drops++;
|
||||
cbq_mark_toplevel(q, cl);
|
||||
cl->qstats.drops++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
cbq_activate_class(cl);
|
||||
return 0;
|
||||
}
|
||||
sch->qstats.drops++;
|
||||
cl->qstats.drops++;
|
||||
if (net_xmit_drop_count(ret)) {
|
||||
sch->qstats.drops++;
|
||||
cl->qstats.drops++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
|
||||
q->rx_class = NULL;
|
||||
|
||||
if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
|
||||
int ret;
|
||||
|
||||
cbq_mark_toplevel(q, cl);
|
||||
|
||||
q->rx_class = cl;
|
||||
cl->q->__parent = sch;
|
||||
|
||||
if (qdisc_enqueue(skb, cl->q) == 0) {
|
||||
ret = qdisc_enqueue(skb, cl->q);
|
||||
if (ret == NET_XMIT_SUCCESS) {
|
||||
sch->q.qlen++;
|
||||
sch->bstats.packets++;
|
||||
sch->bstats.bytes += qdisc_pkt_len(skb);
|
||||
@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
|
||||
cbq_activate_class(cl);
|
||||
return 0;
|
||||
}
|
||||
sch->qstats.drops++;
|
||||
if (net_xmit_drop_count(ret))
|
||||
sch->qstats.drops++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
case TC_ACT_QUEUED:
|
||||
case TC_ACT_STOLEN:
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_SUCCESS;
|
||||
return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
||||
|
||||
case TC_ACT_SHOT:
|
||||
goto drop;
|
||||
@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
|
||||
err = qdisc_enqueue(skb, p->q);
|
||||
if (err != NET_XMIT_SUCCESS) {
|
||||
sch->qstats.drops++;
|
||||
if (net_xmit_drop_count(err))
|
||||
sch->qstats.drops++;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
|
||||
err = p->q->ops->requeue(skb, p->q);
|
||||
if (err != NET_XMIT_SUCCESS) {
|
||||
sch->qstats.drops++;
|
||||
if (net_xmit_drop_count(err))
|
||||
sch->qstats.drops++;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1166,7 +1166,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
||||
switch (result) {
|
||||
case TC_ACT_QUEUED:
|
||||
case TC_ACT_STOLEN:
|
||||
*qerr = NET_XMIT_SUCCESS;
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
||||
case TC_ACT_SHOT:
|
||||
return NULL;
|
||||
}
|
||||
@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
|
||||
err = qdisc_enqueue(skb, cl->qdisc);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
cl->qstats.drops++;
|
||||
sch->qstats.drops++;
|
||||
if (net_xmit_drop_count(err)) {
|
||||
cl->qstats.drops++;
|
||||
sch->qstats.drops++;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -221,7 +221,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
|
||||
switch (result) {
|
||||
case TC_ACT_QUEUED:
|
||||
case TC_ACT_STOLEN:
|
||||
*qerr = NET_XMIT_SUCCESS;
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
||||
case TC_ACT_SHOT:
|
||||
return NULL;
|
||||
}
|
||||
@ -572,9 +572,11 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
kfree_skb(skb);
|
||||
return ret;
|
||||
#endif
|
||||
} else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
|
||||
sch->qstats.drops++;
|
||||
cl->qstats.drops++;
|
||||
} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(ret)) {
|
||||
sch->qstats.drops++;
|
||||
cl->qstats.drops++;
|
||||
}
|
||||
return NET_XMIT_DROP;
|
||||
} else {
|
||||
cl->bstats.packets +=
|
||||
@ -615,10 +617,12 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
kfree_skb(skb);
|
||||
return ret;
|
||||
#endif
|
||||
} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
|
||||
} else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
|
||||
NET_XMIT_SUCCESS) {
|
||||
sch->qstats.drops++;
|
||||
cl->qstats.drops++;
|
||||
if (net_xmit_drop_count(ret)) {
|
||||
sch->qstats.drops++;
|
||||
cl->qstats.drops++;
|
||||
}
|
||||
return NET_XMIT_DROP;
|
||||
} else
|
||||
htb_activate(q, cl);
|
||||
|
@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
sch->q.qlen++;
|
||||
sch->bstats.bytes += qdisc_pkt_len(skb);
|
||||
sch->bstats.packets++;
|
||||
} else
|
||||
} else if (net_xmit_drop_count(ret)) {
|
||||
sch->qstats.drops++;
|
||||
}
|
||||
|
||||
pr_debug("netem: enqueue ret %d\n", ret);
|
||||
return ret;
|
||||
|
@ -45,7 +45,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
||||
switch (err) {
|
||||
case TC_ACT_STOLEN:
|
||||
case TC_ACT_QUEUED:
|
||||
*qerr = NET_XMIT_SUCCESS;
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
||||
case TC_ACT_SHOT:
|
||||
return NULL;
|
||||
}
|
||||
@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
sch->qstats.drops++;
|
||||
if (net_xmit_drop_count(ret))
|
||||
sch->qstats.drops++;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
|
||||
sch->qstats.requeues++;
|
||||
return 0;
|
||||
}
|
||||
sch->qstats.drops++;
|
||||
if (net_xmit_drop_count(ret))
|
||||
sch->qstats.drops++;
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
|
||||
sch->bstats.bytes += qdisc_pkt_len(skb);
|
||||
sch->bstats.packets++;
|
||||
sch->q.qlen++;
|
||||
} else {
|
||||
} else if (net_xmit_drop_count(ret)) {
|
||||
q->stats.pdrop++;
|
||||
sch->qstats.drops++;
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
|
||||
switch (result) {
|
||||
case TC_ACT_STOLEN:
|
||||
case TC_ACT_QUEUED:
|
||||
*qerr = NET_XMIT_SUCCESS;
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
||||
case TC_ACT_SHOT:
|
||||
return 0;
|
||||
}
|
||||
|
@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
|
||||
|
||||
ret = qdisc_enqueue(skb, q->qdisc);
|
||||
if (ret != 0) {
|
||||
sch->qstats.drops++;
|
||||
if (net_xmit_drop_count(ret))
|
||||
sch->qstats.drops++;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user