net_sched: reduce fifo qdisc size
Because of various alignements [SLUB / qdisc], we use 512 bytes of memory for one {p|b}fifo qdisc, instead of 256 bytes on 64bit arches and 192 bytes on 32bit ones. Move the "u32 limit" inside "struct Qdisc" (no impact on other qdiscs) Change qdisc_alloc(), first trying a regular allocation before an oversized one. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
c53fa1ed92
commit
d276055c4e
@@ -83,6 +83,7 @@ struct Qdisc {
|
|||||||
struct gnet_stats_queue qstats;
|
struct gnet_stats_queue qstats;
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
spinlock_t busylock;
|
spinlock_t busylock;
|
||||||
|
u32 limit;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
|
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
|
||||||
|
@@ -19,15 +19,9 @@
|
|||||||
|
|
||||||
/* 1 band FIFO pseudo-"scheduler" */
|
/* 1 band FIFO pseudo-"scheduler" */
|
||||||
|
|
||||||
struct fifo_sched_data {
|
|
||||||
u32 limit;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
struct fifo_sched_data *q = qdisc_priv(sch);
|
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
|
||||||
|
|
||||||
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit))
|
|
||||||
return qdisc_enqueue_tail(skb, sch);
|
return qdisc_enqueue_tail(skb, sch);
|
||||||
|
|
||||||
return qdisc_reshape_fail(skb, sch);
|
return qdisc_reshape_fail(skb, sch);
|
||||||
@@ -35,9 +29,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
|
|
||||||
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
struct fifo_sched_data *q = qdisc_priv(sch);
|
if (likely(skb_queue_len(&sch->q) < sch->limit))
|
||||||
|
|
||||||
if (likely(skb_queue_len(&sch->q) < q->limit))
|
|
||||||
return qdisc_enqueue_tail(skb, sch);
|
return qdisc_enqueue_tail(skb, sch);
|
||||||
|
|
||||||
return qdisc_reshape_fail(skb, sch);
|
return qdisc_reshape_fail(skb, sch);
|
||||||
@@ -45,9 +37,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
|
|
||||||
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
struct fifo_sched_data *q = qdisc_priv(sch);
|
if (likely(skb_queue_len(&sch->q) < sch->limit))
|
||||||
|
|
||||||
if (likely(skb_queue_len(&sch->q) < q->limit))
|
|
||||||
return qdisc_enqueue_tail(skb, sch);
|
return qdisc_enqueue_tail(skb, sch);
|
||||||
|
|
||||||
/* queue full, remove one skb to fulfill the limit */
|
/* queue full, remove one skb to fulfill the limit */
|
||||||
@@ -60,7 +50,6 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
|
|
||||||
static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
|
static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
|
||||||
{
|
{
|
||||||
struct fifo_sched_data *q = qdisc_priv(sch);
|
|
||||||
bool bypass;
|
bool bypass;
|
||||||
bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
|
bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
|
||||||
|
|
||||||
@@ -70,20 +59,20 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
|
|||||||
if (is_bfifo)
|
if (is_bfifo)
|
||||||
limit *= psched_mtu(qdisc_dev(sch));
|
limit *= psched_mtu(qdisc_dev(sch));
|
||||||
|
|
||||||
q->limit = limit;
|
sch->limit = limit;
|
||||||
} else {
|
} else {
|
||||||
struct tc_fifo_qopt *ctl = nla_data(opt);
|
struct tc_fifo_qopt *ctl = nla_data(opt);
|
||||||
|
|
||||||
if (nla_len(opt) < sizeof(*ctl))
|
if (nla_len(opt) < sizeof(*ctl))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
q->limit = ctl->limit;
|
sch->limit = ctl->limit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_bfifo)
|
if (is_bfifo)
|
||||||
bypass = q->limit >= psched_mtu(qdisc_dev(sch));
|
bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
|
||||||
else
|
else
|
||||||
bypass = q->limit >= 1;
|
bypass = sch->limit >= 1;
|
||||||
|
|
||||||
if (bypass)
|
if (bypass)
|
||||||
sch->flags |= TCQ_F_CAN_BYPASS;
|
sch->flags |= TCQ_F_CAN_BYPASS;
|
||||||
@@ -94,8 +83,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
|
|||||||
|
|
||||||
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
|
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct fifo_sched_data *q = qdisc_priv(sch);
|
struct tc_fifo_qopt opt = { .limit = sch->limit };
|
||||||
struct tc_fifo_qopt opt = { .limit = q->limit };
|
|
||||||
|
|
||||||
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
|
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
|
||||||
return skb->len;
|
return skb->len;
|
||||||
@@ -106,7 +94,7 @@ nla_put_failure:
|
|||||||
|
|
||||||
struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
|
struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
|
||||||
.id = "pfifo",
|
.id = "pfifo",
|
||||||
.priv_size = sizeof(struct fifo_sched_data),
|
.priv_size = 0,
|
||||||
.enqueue = pfifo_enqueue,
|
.enqueue = pfifo_enqueue,
|
||||||
.dequeue = qdisc_dequeue_head,
|
.dequeue = qdisc_dequeue_head,
|
||||||
.peek = qdisc_peek_head,
|
.peek = qdisc_peek_head,
|
||||||
@@ -121,7 +109,7 @@ EXPORT_SYMBOL(pfifo_qdisc_ops);
|
|||||||
|
|
||||||
struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
|
struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
|
||||||
.id = "bfifo",
|
.id = "bfifo",
|
||||||
.priv_size = sizeof(struct fifo_sched_data),
|
.priv_size = 0,
|
||||||
.enqueue = bfifo_enqueue,
|
.enqueue = bfifo_enqueue,
|
||||||
.dequeue = qdisc_dequeue_head,
|
.dequeue = qdisc_dequeue_head,
|
||||||
.peek = qdisc_peek_head,
|
.peek = qdisc_peek_head,
|
||||||
@@ -136,7 +124,7 @@ EXPORT_SYMBOL(bfifo_qdisc_ops);
|
|||||||
|
|
||||||
struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
|
struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
|
||||||
.id = "pfifo_head_drop",
|
.id = "pfifo_head_drop",
|
||||||
.priv_size = sizeof(struct fifo_sched_data),
|
.priv_size = 0,
|
||||||
.enqueue = pfifo_tail_enqueue,
|
.enqueue = pfifo_tail_enqueue,
|
||||||
.dequeue = qdisc_dequeue_head,
|
.dequeue = qdisc_dequeue_head,
|
||||||
.peek = qdisc_peek_head,
|
.peek = qdisc_peek_head,
|
||||||
|
@@ -550,21 +550,25 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
|||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
struct Qdisc *sch;
|
struct Qdisc *sch;
|
||||||
unsigned int size;
|
unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
|
||||||
int err = -ENOBUFS;
|
int err = -ENOBUFS;
|
||||||
|
|
||||||
/* ensure that the Qdisc and the private data are 64-byte aligned */
|
|
||||||
size = QDISC_ALIGN(sizeof(*sch));
|
|
||||||
size += ops->priv_size + (QDISC_ALIGNTO - 1);
|
|
||||||
|
|
||||||
p = kzalloc_node(size, GFP_KERNEL,
|
p = kzalloc_node(size, GFP_KERNEL,
|
||||||
netdev_queue_numa_node_read(dev_queue));
|
netdev_queue_numa_node_read(dev_queue));
|
||||||
|
|
||||||
if (!p)
|
if (!p)
|
||||||
goto errout;
|
goto errout;
|
||||||
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
|
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
|
||||||
sch->padded = (char *) sch - (char *) p;
|
/* if we got non aligned memory, ask more and do alignment ourself */
|
||||||
|
if (sch != p) {
|
||||||
|
kfree(p);
|
||||||
|
p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
|
||||||
|
netdev_queue_numa_node_read(dev_queue));
|
||||||
|
if (!p)
|
||||||
|
goto errout;
|
||||||
|
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
|
||||||
|
sch->padded = (char *) sch - (char *) p;
|
||||||
|
}
|
||||||
INIT_LIST_HEAD(&sch->list);
|
INIT_LIST_HEAD(&sch->list);
|
||||||
skb_queue_head_init(&sch->q);
|
skb_queue_head_init(&sch->q);
|
||||||
spin_lock_init(&sch->busylock);
|
spin_lock_init(&sch->busylock);
|
||||||
|
Reference in New Issue
Block a user