struct Qdisc {
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
- spinlock_t *root_lock,
struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *sch);
unsigned int flags;
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
- spinlock_t *root_lock,
struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *);
#endif
}
-static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
qdisc_calculate_pkt_len(skb, sch);
- return sch->enqueue(skb, sch, root_lock, to_free);
+ return sch->enqueue(skb, sch, to_free);
}
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
qdisc_calculate_pkt_len(skb, q);
if (q->flags & TCQ_F_NOLOCK) {
- rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK;
+ rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
qdisc_run(q);
if (unlikely(to_free))
qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
- rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK;
+ rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
/* --------------------------- Qdisc operations ---------------------------- */
-static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
#endif
}
- ret = qdisc_enqueue(skb, flow->q, root_lock, to_free);
+ ret = qdisc_enqueue(skb, flow->q, to_free);
if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused
if (net_xmit_drop_count(ret)) {
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
-static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
qdisc_drop(skb, sch, to_free);
static void cake_reconfigure(struct Qdisc *sch);
-static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct cake_sched_data *q = qdisc_priv(sch);
}
static int
-cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct cbq_sched_data *q = qdisc_priv(sch);
return ret;
}
- ret = qdisc_enqueue(skb, cl->q, root_lock, to_free);
+ ret = qdisc_enqueue(skb, cl->q, to_free);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
cbq_mark_toplevel(q, cl);
s64 sendslope; /* in bytes/s */
s64 idleslope; /* in bytes/s */
struct qdisc_watchdog watchdog;
- int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+ int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free);
struct sk_buff *(*dequeue)(struct Qdisc *sch);
struct Qdisc *qdisc;
};
static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
- struct Qdisc *child, spinlock_t *root_lock,
+ struct Qdisc *child,
struct sk_buff **to_free)
{
unsigned int len = qdisc_pkt_len(skb);
int err;
- err = child->ops->enqueue(skb, child, root_lock, to_free);
+ err = child->ops->enqueue(skb, child, to_free);
if (err != NET_XMIT_SUCCESS)
return err;
return NET_XMIT_SUCCESS;
}
-static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct cbs_sched_data *q = qdisc_priv(sch);
struct Qdisc *qdisc = q->qdisc;
- return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free);
+ return cbs_child_enqueue(skb, sch, qdisc, to_free);
}
-static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct cbs_sched_data *q = qdisc_priv(sch);
q->last = ktime_get_ns();
}
- return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free);
+ return cbs_child_enqueue(skb, sch, qdisc, to_free);
}
-static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct cbs_sched_data *q = qdisc_priv(sch);
- return q->enqueue(skb, sch, root_lock, to_free);
+ return q->enqueue(skb, sch, to_free);
}
/* timediff is in ns, slope is in bytes/s */
return choke_match_flow(oskb, nskb);
}
-static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct choke_sched_data *q = qdisc_priv(sch);
return skb;
}
-static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct codel_sched_data *q;
return NULL;
}
-static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
unsigned int len = qdisc_pkt_len(skb);
}
first = !cl->qdisc->q.qlen;
- err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
cl->qstats.drops++;
/* --------------------------- Qdisc operations ---------------------------- */
-static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
unsigned int len = qdisc_pkt_len(skb);
}
}
- err = qdisc_enqueue(skb, p->q, root_lock, to_free);
+ err = qdisc_enqueue(skb, p->q, to_free);
if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err))
qdisc_qstats_drop(sch);
}
static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
- spinlock_t *root_lock, struct sk_buff **to_free)
+ struct sk_buff **to_free)
{
struct etf_sched_data *q = qdisc_priv(sch);
struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
return &q->classes[band];
}
-static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
unsigned int len = qdisc_pkt_len(skb);
}
first = !cl->qdisc->q.qlen;
- err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
cl->qstats.drops++;
/* 1 band FIFO pseudo-"scheduler" */
-static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
return qdisc_drop(skb, sch, to_free);
}
-static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
if (likely(sch->q.qlen < sch->limit))
return qdisc_drop(skb, sch, to_free);
}
-static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
unsigned int prev_backlog;
return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon));
}
-static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct fq_sched_data *q = qdisc_priv(sch);
return idx;
}
-static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
skb->next = NULL;
}
-static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
cheaper.
*/
-static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock,
+static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
__qdisc_drop(skb, to_free);
return &priv->q[band];
}
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock,
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
int band = prio2band[skb->priority & TC_PRIO_MAX];
return false;
}
-static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct gred_sched_data *q = NULL;
return -1;
}
-static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
- struct sk_buff **to_free)
+static int
+hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
unsigned int len = qdisc_pkt_len(skb);
struct hfsc_class *cl;
}
first = !cl->qdisc->q.qlen;
- err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
cl->qstats.drops++;
return bucket - q->buckets;
}
-static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct hhf_sched_data *q = qdisc_priv(sch);
cl->prio_activity = 0;
}
-static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
int uninitialized_var(ret);
__qdisc_drop(skb, to_free);
return ret;
#endif
- } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, root_lock,
+ } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
to_free)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
qdisc_qstats_drop(sch);
}
static int
-multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct Qdisc *qdisc;
}
#endif
- ret = qdisc_enqueue(skb, qdisc, root_lock, to_free);
+ ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
return NET_XMIT_SUCCESS;
* NET_XMIT_DROP: queue length didn't change.
* NET_XMIT_SUCCESS: one skb was queued.
*/
-static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct netem_sched_data *q = qdisc_priv(sch);
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;
- rootq->enqueue(skb2, rootq, root_lock, to_free);
+ rootq->enqueue(skb2, rootq, to_free);
q->duplicate = dupsave;
rc_drop = NET_XMIT_SUCCESS;
}
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
last_len = segs->len;
- rc = qdisc_enqueue(segs, sch, root_lock, to_free);
+ rc = qdisc_enqueue(segs, sch, to_free);
if (rc != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(rc))
qdisc_qstats_drop(sch);
struct sk_buff *to_free = NULL;
int err;
- err = qdisc_enqueue(skb, q->qdisc, NULL, &to_free);
+ err = qdisc_enqueue(skb, q->qdisc, &to_free);
kfree_skb_list(to_free);
if (err != NET_XMIT_SUCCESS &&
net_xmit_drop_count(err)) {
}
EXPORT_SYMBOL_GPL(pie_drop_early);
-static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct pie_sched_data *q = qdisc_priv(sch);
u32 pkts_to_release;
};
-static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct plug_sched_data *q = qdisc_priv(sch);
return q->queues[band];
}
-static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
- struct sk_buff **to_free)
+static int
+prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *qdisc;
}
#endif
- ret = qdisc_enqueue(skb, qdisc, root_lock, to_free);
+ ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) {
sch->qstats.backlog += len;
sch->q.qlen++;
return agg;
}
-static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
unsigned int len = qdisc_pkt_len(skb), gso_segs;
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
first = !cl->qdisc->q.qlen;
- err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
if (net_xmit_drop_count(err)) {
return q->flags & TC_RED_NODROP;
}
-static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct red_sched_data *q = qdisc_priv(sch);
break;
}
- ret = qdisc_enqueue(skb, child, root_lock, to_free);
+ ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return false;
}
-static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
}
enqueue:
- ret = qdisc_enqueue(skb, child, root_lock, to_free);
+ ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
}
static int
-sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free)
+sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
struct sfq_sched_data *q = qdisc_priv(sch);
unsigned int hash, dropped;
return SKBPRIO_MAX_PRIORITY - 1;
}
-static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
return txtime;
}
-static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct taprio_sched *q = qdisc_priv(sch);
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
- return qdisc_enqueue(skb, child, root_lock, to_free);
+ return qdisc_enqueue(skb, child, to_free);
}
static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
/* GSO packet is too big, segment it so that tbf can transmit
* each segment in time
*/
-static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct tbf_sched_data *q = qdisc_priv(sch);
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len;
- ret = qdisc_enqueue(segs, q->qdisc, root_lock, to_free);
+ ret = qdisc_enqueue(segs, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
-static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct tbf_sched_data *q = qdisc_priv(sch);
if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb) &&
skb_gso_validate_mac_len(skb, q->max_size))
- return tbf_segment(skb, sch, root_lock, to_free);
+ return tbf_segment(skb, sch, to_free);
return qdisc_drop(skb, sch, to_free);
}
- ret = qdisc_enqueue(skb, q->qdisc, root_lock, to_free);
+ ret = qdisc_enqueue(skb, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
/* "teql*" qdisc routines */
-static int teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
- struct sk_buff **to_free)
+static int
+teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch);