--- zzzz-none-000/linux-3.10.107/include/net/sch_generic.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/net/sch_generic.h 2021-02-04 17:41:59.000000000 +0000 @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include @@ -58,14 +60,17 @@ * multiqueue device. */ #define TCQ_F_WARN_NONWC (1 << 16) - int padded; +#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ +#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : + * qdisc_tree_decrease_qlen() should stop. + */ +#define TCQ_F_NSS 0x1000 /* NSS qdisc flag. */ + u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; struct list_head list; u32 handle; u32 parent; - atomic_t refcnt; - struct gnet_stats_rate_est rate_est; int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); @@ -76,8 +81,12 @@ */ struct Qdisc *__parent; struct netdev_queue *dev_queue; - struct Qdisc *next_sched; + struct gnet_stats_rate_est64 rate_est; + struct gnet_stats_basic_cpu __percpu *cpu_bstats; + struct gnet_stats_queue __percpu *cpu_qstats; + + struct Qdisc *next_sched; struct sk_buff *gso_skb; /* * For performance sake on SMP, we put highly modified fields at the end @@ -88,8 +97,10 @@ unsigned int __state; struct gnet_stats_queue qstats; struct rcu_head rcu_head; - spinlock_t busylock; - u32 limit; + int padded; + atomic_t refcnt; + + spinlock_t busylock ____cacheline_aligned_in_smp; }; static inline bool qdisc_is_running(const struct Qdisc *qdisc) @@ -110,6 +121,21 @@ qdisc->__state &= ~__QDISC___STATE_RUNNING; } +static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) +{ + return qdisc->flags & TCQ_F_ONETXQUEUE; +} + +static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) +{ +#ifdef CONFIG_BQL + /* Non-BQL migrated drivers will return 0, too. */ + return dql_avail(&txq->dql); +#else + return 0; +#endif +} + static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) { return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; @@ -142,7 +168,7 @@ void (*walk)(struct Qdisc *, struct qdisc_walker * arg); /* Filter manipulation */ - struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); + struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); void (*unbind_tcf)(struct Qdisc *, unsigned long); @@ -184,26 +210,25 @@ }; struct tcf_proto_ops { - struct tcf_proto_ops *next; + struct list_head head; char kind[IFNAMSIZ]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto*); - void (*destroy)(struct tcf_proto*); + bool (*destroy)(struct tcf_proto*, bool); unsigned long (*get)(struct tcf_proto*, u32 handle); - void (*put)(struct tcf_proto*, unsigned long); int (*change)(struct net *net, struct sk_buff *, struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, - unsigned long *); + unsigned long *, bool); int (*delete)(struct tcf_proto*, unsigned long); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); /* rtnetlink specific */ - int (*dump)(struct tcf_proto*, unsigned long, + int (*dump)(struct net*, struct tcf_proto*, unsigned long, struct sk_buff *skb, struct tcmsg*); struct module *owner; @@ -211,8 +236,8 @@ struct tcf_proto { /* Fast access part */ - struct tcf_proto *next; - void *root; + struct tcf_proto __rcu *next; + void __rcu *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); @@ -224,13 +249,15 @@ struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; + struct rcu_head rcu; }; struct qdisc_skb_cb { unsigned int pkt_len; u16 slave_dev_queue_mapping; - u16 _pad; - unsigned char data[20]; + u16 tc_classid; +#define QDISC_CB_PRIV_LEN 20 + unsigned char data[QDISC_CB_PRIV_LEN]; }; static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) @@ -258,7 +285,9 @@ static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) { - return qdisc->dev_queue->qdisc; + struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); + + return q; } static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) @@ -315,6 +344,9 @@ extern struct Qdisc_ops noop_qdisc_ops; extern struct Qdisc_ops pfifo_fast_ops; extern struct Qdisc_ops mq_qdisc_ops; +extern struct Qdisc_ops noqueue_qdisc_ops; +extern struct Qdisc_ops fq_codel_qdisc_ops; +extern const struct Qdisc_ops *default_qdisc_ops; struct Qdisc_class_common { u32 classid; @@ -349,30 +381,43 @@ return NULL; } -extern int qdisc_class_hash_init(struct Qdisc_class_hash *); -extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *); -extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *); -extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); -extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *); - -extern void dev_init_scheduler(struct net_device *dev); -extern void dev_shutdown(struct net_device *dev); -extern void dev_activate(struct net_device *dev); -extern void dev_deactivate(struct net_device *dev); -extern void dev_deactivate_many(struct list_head *head); -extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, - struct Qdisc *qdisc); -extern void qdisc_reset(struct Qdisc *qdisc); -extern void qdisc_destroy(struct Qdisc *qdisc); -extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); -extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, - struct Qdisc_ops *ops); -extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, - struct Qdisc_ops *ops, u32 parentid); -extern void __qdisc_calculate_pkt_len(struct sk_buff *skb, - const struct qdisc_size_table *stab); -extern void tcf_destroy(struct tcf_proto *tp); -extern void tcf_destroy_chain(struct tcf_proto **fl); +int qdisc_class_hash_init(struct Qdisc_class_hash *); +void qdisc_class_hash_insert(struct Qdisc_class_hash *, + struct Qdisc_class_common *); +void qdisc_class_hash_remove(struct Qdisc_class_hash *, + struct Qdisc_class_common *); +void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); +void qdisc_class_hash_destroy(struct Qdisc_class_hash *); + +void dev_init_scheduler(struct net_device *dev); +void dev_shutdown(struct net_device *dev); +void dev_activate(struct net_device *dev); +void dev_deactivate(struct net_device *dev); +void dev_deactivate_many(struct list_head *head); +struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc); +void qdisc_reset(struct Qdisc *qdisc); +void qdisc_destroy(struct Qdisc *qdisc); +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, + unsigned int len); +struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, + const struct Qdisc_ops *ops); +struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, + const struct Qdisc_ops *ops, u32 parentid); +void __qdisc_calculate_pkt_len(struct sk_buff *skb, + const struct qdisc_size_table *stab); +bool tcf_destroy(struct tcf_proto *tp, bool force); +void tcf_destroy_chain(struct tcf_proto __rcu **fl); +int skb_do_redirect(struct sk_buff *); + +static inline bool skb_at_tc_ingress(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + return G_TC_AT(skb->tc_verd) & AT_INGRESS; +#else + return false; +#endif +} /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) @@ -380,7 +425,7 @@ struct Qdisc *qdisc; for (; i < dev->num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); if (qdisc) { spin_lock_bh(qdisc_lock(qdisc)); qdisc_reset(qdisc); @@ -398,13 +443,18 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev) { unsigned int i; + + rcu_read_lock(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - const struct Qdisc *q = txq->qdisc; + const struct Qdisc *q = rcu_dereference(txq->qdisc); - if (q->q.qlen) + if (q->q.qlen) { + rcu_read_unlock(); return false; + } } + rcu_read_unlock(); return true; } @@ -412,9 +462,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev) { unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (txq->qdisc != txq->qdisc_sleeping) + if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) return true; } return false; @@ -424,9 +475,10 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev) { unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (txq->qdisc != &noop_qdisc) + if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) return false; } return true; @@ -466,13 +518,11 @@ return sch->enqueue(skb, sch); } -static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) +static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) { - qdisc_skb_cb(skb)->pkt_len = skb->len; - return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; + return q->flags & TCQ_F_CPUSTATS; } - static inline void bstats_update(struct gnet_stats_basic_packed *bstats, const struct sk_buff *skb) { @@ -480,17 +530,73 @@ bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; } +static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, + const struct sk_buff *skb) +{ + u64_stats_update_begin(&bstats->syncp); + bstats_update(&bstats->bstats, skb); + u64_stats_update_end(&bstats->syncp); +} + +static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, + const struct sk_buff *skb) +{ + bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); +} + static inline void qdisc_bstats_update(struct Qdisc *sch, const struct sk_buff *skb) { bstats_update(&sch->bstats, skb); } +static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, + const struct sk_buff *skb) +{ + sch->qstats.backlog -= qdisc_pkt_len(skb); +} + +static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, + const struct sk_buff *skb) +{ + sch->qstats.backlog += qdisc_pkt_len(skb); +} + +static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) +{ + sch->qstats.drops += count; +} + +static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) +{ + qstats->drops++; +} + +static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) +{ + qstats->overlimits++; +} + +static inline void qdisc_qstats_drop(struct Qdisc *sch) +{ + qstats_drop_inc(&sch->qstats); +} + +static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) +{ + qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats)); +} + +static inline void qdisc_qstats_overlimit(struct Qdisc *sch) +{ + sch->qstats.overlimits++; +} + static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff_head *list) { __skb_queue_tail(list, skb); - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); return NET_XMIT_SUCCESS; } @@ -506,7 +612,7 @@ struct sk_buff *skb = __skb_dequeue(list); if (likely(skb != NULL)) { - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); } @@ -525,7 +631,7 @@ if (likely(skb != NULL)) { unsigned int len = qdisc_pkt_len(skb); - sch->qstats.backlog -= len; + qdisc_qstats_backlog_dec(sch, skb); kfree_skb(skb); return len; } @@ -544,7 +650,7 @@ struct sk_buff *skb = __skb_dequeue_tail(list); if (likely(skb != NULL)) - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); return skb; } @@ -604,6 +710,23 @@ sch->qstats.backlog = 0; } +static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + struct Qdisc **pold) +{ + struct Qdisc *old; + + sch_tree_lock(sch); + old = *pold; + *pold = new; + if (old != NULL) { + qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); + qdisc_reset(old); + } + sch_tree_unlock(sch); + + return old; +} + static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, struct sk_buff_head *list) { @@ -626,14 +749,14 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) { kfree_skb(skb); - sch->qstats.drops++; + qdisc_qstats_drop(sch); return NET_XMIT_DROP; } static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) { - sch->qstats.drops++; + qdisc_qstats_drop(sch); #ifdef CONFIG_NET_CLS_ACT if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) @@ -661,25 +784,8 @@ return rtab->data[slot]; } -#ifdef CONFIG_NET_CLS_ACT -static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, - int action) -{ - struct sk_buff *n; - - n = skb_clone(skb, gfp_mask); - - if (n) { - n->tc_verd = SET_TC_VERD(n->tc_verd, 0); - n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); - n->tc_verd = CLR_TC_MUNGED(n->tc_verd); - } - return n; -} -#endif - struct psched_ratecfg { - u64 rate_bps; + u64 rate_bytes_ps; /* bytes per second */ u32 mult; u16 overhead; u8 linklayer; @@ -697,13 +803,21 @@ return ((u64)len * r->mult) >> r->shift; } -extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); +void psched_ratecfg_precompute(struct psched_ratecfg *r, + const struct tc_ratespec *conf, + u64 rate64); static inline void psched_ratecfg_getrate(struct tc_ratespec *res, const struct psched_ratecfg *r) { memset(res, 0, sizeof(*res)); - res->rate = r->rate_bps >> 3; + + /* legacy struct tc_ratespec has a 32bit @rate field + * Qdisc using 64bit rate should add new attributes + * in order to maintain compatibility. + */ + res->rate = min_t(u64, r->rate_bytes_ps, ~0U); + res->overhead = r->overhead; res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); }