--- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -17,6 +17,7 @@ struct timer_list { unsigned long expires; void (*function)(struct timer_list *); u32 flags; + unsigned long cust_data; #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -151,6 +151,31 @@ resched: } +void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats) +{ + struct ifb_dev_private *dp; + struct ifb_q_private *txp; + + if (!dev || !offload_stats) { + return; + } + + if (!(dev->priv_flags_ext & IFF_EXT_IFB)) { + return; + } + + dp = netdev_priv(dev); + txp = dp->tx_private; + + u64_stats_update_begin(&txp->rx_stats.sync); + txp->rx_stats.packets += u64_stats_read(&offload_stats->rx_packets); + txp->rx_stats.bytes += u64_stats_read(&offload_stats->rx_bytes); + txp->tx_stats.packets += u64_stats_read(&offload_stats->tx_packets); + txp->tx_stats.bytes += u64_stats_read(&offload_stats->tx_bytes); + u64_stats_update_end(&txp->rx_stats.sync); +} +EXPORT_SYMBOL(ifb_update_offload_stats); + static void ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -326,6 +351,7 @@ static void ifb_setup(struct net_device dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->priv_flags_ext |= IFF_EXT_IFB; /* Mark the device as an IFB device. */ netif_keep_dst(dev); eth_hw_addr_random(dev); dev->needs_free_netdev = true; --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4696,6 +4696,15 @@ void dev_uc_flush(struct net_device *dev void dev_uc_init(struct net_device *dev); /** + * ifb_update_offload_stats - Update the IFB interface stats + * @dev: IFB device to update the stats + * @offload_stats: per CPU stats structure + * + * Allows update of IFB stats when flows are offloaded to an accelerator. + **/ +void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats); + +/** * __dev_uc_sync - Synchonize device's unicast list * @dev: device to sync * @sync: function to call if address should be added @@ -5222,6 +5231,11 @@ static inline bool netif_is_failover_sla return dev->priv_flags & IFF_FAILOVER_SLAVE; } +static inline bool netif_is_ifb_dev(const struct net_device *dev) +{ + return dev->priv_flags_ext & IFF_EXT_IFB; +} + /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -1306,4 +1306,248 @@ enum { #define TCA_ETS_MAX (__TCA_ETS_MAX - 1) +/* QCA NSS Clients Support - Start */ +enum { + TCA_NSS_ACCEL_MODE_NSS_FW, + TCA_NSS_ACCEL_MODE_PPE, + TCA_NSS_ACCEL_MODE_MAX +}; + +/* NSSFIFO section */ + +enum { + TCA_NSSFIFO_UNSPEC, + TCA_NSSFIFO_PARMS, + __TCA_NSSFIFO_MAX +}; + +#define TCA_NSSFIFO_MAX (__TCA_NSSFIFO_MAX - 1) + +struct tc_nssfifo_qopt { + __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ + __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */ + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +/* NSSWRED section */ + +enum { + TCA_NSSWRED_UNSPEC, + TCA_NSSWRED_PARMS, + __TCA_NSSWRED_MAX +}; + +#define TCA_NSSWRED_MAX (__TCA_NSSWRED_MAX - 1) +#define NSSWRED_CLASS_MAX 6 +struct tc_red_alg_parameter { + __u32 min; /* qlen_avg < min: pkts are all enqueued */ + __u32 max; /* qlen_avg > max: pkts are all dropped */ + __u32 probability;/* Drop probability at qlen_avg = max */ + __u32 exp_weight_factor;/* exp_weight_factor for calculate qlen_avg */ +}; + +struct tc_nsswred_traffic_class { + __u32 limit; /* Queue length */ + __u32 weight_mode_value; /* Weight mode value */ + struct tc_red_alg_parameter rap;/* Parameters for RED alg */ +}; + +/* + * Weight modes for WRED + */ +enum tc_nsswred_weight_modes { + TC_NSSWRED_WEIGHT_MODE_DSCP = 0,/* Weight mode is DSCP */ + TC_NSSWRED_WEIGHT_MODES, /* Must be last */ +}; + +struct tc_nsswred_qopt { + __u32 limit; /* Queue length */ + enum tc_nsswred_weight_modes weight_mode; + /* Weight mode */ + __u32 traffic_classes; /* How many traffic classes: DPs */ + __u32 def_traffic_class; /* Default traffic if no match: def_DP */ + __u32 traffic_id; /* The traffic id to be configured: DP */ + __u32 weight_mode_value; /* Weight mode value */ + struct tc_red_alg_parameter rap;/* RED algorithm parameters */ + struct tc_nsswred_traffic_class tntc[NSSWRED_CLASS_MAX]; + /* Traffic settings for dumpping */ + __u8 ecn; /* Setting ECN bit or dropping */ + __u8 set_default; /* Sets qdisc to be the default for enqueue */ + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +/* NSSCODEL section */ + +enum { + TCA_NSSCODEL_UNSPEC, + TCA_NSSCODEL_PARMS, + __TCA_NSSCODEL_MAX +}; + +#define TCA_NSSCODEL_MAX (__TCA_NSSCODEL_MAX - 1) + +struct tc_nsscodel_qopt { + __u32 target; /* Acceptable queueing delay */ + __u32 limit; /* Max number of packets that can be held in the queue */ + __u32 interval; /* Monitoring interval */ + __u32 flows; /* Number of flow buckets */ + __u32 quantum; /* Weight (in bytes) used for DRR of flow buckets */ + __u8 ecn; /* 0 - disable ECN, 1 - enable ECN */ + __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */ + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +struct tc_nsscodel_xstats { + __u32 peak_queue_delay; /* Peak delay experienced by a dequeued packet */ + __u32 peak_drop_delay; /* Peak delay experienced by a dropped packet */ +}; + +/* NSSFQ_CODEL section */ + +struct tc_nssfq_codel_xstats { + __u32 new_flow_count; /* Total number of new flows seen */ + __u32 new_flows_len; /* Current number of new flows */ + __u32 old_flows_len; /* Current number of old flows */ + __u32 ecn_mark; /* Number of packets marked with ECN */ + __u32 drop_overlimit; /* Number of packets dropped due to overlimit */ + __u32 maxpacket; /* The largest packet seen so far in the queue */ +}; + +/* NSSTBL section */ + +enum { + TCA_NSSTBL_UNSPEC, + TCA_NSSTBL_PARMS, + __TCA_NSSTBL_MAX +}; + +#define TCA_NSSTBL_MAX (__TCA_NSSTBL_MAX - 1) + +struct tc_nsstbl_qopt { + __u32 burst; /* Maximum burst size */ + __u32 rate; /* Limiting rate of TBF */ + __u32 peakrate; /* Maximum rate at which TBF is allowed to send */ + __u32 mtu; /* Max size of packet, or minumim burst size */ + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +/* NSSPRIO section */ + +#define TCA_NSSPRIO_MAX_BANDS 256 + +enum { + TCA_NSSPRIO_UNSPEC, + TCA_NSSPRIO_PARMS, + __TCA_NSSPRIO_MAX +}; + +#define TCA_NSSPRIO_MAX (__TCA_NSSPRIO_MAX - 1) + +struct tc_nssprio_qopt { + __u32 bands; /* Number of bands */ + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +/* NSSBF section */ + +enum { + TCA_NSSBF_UNSPEC, + TCA_NSSBF_CLASS_PARMS, + TCA_NSSBF_QDISC_PARMS, + __TCA_NSSBF_MAX +}; + +#define TCA_NSSBF_MAX (__TCA_NSSBF_MAX - 1) + +struct tc_nssbf_class_qopt { + __u32 burst; /* Maximum burst size */ + __u32 rate; /* Allowed bandwidth for this class */ + __u32 mtu; /* MTU of the associated interface */ + __u32 quantum; /* Quantum allocation for DRR */ +}; + +struct tc_nssbf_qopt { + __u16 defcls; /* Default class value */ + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +/* NSSWRR section */ + +enum { + TCA_NSSWRR_UNSPEC, + TCA_NSSWRR_CLASS_PARMS, + TCA_NSSWRR_QDISC_PARMS, + __TCA_NSSWRR_MAX +}; + +#define TCA_NSSWRR_MAX (__TCA_NSSWRR_MAX - 1) + +struct tc_nsswrr_class_qopt { + __u32 quantum; /* Weight associated to this class */ +}; + +struct tc_nsswrr_qopt { + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +/* NSSWFQ section */ + +enum { + TCA_NSSWFQ_UNSPEC, + TCA_NSSWFQ_CLASS_PARMS, + TCA_NSSWFQ_QDISC_PARMS, + __TCA_NSSWFQ_MAX +}; + +#define TCA_NSSWFQ_MAX (__TCA_NSSWFQ_MAX - 1) + +struct tc_nsswfq_class_qopt { + __u32 quantum; /* Weight associated to this class */ +}; + +struct tc_nsswfq_qopt { + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +/* NSSHTB section */ + +enum { + TCA_NSSHTB_UNSPEC, + TCA_NSSHTB_CLASS_PARMS, + TCA_NSSHTB_QDISC_PARMS, + __TCA_NSSHTB_MAX +}; + +#define TCA_NSSHTB_MAX (__TCA_NSSHTB_MAX - 1) + +struct tc_nsshtb_class_qopt { + __u32 burst; /* Allowed burst size */ + __u32 rate; /* Allowed bandwidth for this class */ + __u32 cburst; /* Maximum burst size */ + __u32 crate; /* Maximum bandwidth for this class */ + __u32 quantum; /* Quantum allocation for DRR */ + __u32 priority; /* Priority value associated with this class */ + __u32 overhead; /* Overhead in bytes per packet */ +}; + +struct tc_nsshtb_qopt { + __u32 r2q; /* Rate to quantum ratio */ + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; + +/* NSSBLACKHOLE section */ + +enum { + TCA_NSSBLACKHOLE_UNSPEC, + TCA_NSSBLACKHOLE_PARMS, + __TCA_NSSBLACKHOLE_MAX +}; + +#define TCA_NSSBLACKHOLE_MAX (__TCA_NSSBLACKHOLE_MAX - 1) + +struct tc_nssblackhole_qopt { + __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */ + __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ +}; +/* QCA NSS Clients Support - End */ #endif --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -314,6 +314,7 @@ struct Qdisc *qdisc_lookup(struct net_de out: return q; } +EXPORT_SYMBOL(qdisc_lookup); struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) { @@ -2389,4 +2390,26 @@ static int __init pktsched_init(void) return 0; } +/* QCA NSS Qdisc Support - Start */ +bool tcf_destroy(struct tcf_proto *tp, bool force) +{ + tp->ops->destroy(tp, force, NULL); + module_put(tp->ops->owner); + kfree_rcu(tp, rcu); + + return true; +} + +void tcf_destroy_chain(struct tcf_proto __rcu **fl) +{ + struct tcf_proto *tp; + + while ((tp = rtnl_dereference(*fl)) != NULL) { + RCU_INIT_POINTER(*fl, tp->next); + tcf_destroy(tp, true); + } +} +EXPORT_SYMBOL(tcf_destroy_chain); +/* QCA NSS Qdisc Support - End */ + subsys_initcall(pktsched_init); --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1069,6 +1069,7 @@ static void __qdisc_destroy(struct Qdisc call_rcu(&qdisc->rcu, qdisc_free_cb); } +EXPORT_SYMBOL(qdisc_destroy); void qdisc_destroy(struct Qdisc *qdisc) { --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -94,6 +94,7 @@ struct Qdisc { #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ +#define TCQ_F_NSS 0x1000 /* NSS qdisc flag. */ u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; @@ -751,6 +752,42 @@ static inline bool skb_skip_tc_classify( return false; } +/* QCA NSS Qdisc Support - Start */ +/* + * Set skb classify bit field. + */ +static inline void skb_set_tc_classify_offload(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + skb->tc_skip_classify_offload = 1; +#endif +} + +/* + * Clear skb classify bit field. + */ +static inline void skb_clear_tc_classify_offload(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + skb->tc_skip_classify_offload = 0; +#endif +} + +/* + * Skip skb processing if sent from ifb dev. + */ +static inline bool skb_skip_tc_classify_offload(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + if (skb->tc_skip_classify_offload) { + skb_clear_tc_classify_offload(skb); + return true; + } +#endif + return false; +} +/* QCA NSS Qdisc Support - End */ + /* Reset all TX qdiscs greater than index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) { @@ -1323,4 +1360,9 @@ static inline void qdisc_synchronize(con msleep(1); } +/* QCA NSS Qdisc Support - Start */ +void qdisc_destroy(struct Qdisc *qdisc); +void tcf_destroy_chain(struct tcf_proto __rcu **fl); +/* QCA NSS Qdisc Support - End */ + #endif --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -764,6 +764,7 @@ typedef unsigned char *sk_buff_data_t; * @offload_fwd_mark: Packet was L2-forwarded in hardware * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware * @tc_skip_classify: do not classify packet. set by IFB device + * @tc_skip_classify_offload: do not classify packet set by offload IFB device * @tc_at_ingress: used within tc_classify to distinguish in/egress * @redirected: packet was redirected by packet classifier * @from_ingress: packet was redirected from the ingress path @@ -945,6 +946,9 @@ struct sk_buff { __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ __u8 tc_skip_classify:1; #endif +#ifdef CONFIG_NET_CLS_ACT + __u8 tc_skip_classify_offload:1; /* QCA NSS Qdisc Support */ +#endif __u8 remcsum_offload:1; __u8 csum_complete_sw:1; __u8 csum_level:2;