OpenWrt – Rev 1

Subversion Repositories:
Rev:
From 6ec4d0cf0b0e5e41abc91012db4ebff7d415a92b Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
Date: Tue, 30 Oct 2018 18:26:13 +0800
Subject: [PATCH 08/40] dpaa2-ethernet: support layerscape
This is an integrated patch of dpaa2-ethernet for
 layerscape

Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
 drivers/staging/fsl-dpaa2/Kconfig             |    7 +
 drivers/staging/fsl-dpaa2/ethernet/Makefile   |    2 +
 .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c      | 1240 +++++++++
 .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h      |  183 ++
 .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c    |  357 +++
 .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h    |   60 +
 .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c    | 2335 +++++++++++++----
 .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h    |  388 ++-
 .../fsl-dpaa2/ethernet/dpaa2-ethtool.c        |  625 ++++-
 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h |  192 +-
 drivers/staging/fsl-dpaa2/ethernet/dpni.c     |  604 ++++-
 drivers/staging/fsl-dpaa2/ethernet/dpni.h     |  344 ++-
 12 files changed, 5723 insertions(+), 614 deletions(-)
 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h

--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -17,6 +17,13 @@ config FSL_DPAA2_ETH
          Ethernet driver for Freescale DPAA2 SoCs, using the
          Freescale MC bus driver
 
+config FSL_DPAA2_ETH_CEETM
+       depends on NET_SCHED
+       bool "DPAA2 Ethernet CEETM QoS"
+       default n
+       ---help---
+         Enable QoS offloading support through the CEETM hardware block.
+
 if FSL_DPAA2_ETH
 config FSL_DPAA2_ETH_USE_ERR_QUEUE
        bool "Enable Rx error queue"
--- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
@@ -5,6 +5,8 @@
 obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
 
 fsl-dpaa2-eth-objs    := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
 
 # Needed by the tracing framework
 CFLAGS_dpaa2-eth.o := -I$(src)
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
@@ -0,0 +1,1240 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "dpaa2-eth-ceetm.h"
+#include "dpaa2-eth.h"
+
+#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
+/* Conversion formula from userspace passed Bps to expected Mbit */
+#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
+
+static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
+       [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
+       [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
+};
+
+struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
+
+static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
+                                          struct dpni_tx_shaping_cfg *scfg,
+                                          struct dpni_tx_shaping_cfg *ecfg,
+                                          int coupled, int ch_id)
+{
+       int err = 0;
+
+       netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
+                  ch_id, scfg->rate_limit);
+       err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
+                                 ecfg, coupled);
+       if (err)
+               netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
+
+       return err;
+}
+
+static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
+                                            int ch_id)
+{
+       struct dpni_tx_shaping_cfg cfg = { 0 };
+
+       return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
+}
+
+static inline int
+dpaa2_eth_update_shaping_cfg(struct net_device *dev,
+                            struct dpaa2_ceetm_shaping_cfg cfg,
+                            struct dpni_tx_shaping_cfg *scfg,
+                            struct dpni_tx_shaping_cfg *ecfg)
+{
+       scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
+       ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
+
+       if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
+               netdev_err(dev, "Committed burst size must be under %d\n",
+                          DPAA2_ETH_MAX_BURST_SIZE);
+               return -EINVAL;
+       }
+
+       scfg->max_burst_size = cfg.cbs;
+
+       if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
+               netdev_err(dev, "Excess burst size must be under %d\n",
+                          DPAA2_ETH_MAX_BURST_SIZE);
+               return -EINVAL;
+       }
+
+       ecfg->max_burst_size = cfg.ebs;
+
+       if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
+               netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+enum update_tx_prio {
+       DPAA2_ETH_ADD_CQ,
+       DPAA2_ETH_DEL_CQ,
+};
+
+/* Normalize weights based on max passed value */
+static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
+{
+       struct dpni_tx_schedule_cfg *sched_cfg;
+       struct dpaa2_ceetm_class *cl;
+       u32 qpri;
+       u16 weight_max = 0, increment;
+       int i;
+
+       /* Check the boundaries of the provided values */
+       for (i = 0; i < priv->clhash.hashsize; i++)
+               hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
+                       weight_max = (weight_max == 0 ? cl->prio.weight :
+                                    (weight_max < cl->prio.weight ?
+                                     cl->prio.weight : weight_max));
+
+       /* If there are no elements, there's nothing to do */
+       if (weight_max == 0)
+               return 0;
+
+       increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
+                   weight_max;
+
+       for (i = 0; i < priv->clhash.hashsize; i++) {
+               hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
+                       if (cl->prio.mode == STRICT_PRIORITY)
+                               continue;
+
+                       qpri = cl->prio.qpri;
+                       sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
+
+                       sched_cfg->delta_bandwidth =
+                               DPAA2_CEETM_MIN_WEIGHT +
+                               (cl->prio.weight * increment);
+
+                       pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
+                                __func__, qpri, sched_cfg->delta_bandwidth);
+               }
+       }
+
+       return 0;
+}
+
+static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
+                                          struct dpaa2_ceetm_class *cl,
+                                          enum update_tx_prio type)
+{
+       struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
+       struct dpni_congestion_notification_cfg notif_cfg = {0};
+       struct dpni_tx_schedule_cfg *sched_cfg;
+       struct dpni_taildrop td = {0};
+       u8 ch_id = 0, tc_id = 0;
+       u32 qpri = 0;
+       int err = 0;
+
+       qpri = cl->prio.qpri;
+       tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
+
+       switch (type) {
+       case DPAA2_ETH_ADD_CQ:
+               /* Disable congestion notifications */
+               notif_cfg.threshold_entry = 0;
+               notif_cfg.threshold_exit = 0;
+               err = dpni_set_congestion_notification(priv->mc_io, 0,
+                                                      priv->mc_token,
+                                                      DPNI_QUEUE_TX, tc_id,
+                                                      &notif_cfg);
+               if (err) {
+                       netdev_err(priv->net_dev, "Error disabling congestion notifications %d\n",
+                                  err);
+                       return err;
+               }
+               /* Enable taildrop */
+               td.enable = 1;
+               td.units = DPNI_CONGESTION_UNIT_FRAMES;
+               td.threshold = DPAA2_CEETM_TD_THRESHOLD;
+               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+                                       DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
+                                       0, &td);
+               if (err) {
+                       netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
+                                  err);
+                       return err;
+               }
+               break;
+       case DPAA2_ETH_DEL_CQ:
+               /* Disable taildrop */
+               td.enable = 0;
+               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+                                       DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
+                                       0, &td);
+               if (err) {
+                       netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
+                                  err);
+                       return err;
+               }
+               /* Enable congestion notifications */
+               notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
+               notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
+               notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
+               notif_cfg.message_ctx = (u64)priv;
+               notif_cfg.message_iova = priv->cscn_dma;
+               notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
+                                             DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
+                                             DPNI_CONG_OPT_COHERENT_WRITE;
+               err = dpni_set_congestion_notification(priv->mc_io, 0,
+                                                      priv->mc_token,
+                                                      DPNI_QUEUE_TX, tc_id,
+                                                      &notif_cfg);
+               if (err) {
+                       netdev_err(priv->net_dev, "Error enabling congestion notifications %d\n",
+                                  err);
+                       return err;
+               }
+               break;
+       }
+
+       /* We can zero out the structure in the tx_prio_conf array */
+       if (type == DPAA2_ETH_DEL_CQ) {
+               sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
+               memset(sched_cfg, 0, sizeof(*sched_cfg));
+       }
+
+       /* Normalize priorities */
+       err = dpaa2_eth_normalize_tx_prio(sch);
+
+       /* Debug print goes here */
+       print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
+                            &sch->prio.tx_prio_cfg,
+                            sizeof(sch->prio.tx_prio_cfg), 0);
+
+       /* Call dpni_set_tx_priorities for the entire prio qdisc */
+       err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
+                                    &sch->prio.tx_prio_cfg);
+       if (err)
+               netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
+                          err);
+
+       return err;
+}
+
+static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
+{
+       priv->ceetm_en = true;
+}
+
+static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
+{
+       priv->ceetm_en = false;
+}
+
+/* Find class in qdisc hash table using given handle */
+static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
+                                                        struct Qdisc *sch)
+{
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct Qdisc_class_common *clc;
+
+       pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
+                __func__, handle, sch->handle);
+
+       clc = qdisc_class_find(&priv->clhash, handle);
+       return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
+}
+
+/* Insert a class in the qdisc's class hash */
+static void dpaa2_ceetm_link_class(struct Qdisc *sch,
+                                  struct Qdisc_class_hash *clhash,
+                                  struct Qdisc_class_common *common)
+{
+       sch_tree_lock(sch);
+       qdisc_class_hash_insert(clhash, common);
+       sch_tree_unlock(sch);
+       qdisc_class_hash_grow(sch, clhash);
+}
+
+/* Destroy a ceetm class */
+static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
+                                   struct dpaa2_ceetm_class *cl)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+       if (!cl)
+               return;
+
+       pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
+                __func__, cl->common.classid, sch->handle);
+
+       /* Recurse into child first */
+       if (cl->child) {
+               qdisc_destroy(cl->child);
+               cl->child = NULL;
+       }
+
+       switch (cl->type) {
+       case CEETM_ROOT:
+               if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
+                       netdev_err(dev, "Error resetting channel shaping\n");
+
+               break;
+
+       case CEETM_PRIO:
+               if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
+                       netdev_err(dev, "Error resetting tx_priorities\n");
+
+               if (cl->prio.cstats)
+                       free_percpu(cl->prio.cstats);
+
+               break;
+       }
+
+       tcf_block_put(cl->block);
+       kfree(cl);
+}
+
+/* Destroy a ceetm qdisc */
+static void dpaa2_ceetm_destroy(struct Qdisc *sch)
+{
+       unsigned int i;
+       struct hlist_node *next;
+       struct dpaa2_ceetm_class *cl;
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
+
+       pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
+                __func__, sch->handle);
+
+       /* All filters need to be removed before destroying the classes */
+       tcf_block_put(priv->block);
+
+       for (i = 0; i < priv->clhash.hashsize; i++) {
+               hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
+                       tcf_block_put(cl->block);
+       }
+
+       for (i = 0; i < priv->clhash.hashsize; i++) {
+               hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
+                                         common.hnode)
+                       dpaa2_ceetm_cls_destroy(sch, cl);
+       }
+
+       qdisc_class_hash_destroy(&priv->clhash);
+
+       switch (priv->type) {
+       case CEETM_ROOT:
+               dpaa2_eth_ceetm_disable(priv_eth);
+
+               if (priv->root.qstats)
+                       free_percpu(priv->root.qstats);
+
+               if (!priv->root.qdiscs)
+                       break;
+
+               /* Destroy the pfifo qdiscs in case they haven't been attached
+                * to the netdev queues yet.
+                */
+               for (i = 0; i < dev->num_tx_queues; i++)
+                       if (priv->root.qdiscs[i])
+                               qdisc_destroy(priv->root.qdiscs[i]);
+
+               kfree(priv->root.qdiscs);
+               break;
+
+       case CEETM_PRIO:
+               if (priv->prio.parent)
+                       priv->prio.parent->child = NULL;
+               break;
+       }
+}
+
+static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct Qdisc *qdisc;
+       unsigned int ntx, i;
+       struct nlattr *nest;
+       struct dpaa2_ceetm_tc_qopt qopt;
+       struct dpaa2_ceetm_qdisc_stats *qstats;
+       struct net_device *dev = qdisc_dev(sch);
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+
+       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+       sch_tree_lock(sch);
+       memset(&qopt, 0, sizeof(qopt));
+       qopt.type = priv->type;
+       qopt.shaped = priv->shaped;
+
+       switch (priv->type) {
+       case CEETM_ROOT:
+               /* Gather statistics from the underlying pfifo qdiscs */
+               sch->q.qlen = 0;
+               memset(&sch->bstats, 0, sizeof(sch->bstats));
+               memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+               for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+                       qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
+                       sch->q.qlen             += qdisc->q.qlen;
+                       sch->bstats.bytes       += qdisc->bstats.bytes;
+                       sch->bstats.packets     += qdisc->bstats.packets;
+                       sch->qstats.qlen        += qdisc->qstats.qlen;
+                       sch->qstats.backlog     += qdisc->qstats.backlog;
+                       sch->qstats.drops       += qdisc->qstats.drops;
+                       sch->qstats.requeues    += qdisc->qstats.requeues;
+                       sch->qstats.overlimits  += qdisc->qstats.overlimits;
+               }
+
+               for_each_online_cpu(i) {
+                       qstats = per_cpu_ptr(priv->root.qstats, i);
+                       sch->qstats.drops += qstats->drops;
+               }
+
+               break;
+
+       case CEETM_PRIO:
+               qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
+               qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
+               qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
+               break;
+
+       default:
+               pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
+               sch_tree_unlock(sch);
+               return -EINVAL;
+       }
+
+       nest = nla_nest_start(skb, TCA_OPTIONS);
+       if (!nest)
+               goto nla_put_failure;
+       if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
+               goto nla_put_failure;
+       nla_nest_end(skb, nest);
+
+       sch_tree_unlock(sch);
+       return skb->len;
+
+nla_put_failure:
+       sch_tree_unlock(sch);
+       nla_nest_cancel(skb, nest);
+       return -EMSGSIZE;
+}
+
+static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
+                                  struct dpaa2_ceetm_qdisc *priv,
+                                  struct dpaa2_ceetm_tc_qopt *qopt)
+{
+       /* TODO: Once LX2 support is added */
+       /* priv->shaped = parent_cl->shaped; */
+       priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
+       priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
+       priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
+
+       return 0;
+}
+
+/* Edit a ceetm qdisc */
+static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
+       struct dpaa2_ceetm_tc_qopt *qopt;
+       int err;
+
+       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+       err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
+                              dpaa2_ceetm_policy, NULL);
+       if (err < 0) {
+               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
+                      "nla_parse_nested");
+               return err;
+       }
+
+       if (!tb[DPAA2_CEETM_TCA_QOPS]) {
+               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
+                      "tb");
+               return -EINVAL;
+       }
+
+       if (TC_H_MIN(sch->handle)) {
+               pr_err("CEETM: a qdisc should not have a minor\n");
+               return -EINVAL;
+       }
+
+       qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
+
+       if (priv->type != qopt->type) {
+               pr_err("CEETM: qdisc %X is not of the provided type\n",
+                      sch->handle);
+               return -EINVAL;
+       }
+
+       switch (priv->type) {
+       case CEETM_PRIO:
+               err = dpaa2_ceetm_change_prio(sch, priv, qopt);
+               break;
+       default:
+               pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+/* Configure a root ceetm qdisc */
+static int dpaa2_ceetm_init_root(struct Qdisc *sch,
+                                struct dpaa2_ceetm_qdisc *priv,
+                                struct dpaa2_ceetm_tc_qopt *qopt)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
+       struct netdev_queue *dev_queue;
+       unsigned int i, parent_id;
+       struct Qdisc *qdisc;
+       int err;
+
+       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+       /* Validate inputs */
+       if (sch->parent != TC_H_ROOT) {
+               pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
+               tcf_block_put(priv->block);
+               qdisc_class_hash_destroy(&priv->clhash);
+               return -EINVAL;
+       }
+
+       /* Pre-allocate underlying pfifo qdiscs.
+        *
+        * We want to offload shaping and scheduling decisions to the hardware.
+        * The pfifo qdiscs will be attached to the netdev queues and will
+        * guide the traffic from the IP stack down to the driver with minimum
+        * interference.
+        *
+        * The CEETM qdiscs and classes will be crossed when the traffic
+        * reaches the driver.
+        */
+       priv->root.qdiscs = kcalloc(dev->num_tx_queues,
+                                   sizeof(priv->root.qdiscs[0]),
+                                   GFP_KERNEL);
+       if (!priv->root.qdiscs) {
+               err = -ENOMEM;
+               goto err_init_root;
+       }
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               dev_queue = netdev_get_tx_queue(dev, i);
+               parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
+                                     TC_H_MIN(i + PFIFO_MIN_OFFSET));
+
+               qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+                                         parent_id);
+               if (!qdisc) {
+                       err = -ENOMEM;
+                       goto err_init_root;
+               }
+
+               priv->root.qdiscs[i] = qdisc;
+               qdisc->flags |= TCQ_F_ONETXQUEUE;
+       }
+
+       sch->flags |= TCQ_F_MQROOT;
+
+       priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
+       if (!priv->root.qstats) {
+               pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
+                      __func__);
+               err = -ENOMEM;
+               goto err_init_root;
+       }
+
+       dpaa2_eth_ceetm_enable(priv_eth);
+       return 0;
+
+err_init_root:
+       dpaa2_ceetm_destroy(sch);
+       return err;
+}
+
+/* Configure a prio ceetm qdisc */
+static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
+                                struct dpaa2_ceetm_qdisc *priv,
+                                struct dpaa2_ceetm_tc_qopt *qopt)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct dpaa2_ceetm_class *parent_cl;
+       struct Qdisc *parent_qdisc;
+       int err;
+
+       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+       if (sch->parent == TC_H_ROOT) {
+               pr_err("CEETM: a prio ceetm qdisc can not be root\n");
+               err = -EINVAL;
+               goto err_init_prio;
+       }
+
+       parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
+       if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
+               pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
+               err = -EINVAL;
+               goto err_init_prio;
+       }
+
+       /* Obtain the parent root ceetm_class */
+       parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
+
+       if (!parent_cl || parent_cl->type != CEETM_ROOT) {
+               pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
+               err = -EINVAL;
+               goto err_init_prio;
+       }
+
+       priv->prio.parent = parent_cl;
+       parent_cl->child = sch;
+
+       err = dpaa2_ceetm_change_prio(sch, priv, qopt);
+
+       return 0;
+
+err_init_prio:
+       dpaa2_ceetm_destroy(sch);
+       return err;
+}
+
+/* Configure a generic ceetm qdisc */
+static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
+       struct dpaa2_ceetm_tc_qopt *qopt;
+       int err;
+
+       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+       if (!netif_is_multiqueue(dev))
+               return -EOPNOTSUPP;
+
+       err = tcf_block_get(&priv->block, &priv->filter_list);
+       if (err) {
+               pr_err("CEETM: unable to get tcf_block\n");
+               return err;
+       }
+
+       if (!opt) {
+               pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
+                      __func__);
+               return -EINVAL;
+       }
+
+       err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
+                              dpaa2_ceetm_policy, NULL);
+       if (err < 0) {
+               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
+                      "nla_parse_nested");
+               return err;
+       }
+
+       if (!tb[DPAA2_CEETM_TCA_QOPS]) {
+               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
+                      "tb");
+               return -EINVAL;
+       }
+
+       if (TC_H_MIN(sch->handle)) {
+               pr_err("CEETM: a qdisc should not have a minor\n");
+               return -EINVAL;
+       }
+
+       qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
+
+       /* Initialize the class hash list. Each qdisc has its own class hash */
+       err = qdisc_class_hash_init(&priv->clhash);
+       if (err < 0) {
+               pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
+                      __func__);
+               return err;
+       }
+
+       priv->type = qopt->type;
+       priv->shaped = qopt->shaped;
+
+       switch (priv->type) {
+       case CEETM_ROOT:
+               err = dpaa2_ceetm_init_root(sch, priv, qopt);
+               break;
+       case CEETM_PRIO:
+               err = dpaa2_ceetm_init_prio(sch, priv, qopt);
+               break;
+       default:
+               pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
+               dpaa2_ceetm_destroy(sch);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+/* Attach the underlying pfifo qdiscs */
+static void dpaa2_ceetm_attach(struct Qdisc *sch)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct Qdisc *qdisc, *old_qdisc;
+       unsigned int i;
+
+       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               qdisc = priv->root.qdiscs[i];
+               old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+               if (old_qdisc)
+                       qdisc_destroy(old_qdisc);
+       }
+
+       /* Remove the references to the pfifo qdiscs since the kernel will
+        * destroy them when needed. No cleanup from our part is required from
+        * this point on.
+        */
+       kfree(priv->root.qdiscs);
+       priv->root.qdiscs = NULL;
+}
+
+static unsigned long dpaa2_ceetm_cls_find(struct Qdisc *sch, u32 classid)
+{
+       struct dpaa2_ceetm_class *cl;
+
+       pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
+                __func__, classid, sch->handle);
+       cl = dpaa2_ceetm_find(classid, sch);
+
+       return (unsigned long)cl;
+}
+
+static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
+                                      struct dpaa2_ceetm_tc_copt *copt,
+                                      struct net_device *dev)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(dev);
+       struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
+       int err = 0;
+
+       pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
+                cl->common.classid);
+
+       if (!cl->shaped)
+               return 0;
+
+       if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
+                                        &scfg, &ecfg))
+               return -EINVAL;
+
+       err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
+                                      copt->shaping_cfg.coupled,
+                                      cl->root.ch_id);
+       if (err)
+               return err;
+
+       memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
+              sizeof(struct dpaa2_ceetm_shaping_cfg));
+
+       return err;
+}
+
+static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
+                                      struct dpaa2_ceetm_tc_copt *copt,
+                                      struct net_device *dev)
+{
+       struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
+       struct dpni_tx_schedule_cfg *sched_cfg;
+       struct dpaa2_eth_priv *priv = netdev_priv(dev);
+       int err;
+
+       pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
+                __func__, cl->common.classid, copt->mode, copt->weight);
+
+       if (!cl->prio.cstats) {
+               cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
+               if (!cl->prio.cstats) {
+                       pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
+                              __func__);
+                       return -ENOMEM;
+               }
+       }
+
+       cl->prio.mode = copt->mode;
+       cl->prio.weight = copt->weight;
+
+       sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
+
+       switch (copt->mode) {
+       case STRICT_PRIORITY:
+               sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
+               break;
+       case WEIGHTED_A:
+               sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
+               break;
+       case WEIGHTED_B:
+               sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
+               break;
+       }
+
+       err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
+
+       return err;
+}
+
+/* Add a new ceetm class */
+static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
+                              struct dpaa2_ceetm_tc_copt *copt,
+                              unsigned long *arg)
+{
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
+       struct dpaa2_ceetm_class *cl;
+       int err;
+
+       if (copt->type == CEETM_ROOT &&
+           priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
+               pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
+                      dpaa2_eth_ch_count(priv_eth),
+                      dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
+               return -EINVAL;
+       }
+
+       if (copt->type == CEETM_PRIO &&
+           priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
+               pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
+                      dpaa2_eth_tc_count(priv_eth),
+                      dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
+               return -EINVAL;
+       }
+
+       cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+       if (!cl)
+               return -ENOMEM;
+
+       err = tcf_block_get(&cl->block, &cl->filter_list);
+       if (err) {
+               pr_err("%s: Unable to set new root class\n", __func__);
+               goto out_free;
+       }
+
+       cl->common.classid = classid;
+       cl->parent = sch;
+       cl->child = NULL;
+
+       /* Add class handle in Qdisc */
+       dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
+
+       cl->shaped = copt->shaped;
+       cl->type = copt->type;
+
+       /* Claim a CEETM channel / tc - DPAA2. will assume transition from
+        * classid to qdid/qpri, starting from qdid / qpri 0
+        */
+       switch (copt->type) {
+       case CEETM_ROOT:
+               cl->root.ch_id = classid - sch->handle - 1;
+               err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
+               break;
+       case CEETM_PRIO:
+               cl->prio.qpri = classid - sch->handle - 1;
+               err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
+               break;
+       }
+
+       if (err) {
+               pr_err("%s: Unable to set new %s class\n", __func__,
+                      (copt->type == CEETM_ROOT ? "root" : "prio"));
+               goto out_free;
+       }
+
+       switch (copt->type) {
+       case CEETM_ROOT:
+               pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
+                        __func__, classid, cl->root.ch_id);
+               break;
+       case CEETM_PRIO:
+               pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
+                        __func__, classid, cl->prio.qpri);
+               break;
+       }
+
+       *arg = (unsigned long)cl;
+       return 0;
+
+out_free:
+       kfree(cl);
+       return err;
+}
+
+/* Add or configure a ceetm class */
+static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
+                                 struct nlattr **tca, unsigned long *arg)
+{
+       struct dpaa2_ceetm_qdisc *priv;
+       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
+       struct nlattr *opt = tca[TCA_OPTIONS];
+       struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
+       struct dpaa2_ceetm_tc_copt *copt;
+       struct net_device *dev = qdisc_dev(sch);
+       int err;
+
+       pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
+                __func__, classid, sch->handle);
+
+       if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
+               pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
+               return -EINVAL;
+       }
+
+       priv = qdisc_priv(sch);
+
+       if (!opt) {
+               pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
+               return -EINVAL;
+       }
+
+       err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
+                              dpaa2_ceetm_policy, NULL);
+       if (err < 0) {
+               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
+                      "nla_parse_nested");
+               return -EINVAL;
+       }
+
+       if (!tb[DPAA2_CEETM_TCA_COPT]) {
+               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
+                      "tb");
+               return -EINVAL;
+       }
+
+       copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
+
+       /* Configure an existing ceetm class */
+       if (cl) {
+               if (copt->type != cl->type) {
+                       pr_err("CEETM: class %X is not of the provided type\n",
+                              cl->common.classid);
+                       return -EINVAL;
+               }
+
+               switch (copt->type) {
+               case CEETM_ROOT:
+                       return dpaa2_ceetm_cls_change_root(cl, copt, dev);
+               case CEETM_PRIO:
+                       return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
+
+               default:
+                       pr_err(KBUILD_BASENAME " : %s : invalid class\n",
+                              __func__);
+                       return -EINVAL;
+               }
+       }
+
+       return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
+}
+
+static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct dpaa2_ceetm_class *cl;
+       unsigned int i;
+
+       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+       if (arg->stop)
+               return;
+
+       for (i = 0; i < priv->clhash.hashsize; i++) {
+               hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
+                       if (arg->count < arg->skip) {
+                               arg->count++;
+                               continue;
+                       }
+                       if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
+                               arg->stop = 1;
+                               return;
+                       }
+                       arg->count++;
+               }
+       }
+}
+
+static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
+                               struct sk_buff *skb, struct tcmsg *tcm)
+{
+       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+       struct nlattr *nest;
+       struct dpaa2_ceetm_tc_copt copt;
+
+       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
+                __func__, cl->common.classid, sch->handle);
+
+       sch_tree_lock(sch);
+
+       tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
+       tcm->tcm_handle = cl->common.classid;
+
+       memset(&copt, 0, sizeof(copt));
+
+       copt.shaped = cl->shaped;
+       copt.type = cl->type;
+
+       switch (cl->type) {
+       case CEETM_ROOT:
+               if (cl->child)
+                       tcm->tcm_info = cl->child->handle;
+
+               memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
+                      sizeof(struct dpaa2_ceetm_shaping_cfg));
+
+               break;
+
+       case CEETM_PRIO:
+               if (cl->child)
+                       tcm->tcm_info = cl->child->handle;
+
+               copt.mode = cl->prio.mode;
+               copt.weight = cl->prio.weight;
+
+               break;
+       }
+
+       nest = nla_nest_start(skb, TCA_OPTIONS);
+       if (!nest)
+               goto nla_put_failure;
+       if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
+               goto nla_put_failure;
+       nla_nest_end(skb, nest);
+       sch_tree_unlock(sch);
+       return skb->len;
+
+nla_put_failure:
+       sch_tree_unlock(sch);
+       nla_nest_cancel(skb, nest);
+       return -EMSGSIZE;
+}
+
+static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
+{
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
+       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
+                __func__, cl->common.classid, sch->handle);
+
+       sch_tree_lock(sch);
+       qdisc_class_hash_remove(&priv->clhash, &cl->common);
+       sch_tree_unlock(sch);
+       return 0;
+}
+
+/* Get the class' child qdisc, if any */
+static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
+       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
+                __func__, cl->common.classid, sch->handle);
+
+       switch (cl->type) {
+       case CEETM_ROOT:
+       case CEETM_PRIO:
+               return cl->child;
+       }
+
+       return NULL;
+}
+
+static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
+                                struct Qdisc *new, struct Qdisc **old)
+{
+       if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
+               pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
+                                     struct gnet_dump *d)
+{
+       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+       struct gnet_stats_basic_packed tmp_bstats;
+       struct dpaa2_ceetm_tc_xstats xstats;
+       union dpni_statistics dpni_stats;
+       struct net_device *dev = qdisc_dev(sch);
+       struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
+       u8 ch_id = 0;
+       int err;
+
+       memset(&xstats, 0, sizeof(xstats));
+       memset(&tmp_bstats, 0, sizeof(tmp_bstats));
+
+       if (cl->type == CEETM_ROOT)
+               return 0;
+
+       err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
+                                 DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
+                                 &dpni_stats);
+       if (err)
+               netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
+
+       xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
+       xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
+       xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
+       xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
+
+       return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+}
+
+static struct tcf_block *dpaa2_ceetm_tcf_block(struct Qdisc *sch,
+                                              unsigned long arg)
+{
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
+       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
+                cl ? cl->common.classid : 0, sch->handle);
+       return cl ? cl->block : priv->block;
+}
+
+static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
+                                         unsigned long parent,
+                                         u32 classid)
+{
+       struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
+
+       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
+                cl ? cl->common.classid : 0, sch->handle);
+       return (unsigned long)cl;
+}
+
+static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
+{
+       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
+       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
+                cl ? cl->common.classid : 0, sch->handle);
+}
+
+const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
+       .graft          =       dpaa2_ceetm_cls_graft,
+       .leaf           =       dpaa2_ceetm_cls_leaf,
+       .find           =       dpaa2_ceetm_cls_find,
+       .change         =       dpaa2_ceetm_cls_change,
+       .delete         =       dpaa2_ceetm_cls_delete,
+       .walk           =       dpaa2_ceetm_cls_walk,
+       .tcf_block      =       dpaa2_ceetm_tcf_block,
+       .bind_tcf       =       dpaa2_ceetm_tcf_bind,
+       .unbind_tcf     =       dpaa2_ceetm_tcf_unbind,
+       .dump           =       dpaa2_ceetm_cls_dump,
+       .dump_stats     =       dpaa2_ceetm_cls_dump_stats,
+};
+
+struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
+       .id             =       "ceetm",
+       .priv_size      =       sizeof(struct dpaa2_ceetm_qdisc),
+       .cl_ops         =       &dpaa2_ceetm_cls_ops,
+       .init           =       dpaa2_ceetm_init,
+       .destroy        =       dpaa2_ceetm_destroy,
+       .change         =       dpaa2_ceetm_change,
+       .dump           =       dpaa2_ceetm_dump,
+       .attach         =       dpaa2_ceetm_attach,
+       .owner          =       THIS_MODULE,
+};
+
+/* Run the filters and classifiers attached to the qdisc on the provided skb */
+int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
+                        int *qdid, u8 *qpri)
+{
+       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
+       struct dpaa2_ceetm_class *cl = NULL;
+       struct tcf_result res;
+       struct tcf_proto *tcf;
+       int result;
+
+       tcf = rcu_dereference_bh(priv->filter_list);
+       while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+               switch (result) {
+               case TC_ACT_QUEUED:
+               case TC_ACT_STOLEN:
+               case TC_ACT_SHOT:
+                       /* No valid class found due to action */
+                       return -1;
+               }
+#endif
+               cl = (void *)res.class;
+               if (!cl) {
+                       /* The filter leads to the qdisc */
+                       if (res.classid == sch->handle)
+                               return 0;
+
+                       cl = dpaa2_ceetm_find(res.classid, sch);
+                       /* The filter leads to an invalid class */
+                       if (!cl)
+                               break;
+               }
+
+               /* The class might have its own filters attached */
+               tcf = rcu_dereference_bh(cl->filter_list);
+       }
+
+       /* No valid class found */
+       if (!cl)
+               return 0;
+
+       switch (cl->type) {
+       case CEETM_ROOT:
+               *qdid = cl->root.ch_id;
+
+               /* The root class does not have a child prio qdisc */
+               if (!cl->child)
+                       return 0;
+
+               /* Run the prio qdisc classifiers */
+               return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
+
+       case CEETM_PRIO:
+               *qpri = cl->prio.qpri;
+               break;
+       }
+
+       return 0;
+}
+
+int __init dpaa2_ceetm_register(void)
+{
+       int err = 0;
+
+       pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
+
+       err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
+       if (unlikely(err))
+               pr_err(KBUILD_MODNAME
+                      ": %s:%hu:%s(): register_qdisc() = %d\n",
+                      KBUILD_BASENAME ".c", __LINE__, __func__, err);
+
+       return err;
+}
+
+void __exit dpaa2_ceetm_unregister(void)
+{
+       pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+                KBUILD_BASENAME ".c", __func__);
+
+       unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
+}
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA2_ETH_CEETM_H
+#define __DPAA2_ETH_CEETM_H
+
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+#include <net/netlink.h>
+
+#include "dpaa2-eth.h"
+
+/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
+ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
+ * are reserved for the maximum 32 CEETM channels (majors and minors are in
+ * hex).
+ */
+#define PFIFO_MIN_OFFSET               0x21
+
+#define DPAA2_CEETM_MIN_WEIGHT         100
+#define DPAA2_CEETM_MAX_WEIGHT         24800
+
+#define DPAA2_CEETM_TD_THRESHOLD       1000
+
+enum wbfs_group_type {
+       WBFS_GRP_A,
+       WBFS_GRP_B,
+       WBFS_GRP_LARGE
+};
+
+enum {
+       DPAA2_CEETM_TCA_UNSPEC,
+       DPAA2_CEETM_TCA_COPT,
+       DPAA2_CEETM_TCA_QOPS,
+       DPAA2_CEETM_TCA_MAX,
+};
+
+/* CEETM configuration types */
+enum dpaa2_ceetm_type {
+       CEETM_ROOT = 1,
+       CEETM_PRIO,
+};
+
+enum {
+       STRICT_PRIORITY = 0,
+       WEIGHTED_A,
+       WEIGHTED_B,
+};
+
+struct dpaa2_ceetm_shaping_cfg {
+       __u64 cir; /* committed information rate */
+       __u64 eir; /* excess information rate */
+       __u16 cbs; /* committed burst size */
+       __u16 ebs; /* excess burst size */
+       __u8 coupled; /* shaper coupling */
+};
+
+extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
+
+struct dpaa2_ceetm_class;
+struct dpaa2_ceetm_qdisc_stats;
+struct dpaa2_ceetm_class_stats;
+
+/* corresponds to CEETM shaping at LNI level */
+struct dpaa2_root_q {
+       struct Qdisc **qdiscs;
+       struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
+};
+
+/* corresponds to the number of priorities a channel serves */
+struct dpaa2_prio_q {
+       struct dpaa2_ceetm_class *parent;
+       struct dpni_tx_priorities_cfg tx_prio_cfg;
+};
+
+struct dpaa2_ceetm_qdisc {
+       struct Qdisc_class_hash clhash;
+       struct tcf_proto *filter_list; /* qdisc attached filters */
+       struct tcf_block *block;
+
+       enum dpaa2_ceetm_type type; /* ROOT/PRIO */
+       bool shaped;
+       union {
+               struct dpaa2_root_q root;
+               struct dpaa2_prio_q prio;
+       };
+};
+
+/* CEETM Qdisc configuration parameters */
+struct dpaa2_ceetm_tc_qopt {
+       enum dpaa2_ceetm_type type;
+       __u16 shaped;
+       __u8 prio_group_A;
+       __u8 prio_group_B;
+       __u8 separate_groups;
+};
+
+/* root class - corresponds to a channel */
+struct dpaa2_root_c {
+       struct dpaa2_ceetm_shaping_cfg shaping_cfg;
+       u32 ch_id;
+};
+
+/* prio class - corresponds to a strict priority queue (group) */
+struct dpaa2_prio_c {
+       struct dpaa2_ceetm_class_stats __percpu *cstats;
+       u32 qpri;
+       u8 mode;
+       u16 weight;
+};
+
+struct dpaa2_ceetm_class {
+       struct Qdisc_class_common common;
+       struct tcf_proto *filter_list; /* class attached filters */
+       struct tcf_block *block;
+       struct Qdisc *parent;
+       struct Qdisc *child;
+
+       enum dpaa2_ceetm_type type; /* ROOT/PRIO */
+       bool shaped;
+       union {
+               struct dpaa2_root_c root;
+               struct dpaa2_prio_c prio;
+       };
+};
+
+/* CEETM Class configuration parameters */
+struct dpaa2_ceetm_tc_copt {
+       enum dpaa2_ceetm_type type;
+       struct dpaa2_ceetm_shaping_cfg shaping_cfg;
+       __u16 shaped;
+       __u8 mode;
+       __u16 weight;
+};
+
+/* CEETM stats */
+struct dpaa2_ceetm_qdisc_stats {
+       __u32 drops;
+};
+
+struct dpaa2_ceetm_class_stats {
+       /* Software counters */
+       struct gnet_stats_basic_packed bstats;
+       __u32 ern_drop_count;
+       __u32 congested_count;
+};
+
+struct dpaa2_ceetm_tc_xstats {
+       __u64 ceetm_dequeue_bytes;
+       __u64 ceetm_dequeue_frames;
+       __u64 ceetm_reject_bytes;
+       __u64 ceetm_reject_frames;
+};
+
+#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
+int __init dpaa2_ceetm_register(void);
+void __exit dpaa2_ceetm_unregister(void);
+int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
+                        int *qdid, u8 *qpri);
+#else
+static inline int dpaa2_ceetm_register(void)
+{
+       return 0;
+}
+
+static inline void dpaa2_ceetm_unregister(void) {}
+
+static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
+                                      int *qdid, u8 *qpri)
+{
+       return 0;
+}
+#endif
+
+static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
+{
+       return priv->ceetm_en;
+}
+
+#endif
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
@@ -0,0 +1,357 @@
+
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "dpaa2-eth.h"
+#include "dpaa2-eth-debugfs.h"
+
+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
+static struct dentry *dpaa2_dbg_root;
+
+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+{
+       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+       struct rtnl_link_stats64 *stats;
+       struct dpaa2_eth_drv_stats *extras;
+       int i;
+
+       seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
+       seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
+                  "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
+                  "Tx SG", "Tx realloc", "Enq busy");
+
+       for_each_online_cpu(i) {
+               stats = per_cpu_ptr(priv->percpu_stats, i);
+               extras = per_cpu_ptr(priv->percpu_extras, i);
+               seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
+                          i,
+                          stats->rx_packets,
+                          stats->rx_errors,
+                          extras->rx_sg_frames,
+                          stats->tx_packets,
+                          stats->tx_errors,
+                          extras->tx_conf_frames,
+                          extras->tx_sg_frames,
+                          extras->tx_reallocs,
+                          extras->tx_portal_busy);
+       }
+
+       return 0;
+}
+
+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
+{
+       int err;
+       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+       err = single_open(file, dpaa2_dbg_cpu_show, priv);
+       if (err < 0)
+               netdev_err(priv->net_dev, "single_open() failed\n");
+
+       return err;
+}
+
+static const struct file_operations dpaa2_dbg_cpu_ops = {
+       .open = dpaa2_dbg_cpu_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
+{
+       switch (fq->type) {
+       case DPAA2_RX_FQ:
+               return "Rx";
+       case DPAA2_TX_CONF_FQ:
+               return "Tx conf";
+       case DPAA2_RX_ERR_FQ:
+               return "Rx err";
+       default:
+               return "N/A";
+       }
+}
+
+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
+{
+       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+       struct dpaa2_eth_fq *fq;
+       u32 fcnt, bcnt;
+       int i, err;
+
+       seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
+       seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+                  "VFQID", "CPU", "Traffic Class", "Type", "Frames",
+                  "Pending frames", "Congestion");
+
+       for (i = 0; i <  priv->num_fqs; i++) {
+               fq = &priv->fq[i];
+               err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+               if (err)
+                       fcnt = 0;
+
+               /* A lot of queues, no use displaying zero traffic ones */
+               if (!fq->stats.frames && !fcnt)
+                       continue;
+
+               seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
+                          fq->fqid,
+                          fq->target_cpu,
+                          fq->tc,
+                          fq_type_to_str(fq),
+                          fq->stats.frames,
+                          fcnt,
+                          fq->stats.congestion_entry);
+       }
+
+       return 0;
+}
+
+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
+{
+       int err;
+       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+       err = single_open(file, dpaa2_dbg_fqs_show, priv);
+       if (err < 0)
+               netdev_err(priv->net_dev, "single_open() failed\n");
+
+       return err;
+}
+
+static const struct file_operations dpaa2_dbg_fq_ops = {
+       .open = dpaa2_dbg_fqs_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
+{
+       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+       struct dpaa2_eth_channel *ch;
+       int i;
+
+       seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
+       seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+                  "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+                  "Avg frm/CDAN", "Buf count");
+
+       for (i = 0; i < priv->num_channels; i++) {
+               ch = priv->channel[i];
+               seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
+                          ch->ch_id,
+                          ch->nctx.desired_cpu,
+                          ch->stats.dequeue_portal_busy,
+                          ch->stats.frames,
+                          ch->stats.cdan,
+                          ch->stats.frames / ch->stats.cdan,
+                          ch->buf_count);
+       }
+
+       return 0;
+}
+
+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
+{
+       int err;
+       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+       err = single_open(file, dpaa2_dbg_ch_show, priv);
+       if (err < 0)
+               netdev_err(priv->net_dev, "single_open() failed\n");
+
+       return err;
+}
+
+static const struct file_operations dpaa2_dbg_ch_ops = {
+       .open = dpaa2_dbg_ch_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
+                                    size_t count, loff_t *offset)
+{
+       struct dpaa2_eth_priv *priv = file->private_data;
+       struct rtnl_link_stats64 *percpu_stats;
+       struct dpaa2_eth_drv_stats *percpu_extras;
+       struct dpaa2_eth_fq *fq;
+       struct dpaa2_eth_channel *ch;
+       int i;
+
+       for_each_online_cpu(i) {
+               percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
+               memset(percpu_stats, 0, sizeof(*percpu_stats));
+
+               percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
+               memset(percpu_extras, 0, sizeof(*percpu_extras));
+       }
+
+       for (i = 0; i < priv->num_fqs; i++) {
+               fq = &priv->fq[i];
+               memset(&fq->stats, 0, sizeof(fq->stats));
+       }
+
+       for (i = 0; i < priv->num_channels; i++) {
+               ch = priv->channel[i];
+               memset(&ch->stats, 0, sizeof(ch->stats));
+       }
+
+       return count;
+}
+
+static const struct file_operations dpaa2_dbg_reset_ops = {
+       .open = simple_open,
+       .write = dpaa2_dbg_reset_write,
+};
+
+static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
+                                       const char __user *buf,
+                                       size_t count, loff_t *offset)
+{
+       struct dpaa2_eth_priv *priv = file->private_data;
+       int err;
+
+       err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
+       if (err)
+               netdev_err(priv->net_dev,
+                          "dpni_reset_statistics() failed %d\n", err);
+
+       return count;
+}
+
+static const struct file_operations dpaa2_dbg_reset_mc_ops = {
+       .open = simple_open,
+       .write = dpaa2_dbg_reset_mc_write,
+};
+
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
+{
+       if (!dpaa2_dbg_root)
+               return;
+
+       /* Create a directory for the interface */
+       priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
+                                          dpaa2_dbg_root);
+       if (!priv->dbg.dir) {
+               netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
+               return;
+       }
+
+       /* per-cpu stats file */
+       priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
+                                                 priv->dbg.dir, priv,
+                                                 &dpaa2_dbg_cpu_ops);
+       if (!priv->dbg.cpu_stats) {
+               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+               goto err_cpu_stats;
+       }
+
+       /* per-fq stats file */
+       priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
+                                                priv->dbg.dir, priv,
+                                                &dpaa2_dbg_fq_ops);
+       if (!priv->dbg.fq_stats) {
+               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+               goto err_fq_stats;
+       }
+
+       /* per-fq stats file */
+       priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
+                                                priv->dbg.dir, priv,
+                                                &dpaa2_dbg_ch_ops);
+       if (!priv->dbg.fq_stats) {
+               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+               goto err_ch_stats;
+       }
+
+       /* reset stats */
+       priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
+                                                   priv->dbg.dir, priv,
+                                                   &dpaa2_dbg_reset_ops);
+       if (!priv->dbg.reset_stats) {
+               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+               goto err_reset_stats;
+       }
+
+       /* reset MC stats */
+       priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
+                                               0222, priv->dbg.dir, priv,
+                                               &dpaa2_dbg_reset_mc_ops);
+       if (!priv->dbg.reset_mc_stats) {
+               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+               goto err_reset_mc_stats;
+       }
+
+       return;
+
+err_reset_mc_stats:
+       debugfs_remove(priv->dbg.reset_stats);
+err_reset_stats:
+       debugfs_remove(priv->dbg.ch_stats);
+err_ch_stats:
+       debugfs_remove(priv->dbg.fq_stats);
+err_fq_stats:
+       debugfs_remove(priv->dbg.cpu_stats);
+err_cpu_stats:
+       debugfs_remove(priv->dbg.dir);
+}
+
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
+{
+       debugfs_remove(priv->dbg.reset_mc_stats);
+       debugfs_remove(priv->dbg.reset_stats);
+       debugfs_remove(priv->dbg.fq_stats);
+       debugfs_remove(priv->dbg.ch_stats);
+       debugfs_remove(priv->dbg.cpu_stats);
+       debugfs_remove(priv->dbg.dir);
+}
+
+void dpaa2_eth_dbg_init(void)
+{
+       dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
+       if (!dpaa2_dbg_root) {
+               pr_err("DPAA2-ETH: debugfs create failed\n");
+               return;
+       }
+
+       pr_info("DPAA2-ETH: debugfs created\n");
+}
+
+void __exit dpaa2_eth_dbg_exit(void)
+{
+       debugfs_remove(dpaa2_dbg_root);
+}
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
@@ -0,0 +1,60 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPAA2_ETH_DEBUGFS_H
+#define DPAA2_ETH_DEBUGFS_H
+
+#include <linux/dcache.h>
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_debugfs {
+       struct dentry *dir;
+       struct dentry *fq_stats;
+       struct dentry *ch_stats;
+       struct dentry *cpu_stats;
+       struct dentry *reset_stats;
+       struct dentry *reset_mc_stats;
+};
+
+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
+void dpaa2_eth_dbg_init(void);
+void dpaa2_eth_dbg_exit(void);
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
+#else
+static inline void dpaa2_eth_dbg_init(void) {}
+static inline void dpaa2_eth_dbg_exit(void) {}
+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
+#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
+
+#endif /* DPAA2_ETH_DEBUGFS_H */
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -38,9 +38,14 @@
 #include <linux/msi.h>
 #include <linux/kthread.h>
 #include <linux/iommu.h>
-
+#include <linux/net_tstamp.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/atomic.h>
+#include <net/sock.h>
 #include "../../fsl-mc/include/mc.h"
 #include "dpaa2-eth.h"
+#include "dpaa2-eth-ceetm.h"
 
 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
  * using trace events only need to #include <trace/events/sched.h>
@@ -104,13 +109,15 @@ static void free_rx_fd(struct dpaa2_eth_
                /* We don't support any other format */
                return;
 
-       /* For S/G frames, we first need to free all SG entries */
+       /* For S/G frames, we first need to free all SG entries
+        * except the first one, which was taken care of already
+        */
        sgt = vaddr + dpaa2_fd_get_offset(fd);
-       for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+       for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
                addr = dpaa2_sg_get_addr(&sgt[i]);
                sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
                dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-                                DMA_FROM_DEVICE);
+                                DMA_BIDIRECTIONAL);
 
                skb_free_frag(sg_vaddr);
                if (dpaa2_sg_is_final(&sgt[i]))
@@ -133,8 +140,7 @@ static struct sk_buff *build_linear_skb(
 
        ch->buf_count--;
 
-       skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
-                       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+       skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
        if (unlikely(!skb))
                return NULL;
 
@@ -170,15 +176,19 @@ static struct sk_buff *build_frag_skb(st
                sg_addr = dpaa2_sg_get_addr(sge);
                sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
                dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
-                                DMA_FROM_DEVICE);
+                                DMA_BIDIRECTIONAL);
 
                sg_length = dpaa2_sg_get_len(sge);
 
                if (i == 0) {
                        /* We build the skb around the first data buffer */
-                       skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
-                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+                       skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
                        if (unlikely(!skb)) {
+                               /* Free the first SG entry now, since we already
+                                * unmapped it and obtained the virtual address
+                                */
+                               skb_free_frag(sg_vaddr);
+
                                /* We still need to subtract the buffers used
                                 * by this FD from our software counter
                                 */
@@ -213,17 +223,173 @@ static struct sk_buff *build_frag_skb(st
                        break;
        }
 
+       WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
        /* Count all data buffers + SG table buffer */
        ch->buf_count -= i + 2;
 
        return skb;
 }
 
+static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
+                           struct dpaa2_fd *fd,
+                           void *buf_start,
+                           u16 queue_id)
+{
+       struct dpaa2_eth_fq *fq;
+       struct rtnl_link_stats64 *percpu_stats;
+       struct dpaa2_eth_drv_stats *percpu_extras;
+       struct dpaa2_faead *faead;
+       u32 ctrl, frc;
+       int i, err;
+
+       /* Mark the egress frame annotation area as valid */
+       frc = dpaa2_fd_get_frc(fd);
+       dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+       dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
+       ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
+       faead = dpaa2_get_faead(buf_start, false);
+       faead->ctrl = cpu_to_le32(ctrl);
+       faead->conf_fqid = 0;
+
+       percpu_stats = this_cpu_ptr(priv->percpu_stats);
+       percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+       fq = &priv->fq[queue_id];
+       for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+               err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+                                                 priv->tx_qdid, 0,
+                                                 fq->tx_qdbin, fd);
+               if (err != -EBUSY)
+                       break;
+       }
+
+       percpu_extras->tx_portal_busy += i;
+       if (unlikely(err)) {
+               percpu_stats->tx_errors++;
+       } else {
+               percpu_stats->tx_packets++;
+               percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
+       }
+
+       return err;
+}
+
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       void *vaddr;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               /* Same logic as on regular Rx path */
+               vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+               dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+                                DMA_BIDIRECTIONAL);
+               skb_free_frag(vaddr);
+       }
+}
+
+static void release_fd_buf(struct dpaa2_eth_priv *priv,
+                          struct dpaa2_eth_channel *ch,
+                          dma_addr_t addr)
+{
+       int err;
+
+       ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
+       if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
+               return;
+
+       while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+                                              ch->rel_buf_array,
+                                              ch->rel_buf_cnt)) == -EBUSY)
+               cpu_relax();
+
+       if (err)
+               free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
+
+       ch->rel_buf_cnt = 0;
+}
+
+static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+                            struct dpaa2_eth_channel *ch,
+                            struct dpaa2_fd *fd,
+                            u16 queue_id,
+                            void *vaddr)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       dma_addr_t addr = dpaa2_fd_get_addr(fd);
+       struct rtnl_link_stats64 *percpu_stats;
+       struct bpf_prog *xdp_prog;
+       struct xdp_buff xdp;
+       u32 xdp_act = XDP_PASS;
+
+       xdp_prog = READ_ONCE(ch->xdp_prog);
+       if (!xdp_prog)
+               return xdp_act;
+
+       percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+       xdp.data = vaddr + dpaa2_fd_get_offset(fd);
+       xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
+       /* Allow the XDP program to use the specially reserved headroom */
+       xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+
+       rcu_read_lock();
+       xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+       /* xdp.data pointer may have changed */
+       dpaa2_fd_set_offset(fd, xdp.data - vaddr);
+       dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
+
+       switch (xdp_act) {
+       case XDP_PASS:
+               break;
+       default:
+               bpf_warn_invalid_xdp_action(xdp_act);
+       case XDP_ABORTED:
+       case XDP_DROP:
+               /* This is our buffer, so we can release it back to hardware */
+               release_fd_buf(priv, ch, addr);
+               percpu_stats->rx_dropped++;
+               break;
+       case XDP_TX:
+               if (dpaa2_eth_xdp_tx(priv, fd, vaddr, queue_id)) {
+                       dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+                                        DMA_BIDIRECTIONAL);
+                       free_rx_fd(priv, fd, vaddr);
+                       ch->buf_count--;
+               }
+               break;
+       case XDP_REDIRECT:
+               dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+                                DMA_BIDIRECTIONAL);
+               ch->buf_count--;
+               ch->flush = true;
+               /* Mark the actual start of the data buffer */
+               xdp.data_hard_start = vaddr;
+               if (xdp_do_redirect(priv->net_dev, &xdp, xdp_prog))
+                       free_rx_fd(priv, fd, vaddr);
+               break;
+       }
+
+       if (xdp_act == XDP_TX || xdp_act == XDP_REDIRECT) {
+               percpu_stats->rx_packets++;
+               percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+       }
+
+       rcu_read_unlock();
+
+       return xdp_act;
+}
+
 /* Main Rx frame processing routine */
 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
                         struct dpaa2_eth_channel *ch,
                         const struct dpaa2_fd *fd,
-                        struct napi_struct *napi)
+                        struct napi_struct *napi,
+                        u16 queue_id)
 {
        dma_addr_t addr = dpaa2_fd_get_addr(fd);
        u8 fd_format = dpaa2_fd_get_format(fd);
@@ -235,14 +401,16 @@ static void dpaa2_eth_rx(struct dpaa2_et
        struct dpaa2_fas *fas;
        void *buf_data;
        u32 status = 0;
+       u32 xdp_act;
 
        /* Tracing point */
        trace_dpaa2_rx_fd(priv->net_dev, fd);
 
        vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
-       dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+       dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+                               DMA_BIDIRECTIONAL);
 
-       fas = dpaa2_get_fas(vaddr);
+       fas = dpaa2_get_fas(vaddr, false);
        prefetch(fas);
        buf_data = vaddr + dpaa2_fd_get_offset(fd);
        prefetch(buf_data);
@@ -251,22 +419,41 @@ static void dpaa2_eth_rx(struct dpaa2_et
        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 
        if (fd_format == dpaa2_fd_single) {
+               xdp_act = dpaa2_eth_run_xdp(priv, ch, (struct dpaa2_fd *)fd,
+                                           queue_id, vaddr);
+               if (xdp_act != XDP_PASS)
+                       return;
+
+               dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+                                DMA_BIDIRECTIONAL);
                skb = build_linear_skb(priv, ch, fd, vaddr);
        } else if (fd_format == dpaa2_fd_sg) {
+               dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+                                DMA_BIDIRECTIONAL);
                skb = build_frag_skb(priv, ch, buf_data);
                skb_free_frag(vaddr);
                percpu_extras->rx_sg_frames++;
                percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
        } else {
                /* We don't support any other format */
-               goto err_frame_format;
+               goto drop_cnt;
        }
 
        if (unlikely(!skb))
-               goto err_build_skb;
+               goto drop_fd;
 
        prefetch(skb->data);
 
+       /* Get the timestamp value */
+       if (priv->ts_rx_en) {
+               struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+               u64 *ns = dpaa2_get_ts(vaddr, false);
+
+               *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
+               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+               shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+       }
+
        /* Check if we need to validate the L4 csum */
        if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
                status = le32_to_cpu(fas->status);
@@ -275,6 +462,12 @@ static void dpaa2_eth_rx(struct dpaa2_et
 
        skb->protocol = eth_type_trans(skb, priv->net_dev);
 
+       /* Record Rx queue - this will be used when picking a Tx queue to
+        * forward the frames. We're keeping flow affinity through the
+        * network stack.
+        */
+       skb_record_rx_queue(skb, queue_id);
+
        percpu_stats->rx_packets++;
        percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 
@@ -282,22 +475,74 @@ static void dpaa2_eth_rx(struct dpaa2_et
 
        return;
 
-err_build_skb:
+drop_fd:
        free_rx_fd(priv, fd, vaddr);
-err_frame_format:
+drop_cnt:
        percpu_stats->rx_dropped++;
 }
 
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
+/* Processing of Rx frames received on the error FQ
+ * We check and print the error bits and then free the frame
+ */
+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
+                            struct dpaa2_eth_channel *ch,
+                            const struct dpaa2_fd *fd,
+                            struct napi_struct *napi __always_unused,
+                            u16 queue_id __always_unused)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       dma_addr_t addr = dpaa2_fd_get_addr(fd);
+       void *vaddr;
+       struct rtnl_link_stats64 *percpu_stats;
+       struct dpaa2_fas *fas;
+       u32 status = 0;
+       u32 fd_errors;
+       bool has_fas_errors = false;
+
+       vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+       dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+
+       /* check frame errors in the FD field */
+       fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
+       if (likely(fd_errors)) {
+               has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
+                                !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
+               if (net_ratelimit())
+                       netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
+                                  fd_errors);
+       }
+
+       /* check frame errors in the FAS field */
+       if (has_fas_errors) {
+               fas = dpaa2_get_fas(vaddr, false);
+               status = le32_to_cpu(fas->status);
+               if (net_ratelimit())
+                       netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
+                                  status & DPAA2_FAS_RX_ERR_MASK);
+       }
+       free_rx_fd(priv, fd, vaddr);
+
+       percpu_stats = this_cpu_ptr(priv->percpu_stats);
+       percpu_stats->rx_errors++;
+       ch->buf_count--;
+}
+#endif
+
 /* Consume all frames pull-dequeued into the store. This is the simplest way to
  * make sure we don't accidentally issue another volatile dequeue which would
  * overwrite (leak) frames already in the store.
  *
+ * The number of frames is returned using the last 2 output arguments,
+ * separately for Rx and Tx confirmations.
+ *
  * Observance of NAPI budget is not our concern, leaving that to the caller.
  */
-static int consume_frames(struct dpaa2_eth_channel *ch)
+static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
+                          int *tx_conf_cleaned)
 {
        struct dpaa2_eth_priv *priv = ch->priv;
-       struct dpaa2_eth_fq *fq;
+       struct dpaa2_eth_fq *fq = NULL;
        struct dpaa2_dq *dq;
        const struct dpaa2_fd *fd;
        int cleaned = 0;
@@ -315,14 +560,60 @@ static int consume_frames(struct dpaa2_e
                }
 
                fd = dpaa2_dq_fd(dq);
-               fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
-               fq->stats.frames++;
+               prefetch(fd);
 
-               fq->consume(priv, ch, fd, &ch->napi);
+               fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+               fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
                cleaned++;
        } while (!is_last);
 
-       return cleaned;
+       if (!cleaned)
+               return false;
+
+       /* All frames brought in store by a volatile dequeue
+        * come from the same queue
+        */
+       if (fq->type == DPAA2_TX_CONF_FQ) {
+               *tx_conf_cleaned += cleaned;
+       } else {
+               *rx_cleaned += cleaned;
+               /* If we processed XDP_REDIRECT frames, flush them now */
+               /* FIXME: Since we don't actually do anything inside
+                * ndo_xdp_flush, we call it here simply for compliance
+                * reasons
+                */
+               if (ch->flush) {
+                       xdp_do_flush_map();
+                       ch->flush = false;
+               }
+       }
+
+       fq->stats.frames += cleaned;
+       ch->stats.frames += cleaned;
+
+       return true;
+}
+
+/* Configure the egress frame annotation for timestamp update */
+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+{
+       struct dpaa2_faead *faead;
+       u32 ctrl, frc;
+
+       /* Mark the egress frame annotation area as valid */
+       frc = dpaa2_fd_get_frc(fd);
+       dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+
+       /* Set hardware annotation size */
+       ctrl = dpaa2_fd_get_ctrl(fd);
+       dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
+
+       /* enable UPD (update prepanded data) bit in FAEAD field of
+        * hardware frame annotation area
+        */
+       ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
+       faead = dpaa2_get_faead(buf_start, true);
+       faead->ctrl = cpu_to_le32(ctrl);
 }
 
 /* Create a frame descriptor based on a fragmented skb */
@@ -341,7 +632,6 @@ static int build_sg_fd(struct dpaa2_eth_
        int num_sg;
        int num_dma_bufs;
        struct dpaa2_eth_swa *swa;
-       struct dpaa2_fas *fas;
 
        /* Create and map scatterlist.
         * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
@@ -365,21 +655,14 @@ static int build_sg_fd(struct dpaa2_eth_
 
        /* Prepare the HW SGT structure */
        sgt_buf_size = priv->tx_data_offset +
-                      sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-       sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
+                      sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
+       sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
        if (unlikely(!sgt_buf)) {
                err = -ENOMEM;
                goto sgt_buf_alloc_failed;
        }
        sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
-
-       /* PTA from egress side is passed as is to the confirmation side so
-        * we need to clear some fields here in order to find consistent values
-        * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-        * field from the hardware annotation area
-        */
-       fas = dpaa2_get_fas(sgt_buf);
-       memset(fas, 0, DPAA2_FAS_SIZE);
+       memset(sgt_buf, 0, sgt_buf_size);
 
        sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
 
@@ -402,10 +685,11 @@ static int build_sg_fd(struct dpaa2_eth_
         * all of them on Tx Conf.
         */
        swa = (struct dpaa2_eth_swa *)sgt_buf;
-       swa->skb = skb;
-       swa->scl = scl;
-       swa->num_sg = num_sg;
-       swa->num_dma_bufs = num_dma_bufs;
+       swa->type = DPAA2_ETH_SWA_SG;
+       swa->sg.skb = skb;
+       swa->sg.scl = scl;
+       swa->sg.num_sg = num_sg;
+       swa->sg.sgt_size = sgt_buf_size;
 
        /* Separately map the SGT buffer */
        addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
@@ -417,13 +701,15 @@ static int build_sg_fd(struct dpaa2_eth_
        dpaa2_fd_set_format(fd, dpaa2_fd_sg);
        dpaa2_fd_set_addr(fd, addr);
        dpaa2_fd_set_len(fd, skb->len);
-       dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
-                         DPAA2_FD_CTRL_PTV1);
+       dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+       if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+               enable_tx_tstamp(fd, sgt_buf);
 
        return 0;
 
 dma_map_single_failed:
-       kfree(sgt_buf);
+       skb_free_frag(sgt_buf);
 sgt_buf_alloc_failed:
        dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 dma_map_sg_failed:
@@ -437,29 +723,27 @@ static int build_single_fd(struct dpaa2_
                           struct dpaa2_fd *fd)
 {
        struct device *dev = priv->net_dev->dev.parent;
-       u8 *buffer_start;
-       struct dpaa2_fas *fas;
-       struct sk_buff **skbh;
+       u8 *buffer_start, *aligned_start;
+       struct dpaa2_eth_swa *swa;
        dma_addr_t addr;
 
-       buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
-                                DPAA2_ETH_TX_BUF_ALIGN,
-                                DPAA2_ETH_TX_BUF_ALIGN);
-
-       /* PTA from egress side is passed as is to the confirmation side so
-        * we need to clear some fields here in order to find consistent values
-        * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-        * field from the hardware annotation area
+       buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+
+       /* If there's enough room to align the FD address, do it.
+        * It will help hardware optimize accesses.
         */
-       fas = dpaa2_get_fas(buffer_start);
-       memset(fas, 0, DPAA2_FAS_SIZE);
+       aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+                                 DPAA2_ETH_TX_BUF_ALIGN);
+       if (aligned_start >= skb->head)
+               buffer_start = aligned_start;
 
        /* Store a backpointer to the skb at the beginning of the buffer
         * (in the private data area) such that we can release it
         * on Tx confirm
         */
-       skbh = (struct sk_buff **)buffer_start;
-       *skbh = skb;
+       swa = (struct dpaa2_eth_swa *)buffer_start;
+       swa->type = DPAA2_ETH_SWA_SINGLE;
+       swa->single.skb = skb;
 
        addr = dma_map_single(dev, buffer_start,
                              skb_tail_pointer(skb) - buffer_start,
@@ -471,8 +755,10 @@ static int build_single_fd(struct dpaa2_
        dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
        dpaa2_fd_set_len(fd, skb->len);
        dpaa2_fd_set_format(fd, dpaa2_fd_single);
-       dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
-                         DPAA2_FD_CTRL_PTV1);
+       dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+       if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+               enable_tx_tstamp(fd, buffer_start);
 
        return 0;
 }
@@ -486,92 +772,128 @@ static int build_single_fd(struct dpaa2_
  * Optionally, return the frame annotation status word (FAS), which needs
  * to be checked if we're on the confirmation path.
  */
-static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+static void free_tx_fd(struct dpaa2_eth_priv *priv,
                       const struct dpaa2_fd *fd,
-                      u32 *status)
+                      bool in_napi)
 {
        struct device *dev = priv->net_dev->dev.parent;
        dma_addr_t fd_addr;
-       struct sk_buff **skbh, *skb;
+       struct sk_buff *skb = NULL;
        unsigned char *buffer_start;
-       int unmap_size;
-       struct scatterlist *scl;
-       int num_sg, num_dma_bufs;
        struct dpaa2_eth_swa *swa;
        u8 fd_format = dpaa2_fd_get_format(fd);
-       struct dpaa2_fas *fas;
 
        fd_addr = dpaa2_fd_get_addr(fd);
-       skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
-       fas = dpaa2_get_fas(skbh);
+       buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+       swa = (struct dpaa2_eth_swa *)buffer_start;
 
        if (fd_format == dpaa2_fd_single) {
-               skb = *skbh;
-               buffer_start = (unsigned char *)skbh;
-               /* Accessing the skb buffer is safe before dma unmap, because
-                * we didn't map the actual skb shell.
-                */
-               dma_unmap_single(dev, fd_addr,
-                                skb_tail_pointer(skb) - buffer_start,
-                                DMA_BIDIRECTIONAL);
+               if (swa->type == DPAA2_ETH_SWA_SINGLE) {
+                       skb = swa->single.skb;
+                       /* Accessing the skb buffer is safe before dma unmap,
+                        * because we didn't map the actual skb shell.
+                        */
+                       dma_unmap_single(dev, fd_addr,
+                                        skb_tail_pointer(skb) - buffer_start,
+                                        DMA_BIDIRECTIONAL);
+               } else {
+                       WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP,
+                                 "Wrong SWA type");
+                       dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
+                                        DMA_BIDIRECTIONAL);
+               }
        } else if (fd_format == dpaa2_fd_sg) {
-               swa = (struct dpaa2_eth_swa *)skbh;
-               skb = swa->skb;
-               scl = swa->scl;
-               num_sg = swa->num_sg;
-               num_dma_bufs = swa->num_dma_bufs;
+               skb = swa->sg.skb;
 
                /* Unmap the scatterlist */
-               dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
-               kfree(scl);
+               dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
+               kfree(swa->sg.scl);
 
                /* Unmap the SGT buffer */
-               unmap_size = priv->tx_data_offset +
-                      sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-               dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
+               dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
+                                DMA_BIDIRECTIONAL);
        } else {
-               /* Unsupported format, mark it as errored and give up */
-               if (status)
-                       *status = ~0;
+               netdev_dbg(priv->net_dev, "Invalid FD format\n");
                return;
        }
 
-       /* Read the status from the Frame Annotation after we unmap the first
-        * buffer but before we free it. The caller function is responsible
-        * for checking the status value.
-        */
-       if (status)
-               *status = le32_to_cpu(fas->status);
+       if (swa->type == DPAA2_ETH_SWA_XDP) {
+               page_frag_free(buffer_start);
+               return;
+       }
+
+       /* Get the timestamp value */
+       if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+               struct skb_shared_hwtstamps shhwtstamps;
+               u64 *ns;
+
+               memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+               ns = dpaa2_get_ts(buffer_start, true);
+               *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
+               shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+               skb_tstamp_tx(skb, &shhwtstamps);
+       }
 
-       /* Free SGT buffer kmalloc'ed on tx */
+       /* Free SGT buffer allocated on tx */
        if (fd_format != dpaa2_fd_single)
-               kfree(skbh);
+               skb_free_frag(buffer_start);
 
        /* Move on with skb release */
-       dev_kfree_skb(skb);
+       napi_consume_skb(skb, in_napi);
 }
 
 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct device *dev = net_dev->dev.parent;
        struct dpaa2_fd fd;
        struct rtnl_link_stats64 *percpu_stats;
        struct dpaa2_eth_drv_stats *percpu_extras;
        struct dpaa2_eth_fq *fq;
        u16 queue_mapping;
-       int err, i;
+       unsigned int needed_headroom;
+       u8 prio;
+       int err, i, ch_id = 0;
+
+       queue_mapping = skb_get_queue_mapping(skb);
+       prio = netdev_txq_to_tc(net_dev, queue_mapping);
+
+       /* Hardware interprets priority level 0 as being the highest,
+        * so we need to do a reverse mapping to the netdev tc index
+        */
+       if (net_dev->num_tc)
+               prio = net_dev->num_tc - prio - 1;
+ 
+       queue_mapping %= dpaa2_eth_queue_count(priv);
+       fq = &priv->fq[queue_mapping];
+
+       /* If we're congested, stop this tx queue; transmission of
+        * the current skb happens regardless of congestion state
+        */
+       dma_sync_single_for_cpu(dev, priv->cscn_dma,
+                               DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+       if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
+               netif_stop_subqueue(net_dev, queue_mapping);
+               fq->stats.congestion_entry++;
+       }
 
        percpu_stats = this_cpu_ptr(priv->percpu_stats);
        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 
-       if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
+       /* For non-linear skb we don't need a minimum headroom */
+       needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
+       if (skb_headroom(skb) < needed_headroom) {
                struct sk_buff *ns;
 
-               ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
+               ns = skb_realloc_headroom(skb, needed_headroom);
                if (unlikely(!ns)) {
                        percpu_stats->tx_dropped++;
                        goto err_alloc_headroom;
                }
+               percpu_extras->tx_reallocs++;
+               if (skb->sk)
+                       skb_set_owner_w(ns, skb->sk);
                dev_kfree_skb(skb);
                skb = ns;
        }
@@ -605,13 +927,15 @@ static netdev_tx_t dpaa2_eth_tx(struct s
        /* Tracing point */
        trace_dpaa2_tx_fd(net_dev, &fd);
 
-       /* TxConf FQ selection primarily based on cpu affinity; this is
-        * non-migratable context, so it's safe to call smp_processor_id().
-        */
-       queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
-       fq = &priv->fq[queue_mapping];
+       if (dpaa2_eth_ceetm_is_enabled(priv)) {
+               err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio);
+               if (err)
+                       goto err_ceetm_classify;
+       }
+
        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-               err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
+               err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+                                                 priv->tx_qdid, prio,
                                                  fq->tx_qdbin, &fd);
                if (err != -EBUSY)
                        break;
@@ -620,7 +944,7 @@ static netdev_tx_t dpaa2_eth_tx(struct s
        if (unlikely(err < 0)) {
                percpu_stats->tx_errors++;
                /* Clean up everything, including freeing the skb */
-               free_tx_fd(priv, &fd, NULL);
+               free_tx_fd(priv, &fd, false);
        } else {
                percpu_stats->tx_packets++;
                percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
@@ -628,6 +952,8 @@ static netdev_tx_t dpaa2_eth_tx(struct s
 
        return NETDEV_TX_OK;
 
+err_ceetm_classify:
+       free_tx_fd(priv, &fd, false);
 err_build_fd:
 err_alloc_headroom:
        dev_kfree_skb(skb);
@@ -639,13 +965,13 @@ err_alloc_headroom:
 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
                              struct dpaa2_eth_channel *ch,
                              const struct dpaa2_fd *fd,
-                             struct napi_struct *napi __always_unused)
+                             struct napi_struct *napi __always_unused,
+                             u16 queue_id)
 {
+       struct device *dev = priv->net_dev->dev.parent;
        struct rtnl_link_stats64 *percpu_stats;
        struct dpaa2_eth_drv_stats *percpu_extras;
-       u32 status = 0;
        u32 fd_errors;
-       bool has_fas_errors = false;
 
        /* Tracing point */
        trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
@@ -654,31 +980,28 @@ static void dpaa2_eth_tx_conf(struct dpa
        percpu_extras->tx_conf_frames++;
        percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
 
-       /* Check frame errors in the FD field */
-       fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
-       if (unlikely(fd_errors)) {
-               /* We only check error bits in the FAS field if corresponding
-                * FAERR bit is set in FD and the FAS field is marked as valid
-                */
-               has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
-                                !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
-               if (net_ratelimit())
-                       netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
-                                  fd_errors);
+       /* Check congestion state and wake all queues if necessary */
+       if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
+               dma_sync_single_for_cpu(dev, priv->cscn_dma,
+                                       DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+               if (!dpaa2_cscn_state_congested(priv->cscn_mem))
+                       netif_tx_wake_all_queues(priv->net_dev);
        }
 
-       free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
+       /* Check frame errors in the FD field */
+       fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
+       free_tx_fd(priv, fd, true);
 
        if (likely(!fd_errors))
                return;
 
+       if (net_ratelimit())
+               netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
+                          fd_errors);
+
        percpu_stats = this_cpu_ptr(priv->percpu_stats);
        /* Tx-conf logically pertains to the egress path. */
        percpu_stats->tx_errors++;
-
-       if (has_fas_errors && net_ratelimit())
-               netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n",
-                          status & DPAA2_FAS_TX_ERR_MASK);
 }
 
 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
@@ -728,26 +1051,27 @@ static int set_tx_csum(struct dpaa2_eth_
 /* Perform a single release command to add buffers
  * to the specified buffer pool
  */
-static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
+static int add_bufs(struct dpaa2_eth_priv *priv,
+                   struct dpaa2_eth_channel *ch, u16 bpid)
 {
        struct device *dev = priv->net_dev->dev.parent;
        u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
        void *buf;
        dma_addr_t addr;
-       int i;
+       int i, err;
 
        for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
                /* Allocate buffer visible to WRIOP + skb shared info +
                 * alignment padding
                 */
-               buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
+               buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
                if (unlikely(!buf))
                        goto err_alloc;
 
-               buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
+               buf = PTR_ALIGN(buf, priv->rx_buf_align);
 
                addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
-                                     DMA_FROM_DEVICE);
+                                     DMA_BIDIRECTIONAL);
                if (unlikely(dma_mapping_error(dev, addr)))
                        goto err_map;
 
@@ -755,28 +1079,31 @@ static int add_bufs(struct dpaa2_eth_pri
 
                /* tracing point */
                trace_dpaa2_eth_buf_seed(priv->net_dev,
-                                        buf, DPAA2_ETH_BUF_RAW_SIZE,
+                                        buf, dpaa2_eth_buf_raw_size(priv),
                                         addr, DPAA2_ETH_RX_BUF_SIZE,
                                         bpid);
        }
 
 release_bufs:
-       /* In case the portal is busy, retry until successful.
-        * The buffer release function would only fail if the QBMan portal
-        * was busy, which implies portal contention (i.e. more CPUs than
-        * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
-        * there is little we can realistically do, short of giving up -
-        * in which case we'd risk depleting the buffer pool and never again
-        * receiving the Rx interrupt which would kick-start the refill logic.
-        * So just keep retrying, at the risk of being moved to ksoftirqd.
-        */
-       while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
+       /* In case the portal is busy, retry until successful */
+       while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+                                              buf_array, i)) == -EBUSY)
                cpu_relax();
+
+       /* If release command failed, clean up and bail out; not much
+        * else we can do about it
+        */
+       if (err) {
+               free_bufs(priv, buf_array, i);
+               return 0;
+       }
+
        return i;
 
 err_map:
        skb_free_frag(buf);
 err_alloc:
+       /* If we managed to allocate at least some buffers, release them */
        if (i)
                goto release_bufs;
 
@@ -796,9 +1123,10 @@ static int seed_pool(struct dpaa2_eth_pr
         */
        preempt_disable();
        for (j = 0; j < priv->num_channels; j++) {
-               for (i = 0; i < DPAA2_ETH_NUM_BUFS;
+               priv->channel[j]->buf_count = 0;
+               for (i = 0; i < priv->max_bufs_per_ch;
                     i += DPAA2_ETH_BUFS_PER_CMD) {
-                       new_count = add_bufs(priv, bpid);
+                       new_count = add_bufs(priv, priv->channel[j], bpid);
                        priv->channel[j]->buf_count += new_count;
 
                        if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
@@ -818,10 +1146,8 @@ static int seed_pool(struct dpaa2_eth_pr
  */
 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
 {
-       struct device *dev = priv->net_dev->dev.parent;
        u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
-       void *vaddr;
-       int ret, i;
+       int ret;
 
        do {
                ret = dpaa2_io_service_acquire(NULL, priv->bpid,
@@ -830,27 +1156,16 @@ static void drain_bufs(struct dpaa2_eth_
                        netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
                        return;
                }
-               for (i = 0; i < ret; i++) {
-                       /* Same logic as on regular Rx path */
-                       vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
-                                                  buf_array[i]);
-                       dma_unmap_single(dev, buf_array[i],
-                                        DPAA2_ETH_RX_BUF_SIZE,
-                                        DMA_FROM_DEVICE);
-                       skb_free_frag(vaddr);
-               }
+               free_bufs(priv, buf_array, ret);
        } while (ret);
 }
 
 static void drain_pool(struct dpaa2_eth_priv *priv)
 {
-       int i;
-
+       preempt_disable();
        drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
        drain_bufs(priv, 1);
-
-       for (i = 0; i < priv->num_channels; i++)
-               priv->channel[i]->buf_count = 0;
+       preempt_enable();
 }
 
 /* Function is called from softirq context only, so we don't need to guard
@@ -862,19 +1177,19 @@ static int refill_pool(struct dpaa2_eth_
 {
        int new_count;
 
-       if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
+       if (likely(ch->buf_count >= priv->refill_thresh))
                return 0;
 
        do {
-               new_count = add_bufs(priv, bpid);
+               new_count = add_bufs(priv, ch, bpid);
                if (unlikely(!new_count)) {
                        /* Out of memory; abort for now, we'll try later on */
                        break;
                }
                ch->buf_count += new_count;
-       } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
+       } while (ch->buf_count < priv->max_bufs_per_ch);
 
-       if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
+       if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
                return -ENOMEM;
 
        return 0;
@@ -887,7 +1202,8 @@ static int pull_channel(struct dpaa2_eth
 
        /* Retry while portal is busy */
        do {
-               err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
+               err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
+                                                   ch->store);
                dequeues++;
                cpu_relax();
        } while (err == -EBUSY);
@@ -902,20 +1218,21 @@ static int pull_channel(struct dpaa2_eth
 /* NAPI poll routine
  *
  * Frames are dequeued from the QMan channel associated with this NAPI context.
- * Rx, Tx confirmation and (if configured) Rx error frames all count
- * towards the NAPI budget.
+ * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
+ * confirmation frames are limited by a threshold per NAPI poll cycle.
  */
 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 {
        struct dpaa2_eth_channel *ch;
-       int cleaned = 0, store_cleaned;
+       int rx_cleaned = 0, tx_conf_cleaned = 0;
+       bool store_cleaned;
        struct dpaa2_eth_priv *priv;
        int err;
 
        ch = container_of(napi, struct dpaa2_eth_channel, napi);
        priv = ch->priv;
 
-       while (cleaned < budget) {
+       do {
                err = pull_channel(ch);
                if (unlikely(err))
                        break;
@@ -923,29 +1240,29 @@ static int dpaa2_eth_poll(struct napi_st
                /* Refill pool if appropriate */
                refill_pool(priv, ch, priv->bpid);
 
-               store_cleaned = consume_frames(ch);
-               cleaned += store_cleaned;
+               store_cleaned = consume_frames(ch, &rx_cleaned,
+                                              &tx_conf_cleaned);
 
-               /* If we have enough budget left for a full store,
-                * try a new pull dequeue, otherwise we're done here
+               /* If we've either consumed the budget with Rx frames,
+                * or reached the Tx conf threshold, we're done.
                 */
-               if (store_cleaned == 0 ||
-                   cleaned > budget - DPAA2_ETH_STORE_SIZE)
-                       break;
-       }
-
-       if (cleaned < budget) {
-               napi_complete_done(napi, cleaned);
-               /* Re-enable data available notifications */
-               do {
-                       err = dpaa2_io_service_rearm(NULL, &ch->nctx);
-                       cpu_relax();
-               } while (err == -EBUSY);
-       }
+               if (rx_cleaned >= budget ||
+                   tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
+                       return budget;
+       } while (store_cleaned);
 
-       ch->stats.frames += cleaned;
+       /* We didn't consume the entire budget, finish napi and
+        * re-enable data availability notifications
+        */
+       napi_complete(napi);
+       do {
+               err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+               cpu_relax();
+       } while (err == -EBUSY);
+               WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+                         ch->nctx.desired_cpu);
 
-       return cleaned;
+       return max(rx_cleaned, 1);
 }
 
 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -1006,28 +1323,30 @@ static int dpaa2_eth_open(struct net_dev
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
        int err;
 
-       err = seed_pool(priv, priv->bpid);
-       if (err) {
-               /* Not much to do; the buffer pool, though not filled up,
-                * may still contain some buffers which would enable us
-                * to limp on.
-                */
-               netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
-                          priv->dpbp_dev->obj_desc.id, priv->bpid);
-       }
-
        /* We'll only start the txqs when the link is actually ready; make sure
         * we don't race against the link up notification, which may come
         * immediately after dpni_enable();
         */
        netif_tx_stop_all_queues(net_dev);
-       enable_ch_napi(priv);
+
        /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
         * return true and cause 'ip link show' to report the LOWER_UP flag,
         * even though the link notification wasn't even received.
         */
        netif_carrier_off(net_dev);
 
+       err = seed_pool(priv, priv->bpid);
+       if (err) {
+               /* Not much to do; the buffer pool, though not filled up,
+                * may still contain some buffers which would enable us
+                * to limp on.
+                */
+               netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+                          priv->dpbp_dev->obj_desc.id, priv->bpid);
+       }
+
+       priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
+
        err = dpni_enable(priv->mc_io, 0, priv->mc_token);
        if (err < 0) {
                netdev_err(net_dev, "dpni_enable() failed\n");
@@ -1047,48 +1366,17 @@ static int dpaa2_eth_open(struct net_dev
 
 link_state_err:
 enable_err:
-       disable_ch_napi(priv);
+       priv->refill_thresh = 0;
        drain_pool(priv);
        return err;
 }
 
-/* The DPIO store must be empty when we call this,
- * at the end of every NAPI cycle.
- */
-static u32 drain_channel(struct dpaa2_eth_priv *priv,
-                        struct dpaa2_eth_channel *ch)
-{
-       u32 drained = 0, total = 0;
-
-       do {
-               pull_channel(ch);
-               drained = consume_frames(ch);
-               total += drained;
-       } while (drained);
-
-       return total;
-}
-
-static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
-{
-       struct dpaa2_eth_channel *ch;
-       int i;
-       u32 drained = 0;
-
-       for (i = 0; i < priv->num_channels; i++) {
-               ch = priv->channel[i];
-               drained += drain_channel(priv, ch);
-       }
-
-       return drained;
-}
-
 static int dpaa2_eth_stop(struct net_device *net_dev)
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
        int dpni_enabled;
-       int retries = 10;
-       u32 drained;
+       int retries = 10, i;
+       int err = 0;
 
        netif_tx_stop_all_queues(net_dev);
        netif_carrier_off(net_dev);
@@ -1105,56 +1393,24 @@ static int dpaa2_eth_stop(struct net_dev
        } while (dpni_enabled && --retries);
        if (!retries) {
                netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
-               /* Must go on and disable NAPI nonetheless, so we don't crash at
-                * the next "ifconfig up"
+               /* Must go on and finish processing pending frames, so we don't
+                * crash at the next "ifconfig up"
                 */
+               err = -ETIMEDOUT;
        }
 
-       /* Wait for NAPI to complete on every core and disable it.
-        * In particular, this will also prevent NAPI from being rescheduled if
-        * a new CDAN is serviced, effectively discarding the CDAN. We therefore
-        * don't even need to disarm the channels, except perhaps for the case
-        * of a huge coalescing value.
-        */
-       disable_ch_napi(priv);
+       priv->refill_thresh = 0;
 
-        /* Manually drain the Rx and TxConf queues */
-       drained = drain_ingress_frames(priv);
-       if (drained)
-               netdev_dbg(net_dev, "Drained %d frames.\n", drained);
+       /* Wait for all running napi poll routines to finish, so that no
+        * new refill operations are started
+        */
+       for (i = 0; i < priv->num_channels; i++)
+               napi_synchronize(&priv->channel[i]->napi);
 
        /* Empty the buffer pool */
        drain_pool(priv);
 
-       return 0;
-}
-
-static int dpaa2_eth_init(struct net_device *net_dev)
-{
-       u64 supported = 0;
-       u64 not_supported = 0;
-       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-       u32 options = priv->dpni_attrs.options;
-
-       /* Capabilities listing */
-       supported |= IFF_LIVE_ADDR_CHANGE;
-
-       if (options & DPNI_OPT_NO_MAC_FILTER)
-               not_supported |= IFF_UNICAST_FLT;
-       else
-               supported |= IFF_UNICAST_FLT;
-
-       net_dev->priv_flags |= supported;
-       net_dev->priv_flags &= ~not_supported;
-
-       /* Features */
-       net_dev->features = NETIF_F_RXCSUM |
-                           NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                           NETIF_F_SG | NETIF_F_HIGHDMA |
-                           NETIF_F_LLTX;
-       net_dev->hw_features = net_dev->features;
-
-       return 0;
+       return err;
 }
 
 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
@@ -1200,25 +1456,6 @@ static void dpaa2_eth_get_stats(struct n
        }
 }
 
-static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
-{
-       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-       int err;
-
-       /* Set the maximum Rx frame length to match the transmit side;
-        * account for L2 headers when computing the MFL
-        */
-       err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
-                                       (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
-       if (err) {
-               netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
-               return err;
-       }
-
-       net_dev->mtu = mtu;
-       return 0;
-}
-
 /* Copy mac unicast addresses from @net_dev to @priv.
  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
  */
@@ -1380,16 +1617,363 @@ static int dpaa2_eth_set_features(struct
        return 0;
 }
 
+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(dev);
+       struct hwtstamp_config config;
+
+       if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               priv->ts_tx_en = false;
+               break;
+       case HWTSTAMP_TX_ON:
+               priv->ts_tx_en = true;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+               priv->ts_rx_en = false;
+       } else {
+               priv->ts_rx_en = true;
+               /* TS is set for all frame types, not only those requested */
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+       }
+
+       return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+                       -EFAULT : 0;
+}
+
+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       if (cmd == SIOCSHWTSTAMP)
+               return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+
+       return -EINVAL;
+}
+
+static int set_buffer_layout(struct dpaa2_eth_priv *priv)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpni_buffer_layout buf_layout = {0};
+       int err;
+
+       /* We need to check for WRIOP version 1.0.0, but depending on the MC
+        * version, this number is not always provided correctly on rev1.
+        * We need to check for both alternatives in this situation.
+        */
+       if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
+           priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
+               priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+       else
+               priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+
+       /* tx buffer */
+       buf_layout.pass_timestamp = true;
+       buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
+       buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+                            DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+                                    DPNI_QUEUE_TX, &buf_layout);
+       if (err) {
+               dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
+               return err;
+       }
+
+       /* tx-confirm buffer */
+       buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+                                    DPNI_QUEUE_TX_CONFIRM, &buf_layout);
+       if (err) {
+               dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
+               return err;
+       }
+
+       /* Now that we've set our tx buffer layout, retrieve the minimum
+        * required tx data offset.
+        */
+       err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
+                                     &priv->tx_data_offset);
+       if (err) {
+               dev_err(dev, "dpni_get_tx_data_offset() failed\n");
+               return err;
+       }
+
+       if ((priv->tx_data_offset % 64) != 0)
+               dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
+                        priv->tx_data_offset);
+
+       /* rx buffer */
+       buf_layout.pass_frame_status = true;
+       buf_layout.pass_parser_result = true;
+       buf_layout.data_align = priv->rx_buf_align;
+       buf_layout.private_data_size = 0;
+       buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
+       /* If XDP program is attached, reserve extra space for
+        * potential header expansions
+        */
+       if (priv->has_xdp_prog)
+               buf_layout.data_head_room += XDP_PACKET_HEADROOM;
+       buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+                            DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+                            DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+                            DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+                            DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+                                    DPNI_QUEUE_RX, &buf_layout);
+       if (err) {
+               dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
+               return err;
+       }
+
+       return 0;
+}
+
+static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct dpaa2_eth_channel *ch;
+       struct bpf_prog *old_prog = NULL;
+       int i, err;
+
+       /* No support for SG frames */
+       if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
+               return -EINVAL;
+
+       if (netif_running(net_dev)) {
+               err = dpaa2_eth_stop(net_dev);
+               if (err)
+                       return err;
+       }
+
+       if (prog) {
+               prog = bpf_prog_add(prog, priv->num_channels - 1);
+               if (IS_ERR(prog))
+                       return PTR_ERR(prog);
+       }
+
+       priv->has_xdp_prog = !!prog;
+
+       for (i = 0; i < priv->num_channels; i++) {
+               ch = priv->channel[i];
+               old_prog = xchg(&ch->xdp_prog, prog);
+               if (old_prog)
+                       bpf_prog_put(old_prog);
+       }
+
+       /* When turning XDP on/off we need to do some reconfiguring
+        * of the Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
+        * so we are sure no old format buffers will be used from now on
+        */
+       if (priv->has_xdp_prog != !!old_prog)
+               set_buffer_layout(priv);
+
+       if (netif_running(net_dev)) {
+               err = dpaa2_eth_open(net_dev);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return dpaa2_eth_set_xdp(dev, xdp->prog);
+       case XDP_QUERY_PROG:
+               xdp->prog_attached = priv->has_xdp_prog;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, struct xdp_buff *xdp)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct device *dev = net_dev->dev.parent;
+       struct rtnl_link_stats64 *percpu_stats;
+       struct dpaa2_eth_drv_stats *percpu_extras;
+       unsigned int needed_headroom;
+       struct dpaa2_eth_swa *swa;
+       struct dpaa2_eth_fq *fq;
+       struct dpaa2_fd fd;
+       void *buffer_start, *aligned_start;
+       dma_addr_t addr;
+       int err, i;
+
+       if (!netif_running(net_dev))
+               return -ENETDOWN;
+
+       /* We require a minimum headroom to be able to transmit the frame.
+        * Otherwise return an error and let the original net_device handle it
+        */
+       /* TODO: Do we update i/f counters here or just on the Rx device? */
+       needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
+       if (xdp->data < xdp->data_hard_start ||
+           xdp->data - xdp->data_hard_start < needed_headroom) {
+               percpu_stats->tx_dropped++;
+               return -EINVAL;
+       }
+
+       percpu_stats = this_cpu_ptr(priv->percpu_stats);
+       percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+       /* Setup the FD fields */
+       memset(&fd, 0, sizeof(fd));
+
+       /* Align FD address, if possible */
+       buffer_start = xdp->data - needed_headroom;
+       aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+                                 DPAA2_ETH_TX_BUF_ALIGN);
+       if (aligned_start >= xdp->data_hard_start)
+               buffer_start = aligned_start;
+
+       swa = (struct dpaa2_eth_swa *)buffer_start;
+       /* fill in necessary fields here */
+       swa->type = DPAA2_ETH_SWA_XDP;
+       swa->xdp.dma_size = xdp->data_end - buffer_start;
+
+       addr = dma_map_single(dev, buffer_start,
+                             xdp->data_end - buffer_start,
+                             DMA_BIDIRECTIONAL);
+       if (unlikely(dma_mapping_error(dev, addr))) {
+               percpu_stats->tx_dropped++;
+               return -ENOMEM;
+       }
+
+       dpaa2_fd_set_addr(&fd, addr);
+       dpaa2_fd_set_offset(&fd, xdp->data - buffer_start);
+       dpaa2_fd_set_len(&fd, xdp->data_end - xdp->data);
+       dpaa2_fd_set_format(&fd, dpaa2_fd_single);
+       dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
+
+       fq = &priv->fq[smp_processor_id()];
+       for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+               err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
+                                                 fq->tx_qdbin, &fd);
+               if (err != -EBUSY)
+                       break;
+       }
+       percpu_extras->tx_portal_busy += i;
+       if (unlikely(err < 0)) {
+               percpu_stats->tx_errors++;
+               /* let the Rx device handle the cleanup */
+               return err;
+       }
+
+       percpu_stats->tx_packets++;
+       percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+
+       return 0;
+}
+
+static void dpaa2_eth_xdp_flush(struct net_device *net_dev)
+{
+       /* We don't have hardware support for Tx batching,
+        * so we do the actual frame enqueue in ndo_xdp_xmit
+        */
+}
+
+static int dpaa2_eth_update_xps(struct dpaa2_eth_priv *priv)
+{
+       struct net_device *net_dev = priv->net_dev;
+       unsigned int i, num_queues;
+       struct cpumask xps_mask;
+       struct dpaa2_eth_fq *fq;
+       int err = 0;
+
+       num_queues = (net_dev->num_tc ? : 1) * dpaa2_eth_queue_count(priv);
+       for (i = 0; i < num_queues; i++) {
+               fq = &priv->fq[i % dpaa2_eth_queue_count(priv)];
+               cpumask_clear(&xps_mask);
+               cpumask_set_cpu(fq->target_cpu, &xps_mask);
+               err = netif_set_xps_queue(net_dev, &xps_mask, i);
+               if (err) {
+                       dev_info_once(net_dev->dev.parent,
+                                     "Error setting XPS queue\n");
+                       break;
+               }
+       }
+
+       return err;
+}
+
+static int dpaa2_eth_setup_tc(struct net_device *net_dev,
+                             enum tc_setup_type type,
+                             void *type_data)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct tc_mqprio_qopt *mqprio = (struct tc_mqprio_qopt *)type_data;
+       int i, err = 0;
+
+       if (type != TC_SETUP_MQPRIO)
+               return -EINVAL;
+
+       if (mqprio->num_tc > dpaa2_eth_tc_count(priv)) {
+               netdev_err(net_dev, "Max %d traffic classes supported\n",
+                          dpaa2_eth_tc_count(priv));
+               return -EINVAL;
+       }
+
+       if (mqprio->num_tc == net_dev->num_tc)
+               return 0;
+
+       mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       if (!mqprio->num_tc) {
+               netdev_reset_tc(net_dev);
+               err = netif_set_real_num_tx_queues(net_dev,
+                                                  dpaa2_eth_queue_count(priv));
+               if (err)
+                       return err;
+
+               goto update_xps;
+       }
+
+       err = netdev_set_num_tc(net_dev, mqprio->num_tc);
+       if (err)
+               return err;
+
+       err = netif_set_real_num_tx_queues(net_dev, mqprio->num_tc *
+                                          dpaa2_eth_queue_count(priv));
+       if (err)
+               return err;
+
+       for (i = 0; i < mqprio->num_tc; i++) {
+               err = netdev_set_tc_queue(net_dev, i,
+                                         dpaa2_eth_queue_count(priv),
+                                         i * dpaa2_eth_queue_count(priv));
+               if (err)
+                       return err;
+       }
+
+update_xps:
+       err = dpaa2_eth_update_xps(priv);
+       return err;
+}
+
 static const struct net_device_ops dpaa2_eth_ops = {
        .ndo_open = dpaa2_eth_open,
        .ndo_start_xmit = dpaa2_eth_tx,
        .ndo_stop = dpaa2_eth_stop,
-       .ndo_init = dpaa2_eth_init,
        .ndo_set_mac_address = dpaa2_eth_set_addr,
        .ndo_get_stats64 = dpaa2_eth_get_stats,
-       .ndo_change_mtu = dpaa2_eth_change_mtu,
        .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
        .ndo_set_features = dpaa2_eth_set_features,
+       .ndo_do_ioctl = dpaa2_eth_ioctl,
+       .ndo_xdp = dpaa2_eth_xdp,
+       .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+       .ndo_xdp_flush = dpaa2_eth_xdp_flush,
+       .ndo_setup_tc = dpaa2_eth_setup_tc,
 };
 
 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -1422,34 +2006,32 @@ static struct fsl_mc_device *setup_dpcon
        err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
        if (err) {
                dev_err(dev, "dpcon_open() failed\n");
-               goto err_open;
+               goto free;
        }
 
        err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
        if (err) {
                dev_err(dev, "dpcon_reset() failed\n");
-               goto err_reset;
+               goto close;
        }
 
        err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
        if (err) {
                dev_err(dev, "dpcon_get_attributes() failed\n");
-               goto err_get_attr;
+               goto close;
        }
 
        err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
        if (err) {
                dev_err(dev, "dpcon_enable() failed\n");
-               goto err_enable;
+               goto close;
        }
 
        return dpcon;
 
-err_enable:
-err_get_attr:
-err_reset:
+close:
        dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
-err_open:
+free:
        fsl_mc_object_free(dpcon);
 
        return NULL;
@@ -1502,7 +2084,14 @@ err_setup:
 static void free_channel(struct dpaa2_eth_priv *priv,
                         struct dpaa2_eth_channel *channel)
 {
+       struct bpf_prog *prog;
+
        free_dpcon(priv, channel->dpcon);
+
+       prog = READ_ONCE(channel->xdp_prog);
+       if (prog)
+               bpf_prog_put(prog);
+
        kfree(channel);
 }
 
@@ -1546,7 +2135,8 @@ static int setup_dpio(struct dpaa2_eth_p
                nctx->desired_cpu = i;
 
                /* Register the new context */
-               err = dpaa2_io_service_register(NULL, nctx);
+               channel->dpio = dpaa2_io_service_select(i);
+               err = dpaa2_io_service_register(channel->dpio, nctx);
                if (err) {
                        dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
                        /* If no affine DPIO for this core, there's probably
@@ -1586,7 +2176,7 @@ static int setup_dpio(struct dpaa2_eth_p
        return 0;
 
 err_set_cdan:
-       dpaa2_io_service_deregister(NULL, nctx);
+       dpaa2_io_service_deregister(channel->dpio, nctx);
 err_service_reg:
        free_channel(priv, channel);
 err_alloc_ch:
@@ -1609,7 +2199,7 @@ static void free_dpio(struct dpaa2_eth_p
        /* deregister CDAN notifications and free channels */
        for (i = 0; i < priv->num_channels; i++) {
                ch = priv->channel[i];
-               dpaa2_io_service_deregister(NULL, &ch->nctx);
+               dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
                free_channel(priv, ch);
        }
 }
@@ -1636,8 +2226,7 @@ static void set_fq_affinity(struct dpaa2
 {
        struct device *dev = priv->net_dev->dev.parent;
        struct dpaa2_eth_fq *fq;
-       int rx_cpu, txc_cpu;
-       int i;
+       int rx_cpu, txc_cpu, i;
 
        /* For each FQ, pick one channel/CPU to deliver frames to.
         * This may well change at runtime, either through irqbalance or
@@ -1649,6 +2238,7 @@ static void set_fq_affinity(struct dpaa2
                fq = &priv->fq[i];
                switch (fq->type) {
                case DPAA2_RX_FQ:
+               case DPAA2_RX_ERR_FQ:
                        fq->target_cpu = rx_cpu;
                        rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
                        if (rx_cpu >= nr_cpu_ids)
@@ -1656,6 +2246,7 @@ static void set_fq_affinity(struct dpaa2
                        break;
                case DPAA2_TX_CONF_FQ:
                        fq->target_cpu = txc_cpu;
+
                        txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
                        if (txc_cpu >= nr_cpu_ids)
                                txc_cpu = cpumask_first(&priv->dpio_cpumask);
@@ -1665,11 +2256,13 @@ static void set_fq_affinity(struct dpaa2
                }
                fq->channel = get_affine_channel(priv, fq->target_cpu);
        }
+
+       dpaa2_eth_update_xps(priv);
 }
 
 static void setup_fqs(struct dpaa2_eth_priv *priv)
 {
-       int i;
+       int i, j;
 
        /* We have one TxConf FQ per Tx flow.
         * The number of Tx and Rx queues is the same.
@@ -1681,11 +2274,19 @@ static void setup_fqs(struct dpaa2_eth_p
                priv->fq[priv->num_fqs++].flowid = (u16)i;
        }
 
-       for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
-               priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
-               priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
-               priv->fq[priv->num_fqs++].flowid = (u16)i;
-       }
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
+               for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
+                       priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+                       priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+                       priv->fq[priv->num_fqs].tc = (u8)i;
+                       priv->fq[priv->num_fqs++].flowid = (u16)j;
+               }
+
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
+       /* We have exactly one Rx error queue per DPNI */
+       priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
+       priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
+#endif
 
        /* For each FQ, decide on which core to process incoming frames */
        set_fq_affinity(priv);
@@ -1735,6 +2336,9 @@ static int setup_dpbp(struct dpaa2_eth_p
        }
        priv->bpid = dpbp_attrs.bpid;
 
+       /* By default we start with flow control enabled */
+       priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
+
        return 0;
 
 err_get_attr:
@@ -1756,13 +2360,59 @@ static void free_dpbp(struct dpaa2_eth_p
        fsl_mc_object_free(priv->dpbp_dev);
 }
 
+static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
+{
+       struct dpni_congestion_notification_cfg notif_cfg = {0};
+       struct device *dev = priv->net_dev->dev.parent;
+       int err;
+
+       priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
+                                      GFP_KERNEL);
+
+       if (!priv->cscn_unaligned)
+               return -ENOMEM;
+
+       priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
+       priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
+                                       DMA_FROM_DEVICE);
+       if (dma_mapping_error(dev, priv->cscn_dma)) {
+               dev_err(dev, "Error mapping CSCN memory area\n");
+               err = -ENOMEM;
+               goto err_dma_map;
+       }
+
+       notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
+       notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
+       notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
+       notif_cfg.message_ctx = (u64)priv;
+       notif_cfg.message_iova = priv->cscn_dma;
+       notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
+                                     DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
+                                     DPNI_CONG_OPT_COHERENT_WRITE;
+       err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
+                                              DPNI_QUEUE_TX, 0, &notif_cfg);
+       if (err) {
+               dev_err(dev, "dpni_set_congestion_notification failed\n");
+               goto err_set_cong;
+       }
+
+       return 0;
+
+err_set_cong:
+       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+err_dma_map:
+       kfree(priv->cscn_unaligned);
+
+       return err;
+}
+
 /* Configure the DPNI object this interface is associated with */
 static int setup_dpni(struct fsl_mc_device *ls_dev)
 {
        struct device *dev = &ls_dev->dev;
        struct dpaa2_eth_priv *priv;
        struct net_device *net_dev;
-       struct dpni_buffer_layout buf_layout = {0};
+       struct dpni_link_cfg cfg = {0};
        int err;
 
        net_dev = dev_get_drvdata(dev);
@@ -1772,7 +2422,22 @@ static int setup_dpni(struct fsl_mc_devi
        err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
        if (err) {
                dev_err(dev, "dpni_open() failed\n");
-               goto err_open;
+               return err;
+       }
+
+       /* Check if we can work with this DPNI object */
+       err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
+                                  &priv->dpni_ver_minor);
+       if (err) {
+               dev_err(dev, "dpni_get_api_version() failed\n");
+               goto close;
+       }
+       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
+               dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
+                       priv->dpni_ver_major, priv->dpni_ver_minor,
+                       DPNI_VER_MAJOR, DPNI_VER_MINOR);
+               err = -ENOTSUPP;
+               goto close;
        }
 
        ls_dev->mc_io = priv->mc_io;
@@ -1781,82 +2446,53 @@ static int setup_dpni(struct fsl_mc_devi
        err = dpni_reset(priv->mc_io, 0, priv->mc_token);
        if (err) {
                dev_err(dev, "dpni_reset() failed\n");
-               goto err_reset;
+               goto close;
        }
 
        err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
                                  &priv->dpni_attrs);
        if (err) {
                dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
-               goto err_get_attr;
+               goto close;
        }
 
-       /* Configure buffer layouts */
-       /* rx buffer */
-       buf_layout.pass_parser_result = true;
-       buf_layout.pass_frame_status = true;
-       buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
-       buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
-       buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
-                            DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-                            DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
-                            DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
-       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-                                    DPNI_QUEUE_RX, &buf_layout);
-       if (err) {
-               dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
-               goto err_buf_layout;
-       }
+       err = set_buffer_layout(priv);
+       if (err)
+               goto close;
 
-       /* tx buffer */
-       buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-                            DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
-       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-                                    DPNI_QUEUE_TX, &buf_layout);
-       if (err) {
-               dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
-               goto err_buf_layout;
-       }
+       /* Enable congestion notifications for Tx queues */
+       err = setup_tx_congestion(priv);
+       if (err)
+               goto close;
 
-       /* tx-confirm buffer */
-       buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
-       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-                                    DPNI_QUEUE_TX_CONFIRM, &buf_layout);
-       if (err) {
-               dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
-               goto err_buf_layout;
-       }
+       /* allocate classification rule space */
+       priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
+                                dpaa2_eth_fs_count(priv), GFP_KERNEL);
+       if (!priv->cls_rule)
+               goto close;
 
-       /* Now that we've set our tx buffer layout, retrieve the minimum
-        * required tx data offset.
-        */
-       err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
-                                     &priv->tx_data_offset);
+       /* Enable flow control */
+       cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
+       priv->tx_pause_frames = true;
+       err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
        if (err) {
-               dev_err(dev, "dpni_get_tx_data_offset() failed\n");
-               goto err_data_offset;
+               dev_err(dev, "dpni_set_link_cfg() failed\n");
+               goto cls_free;
        }
 
-       if ((priv->tx_data_offset % 64) != 0)
-               dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
-                        priv->tx_data_offset);
-
-       /* Accommodate software annotation space (SWA) */
-       priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
-
        return 0;
 
-err_data_offset:
-err_buf_layout:
-err_get_attr:
-err_reset:
+cls_free:
+       kfree(priv->cls_rule);
+close:
        dpni_close(priv->mc_io, 0, priv->mc_token);
-err_open:
+
        return err;
 }
 
 static void free_dpni(struct dpaa2_eth_priv *priv)
 {
+       struct device *dev = priv->net_dev->dev.parent;
        int err;
 
        err = dpni_reset(priv->mc_io, 0, priv->mc_token);
@@ -1865,6 +2501,11 @@ static void free_dpni(struct dpaa2_eth_p
                            err);
 
        dpni_close(priv->mc_io, 0, priv->mc_token);
+
+       kfree(priv->cls_rule);
+
+       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+       kfree(priv->cscn_unaligned);
 }
 
 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
@@ -1873,11 +2514,10 @@ static int setup_rx_flow(struct dpaa2_et
        struct device *dev = priv->net_dev->dev.parent;
        struct dpni_queue queue;
        struct dpni_queue_id qid;
-       struct dpni_taildrop td;
        int err;
 
        err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-                            DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
+                            DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
        if (err) {
                dev_err(dev, "dpni_get_queue(RX) failed\n");
                return err;
@@ -1890,7 +2530,7 @@ static int setup_rx_flow(struct dpaa2_et
        queue.destination.priority = 1;
        queue.user_context = (u64)(uintptr_t)fq;
        err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-                            DPNI_QUEUE_RX, 0, fq->flowid,
+                            DPNI_QUEUE_RX, fq->tc, fq->flowid,
                             DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
                             &queue);
        if (err) {
@@ -1898,15 +2538,121 @@ static int setup_rx_flow(struct dpaa2_et
                return err;
        }
 
-       td.enable = 1;
-       td.threshold = DPAA2_ETH_TAILDROP_THRESH;
-       err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
-                               DPNI_QUEUE_RX, 0, fq->flowid, &td);
-       if (err) {
-               dev_err(dev, "dpni_set_threshold() failed\n");
-               return err;
+       return 0;
+}
+
+static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
+                             struct dpni_taildrop *td)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       int i, err;
+
+       for (i = 0; i < priv->num_fqs; i++) {
+               if (priv->fq[i].type != DPAA2_RX_FQ)
+                       continue;
+
+               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+                                       DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+                                       priv->fq[i].tc, priv->fq[i].flowid,
+                                       td);
+               if (err) {
+                       dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
+                       return err;
+               }
+
+               dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
+                       (td->enable ? "Enabled" : "Disabled"),
+                       priv->fq[i].flowid, priv->fq[i].tc);
+       }
+
+       return 0;
+}
+
+static int set_group_taildrop(struct dpaa2_eth_priv *priv,
+                             struct dpni_taildrop *td)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpni_taildrop disable_td, *tc_td;
+       int i, err;
+
+       memset(&disable_td, 0, sizeof(struct dpni_taildrop));
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+               if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
+                       /* Do not set taildrop thresholds for PFC-enabled
+                        * traffic classes. We will enable congestion
+                        * notifications for them.
+                        */
+                       tc_td = &disable_td;
+               else
+                       tc_td = td;
+
+               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+                                       DPNI_CP_GROUP, DPNI_QUEUE_RX,
+                                       i, 0, tc_td);
+               if (err) {
+                       dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
+                       return err;
+               }
+
+               dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
+                       (tc_td->enable ? "Enabled" : "Disabled"),
+                       i);
+       }
+
+       return 0;
+}
+
+/* Enable/disable Rx FQ taildrop
+ *
+ * Rx FQ taildrop is mutually exclusive with flow control and it only gets
+ * disabled when FC is active. Depending on FC status, we need to compute
+ * the maximum number of buffers in the pool differently, so use the
+ * opportunity to update max number of buffers as well.
+ */
+int set_rx_taildrop(struct dpaa2_eth_priv *priv)
+{
+       enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
+       struct dpni_taildrop td_queue, td_group;
+       int err = 0;
+
+       switch (cfg) {
+       case DPAA2_ETH_TD_NONE:
+               memset(&td_queue, 0, sizeof(struct dpni_taildrop));
+               memset(&td_group, 0, sizeof(struct dpni_taildrop));
+               priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
+                                       priv->num_channels;
+               break;
+       case DPAA2_ETH_TD_QUEUE:
+               memset(&td_group, 0, sizeof(struct dpni_taildrop));
+               td_queue.enable = 1;
+               td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
+               td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
+                                    dpaa2_eth_tc_count(priv);
+               priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
+               break;
+       case DPAA2_ETH_TD_GROUP:
+               memset(&td_queue, 0, sizeof(struct dpni_taildrop));
+               td_group.enable = 1;
+               td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
+               td_group.threshold = NAPI_POLL_WEIGHT *
+                                    dpaa2_eth_queue_count(priv);
+               priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
+                                       dpaa2_eth_tc_count(priv);
+               break;
+       default:
+               break;
        }
 
+       err = set_queue_taildrop(priv, &td_queue);
+       if (err)
+               return err;
+
+       err = set_group_taildrop(priv, &td_group);
+       if (err)
+               return err;
+
+       priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
+
        return 0;
 }
 
@@ -1953,23 +2699,88 @@ static int setup_tx_flow(struct dpaa2_et
        return 0;
 }
 
-/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
-static const struct dpaa2_eth_hash_fields hash_fields[] = {
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
+                            struct dpaa2_eth_fq *fq)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpni_queue q = { { 0 } };
+       struct dpni_queue_id qid;
+       u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+       int err;
+
+       err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+                            DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
+       if (err) {
+               dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
+               return err;
+       }
+
+       fq->fqid = qid.fqid;
+
+       q.destination.id = fq->channel->dpcon_id;
+       q.destination.type = DPNI_DEST_DPCON;
+       q.destination.priority = 1;
+       q.user_context = (u64)fq;
+       err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+                            DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
+       if (err) {
+               dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
+               return err;
+       }
+
+       return 0;
+}
+#endif
+
+/* default hash key fields */
+static struct dpaa2_eth_dist_fields default_dist_fields[] = {
        {
+               /* L2 header */
+               .rxnfc_field = RXH_L2DA,
+               .cls_prot = NET_PROT_ETH,
+               .cls_field = NH_FLD_ETH_DA,
+               .id = DPAA2_ETH_DIST_ETHDST,
+               .size = 6,
+       }, {
+               .cls_prot = NET_PROT_ETH,
+               .cls_field = NH_FLD_ETH_SA,
+               .id = DPAA2_ETH_DIST_ETHSRC,
+               .size = 6,
+       }, {
+               /* This is the last ethertype field parsed:
+                * depending on frame format, it can be the MAC ethertype
+                * or the VLAN etype.
+                */
+               .cls_prot = NET_PROT_ETH,
+               .cls_field = NH_FLD_ETH_TYPE,
+               .id = DPAA2_ETH_DIST_ETHTYPE,
+               .size = 2,
+       }, {
+               /* VLAN header */
+               .rxnfc_field = RXH_VLAN,
+               .cls_prot = NET_PROT_VLAN,
+               .cls_field = NH_FLD_VLAN_TCI,
+               .id = DPAA2_ETH_DIST_VLAN,
+               .size = 2,
+       }, {
                /* IP header */
                .rxnfc_field = RXH_IP_SRC,
                .cls_prot = NET_PROT_IP,
                .cls_field = NH_FLD_IP_SRC,
+               .id = DPAA2_ETH_DIST_IPSRC,
                .size = 4,
        }, {
                .rxnfc_field = RXH_IP_DST,
                .cls_prot = NET_PROT_IP,
                .cls_field = NH_FLD_IP_DST,
+               .id = DPAA2_ETH_DIST_IPDST,
                .size = 4,
        }, {
                .rxnfc_field = RXH_L3_PROTO,
                .cls_prot = NET_PROT_IP,
                .cls_field = NH_FLD_IP_PROTO,
+               .id = DPAA2_ETH_DIST_IPPROTO,
                .size = 1,
        }, {
                /* Using UDP ports, this is functionally equivalent to raw
@@ -1978,90 +2789,182 @@ static const struct dpaa2_eth_hash_field
                .rxnfc_field = RXH_L4_B_0_1,
                .cls_prot = NET_PROT_UDP,
                .cls_field = NH_FLD_UDP_PORT_SRC,
+               .id = DPAA2_ETH_DIST_L4SRC,
                .size = 2,
        }, {
                .rxnfc_field = RXH_L4_B_2_3,
                .cls_prot = NET_PROT_UDP,
                .cls_field = NH_FLD_UDP_PORT_DST,
+               .id = DPAA2_ETH_DIST_L4DST,
                .size = 2,
        },
 };
 
-/* Set RX hash options
- * flags is a combination of RXH_ bits
- */
-static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+static int legacy_config_dist_key(struct dpaa2_eth_priv *priv,
+                                 dma_addr_t key_iova)
 {
-       struct device *dev = net_dev->dev.parent;
-       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-       struct dpkg_profile_cfg cls_cfg;
+       struct device *dev = priv->net_dev->dev.parent;
        struct dpni_rx_tc_dist_cfg dist_cfg;
-       u8 *dma_mem;
-       int i;
-       int err = 0;
+       int i, err;
 
-       if (!dpaa2_eth_hash_enabled(priv)) {
-               dev_dbg(dev, "Hashing support is not enabled\n");
-               return 0;
+       /* In legacy mode, we can't configure flow steering independently */
+       if (!dpaa2_eth_hash_enabled(priv))
+               return -EOPNOTSUPP;
+
+       memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+       dist_cfg.key_cfg_iova = key_iova;
+       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+       if (dpaa2_eth_fs_enabled(priv)) {
+               dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
+               dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
+       } else {
+               dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
        }
 
-       memset(&cls_cfg, 0, sizeof(cls_cfg));
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+               err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
+                                         &dist_cfg);
+               if (err) {
+                       dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpni_rx_dist_cfg dist_cfg;
+       int i, err;
 
-       for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
-               struct dpkg_extract *key =
-                       &cls_cfg.extracts[cls_cfg.num_extracts];
+       if (!dpaa2_eth_hash_enabled(priv))
+               return -EOPNOTSUPP;
 
-               if (!(flags & hash_fields[i].rxnfc_field))
-                       continue;
+       memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+       dist_cfg.key_cfg_iova = key_iova;
+       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+       dist_cfg.enable = true;
+
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+               dist_cfg.tc = i;
+
+               err = dpni_set_rx_hash_dist(priv->mc_io, 0,
+                                           priv->mc_token, &dist_cfg);
+               if (err) {
+                       dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static int config_fs_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpni_rx_dist_cfg dist_cfg;
+       int i, err;
 
-               if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
-                       dev_err(dev, "error adding key extraction rule, too many rules?\n");
-                       return -E2BIG;
+       if (!dpaa2_eth_fs_enabled(priv))
+               return -EOPNOTSUPP;
+
+       memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+       dist_cfg.key_cfg_iova = key_iova;
+       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+       dist_cfg.enable = true;
+
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+               dist_cfg.tc = i;
+
+               err = dpni_set_rx_fs_dist(priv->mc_io, 0,
+                                         priv->mc_token, &dist_cfg);
+               if (err) {
+                       dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+                       return err;
                }
+       }
+
+       return 0;
+}
 
+int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
+                          enum dpaa2_eth_rx_dist type, u32 key_fields)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpkg_profile_cfg cls_cfg;
+       struct dpkg_extract *key;
+       u32 hash_fields = 0;
+       dma_addr_t key_iova;
+       u8 *key_mem;
+       int i, err;
+
+       memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+       for (i = 0; i < priv->num_dist_fields; i++) {
+               if (!(key_fields & priv->dist_fields[i].id))
+                       continue;
+
+               key = &cls_cfg.extracts[cls_cfg.num_extracts];
                key->type = DPKG_EXTRACT_FROM_HDR;
-               key->extract.from_hdr.prot = hash_fields[i].cls_prot;
+               key->extract.from_hdr.prot = priv->dist_fields[i].cls_prot;
                key->extract.from_hdr.type = DPKG_FULL_FIELD;
-               key->extract.from_hdr.field = hash_fields[i].cls_field;
+               key->extract.from_hdr.field = priv->dist_fields[i].cls_field;
                cls_cfg.num_extracts++;
 
-               priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
+               hash_fields |= priv->dist_fields[i].rxnfc_field;
        }
 
-       dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
-       if (!dma_mem)
+       key_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+       if (!key_mem)
                return -ENOMEM;
 
-       err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
+       err = dpni_prepare_key_cfg(&cls_cfg, key_mem);
        if (err) {
                dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
-               goto err_prep_key;
+               goto free_key;
        }
 
-       memset(&dist_cfg, 0, sizeof(dist_cfg));
-
-       /* Prepare for setting the rx dist */
-       dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
-                                              DPAA2_CLASSIFIER_DMA_SIZE,
-                                              DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
+       key_iova = dma_map_single(dev, key_mem, DPAA2_CLASSIFIER_DMA_SIZE,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, key_iova)) {
                dev_err(dev, "DMA mapping failed\n");
                err = -ENOMEM;
-               goto err_dma_map;
+               goto free_key;
        }
 
-       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-       dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+       switch (type) {
+       case DPAA2_ETH_RX_DIST_LEGACY:
+               err = legacy_config_dist_key(priv, key_iova);
+               break;
+       case DPAA2_ETH_RX_DIST_HASH:
+               err = config_hash_key(priv, key_iova);
+               break;
+       case DPAA2_ETH_RX_DIST_FS:
+               err = config_fs_key(priv, key_iova);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
 
-       err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
-       dma_unmap_single(dev, dist_cfg.key_cfg_iova,
-                        DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
-       if (err)
-               dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
+       dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+                        DMA_TO_DEVICE);
+       if (err) {
+               if (err != -EOPNOTSUPP)
+                       dev_err(dev, "Distribution key config failed\n");
+               goto free_key;
+       }
 
-err_dma_map:
-err_prep_key:
-       kfree(dma_mem);
+       if (type != DPAA2_ETH_RX_DIST_FS)
+               priv->rx_hash_fields = hash_fields;
+
+free_key:
+       kfree(key_mem);
        return err;
 }
 
@@ -2080,6 +2983,7 @@ static int bind_dpni(struct dpaa2_eth_pr
        pools_params.num_dpbp = 1;
        pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
        pools_params.pools[0].backup_pool = 0;
+       pools_params.pools[0].priority_mask = 0xff;
        pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
        err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
        if (err) {
@@ -2087,17 +2991,36 @@ static int bind_dpni(struct dpaa2_eth_pr
                return err;
        }
 
-       /* have the interface implicitly distribute traffic based on supported
-        * header fields
+       /* Verify classification options and disable hashing and/or
+        * flow steering support in case of invalid configuration values
         */
-       err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
-       if (err)
-               netdev_err(net_dev, "Failed to configure hashing\n");
+       priv->dist_fields = default_dist_fields;
+       priv->num_dist_fields = ARRAY_SIZE(default_dist_fields);
+       check_cls_support(priv);
+
+       /* have the interface implicitly distribute traffic based on
+        * a static hash key. Also configure flow steering key, if supported.
+        * Errors here are not blocking, so just let the called function
+        * print its error message and move along.
+        */
+       if (dpaa2_eth_has_legacy_dist(priv)) {
+               dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_LEGACY,
+                                      DPAA2_ETH_DIST_ALL);
+       } else {
+               dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH,
+                                      DPAA2_ETH_DIST_DEFAULT_HASH);
+               dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_FS,
+                                      DPAA2_ETH_DIST_ALL);
+       }
 
        /* Configure handling of error frames */
        err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
        err_cfg.set_frame_annotation = 1;
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
+       err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
+#else
        err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+#endif
        err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
                                       &err_cfg);
        if (err) {
@@ -2114,6 +3037,11 @@ static int bind_dpni(struct dpaa2_eth_pr
                case DPAA2_TX_CONF_FQ:
                        err = setup_tx_flow(priv, &priv->fq[i]);
                        break;
+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
+               case DPAA2_RX_ERR_FQ:
+                       err = setup_rx_err_flow(priv, &priv->fq[i]);
+                       break;
+#endif
                default:
                        dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
                        return -EINVAL;
@@ -2237,11 +3165,14 @@ static int netdev_init(struct net_device
 {
        struct device *dev = net_dev->dev.parent;
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       u32 options = priv->dpni_attrs.options;
+       u64 supported = 0, not_supported = 0;
        u8 bcast_addr[ETH_ALEN];
        u8 num_queues;
        int err;
 
        net_dev->netdev_ops = &dpaa2_eth_ops;
+       net_dev->ethtool_ops = &dpaa2_ethtool_ops;
 
        err = set_mac_addr(priv);
        if (err)
@@ -2255,14 +3186,14 @@ static int netdev_init(struct net_device
                return err;
        }
 
-       /* Reserve enough space to align buffer as per hardware requirement;
-        * NOTE: priv->tx_data_offset MUST be initialized at this point.
-        */
-       net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
-
-       /* Set MTU limits */
-       net_dev->min_mtu = 68;
+       /* Set MTU upper limit; lower limit is default (68B) */
        net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
+       err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
+                                       (u16)DPAA2_ETH_MFL);
+       if (err) {
+               dev_err(dev, "dpni_set_max_frame_length() failed\n");
+               return err;
+       }
 
        /* Set actual number of queues in the net device */
        num_queues = dpaa2_eth_queue_count(priv);
@@ -2277,12 +3208,23 @@ static int netdev_init(struct net_device
                return err;
        }
 
-       /* Our .ndo_init will be called herein */
-       err = register_netdev(net_dev);
-       if (err < 0) {
-               dev_err(dev, "register_netdev() failed\n");
-               return err;
-       }
+       /* Capabilities listing */
+       supported |= IFF_LIVE_ADDR_CHANGE;
+
+       if (options & DPNI_OPT_NO_MAC_FILTER)
+               not_supported |= IFF_UNICAST_FLT;
+       else
+               supported |= IFF_UNICAST_FLT;
+
+       net_dev->priv_flags |= supported;
+       net_dev->priv_flags &= ~not_supported;
+
+       /* Features */
+       net_dev->features = NETIF_F_RXCSUM |
+                           NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                           NETIF_F_SG | NETIF_F_HIGHDMA |
+                           NETIF_F_LLTX;
+       net_dev->hw_features = net_dev->features;
 
        return 0;
 }
@@ -2303,14 +3245,9 @@ static int poll_link_state(void *arg)
        return 0;
 }
 
-static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
-{
-       return IRQ_WAKE_THREAD;
-}
-
 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
 {
-       u32 status = 0, clear = 0;
+       u32 status = ~0;
        struct device *dev = (struct device *)arg;
        struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
        struct net_device *net_dev = dev_get_drvdata(dev);
@@ -2320,18 +3257,12 @@ static irqreturn_t dpni_irq0_handler_thr
                                  DPNI_IRQ_INDEX, &status);
        if (unlikely(err)) {
                netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
-               clear = 0xffffffff;
-               goto out;
+               return IRQ_HANDLED;
        }
 
-       if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
-               clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
+       if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
                link_state_update(netdev_priv(net_dev));
-       }
 
-out:
-       dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
-                             DPNI_IRQ_INDEX, clear);
        return IRQ_HANDLED;
 }
 
@@ -2348,8 +3279,7 @@ static int setup_irqs(struct fsl_mc_devi
 
        irq = ls_dev->irqs[0];
        err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
-                                       dpni_irq0_handler,
-                                       dpni_irq0_handler_thread,
+                                       NULL, dpni_irq0_handler_thread,
                                        IRQF_NO_SUSPEND | IRQF_ONESHOT,
                                        dev_name(&ls_dev->dev), &ls_dev->dev);
        if (err < 0) {
@@ -2405,6 +3335,393 @@ static void del_ch_napi(struct dpaa2_eth
        }
 }
 
+/* SysFS support */
+static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
+       /* No MC API for getting the shaping config. We're stateful. */
+       struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
+
+       return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
+}
+
+static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf,
+                                         size_t count)
+{
+       int err, items;
+       struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
+       struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
+
+       items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
+       if (items != 2) {
+               pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
+               return -EINVAL;
+       }
+       /* Size restriction as per MC API documentation */
+       if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
+               pr_err("max_burst_size must be <= %d\n",
+                      DPAA2_ETH_MAX_BURST_SIZE);
+               return -EINVAL;
+       }
+
+       err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
+                                 &ercfg, 0);
+       if (err) {
+               dev_err(dev, "dpni_set_tx_shaping() failed\n");
+               return -EPERM;
+       }
+       /* If successful, save the current configuration for future inquiries */
+       priv->shaping_cfg = scfg;
+
+       return count;
+}
+
+static struct device_attribute dpaa2_eth_attrs[] = {
+       __ATTR(tx_shaping,
+              0600,
+              dpaa2_eth_show_tx_shaping,
+              dpaa2_eth_write_tx_shaping),
+};
+
+static void dpaa2_eth_sysfs_init(struct device *dev)
+{
+       int i, err;
+
+       for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
+               err = device_create_file(dev, &dpaa2_eth_attrs[i]);
+               if (err) {
+                       dev_err(dev, "ERROR creating sysfs file\n");
+                       goto undo;
+               }
+       }
+       return;
+
+undo:
+       while (i > 0)
+               device_remove_file(dev, &dpaa2_eth_attrs[--i]);
+}
+
+static void dpaa2_eth_sysfs_remove(struct device *dev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
+               device_remove_file(dev, &dpaa2_eth_attrs[i]);
+}
+
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
+                                      struct ieee_pfc *pfc)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct dpni_congestion_notification_cfg notification_cfg;
+       struct dpni_link_state state;
+       int err, i;
+
+       priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
+
+       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+       if (err) {
+               netdev_err(net_dev, "ERROR %d getting link state", err);
+               return err;
+       }
+
+       if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
+               return 0;
+
+       priv->pfc.pfc_en = 0;
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+               err = dpni_get_congestion_notification(priv->mc_io, 0,
+                                                      priv->mc_token,
+                                                      DPNI_QUEUE_RX,
+                                                      i, &notification_cfg);
+               if (err) {
+                       netdev_err(net_dev, "Error %d getting congestion notif",
+                                  err);
+                       return err;
+               }
+
+               if (notification_cfg.threshold_entry)
+                       priv->pfc.pfc_en |= 1 << i;
+       }
+
+       memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
+
+       return 0;
+}
+
+/* Configure ingress classification based on VLAN PCP */
+static int set_vlan_qos(struct dpaa2_eth_priv *priv)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpkg_profile_cfg kg_cfg = {0};
+       struct dpni_qos_tbl_cfg qos_cfg = {0};
+       struct dpni_rule_cfg key_params;
+       u8 *params_iova, *key, *mask = NULL;
+       /* We only need the trailing 16 bits, without the TPID */
+       u8 key_size = VLAN_HLEN / 2;
+       int err = 0, i, j = 0;
+
+       if (priv->vlan_clsf_set)
+               return 0;
+
+       params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+       if (!params_iova)
+               return -ENOMEM;
+
+       kg_cfg.num_extracts = 1;
+       kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
+       kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
+       kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
+       kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+
+       err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
+       if (err) {
+               dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
+               goto out_free;
+       }
+
+       /* Set QoS table */
+       qos_cfg.default_tc = 0;
+       qos_cfg.discard_on_miss = 0;
+       qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
+                                             DPAA2_CLASSIFIER_DMA_SIZE,
+                                             DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
+               dev_err(dev, "%s: DMA mapping failed\n", __func__);
+               err = -ENOMEM;
+               goto out_free;
+       }
+       err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
+       dma_unmap_single(dev, qos_cfg.key_cfg_iova,
+                        DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
+
+       if (err) {
+               dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
+               goto out_free;
+       }
+
+       key_params.key_size = key_size;
+
+       if (dpaa2_eth_fs_mask_enabled(priv)) {
+               mask = kzalloc(key_size, GFP_KERNEL);
+               if (!mask)
+                       goto out_free;
+
+               *mask = cpu_to_be16(VLAN_PRIO_MASK);
+
+               key_params.mask_iova = dma_map_single(dev, mask, key_size,
+                                                     DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, key_params.mask_iova)) {
+                       dev_err(dev, "DMA mapping failed %s\n", __func__);
+                       err = -ENOMEM;
+                       goto out_free_mask;
+               }
+       } else {
+               key_params.mask_iova = 0;
+       }
+
+       key = kzalloc(key_size, GFP_KERNEL);
+       if (!key)
+               goto out_cleanup_mask;
+
+       key_params.key_iova = dma_map_single(dev, key, key_size,
+                                            DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, key_params.key_iova)) {
+               dev_err(dev, "%s: DMA mapping failed\n", __func__);
+               err = -ENOMEM;
+               goto out_free_key;
+       }
+
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+               *key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
+
+               dma_sync_single_for_device(dev, key_params.key_iova,
+                                          key_size, DMA_TO_DEVICE);
+
+               err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
+                                        &key_params, i, j++);
+               if (err) {
+                       dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
+                       goto out_remove;
+               }
+       }
+
+       priv->vlan_clsf_set = true;
+       dev_dbg(dev, "Vlan PCP QoS classification set\n");
+       goto out_cleanup;
+
+out_remove:
+       for (j = 0; j < i; j++) {
+               *key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
+
+               dma_sync_single_for_device(dev, key_params.key_iova, key_size,
+                                          DMA_TO_DEVICE);
+
+               err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
+                                           &key_params);
+               if (err)
+                       dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
+       }
+
+out_cleanup:
+       dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
+out_free_key:
+       kfree(key);
+out_cleanup_mask:
+       if (key_params.mask_iova)
+               dma_unmap_single(dev, key_params.mask_iova, key_size,
+                                DMA_TO_DEVICE);
+out_free_mask:
+       kfree(mask);
+out_free:
+       kfree(params_iova);
+       return err;
+}
+
+static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
+                                      struct ieee_pfc *pfc)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct dpni_congestion_notification_cfg notification_cfg = {0};
+       struct dpni_link_state state = {0};
+       struct dpni_link_cfg cfg = {0};
+       struct ieee_pfc old_pfc;
+       int err = 0, i;
+
+       if (dpaa2_eth_tc_count(priv) == 1) {
+               netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
+               return 0;
+       }
+
+       /* Zero out pfc_enabled prios greater than tc_count */
+       pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
+
+       if (priv->pfc.pfc_en == pfc->pfc_en)
+               /* Same enabled mask, nothing to be done */
+               return 0;
+
+       err = set_vlan_qos(priv);
+       if (err)
+               return err;
+
+       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+       if (err) {
+               netdev_err(net_dev, "ERROR %d getting link state", err);
+               return err;
+       }
+
+       cfg.rate = state.rate;
+       cfg.options = state.options;
+       if (pfc->pfc_en)
+               cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
+       else
+               cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
+
+       err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+       if (err) {
+               netdev_err(net_dev, "ERROR %d setting link cfg", err);
+               return err;
+       }
+
+       memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
+       memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+
+       err = set_rx_taildrop(priv);
+       if (err)
+               goto out_restore_config;
+
+       /* configure congestion notifications */
+       notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
+       notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+       notification_cfg.message_iova = 0ULL;
+       notification_cfg.message_ctx = 0ULL;
+
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+               if (dpaa2_eth_is_pfc_enabled(priv, i)) {
+                       notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
+                       notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
+               } else {
+                       notification_cfg.threshold_entry = 0;
+                       notification_cfg.threshold_exit = 0;
+               }
+
+               err = dpni_set_congestion_notification(priv->mc_io, 0,
+                                                      priv->mc_token,
+                                                      DPNI_QUEUE_RX,
+                                                      i, &notification_cfg);
+               if (err) {
+                       netdev_err(net_dev, "Error %d setting congestion notif",
+                                  err);
+                       goto out_restore_config;
+               }
+
+               netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
+                          (notification_cfg.threshold_entry ?
+                           "Enabled" : "Disabled"), i);
+       }
+
+       return 0;
+
+out_restore_config:
+       memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
+       return err;
+}
+
+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+       return priv->dcbx_mode;
+}
+
+static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+       priv->dcbx_mode = mode;
+       return 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+       switch (capid) {
+       case DCB_CAP_ATTR_PFC:
+               *cap = true;
+               break;
+       case DCB_CAP_ATTR_PFC_TCS:
+               /* bitmap where each bit represents a number of traffic
+                * classes the device can be configured to use for Priority
+                * Flow Control
+                */
+               *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
+               break;
+       case DCB_CAP_ATTR_DCBX:
+               *cap = priv->dcbx_mode;
+               break;
+       default:
+               *cap = false;
+               break;
+       }
+
+       return 0;
+}
+
+const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
+       .ieee_getpfc    = dpaa2_eth_dcbnl_ieee_getpfc,
+       .ieee_setpfc    = dpaa2_eth_dcbnl_ieee_setpfc,
+       .getdcbx        = dpaa2_eth_dcbnl_getdcbx,
+       .setdcbx        = dpaa2_eth_dcbnl_setdcbx,
+       .getcap         = dpaa2_eth_dcbnl_getcap,
+};
+#endif
+
 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
 {
        struct device *dev;
@@ -2415,7 +3732,7 @@ static int dpaa2_eth_probe(struct fsl_mc
        dev = &dpni_dev->dev;
 
        /* Net device */
-       net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
+       net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
        if (!net_dev) {
                dev_err(dev, "alloc_etherdev_mq() failed\n");
                return -ENOMEM;
@@ -2433,7 +3750,10 @@ static int dpaa2_eth_probe(struct fsl_mc
        err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
                                     &priv->mc_io);
        if (err) {
-               dev_err(dev, "MC portal allocation failed\n");
+               if (err == -ENXIO)
+                       err = -EPROBE_DEFER;
+               else
+                       dev_err(dev, "MC portal allocation failed\n");
                goto err_portal_alloc;
        }
 
@@ -2456,9 +3776,6 @@ static int dpaa2_eth_probe(struct fsl_mc
        if (err)
                goto err_bind;
 
-       /* Add a NAPI context for each channel */
-       add_ch_napi(priv);
-
        /* Percpu statistics */
        priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
        if (!priv->percpu_stats) {
@@ -2491,7 +3808,14 @@ static int dpaa2_eth_probe(struct fsl_mc
        if (err)
                goto err_alloc_rings;
 
-       net_dev->ethtool_ops = &dpaa2_ethtool_ops;
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+       net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
+       priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+#endif
+
+       /* Add a NAPI context for each channel */
+       add_ch_napi(priv);
+       enable_ch_napi(priv);
 
        err = setup_irqs(dpni_dev);
        if (err) {
@@ -2499,25 +3823,41 @@ static int dpaa2_eth_probe(struct fsl_mc
                priv->poll_thread = kthread_run(poll_link_state, priv,
                                                "%s_poll_link", net_dev->name);
                if (IS_ERR(priv->poll_thread)) {
-                       netdev_err(net_dev, "Error starting polling thread\n");
+                       dev_err(dev, "Error starting polling thread\n");
                        goto err_poll_thread;
                }
                priv->do_link_poll = true;
        }
 
+       err = register_netdev(net_dev);
+       if (err < 0) {
+               dev_err(dev, "register_netdev() failed\n");
+               goto err_netdev_reg;
+       }
+
+       dpaa2_eth_sysfs_init(&net_dev->dev);
+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
+       dpaa2_dbg_add(priv);
+#endif
+
        dev_info(dev, "Probed interface %s\n", net_dev->name);
        return 0;
 
+err_netdev_reg:
+       if (priv->do_link_poll)
+               kthread_stop(priv->poll_thread);
+       else
+               fsl_mc_free_irqs(dpni_dev);
 err_poll_thread:
        free_rings(priv);
 err_alloc_rings:
 err_csum:
-       unregister_netdev(net_dev);
 err_netdev_init:
        free_percpu(priv->percpu_extras);
 err_alloc_percpu_extras:
        free_percpu(priv->percpu_stats);
 err_alloc_percpu_stats:
+       disable_ch_napi(priv);
        del_ch_napi(priv);
 err_bind:
        free_dpbp(priv);
@@ -2544,8 +3884,15 @@ static int dpaa2_eth_remove(struct fsl_m
        net_dev = dev_get_drvdata(dev);
        priv = netdev_priv(net_dev);
 
+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
+       dpaa2_dbg_remove(priv);
+#endif
+       dpaa2_eth_sysfs_remove(&net_dev->dev);
+
        unregister_netdev(net_dev);
-       dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
+       disable_ch_napi(priv);
+       del_ch_napi(priv);
 
        if (priv->do_link_poll)
                kthread_stop(priv->poll_thread);
@@ -2555,8 +3902,6 @@ static int dpaa2_eth_remove(struct fsl_m
        free_rings(priv);
        free_percpu(priv->percpu_stats);
        free_percpu(priv->percpu_extras);
-
-       del_ch_napi(priv);
        free_dpbp(priv);
        free_dpio(priv);
        free_dpni(priv);
@@ -2566,6 +3911,8 @@ static int dpaa2_eth_remove(struct fsl_m
        dev_set_drvdata(dev, NULL);
        free_netdev(net_dev);
 
+       dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
        return 0;
 }
 
@@ -2588,4 +3935,34 @@ static struct fsl_mc_driver dpaa2_eth_dr
        .match_id_table = dpaa2_eth_match_id_table
 };
 
-module_fsl_mc_driver(dpaa2_eth_driver);
+static int __init dpaa2_eth_driver_init(void)
+{
+       int err;
+
+       dpaa2_eth_dbg_init();
+       err = fsl_mc_driver_register(&dpaa2_eth_driver);
+       if (err)
+               goto out_debugfs_err;
+
+       err = dpaa2_ceetm_register();
+       if (err)
+               goto out_ceetm_err;
+
+       return 0;
+
+out_ceetm_err:
+       fsl_mc_driver_unregister(&dpaa2_eth_driver);
+out_debugfs_err:
+       dpaa2_eth_dbg_exit();
+       return err;
+}
+
+static void __exit dpaa2_eth_driver_exit(void)
+{
+       dpaa2_ceetm_unregister();
+       fsl_mc_driver_unregister(&dpaa2_eth_driver);
+       dpaa2_eth_dbg_exit();
+}
+
+module_init(dpaa2_eth_driver_init);
+module_exit(dpaa2_eth_driver_exit);
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -33,6 +33,7 @@
 #ifndef __DPAA2_ETH_H
 #define __DPAA2_ETH_H
 
+#include <linux/dcbnl.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
 
@@ -44,9 +45,17 @@
 #include "dpni-cmd.h"
 
 #include "dpaa2-eth-trace.h"
+#include "dpaa2-eth-debugfs.h"
+
+#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
 
 #define DPAA2_ETH_STORE_SIZE           16
 
+/* We set a max threshold for how many Tx confirmations we should process
+ * on a NAPI poll call, they take less processing time.
+ */
+#define TX_CONF_PER_NAPI_POLL          256
+
 /* Maximum number of scatter-gather entries in an ingress frame,
  * considering the maximum receive frame size is 64K
  */
@@ -60,6 +69,14 @@
 /* Convert L3 MTU to L2 MFL */
 #define DPAA2_ETH_L2_MAX_FRM(mtu)      ((mtu) + VLAN_ETH_HLEN)
 
+/* Maximum burst size value for Tx shaping */
+#define DPAA2_ETH_MAX_BURST_SIZE       0xF7FF
+
+/* Maximum number of buffers that can be acquired/released through a single
+ * QBMan command
+ */
+#define DPAA2_ETH_BUFS_PER_CMD         7
+
 /* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
  * frames in the Rx queues (length of the current frame is not
  * taken into account when making the taildrop decision)
@@ -72,31 +89,32 @@
  * to accommodate the buffer refill delay.
  */
 #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
-#define DPAA2_ETH_NUM_BUFS             (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
-#define DPAA2_ETH_REFILL_THRESH                DPAA2_ETH_MAX_FRAMES_PER_QUEUE
+#define DPAA2_ETH_NUM_BUFS_PER_CH      (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+#define DPAA2_ETH_REFILL_THRESH(priv)  \
+       ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
 
-/* Maximum number of buffers that can be acquired/released through a single
- * QBMan command
- */
-#define DPAA2_ETH_BUFS_PER_CMD         7
+/* Global buffer quota in case flow control is enabled */
+#define DPAA2_ETH_NUM_BUFS_FC          256
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_ETH_TX_BUF_ALIGN         64
 
-/* Hardware requires alignment for ingress/egress buffer addresses
- * and ingress buffer lengths.
+/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
+ * to 256B. For newer revisions, the requirement is only for 64B alignment
  */
+#define DPAA2_ETH_RX_BUF_ALIGN_REV1    256
+#define DPAA2_ETH_RX_BUF_ALIGN         64
+
 #define DPAA2_ETH_RX_BUF_SIZE          2048
-#define DPAA2_ETH_TX_BUF_ALIGN         64
-#define DPAA2_ETH_RX_BUF_ALIGN         256
-#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
-       ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
-
-/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
- * buffers large enough to allow building an skb around them and also account
- * for alignment restrictions
- */
-#define DPAA2_ETH_BUF_RAW_SIZE \
-       (DPAA2_ETH_RX_BUF_SIZE + \
-       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
-       DPAA2_ETH_RX_BUF_ALIGN)
+#define DPAA2_ETH_SKB_SIZE \
+       (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* PTP nominal frequency 1GHz */
+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
+
+/* Hardware annotation area in RX/TX buffers */
+#define DPAA2_ETH_RX_HWA_SIZE          64
+#define DPAA2_ETH_TX_HWA_SIZE          128
 
 /* We are accommodating a skb backpointer and some S/G info
  * in the frame's software annotation. The hardware
@@ -104,12 +122,32 @@
  */
 #define DPAA2_ETH_SWA_SIZE             64
 
+/* We store different information in the software annotation area of a Tx frame
+ * based on what type of frame it is
+ */
+enum dpaa2_eth_swa_type {
+       DPAA2_ETH_SWA_SINGLE,
+       DPAA2_ETH_SWA_SG,
+       DPAA2_ETH_SWA_XDP,
+};
+
 /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
 struct dpaa2_eth_swa {
-       struct sk_buff *skb;
-       struct scatterlist *scl;
-       int num_sg;
-       int num_dma_bufs;
+       enum dpaa2_eth_swa_type type;
+       union {
+               struct {
+                       struct sk_buff *skb;
+               } single;
+               struct {
+                       struct sk_buff *skb;
+                       struct scatterlist *scl;
+                       int num_sg;
+                       int sgt_size;
+               } sg;
+               struct {
+                       int dma_size;
+               } xdp;
+       };
 };
 
 /* Annotation valid bits in FD FRC */
@@ -120,23 +158,14 @@ struct dpaa2_eth_swa {
 #define DPAA2_FD_FRC_FASWOV            0x0800
 #define DPAA2_FD_FRC_FAICFDV           0x0400
 
-/* Error bits in FD CTRL */
-#define DPAA2_FD_CTRL_UFD              0x00000004
-#define DPAA2_FD_CTRL_SBE              0x00000008
-#define DPAA2_FD_CTRL_FSE              0x00000020
-#define DPAA2_FD_CTRL_FAERR            0x00000040
-
-#define DPAA2_FD_RX_ERR_MASK           (DPAA2_FD_CTRL_SBE      | \
-                                        DPAA2_FD_CTRL_FAERR)
-#define DPAA2_FD_TX_ERR_MASK           (DPAA2_FD_CTRL_UFD      | \
-                                        DPAA2_FD_CTRL_SBE      | \
-                                        DPAA2_FD_CTRL_FSE      | \
-                                        DPAA2_FD_CTRL_FAERR)
+#define DPAA2_FD_RX_ERR_MASK           (FD_CTRL_SBE | FD_CTRL_FAERR)
+#define DPAA2_FD_TX_ERR_MASK           (FD_CTRL_UFD    | \
+                                        FD_CTRL_SBE    | \
+                                        FD_CTRL_FSE    | \
+                                        FD_CTRL_FAERR)
 
 /* Annotation bits in FD CTRL */
 #define DPAA2_FD_CTRL_ASAL             0x00020000      /* ASAL = 128 */
-#define DPAA2_FD_CTRL_PTA              0x00800000
-#define DPAA2_FD_CTRL_PTV1             0x00400000
 
 /* Frame annotation status */
 struct dpaa2_fas {
@@ -144,7 +173,7 @@ struct dpaa2_fas {
        u8 ppid;
        __le16 ifpid;
        __le32 status;
-} __packed;
+};
 
 /* Frame annotation status word is located in the first 8 bytes
  * of the buffer's hardware annoatation area
@@ -152,11 +181,45 @@ struct dpaa2_fas {
 #define DPAA2_FAS_OFFSET               0
 #define DPAA2_FAS_SIZE                 (sizeof(struct dpaa2_fas))
 
+/* Timestamp is located in the next 8 bytes of the buffer's
+ * hardware annotation area
+ */
+#define DPAA2_TS_OFFSET                        0x8
+
+/* Frame annotation egress action descriptor */
+#define DPAA2_FAEAD_OFFSET             0x58
+
+struct dpaa2_faead {
+       __le32 conf_fqid;
+       __le32 ctrl;
+};
+
+#define DPAA2_FAEAD_A2V                        0x20000000
+#define DPAA2_FAEAD_A4V                        0x08000000
+#define DPAA2_FAEAD_UPDV               0x00001000
+#define DPAA2_FAEAD_EBDDV              0x00002000
+#define DPAA2_FAEAD_UPD                        0x00000010
+
 /* Accessors for the hardware annotation fields that we use */
-#define dpaa2_get_hwa(buf_addr) \
-       ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
-#define dpaa2_get_fas(buf_addr) \
-       (struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
+static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
+{
+       return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
+}
+
+static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
+{
+       return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
+}
+
+static inline u64 *dpaa2_get_ts(void *buf_addr, bool swa)
+{
+       return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
+}
+
+static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
+{
+       return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
+}
 
 /* Error and status bits in the frame annotation status word */
 /* Debug frame, otherwise supposed to be discarded */
@@ -203,11 +266,6 @@ struct dpaa2_fas {
                                         DPAA2_FAS_BLE          | \
                                         DPAA2_FAS_L3CE         | \
                                         DPAA2_FAS_L4CE)
-/* Tx errors */
-#define DPAA2_FAS_TX_ERR_MASK          (DPAA2_FAS_KSE          | \
-                                        DPAA2_FAS_EOFHE        | \
-                                        DPAA2_FAS_MNLE         | \
-                                        DPAA2_FAS_TIDE)
 
 /* Time in milliseconds between link state updates */
 #define DPAA2_ETH_LINK_STATE_REFRESH   1000
@@ -218,6 +276,14 @@ struct dpaa2_fas {
  */
 #define DPAA2_ETH_ENQUEUE_RETRIES      10
 
+/* Tx congestion entry & exit thresholds, in number of bytes.
+ * We allow a maximum of 512KB worth of frames pending processing on the Tx
+ * queues of an interface
+ */
+#define DPAA2_ETH_TX_CONG_ENTRY_THRESH  (512 * 1024)
+#define DPAA2_ETH_TX_CONG_EXIT_THRESH  \
+       (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9 / 10)
+
 /* Driver statistics, other than those in struct rtnl_link_stats64.
  * These are usually collected per-CPU and aggregated by ethtool.
  */
@@ -226,6 +292,7 @@ struct dpaa2_eth_drv_stats {
        __u64   tx_conf_bytes;
        __u64   tx_sg_frames;
        __u64   tx_sg_bytes;
+       __u64   tx_reallocs;
        __u64   rx_sg_frames;
        __u64   rx_sg_bytes;
        /* Enqueues retried due to portal busy */
@@ -236,6 +303,8 @@ struct dpaa2_eth_drv_stats {
 struct dpaa2_eth_fq_stats {
        /* Number of frames received on this queue */
        __u64 frames;
+       /* Number of times this queue entered congestion */
+       __u64 congestion_entry;
 };
 
 /* Per-channel statistics */
@@ -250,17 +319,23 @@ struct dpaa2_eth_ch_stats {
        __u64 pull_err;
 };
 
+#define DPAA2_ETH_MAX_TCS              8
+
 /* Maximum number of queues associated with a DPNI */
-#define DPAA2_ETH_MAX_RX_QUEUES                16
-#define DPAA2_ETH_MAX_TX_QUEUES                NR_CPUS
+#define DPAA2_ETH_MAX_RX_QUEUES                (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
+#define DPAA2_ETH_MAX_TX_QUEUES                DPNI_MAX_SENDERS
+#define DPAA2_ETH_MAX_RX_ERR_QUEUES    1
 #define DPAA2_ETH_MAX_QUEUES           (DPAA2_ETH_MAX_RX_QUEUES + \
-                                       DPAA2_ETH_MAX_TX_QUEUES)
+                                       DPAA2_ETH_MAX_TX_QUEUES + \
+                                       DPAA2_ETH_MAX_RX_ERR_QUEUES)
+#define DPAA2_ETH_MAX_NETDEV_QUEUES    (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
 
-#define DPAA2_ETH_MAX_DPCONS           NR_CPUS
+#define DPAA2_ETH_MAX_DPCONS           16
 
 enum dpaa2_eth_fq_type {
        DPAA2_RX_FQ = 0,
        DPAA2_TX_CONF_FQ,
+       DPAA2_RX_ERR_FQ
 };
 
 struct dpaa2_eth_priv;
@@ -269,6 +344,7 @@ struct dpaa2_eth_fq {
        u32 fqid;
        u32 tx_qdbin;
        u16 flowid;
+       u8 tc;
        int target_cpu;
        struct dpaa2_eth_channel *channel;
        enum dpaa2_eth_fq_type type;
@@ -276,7 +352,8 @@ struct dpaa2_eth_fq {
        void (*consume)(struct dpaa2_eth_priv *,
                        struct dpaa2_eth_channel *,
                        const struct dpaa2_fd *,
-                       struct napi_struct *);
+                       struct napi_struct *,
+                       u16 queue_id);
        struct dpaa2_eth_fq_stats stats;
 };
 
@@ -285,24 +362,53 @@ struct dpaa2_eth_channel {
        struct fsl_mc_device *dpcon;
        int dpcon_id;
        int ch_id;
-       int dpio_id;
        struct napi_struct napi;
+       struct dpaa2_io *dpio;
        struct dpaa2_io_store *store;
        struct dpaa2_eth_priv *priv;
        int buf_count;
        struct dpaa2_eth_ch_stats stats;
+       struct bpf_prog *xdp_prog;
+       u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
+       u8 rel_buf_cnt;
+       bool flush;
+};
+
+struct dpaa2_eth_cls_rule {
+       struct ethtool_rx_flow_spec fs;
+       bool in_use;
 };
 
-struct dpaa2_eth_hash_fields {
+struct dpaa2_eth_dist_fields {
        u64 rxnfc_field;
        enum net_prot cls_prot;
        int cls_field;
+       int offset;
        int size;
+       u32 id;
 };
 
 /* Driver private data */
 struct dpaa2_eth_priv {
        struct net_device *net_dev;
+       /* Standard statistics */
+       struct rtnl_link_stats64 __percpu *percpu_stats;
+       /* Extra stats, in addition to the ones known by the kernel */
+       struct dpaa2_eth_drv_stats __percpu *percpu_extras;
+       bool ts_tx_en; /* Tx timestamping enabled */
+       bool ts_rx_en; /* Rx timestamping enabled */
+       u16 tx_data_offset;
+       u16 bpid;
+       u16 tx_qdid;
+       u16 rx_buf_align;
+       struct iommu_domain *iommu_domain;
+       int max_bufs_per_ch;
+       int refill_thresh;
+       bool has_xdp_prog;
+
+       void *cscn_mem; /* Tx congestion notifications are written here */
+       void *cscn_unaligned;
+       dma_addr_t cscn_dma;
 
        u8 num_fqs;
        struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
@@ -311,51 +417,193 @@ struct dpaa2_eth_priv {
        struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
 
        struct dpni_attr dpni_attrs;
-       u16 tx_data_offset;
-
+       u16 dpni_ver_major;
+       u16 dpni_ver_minor;
        struct fsl_mc_device *dpbp_dev;
-       u16 bpid;
-       struct iommu_domain *iommu_domain;
 
-       u16 tx_qdid;
        struct fsl_mc_io *mc_io;
        /* Cores which have an affine DPIO/DPCON.
         * This is the cpu set on which Rx and Tx conf frames are processed
         */
        struct cpumask dpio_cpumask;
 
-       /* Standard statistics */
-       struct rtnl_link_stats64 __percpu *percpu_stats;
-       /* Extra stats, in addition to the ones known by the kernel */
-       struct dpaa2_eth_drv_stats __percpu *percpu_extras;
-
        u16 mc_token;
 
        struct dpni_link_state link_state;
        bool do_link_poll;
        struct task_struct *poll_thread;
 
+       /* Rx distribution (hash and flow steering) header fields
+        * supported by the driver
+        */
+       struct dpaa2_eth_dist_fields *dist_fields;
+       u8 num_dist_fields;
        /* enabled ethtool hashing bits */
        u64 rx_hash_fields;
+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
+       struct dpaa2_debugfs dbg;
+#endif
+       /* array of classification rules */
+       struct dpaa2_eth_cls_rule *cls_rule;
+       struct dpni_tx_shaping_cfg shaping_cfg;
+
+       u8 dcbx_mode;
+       struct ieee_pfc pfc;
+       bool vlan_clsf_set;
+       bool tx_pause_frames;
+
+       bool ceetm_en;
+};
+
+enum dpaa2_eth_rx_dist {
+       DPAA2_ETH_RX_DIST_HASH,
+       DPAA2_ETH_RX_DIST_FS,
+       DPAA2_ETH_RX_DIST_LEGACY
 };
 
-/* default Rx hash options, set during probing */
-#define DPAA2_RXH_SUPPORTED    (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
-                               | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
-                               | RXH_L4_B_2_3)
+/* Supported Rx distribution field ids */
+#define DPAA2_ETH_DIST_ETHSRC          BIT(0)
+#define DPAA2_ETH_DIST_ETHDST          BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE         BIT(2)
+#define DPAA2_ETH_DIST_VLAN            BIT(3)
+#define DPAA2_ETH_DIST_IPSRC           BIT(4)
+#define DPAA2_ETH_DIST_IPDST           BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO         BIT(6)
+#define DPAA2_ETH_DIST_L4SRC           BIT(7)
+#define DPAA2_ETH_DIST_L4DST           BIT(8)
+#define DPAA2_ETH_DIST_ALL             (~0U)
+
+/* Default Rx hash key */
+#define DPAA2_ETH_DIST_DEFAULT_HASH \
+       (DPAA2_ETH_DIST_IPPROTO | \
+        DPAA2_ETH_DIST_IPSRC | DPAA2_ETH_DIST_IPDST | \
+        DPAA2_ETH_DIST_L4SRC | DPAA2_ETH_DIST_L4DST)
 
 #define dpaa2_eth_hash_enabled(priv)   \
        ((priv)->dpni_attrs.num_queues > 1)
 
+#define dpaa2_eth_fs_enabled(priv)     \
+       (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
+#define dpaa2_eth_fs_mask_enabled(priv)        \
+       ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
+
+#define dpaa2_eth_fs_count(priv)       \
+       ((priv)->dpni_attrs.fs_entries)
+
 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
 #define DPAA2_CLASSIFIER_DMA_SIZE 256
 
 extern const struct ethtool_ops dpaa2_ethtool_ops;
 extern const char dpaa2_eth_drv_version[];
 
-static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
+static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
+                                        u16 ver_major, u16 ver_minor)
+{
+       if (priv->dpni_ver_major == ver_major)
+               return priv->dpni_ver_minor - ver_minor;
+       return priv->dpni_ver_major - ver_major;
+}
+
+#define DPNI_DIST_KEY_VER_MAJOR                        7
+#define DPNI_DIST_KEY_VER_MINOR                        5
+
+static inline bool dpaa2_eth_has_legacy_dist(struct dpaa2_eth_priv *priv)
+{
+       return (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DIST_KEY_VER_MAJOR,
+                                      DPNI_DIST_KEY_VER_MINOR) < 0);
+}
+
+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
+ * the buffer also needs space for its shared info struct, and we need
+ * to allocate enough to accommodate hardware alignment restrictions
+ */
+static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
+{
+       return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
+}
+
+/* Total headroom needed by the hardware in Tx frame buffers */
+static inline unsigned int
+dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb)
+{
+       unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+
+       /* If we don't have an skb (e.g. XDP buffer), we only need space for
+        * the software annotation area
+        */
+       if (!skb)
+               return headroom;
+
+       /* For non-linear skbs we have no headroom requirement, as we build a
+        * SG frame with a newly allocated SGT buffer
+        */
+       if (skb_is_nonlinear(skb))
+               return 0;
+
+       /* If we have Tx timestamping, need 128B hardware annotation */
+       if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+               headroom += DPAA2_ETH_TX_HWA_SIZE;
+
+       return headroom;
+}
+
+/* Extra headroom space requested to hardware, in order to make sure there's
+ * no realloc'ing in forwarding scenarios. We need to reserve enough space
+ * such that we can accommodate the maximum required Tx offset and alignment
+ * in the ingress frame buffer
+ */
+static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
+{
+       return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
+              DPAA2_ETH_RX_HWA_SIZE;
+}
+
+static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
 {
        return priv->dpni_attrs.num_queues;
 }
 
+static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv)
+{
+       return priv->dpni_attrs.num_tcs;
+}
+
+static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
+                                           int traffic_class)
+{
+       return priv->pfc.pfc_en & (1 << traffic_class);
+}
+
+enum dpaa2_eth_td_cfg {
+       DPAA2_ETH_TD_NONE,
+       DPAA2_ETH_TD_QUEUE,
+       DPAA2_ETH_TD_GROUP
+};
+
+static inline enum dpaa2_eth_td_cfg
+dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
+{
+       bool pfc_enabled = !!(priv->pfc.pfc_en);
+
+       if (pfc_enabled)
+               return DPAA2_ETH_TD_GROUP;
+       else if (priv->tx_pause_frames)
+               return DPAA2_ETH_TD_NONE;
+       else
+               return DPAA2_ETH_TD_QUEUE;
+}
+
+static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
+{
+       return 1;
+}
+
+void check_cls_support(struct dpaa2_eth_priv *priv);
+
+int set_rx_taildrop(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
+                          enum dpaa2_eth_rx_dist type, u32 key_fields);
+
 #endif /* __DPAA2_H */
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -1,5 +1,5 @@
 /* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2017 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -62,6 +62,7 @@ static char dpaa2_ethtool_extras[][ETH_G
        "[drv] tx conf bytes",
        "[drv] tx sg frames",
        "[drv] tx sg bytes",
+       "[drv] tx realloc frames",
        "[drv] rx sg frames",
        "[drv] rx sg bytes",
        "[drv] enqueue portal busy",
@@ -69,6 +70,15 @@ static char dpaa2_ethtool_extras[][ETH_G
        "[drv] dequeue portal busy",
        "[drv] channel pull errors",
        "[drv] cdan",
+       "[drv] tx congestion state",
+#ifdef CONFIG_FSL_QBMAN_DEBUG
+       /* FQ stats */
+       "rx pending frames",
+       "rx pending bytes",
+       "tx conf pending frames",
+       "tx conf pending bytes",
+       "buffer count"
+#endif
 };
 
 #define DPAA2_ETH_NUM_EXTRA_STATS      ARRAY_SIZE(dpaa2_ethtool_extras)
@@ -76,10 +86,15 @@ static char dpaa2_ethtool_extras[][ETH_G
 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
                                  struct ethtool_drvinfo *drvinfo)
 {
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
        strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, dpaa2_eth_drv_version,
                sizeof(drvinfo->version));
-       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
+
        strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
                sizeof(drvinfo->bus_info));
 }
@@ -113,25 +128,37 @@ out:
        return err;
 }
 
+#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR                7
+#define DPNI_DYNAMIC_LINK_SET_VER_MINOR                1
 static int
 dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
                             const struct ethtool_link_ksettings *link_settings)
 {
-       struct dpni_link_cfg cfg = {0};
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct dpni_link_state state = {0};
+       struct dpni_link_cfg cfg = {0};
        int err = 0;
 
-       netdev_dbg(net_dev, "Setting link parameters...");
+       /* If using an older MC version, the DPNI must be down
+        * in order to be able to change link settings. Taking steps to let
+        * the user know that.
+        */
+       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
+                                  DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
+               if (netif_running(net_dev)) {
+                       netdev_info(net_dev, "Interface must be brought down first.\n");
+                       return -EACCES;
+               }
+       }
 
-       /* Due to a temporary MC limitation, the DPNI must be down
-        * in order to be able to change link settings. Taking steps to let
-        * the user know that.
-        */
-       if (netif_running(net_dev)) {
-               netdev_info(net_dev, "Sorry, interface must be brought down first.\n");
-               return -EACCES;
+       /* Need to interrogate link state to get flow control params */
+       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+       if (err) {
+               netdev_err(net_dev, "Error getting link state\n");
+               goto out;
        }
 
+       cfg.options = state.options;
        cfg.rate = link_settings->base.speed;
        if (link_settings->base.autoneg == AUTONEG_ENABLE)
                cfg.options |= DPNI_LINK_OPT_AUTONEG;
@@ -149,6 +176,81 @@ dpaa2_eth_set_link_ksettings(struct net_
                 */
                netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
 
+out:
+       return err;
+}
+
+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
+                                    struct ethtool_pauseparam *pause)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct dpni_link_state state = {0};
+       int err;
+
+       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+       if (err)
+               netdev_dbg(net_dev, "Error getting link state\n");
+
+       /* Report general port autonegotiation status */
+       pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
+       pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
+       pause->tx_pause = pause->rx_pause ^
+                         !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
+}
+
+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
+                                   struct ethtool_pauseparam *pause)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct dpni_link_state state = {0};
+       struct dpni_link_cfg cfg = {0};
+       u32 current_tx_pause;
+       int err = 0;
+
+       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+       if (err) {
+               netdev_dbg(net_dev, "Error getting link state\n");
+               goto out;
+       }
+
+       cfg.rate = state.rate;
+       cfg.options = state.options;
+       current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
+                          !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
+
+       /* We don't support changing pause frame autonegotiation separately
+        * from general port autoneg
+        */
+       if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
+               netdev_warn(net_dev,
+                           "Cannot change pause frame autoneg separately\n");
+
+       if (pause->rx_pause)
+               cfg.options |= DPNI_LINK_OPT_PAUSE;
+       else
+               cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+
+       if (pause->rx_pause ^ pause->tx_pause)
+               cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+       else
+               cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
+       err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+       if (err) {
+               netdev_dbg(net_dev, "Error setting link\n");
+               goto out;
+       }
+
+       /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
+       if (current_tx_pause == pause->tx_pause)
+               goto out;
+
+       priv->tx_pause_frames = pause->tx_pause;
+       err = set_rx_taildrop(priv);
+       if (err)
+               netdev_dbg(net_dev, "Error configuring taildrop\n");
+
+out:
        return err;
 }
 
@@ -192,6 +294,13 @@ static void dpaa2_eth_get_ethtool_stats(
        int j, k, err;
        int num_cnt;
        union dpni_statistics dpni_stats;
+
+#ifdef CONFIG_FSL_QBMAN_DEBUG
+       u32 fcnt, bcnt;
+       u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+       u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+       u32 buf_cnt;
+#endif
        u64 cdan = 0;
        u64 portal_busy = 0, pull_err = 0;
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -204,7 +313,7 @@ static void dpaa2_eth_get_ethtool_stats(
        /* Print standard counters, from DPNI statistics */
        for (j = 0; j <= 2; j++) {
                err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
-                                         j, &dpni_stats);
+                                         j, 0, &dpni_stats);
                if (err != 0)
                        netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
                switch (j) {
@@ -240,12 +349,474 @@ static void dpaa2_eth_get_ethtool_stats(
        *(data + i++) = portal_busy;
        *(data + i++) = pull_err;
        *(data + i++) = cdan;
+
+       *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
+
+#ifdef CONFIG_FSL_QBMAN_DEBUG
+       for (j = 0; j < priv->num_fqs; j++) {
+               /* Print FQ instantaneous counts */
+               err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
+                                             &fcnt, &bcnt);
+               if (err) {
+                       netdev_warn(net_dev, "FQ query error %d", err);
+                       return;
+               }
+
+               if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
+                       fcnt_tx_total += fcnt;
+                       bcnt_tx_total += bcnt;
+               } else {
+                       fcnt_rx_total += fcnt;
+                       bcnt_rx_total += bcnt;
+               }
+       }
+
+       *(data + i++) = fcnt_rx_total;
+       *(data + i++) = bcnt_rx_total;
+       *(data + i++) = fcnt_tx_total;
+       *(data + i++) = bcnt_tx_total;
+
+       err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
+       if (err) {
+               netdev_warn(net_dev, "Buffer count query error %d\n", err);
+               return;
+       }
+       *(data + i++) = buf_cnt;
+#endif
+}
+
+static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
+{
+       int i, off = 0;
+
+       for (i = 0; i < priv->num_dist_fields; i++) {
+               if (priv->dist_fields[i].cls_prot == prot &&
+                   priv->dist_fields[i].cls_field == field)
+                       return off;
+               off += priv->dist_fields[i].size;
+       }
+
+       return -1;
+}
+
+static u8 cls_key_size(struct dpaa2_eth_priv *priv)
+{
+       u8 i, size = 0;
+
+       for (i = 0; i < priv->num_dist_fields; i++)
+               size += priv->dist_fields[i].size;
+
+       return size;
+}
+
+void check_cls_support(struct dpaa2_eth_priv *priv)
+{
+       u8 key_size = cls_key_size(priv);
+       struct device *dev = priv->net_dev->dev.parent;
+
+       if (dpaa2_eth_hash_enabled(priv)) {
+               if (priv->dpni_attrs.fs_key_size < key_size) {
+                       dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
+                                priv->dpni_attrs.fs_key_size,
+                                key_size);
+                       goto disable_fs;
+               }
+               if (priv->num_dist_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
+                       dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
+                                DPKG_MAX_NUM_OF_EXTRACTS);
+                       goto disable_fs;
+               }
+       }
+
+       if (dpaa2_eth_fs_enabled(priv)) {
+               if (!dpaa2_eth_hash_enabled(priv)) {
+                       dev_info(dev, "Insufficient queues. Steering is disabled\n");
+                       goto disable_fs;
+               }
+
+               if (!dpaa2_eth_fs_mask_enabled(priv)) {
+                       dev_info(dev, "Key masks not supported. Steering is disabled\n");
+                       goto disable_fs;
+               }
+       }
+
+       return;
+
+disable_fs:
+       priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
+       priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
+}
+
+static int prep_l4_rule(struct dpaa2_eth_priv *priv,
+                       struct ethtool_tcpip4_spec *l4_value,
+                       struct ethtool_tcpip4_spec *l4_mask,
+                       void *key, void *mask, u8 l4_proto)
+{
+       int offset;
+
+       if (l4_mask->tos) {
+               netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (l4_mask->ip4src) {
+               offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
+               *(u32 *)(key + offset) = l4_value->ip4src;
+               *(u32 *)(mask + offset) = l4_mask->ip4src;
+       }
+
+       if (l4_mask->ip4dst) {
+               offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
+               *(u32 *)(key + offset) = l4_value->ip4dst;
+               *(u32 *)(mask + offset) = l4_mask->ip4dst;
+       }
+
+       if (l4_mask->psrc) {
+               offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+               *(u32 *)(key + offset) = l4_value->psrc;
+               *(u32 *)(mask + offset) = l4_mask->psrc;
+       }
+
+       if (l4_mask->pdst) {
+               offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+               *(u32 *)(key + offset) = l4_value->pdst;
+               *(u32 *)(mask + offset) = l4_mask->pdst;
+       }
+
+       /* Only apply the rule for the user-specified L4 protocol
+        * and if ethertype matches IPv4
+        */
+       offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
+       *(u16 *)(key + offset) = htons(ETH_P_IP);
+       *(u16 *)(mask + offset) = 0xFFFF;
+
+       offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
+       *(u8 *)(key + offset) = l4_proto;
+       *(u8 *)(mask + offset) = 0xFF;
+
+       /* TODO: check IP version */
+
+       return 0;
+}
+
+static int prep_eth_rule(struct dpaa2_eth_priv *priv,
+                        struct ethhdr *eth_value, struct ethhdr *eth_mask,
+                        void *key, void *mask)
+{
+       int offset;
+
+       if (eth_mask->h_proto) {
+               netdev_err(priv->net_dev, "Ethertype is not supported!\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (!is_zero_ether_addr(eth_mask->h_source)) {
+               offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
+               ether_addr_copy(key + offset, eth_value->h_source);
+               ether_addr_copy(mask + offset, eth_mask->h_source);
+       }
+
+       if (!is_zero_ether_addr(eth_mask->h_dest)) {
+               offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
+               ether_addr_copy(key + offset, eth_value->h_dest);
+               ether_addr_copy(mask + offset, eth_mask->h_dest);
+       }
+
+       return 0;
+}
+
+static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
+                            struct ethtool_usrip4_spec *uip_value,
+                            struct ethtool_usrip4_spec *uip_mask,
+                            void *key, void *mask)
+{
+       int offset;
+
+       if (uip_mask->tos)
+               return -EOPNOTSUPP;
+
+       if (uip_mask->ip4src) {
+               offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
+               *(u32 *)(key + offset) = uip_value->ip4src;
+               *(u32 *)(mask + offset) = uip_mask->ip4src;
+       }
+
+       if (uip_mask->ip4dst) {
+               offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
+               *(u32 *)(key + offset) = uip_value->ip4dst;
+               *(u32 *)(mask + offset) = uip_mask->ip4dst;
+       }
+
+       if (uip_mask->proto) {
+               offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
+               *(u32 *)(key + offset) = uip_value->proto;
+               *(u32 *)(mask + offset) = uip_mask->proto;
+       }
+       if (uip_mask->l4_4_bytes) {
+               offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+               *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
+               *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
+
+               offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+               *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
+               *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
+       }
+
+       /* Ethertype must be IP */
+       offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
+       *(u16 *)(key + offset) = htons(ETH_P_IP);
+       *(u16 *)(mask + offset) = 0xFFFF;
+
+       return 0;
+}
+
+static int prep_ext_rule(struct dpaa2_eth_priv *priv,
+                        struct ethtool_flow_ext *ext_value,
+                        struct ethtool_flow_ext *ext_mask,
+                        void *key, void *mask)
+{
+       int offset;
+
+       if (ext_mask->vlan_etype)
+               return -EOPNOTSUPP;
+
+       if (ext_mask->vlan_tci) {
+               offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+               *(u16 *)(key + offset) = ext_value->vlan_tci;
+               *(u16 *)(mask + offset) = ext_mask->vlan_tci;
+       }
+
+       return 0;
+}
+
+static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
+                            struct ethtool_flow_ext *ext_value,
+                            struct ethtool_flow_ext *ext_mask,
+                            void *key, void *mask)
+{
+       int offset;
+
+       if (!is_zero_ether_addr(ext_mask->h_dest)) {
+               offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
+               ether_addr_copy(key + offset, ext_value->h_dest);
+               ether_addr_copy(mask + offset, ext_mask->h_dest);
+       }
+
+       return 0;
+}
+
+static int prep_cls_rule(struct net_device *net_dev,
+                        struct ethtool_rx_flow_spec *fs,
+                        void *key)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       const u8 key_size = cls_key_size(priv);
+       void *msk = key + key_size;
+       int err;
+
+       memset(key, 0, key_size * 2);
+
+       switch (fs->flow_type & 0xff) {
+       case TCP_V4_FLOW:
+               err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
+                                  &fs->m_u.tcp_ip4_spec, key, msk,
+                                  IPPROTO_TCP);
+               break;
+       case UDP_V4_FLOW:
+               err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
+                                  &fs->m_u.udp_ip4_spec, key, msk,
+                                  IPPROTO_UDP);
+               break;
+       case SCTP_V4_FLOW:
+               err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
+                                  &fs->m_u.sctp_ip4_spec, key, msk,
+                                  IPPROTO_SCTP);
+               break;
+       case ETHER_FLOW:
+               err = prep_eth_rule(priv, &fs->h_u.ether_spec,
+                                   &fs->m_u.ether_spec, key, msk);
+               break;
+       case IP_USER_FLOW:
+               err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
+                                       &fs->m_u.usr_ip4_spec, key, msk);
+               break;
+       default:
+               /* TODO: AH, ESP */
+               return -EOPNOTSUPP;
+       }
+       if (err)
+               return err;
+
+       if (fs->flow_type & FLOW_EXT) {
+               err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
+               if (err)
+                       return err;
+       }
+
+       if (fs->flow_type & FLOW_MAC_EXT) {
+               err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int del_cls(struct net_device *net_dev, int location);
+
+static int do_cls(struct net_device *net_dev,
+                 struct ethtool_rx_flow_spec *fs,
+                 bool add)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct device *dev = net_dev->dev.parent;
+       const int rule_cnt = dpaa2_eth_fs_count(priv);
+       struct dpni_rule_cfg rule_cfg;
+       struct dpni_fs_action_cfg fs_act = { 0 };
+       void *dma_mem;
+       int err = 0, tc;
+
+       if (!dpaa2_eth_fs_enabled(priv)) {
+               netdev_err(net_dev, "dev does not support steering!\n");
+               /* dev doesn't support steering */
+               return -EOPNOTSUPP;
+       }
+
+       if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
+            fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
+            fs->location >= rule_cnt)
+               return -EINVAL;
+
+       /* When adding a new rule, check if location if available
+        * and if not, free the existing table entry before inserting
+        * the new one
+        */
+       if (add && (priv->cls_rule[fs->location].in_use == true))
+               del_cls(net_dev, fs->location);
+
+       memset(&rule_cfg, 0, sizeof(rule_cfg));
+       rule_cfg.key_size = cls_key_size(priv);
+
+       /* allocate twice the key size, for the actual key and for mask */
+       dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
+       if (!dma_mem)
+               return -ENOMEM;
+
+       err = prep_cls_rule(net_dev, fs, dma_mem);
+       if (err)
+               goto err_free_mem;
+
+       rule_cfg.key_iova = dma_map_single(dev, dma_mem,
+                                          rule_cfg.key_size * 2,
+                                          DMA_TO_DEVICE);
+
+       rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
+
+       if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+               fs_act.options |= DPNI_FS_OPT_DISCARD;
+       else
+               fs_act.flow_id = fs->ring_cookie;
+
+       for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) {
+               if (add)
+                       err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
+                                               tc, fs->location, &rule_cfg,
+                                               &fs_act);
+               else
+                       err = dpni_remove_fs_entry(priv->mc_io, 0,
+                                                  priv->mc_token, tc,
+                                                  &rule_cfg);
+
+               if (err)
+                       break;
+       }
+
+       dma_unmap_single(dev, rule_cfg.key_iova,
+                        rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
+       if (err)
+               netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
+
+err_free_mem:
+       kfree(dma_mem);
+
+       return err;
+}
+
+static int add_cls(struct net_device *net_dev,
+                  struct ethtool_rx_flow_spec *fs)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       int err;
+
+       err = do_cls(net_dev, fs, true);
+       if (err)
+               return err;
+
+       priv->cls_rule[fs->location].in_use = true;
+       priv->cls_rule[fs->location].fs = *fs;
+
+       return 0;
+}
+
+static int del_cls(struct net_device *net_dev, int location)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       int err;
+
+       err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
+       if (err)
+               return err;
+
+       priv->cls_rule[location].in_use = false;
+
+       return 0;
+}
+
+static int set_hash(struct net_device *net_dev, u64 data)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       u32 key = 0;
+       int i;
+
+       if (data & RXH_DISCARD)
+               return -EOPNOTSUPP;
+
+       for (i = 0; i < priv->num_dist_fields; i++)
+               if (priv->dist_fields[i].rxnfc_field & data)
+                       key |= priv->dist_fields[i].id;
+
+       return dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH, key);
+}
+
+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
+                              struct ethtool_rxnfc *rxnfc)
+{
+       int err = 0;
+
+       switch (rxnfc->cmd) {
+       case ETHTOOL_SRXCLSRLINS:
+               err = add_cls(net_dev, &rxnfc->fs);
+               break;
+       case ETHTOOL_SRXCLSRLDEL:
+               err = del_cls(net_dev, rxnfc->fs.location);
+               break;
+       case ETHTOOL_SRXFH:
+               err = set_hash(net_dev, rxnfc->data);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+       }
+
+       return err;
 }
 
 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
                               struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       const int rule_cnt = dpaa2_eth_fs_count(priv);
+       int i, j;
 
        switch (rxnfc->cmd) {
        case ETHTOOL_GRXFH:
@@ -258,6 +829,33 @@ static int dpaa2_eth_get_rxnfc(struct ne
        case ETHTOOL_GRXRINGS:
                rxnfc->data = dpaa2_eth_queue_count(priv);
                break;
+
+       case ETHTOOL_GRXCLSRLCNT:
+               for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
+                       if (priv->cls_rule[i].in_use)
+                               rxnfc->rule_cnt++;
+               rxnfc->data = rule_cnt;
+               break;
+
+       case ETHTOOL_GRXCLSRULE:
+               if (!priv->cls_rule[rxnfc->fs.location].in_use)
+                       return -EINVAL;
+
+               rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
+               break;
+
+       case ETHTOOL_GRXCLSRLALL:
+               for (i = 0, j = 0; i < rule_cnt; i++) {
+                       if (!priv->cls_rule[i].in_use)
+                               continue;
+                       if (j == rxnfc->rule_cnt)
+                               return -EMSGSIZE;
+                       rule_locs[j++] = i;
+               }
+               rxnfc->rule_cnt = j;
+               rxnfc->data = rule_cnt;
+               break;
+
        default:
                return -EOPNOTSUPP;
        }
@@ -270,8 +868,11 @@ const struct ethtool_ops dpaa2_ethtool_o
        .get_link = ethtool_op_get_link,
        .get_link_ksettings = dpaa2_eth_get_link_ksettings,
        .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+       .get_pauseparam = dpaa2_eth_get_pauseparam,
+       .set_pauseparam = dpaa2_eth_set_pauseparam,
        .get_sset_count = dpaa2_eth_get_sset_count,
        .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
        .get_strings = dpaa2_eth_get_strings,
        .get_rxnfc = dpaa2_eth_get_rxnfc,
+       .set_rxnfc = dpaa2_eth_set_rxnfc,
 };
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
@@ -39,9 +39,11 @@
 #define DPNI_VER_MAJOR                         7
 #define DPNI_VER_MINOR                         0
 #define DPNI_CMD_BASE_VERSION                  1
+#define DPNI_CMD_2ND_VERSION                   2
 #define DPNI_CMD_ID_OFFSET                     4
 
 #define DPNI_CMD(id)   (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
+#define DPNI_CMD_V2(id)        (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
 
 #define DPNI_CMDID_OPEN                                        DPNI_CMD(0x801)
 #define DPNI_CMDID_CLOSE                               DPNI_CMD(0x800)
@@ -64,7 +66,7 @@
 #define DPNI_CMDID_GET_IRQ_STATUS                      DPNI_CMD(0x016)
 #define DPNI_CMDID_CLEAR_IRQ_STATUS                    DPNI_CMD(0x017)
 
-#define DPNI_CMDID_SET_POOLS                           DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_POOLS                           DPNI_CMD_V2(0x200)
 #define DPNI_CMDID_SET_ERRORS_BEHAVIOR                 DPNI_CMD(0x20B)
 
 #define DPNI_CMDID_GET_QDID                            DPNI_CMD(0x210)
@@ -73,7 +75,7 @@
 #define DPNI_CMDID_SET_MAX_FRAME_LENGTH                        DPNI_CMD(0x216)
 #define DPNI_CMDID_GET_MAX_FRAME_LENGTH                        DPNI_CMD(0x217)
 #define DPNI_CMDID_SET_LINK_CFG                                DPNI_CMD(0x21A)
-#define DPNI_CMDID_SET_TX_SHAPING                      DPNI_CMD(0x21B)
+#define DPNI_CMDID_SET_TX_SHAPING                      DPNI_CMD_V2(0x21B)
 
 #define DPNI_CMDID_SET_MCAST_PROMISC                   DPNI_CMD(0x220)
 #define DPNI_CMDID_GET_MCAST_PROMISC                   DPNI_CMD(0x221)
@@ -87,11 +89,16 @@
 
 #define DPNI_CMDID_SET_RX_TC_DIST                      DPNI_CMD(0x235)
 
+#define DPNI_CMDID_SET_QOS_TBL                         DPNI_CMD(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT                         DPNI_CMD(0x241)
+#define DPNI_CMDID_REMOVE_QOS_ENT                      DPNI_CMD(0x242)
 #define DPNI_CMDID_ADD_FS_ENT                          DPNI_CMD(0x244)
 #define DPNI_CMDID_REMOVE_FS_ENT                       DPNI_CMD(0x245)
 #define DPNI_CMDID_CLR_FS_ENT                          DPNI_CMD(0x246)
 
-#define DPNI_CMDID_GET_STATISTICS                      DPNI_CMD(0x25D)
+#define DPNI_CMDID_SET_TX_PRIORITIES                   DPNI_CMD_V2(0x250)
+#define DPNI_CMDID_GET_STATISTICS                      DPNI_CMD_V2(0x25D)
+#define DPNI_CMDID_RESET_STATISTICS                    DPNI_CMD(0x25E)
 #define DPNI_CMDID_GET_QUEUE                           DPNI_CMD(0x25F)
 #define DPNI_CMDID_SET_QUEUE                           DPNI_CMD(0x260)
 #define DPNI_CMDID_GET_TAILDROP                                DPNI_CMD(0x261)
@@ -110,6 +117,9 @@
 #define DPNI_CMDID_GET_OFFLOAD                         DPNI_CMD(0x26B)
 #define DPNI_CMDID_SET_OFFLOAD                         DPNI_CMD(0x26C)
 
+#define DPNI_CMDID_SET_RX_FS_DIST                      DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST                    DPNI_CMD(0x274)
+
 /* Macros for accessing command fields smaller than 1byte */
 #define DPNI_MASK(field)       \
        GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
@@ -126,13 +136,14 @@ struct dpni_cmd_open {
 
 #define DPNI_BACKUP_POOL(val, order)   (((val) & 0x1) << (order))
 struct dpni_cmd_set_pools {
-       /* cmd word 0 */
        u8 num_dpbp;
        u8 backup_pool_mask;
        __le16 pad;
-       /* cmd word 0..4 */
-       __le32 dpbp_id[DPNI_MAX_DPBP];
-       /* cmd word 4..6 */
+       struct {
+               __le16 dpbp_id;
+               u8 priority_mask;
+               u8 pad;
+       } pool[DPNI_MAX_DPBP];
        __le16 buffer_size[DPNI_MAX_DPBP];
 };
 
@@ -303,6 +314,7 @@ struct dpni_rsp_get_tx_data_offset {
 
 struct dpni_cmd_get_statistics {
        u8 page_number;
+       u8 param;
 };
 
 struct dpni_rsp_get_statistics {
@@ -335,6 +347,22 @@ struct dpni_rsp_get_link_state {
        __le64 options;
 };
 
+#define DPNI_COUPLED_SHIFT     0
+#define DPNI_COUPLED_SIZE      1
+
+struct dpni_cmd_set_tx_shaping {
+       /* cmd word 0 */
+       __le16 tx_cr_max_burst_size;
+       __le16 tx_er_max_burst_size;
+       __le32 pad;
+       /* cmd word 1 */
+       __le32 tx_cr_rate_limit;
+       __le32 tx_er_rate_limit;
+       /* cmd word 2 */
+       /* from LSB: coupled:1 */
+       u8 coupled;
+};
+
 struct dpni_cmd_set_max_frame_length {
        __le16 max_frame_length;
 };
@@ -394,6 +422,24 @@ struct dpni_cmd_clear_mac_filters {
        u8 flags;
 };
 
+#define DPNI_SEPARATE_GRP_SHIFT 0
+#define DPNI_SEPARATE_GRP_SIZE  1
+#define DPNI_MODE_1_SHIFT              0
+#define DPNI_MODE_1_SIZE               4
+#define DPNI_MODE_2_SHIFT              4
+#define DPNI_MODE_2_SIZE               4
+
+struct dpni_cmd_set_tx_priorities {
+       __le16 flags;
+       u8 prio_group_A;
+       u8 prio_group_B;
+       __le32 pad0;
+       u8 modes[4];
+       __le32 pad1;
+       __le64 pad2;
+       __le16 delta_bandwidth[8];
+};
+
 #define DPNI_DIST_MODE_SHIFT           0
 #define DPNI_DIST_MODE_SIZE            4
 #define DPNI_MISS_ACTION_SHIFT         4
@@ -503,6 +549,63 @@ struct dpni_cmd_set_queue {
        __le64 user_context;
 };
 
+#define DPNI_DISCARD_ON_MISS_SHIFT     0
+#define DPNI_DISCARD_ON_MISS_SIZE      1
+
+struct dpni_cmd_set_qos_table {
+       __le32 pad;
+       u8 default_tc;
+       /* only the LSB */
+       u8 discard_on_miss;
+       __le16 pad1[21];
+       __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
+       __le16 pad;
+       u8 tc_id;
+       u8 key_size;
+       __le16 index;
+       __le16 pad2;
+       __le64 key_iova;
+       __le64 mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+       u8 pad1[3];
+       u8 key_size;
+       __le32 pad2;
+       __le64 key_iova;
+       __le64 mask_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+       /* cmd word 0 */
+       __le16 options;
+       u8 tc_id;
+       u8 key_size;
+       __le16 index;
+       __le16 flow_id;
+       /* cmd word 1 */
+       __le64 key_iova;
+       /* cmd word 2 */
+       __le64 mask_iova;
+       /* cmd word 3 */
+       __le64 flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+       /* cmd word 0 */
+       __le16 pad0;
+       u8 tc_id;
+       u8 key_size;
+       __le32 pad1;
+       /* cmd word 1 */
+       __le64 key_iova;
+       /* cmd word 2 */
+       __le64 mask_iova;
+};
+
 struct dpni_cmd_set_taildrop {
        /* cmd word 0 */
        u8 congestion_point;
@@ -538,4 +641,79 @@ struct dpni_rsp_get_taildrop {
        __le32 threshold;
 };
 
+struct dpni_rsp_get_api_version {
+       u16 major;
+       u16 minor;
+};
+
+#define DPNI_DEST_TYPE_SHIFT           0
+#define DPNI_DEST_TYPE_SIZE            4
+#define DPNI_CONG_UNITS_SHIFT          4
+#define DPNI_CONG_UNITS_SIZE           2
+
+struct dpni_cmd_set_congestion_notification {
+       /* cmd word 0 */
+       u8 qtype;
+       u8 tc;
+       u8 pad[6];
+       /* cmd word 1 */
+       __le32 dest_id;
+       __le16 notification_mode;
+       u8 dest_priority;
+       /* from LSB: dest_type: 4 units:2 */
+       u8 type_units;
+       /* cmd word 2 */
+       __le64 message_iova;
+       /* cmd word 3 */
+       __le64 message_ctx;
+       /* cmd word 4 */
+       __le32 threshold_entry;
+       __le32 threshold_exit;
+};
+
+struct dpni_cmd_get_congestion_notification {
+       /* cmd word 0 */
+       u8 qtype;
+       u8 tc;
+};
+
+struct dpni_rsp_get_congestion_notification {
+       /* cmd word 0 */
+       __le64 pad;
+       /* cmd word 1 */
+       __le32 dest_id;
+       __le16 notification_mode;
+       u8 dest_priority;
+       /* from LSB: dest_type: 4 units:2 */
+       u8 type_units;
+       /* cmd word 2 */
+       __le64 message_iova;
+       /* cmd word 3 */
+       __le64 message_ctx;
+       /* cmd word 4 */
+       __le32 threshold_entry;
+       __le32 threshold_exit;
+};
+
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT   0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE    1
+struct dpni_cmd_set_rx_fs_dist {
+       __le16 dist_size;
+       u8 enable;
+       u8 tc;
+       __le16 miss_flow_id;
+       __le16 pad;
+       __le64 key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE  1
+struct dpni_cmd_set_rx_hash_dist {
+       __le16 dist_size;
+       u8 enable;
+       u8 tc;
+       __le32 pad;
+       __le64 key_cfg_iova;
+};
+
 #endif /* _FSL_DPNI_CMD_H */
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
@@ -122,7 +122,7 @@ int dpni_open(struct fsl_mc_io *mc_io,
              int dpni_id,
              u16 *token)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_open *cmd_params;
 
        int err;
@@ -160,7 +160,7 @@ int dpni_close(struct fsl_mc_io *mc_io,
               u32 cmd_flags,
               u16 token)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
@@ -188,7 +188,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_
                   u16 token,
                   const struct dpni_pools_cfg *cfg)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_pools *cmd_params;
        int i;
 
@@ -199,7 +199,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_
        cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
        cmd_params->num_dpbp = cfg->num_dpbp;
        for (i = 0; i < DPNI_MAX_DPBP; i++) {
-               cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+               cmd_params->pool[i].dpbp_id =
+                       cpu_to_le16(cfg->pools[i].dpbp_id);
+               cmd_params->pool[i].priority_mask =
+                       cfg->pools[i].priority_mask;
                cmd_params->buffer_size[i] =
                        cpu_to_le16(cfg->pools[i].buffer_size);
                cmd_params->backup_pool_mask |=
@@ -222,7 +225,7 @@ int dpni_enable(struct fsl_mc_io *mc_io,
                u32 cmd_flags,
                u16 token)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
@@ -245,7 +248,7 @@ int dpni_disable(struct fsl_mc_io *mc_io
                 u32 cmd_flags,
                 u16 token)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
@@ -270,7 +273,7 @@ int dpni_is_enabled(struct fsl_mc_io *mc
                    u16 token,
                    int *en)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_is_enabled *rsp_params;
        int err;
 
@@ -303,7 +306,7 @@ int dpni_reset(struct fsl_mc_io *mc_io,
               u32 cmd_flags,
               u16 token)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
@@ -335,7 +338,7 @@ int dpni_set_irq_enable(struct fsl_mc_io
                        u8 irq_index,
                        u8 en)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_irq_enable *cmd_params;
 
        /* prepare command */
@@ -366,7 +369,7 @@ int dpni_get_irq_enable(struct fsl_mc_io
                        u8 irq_index,
                        u8 *en)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_irq_enable *cmd_params;
        struct dpni_rsp_get_irq_enable *rsp_params;
 
@@ -413,7 +416,7 @@ int dpni_set_irq_mask(struct fsl_mc_io *
                      u8 irq_index,
                      u32 mask)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_irq_mask *cmd_params;
 
        /* prepare command */
@@ -447,7 +450,7 @@ int dpni_get_irq_mask(struct fsl_mc_io *
                      u8 irq_index,
                      u32 *mask)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_irq_mask *cmd_params;
        struct dpni_rsp_get_irq_mask *rsp_params;
        int err;
@@ -489,7 +492,7 @@ int dpni_get_irq_status(struct fsl_mc_io
                        u8 irq_index,
                        u32 *status)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_irq_status *cmd_params;
        struct dpni_rsp_get_irq_status *rsp_params;
        int err;
@@ -532,7 +535,7 @@ int dpni_clear_irq_status(struct fsl_mc_
                          u8 irq_index,
                          u32 status)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_clear_irq_status *cmd_params;
 
        /* prepare command */
@@ -561,7 +564,7 @@ int dpni_get_attributes(struct fsl_mc_io
                        u16 token,
                        struct dpni_attr *attr)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_get_attr *rsp_params;
 
        int err;
@@ -609,7 +612,7 @@ int dpni_set_errors_behavior(struct fsl_
                             u16 token,
                             struct dpni_error_cfg *cfg)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_errors_behavior *cmd_params;
 
        /* prepare command */
@@ -641,7 +644,7 @@ int dpni_get_buffer_layout(struct fsl_mc
                           enum dpni_queue_type qtype,
                           struct dpni_buffer_layout *layout)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_buffer_layout *cmd_params;
        struct dpni_rsp_get_buffer_layout *rsp_params;
        int err;
@@ -689,7 +692,7 @@ int dpni_set_buffer_layout(struct fsl_mc
                           enum dpni_queue_type qtype,
                           const struct dpni_buffer_layout *layout)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_buffer_layout *cmd_params;
 
        /* prepare command */
@@ -731,7 +734,7 @@ int dpni_set_offload(struct fsl_mc_io *m
                     enum dpni_offload type,
                     u32 config)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_offload *cmd_params;
 
        cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
@@ -750,7 +753,7 @@ int dpni_get_offload(struct fsl_mc_io *m
                     enum dpni_offload type,
                     u32 *config)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_offload *cmd_params;
        struct dpni_rsp_get_offload *rsp_params;
        int err;
@@ -792,7 +795,7 @@ int dpni_get_qdid(struct fsl_mc_io *mc_i
                  enum dpni_queue_type qtype,
                  u16 *qdid)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_qdid *cmd_params;
        struct dpni_rsp_get_qdid *rsp_params;
        int err;
@@ -830,7 +833,7 @@ int dpni_get_tx_data_offset(struct fsl_m
                            u16 token,
                            u16 *data_offset)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_get_tx_data_offset *rsp_params;
        int err;
 
@@ -865,7 +868,7 @@ int dpni_set_link_cfg(struct fsl_mc_io *
                      u16 token,
                      const struct dpni_link_cfg *cfg)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_link_cfg *cmd_params;
 
        /* prepare command */
@@ -894,7 +897,7 @@ int dpni_get_link_state(struct fsl_mc_io
                        u16 token,
                        struct dpni_link_state *state)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_get_link_state *rsp_params;
        int err;
 
@@ -918,6 +921,44 @@ int dpni_get_link_state(struct fsl_mc_io
 }
 
 /**
+ * dpni_set_tx_shaping() - Set the transmit shaping
+ * @mc_io:             Pointer to MC portal's I/O object
+ * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:             Token of DPNI object
+ * @tx_cr_shaper:      TX committed rate shaping configuration
+ * @tx_er_shaper:      TX excess rate shaping configuration
+ * @coupled:           Committed and excess rate shapers are coupled
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+                       const struct dpni_tx_shaping_cfg *tx_er_shaper,
+                       int coupled)
+{
+       struct fsl_mc_command cmd = { 0 };
+       struct dpni_cmd_set_tx_shaping *cmd_params;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
+       cmd_params->tx_cr_max_burst_size =
+                               cpu_to_le16(tx_cr_shaper->max_burst_size);
+       cmd_params->tx_er_max_burst_size =
+                               cpu_to_le16(tx_er_shaper->max_burst_size);
+       cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
+       cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
+       dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
  * dpni_set_max_frame_length() - Set the maximum received frame length.
  * @mc_io:     Pointer to MC portal's I/O object
  * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -933,7 +974,7 @@ int dpni_set_max_frame_length(struct fsl
                              u16 token,
                              u16 max_frame_length)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_max_frame_length *cmd_params;
 
        /* prepare command */
@@ -963,7 +1004,7 @@ int dpni_get_max_frame_length(struct fsl
                              u16 token,
                              u16 *max_frame_length)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_get_max_frame_length *rsp_params;
        int err;
 
@@ -998,7 +1039,7 @@ int dpni_set_multicast_promisc(struct fs
                               u16 token,
                               int en)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_multicast_promisc *cmd_params;
 
        /* prepare command */
@@ -1026,7 +1067,7 @@ int dpni_get_multicast_promisc(struct fs
                               u16 token,
                               int *en)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_get_multicast_promisc *rsp_params;
        int err;
 
@@ -1061,7 +1102,7 @@ int dpni_set_unicast_promisc(struct fsl_
                             u16 token,
                             int en)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_unicast_promisc *cmd_params;
 
        /* prepare command */
@@ -1089,7 +1130,7 @@ int dpni_get_unicast_promisc(struct fsl_
                             u16 token,
                             int *en)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_get_unicast_promisc *rsp_params;
        int err;
 
@@ -1124,7 +1165,7 @@ int dpni_set_primary_mac_addr(struct fsl
                              u16 token,
                              const u8 mac_addr[6])
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_primary_mac_addr *cmd_params;
        int i;
 
@@ -1154,7 +1195,7 @@ int dpni_get_primary_mac_addr(struct fsl
                              u16 token,
                              u8 mac_addr[6])
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_get_primary_mac_addr *rsp_params;
        int i, err;
 
@@ -1193,7 +1234,7 @@ int dpni_get_port_mac_addr(struct fsl_mc
                           u16 token,
                           u8 mac_addr[6])
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_rsp_get_port_mac_addr *rsp_params;
        int i, err;
 
@@ -1229,7 +1270,7 @@ int dpni_add_mac_addr(struct fsl_mc_io *
                      u16 token,
                      const u8 mac_addr[6])
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_add_mac_addr *cmd_params;
        int i;
 
@@ -1259,7 +1300,7 @@ int dpni_remove_mac_addr(struct fsl_mc_i
                         u16 token,
                         const u8 mac_addr[6])
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_remove_mac_addr *cmd_params;
        int i;
 
@@ -1293,7 +1334,7 @@ int dpni_clear_mac_filters(struct fsl_mc
                           int unicast,
                           int multicast)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_clear_mac_filters *cmd_params;
 
        /* prepare command */
@@ -1309,6 +1350,55 @@ int dpni_clear_mac_filters(struct fsl_mc
 }
 
 /**
+ * dpni_set_tx_priorities() - Set transmission TC priority configuration
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg:       Transmission selection configuration
+ *
+ * warning:    Allowed only when DPNI is disabled
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
+                          u32 cmd_flags,
+                          u16 token,
+                          const struct dpni_tx_priorities_cfg *cfg)
+{
+       struct dpni_cmd_set_tx_priorities *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+       int i;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
+       dpni_set_field(cmd_params->flags,
+                      SEPARATE_GRP,
+                      cfg->separate_groups);
+       cmd_params->prio_group_A = cfg->prio_group_A;
+       cmd_params->prio_group_B = cfg->prio_group_B;
+
+       for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
+               dpni_set_field(cmd_params->modes[i / 2],
+                              MODE_1,
+                              cfg->tc_sched[i].mode);
+               dpni_set_field(cmd_params->modes[i / 2],
+                              MODE_2,
+                              cfg->tc_sched[i + 1].mode);
+       }
+
+       for (i = 0; i < DPNI_MAX_TC; i++) {
+               cmd_params->delta_bandwidth[i] =
+                               cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
+       }
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
  * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
  * @mc_io:     Pointer to MC portal's I/O object
  * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -1327,7 +1417,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
                        u8 tc_id,
                        const struct dpni_rx_tc_dist_cfg *cfg)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_rx_tc_dist *cmd_params;
 
        /* prepare command */
@@ -1346,6 +1436,293 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
        return mc_send_command(mc_io, &cmd);
 }
 
+/*
+ * dpni_set_qos_table() - Set QoS mapping table
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg:       QoS table configuration
+ *
+ * This function and all QoS-related functions require that
+ *'max_tcs > 1' was set at DPNI creation.
+ *
+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
+ *                     prepare the key_cfg_iova parameter
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+                      u32 cmd_flags,
+                      u16 token,
+                      const struct dpni_qos_tbl_cfg *cfg)
+{
+       struct dpni_cmd_set_qos_table *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
+       cmd_params->default_tc = cfg->default_tc;
+       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+       dpni_set_field(cmd_params->discard_on_miss,
+                      ENABLE,
+                      cfg->discard_on_miss);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg:       QoS rule to add
+ * @tc_id:     Traffic class selection (0-7)
+ * @index:     Location in the QoS table where to insert the entry.
+ *             Only relevant if MASKING is enabled for QoS classification on
+ *             this DPNI, it is ignored for exact match.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+                      u32 cmd_flags,
+                      u16 token,
+                      const struct dpni_rule_cfg *cfg,
+                      u8 tc_id,
+                      u16 index)
+{
+       struct dpni_cmd_add_qos_entry *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
+       cmd_params->tc_id = tc_id;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->index = cpu_to_le16(index);
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_qos_entry() - Remove QoS mapping entry
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg:       QoS rule to remove
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+                         u32 cmd_flags,
+                         u16 token,
+                         const struct dpni_rule_cfg *cfg)
+{
+       struct dpni_cmd_remove_qos_entry *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ *                     (to select a flow ID)
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @tc_id:     Traffic class selection (0-7)
+ * @index:     Location in the QoS table where to insert the entry.
+ *             Only relevant if MASKING is enabled for QoS
+ *             classification on this DPNI, it is ignored for exact match.
+ * @cfg:       Flow steering rule to add
+ * @action:    Action to be taken as result of a classification hit
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+                     u32 cmd_flags,
+                     u16 token,
+                     u8 tc_id,
+                     u16 index,
+                     const struct dpni_rule_cfg *cfg,
+                     const struct dpni_fs_action_cfg *action)
+{
+       struct dpni_cmd_add_fs_entry *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+       cmd_params->tc_id = tc_id;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->index = cpu_to_le16(index);
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+       cmd_params->options = cpu_to_le16(action->options);
+       cmd_params->flow_id = cpu_to_le16(action->flow_id);
+       cmd_params->flc = cpu_to_le64(action->flc);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ *                         traffic class
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @tc_id:     Traffic class selection (0-7)
+ * @cfg:       Flow steering rule to remove
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token,
+                        u8 tc_id,
+                        const struct dpni_rule_cfg *cfg)
+{
+       struct dpni_cmd_remove_fs_entry *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+       cmd_params->tc_id = tc_id;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ *                                     notification configuration
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @qtype:     Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id:     Traffic class selection (0-7)
+ * @cfg:       Congestion notification configuration
+ *
+ * Return:     '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(
+                       struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       enum dpni_queue_type qtype,
+                       u8 tc_id,
+                       const struct dpni_congestion_notification_cfg *cfg)
+{
+       struct dpni_cmd_set_congestion_notification *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(
+                       DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+                       cmd_flags,
+                       token);
+       cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
+       cmd_params->qtype = qtype;
+       cmd_params->tc = tc_id;
+       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+       cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+       cmd_params->dest_priority = cfg->dest_cfg.priority;
+       dpni_set_field(cmd_params->type_units, DEST_TYPE,
+                      cfg->dest_cfg.dest_type);
+       dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
+       cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+       cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+       cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+       cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_congestion_notification() - Get traffic class congestion
+ *     notification configuration
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @qtype:     Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id:     bits 7-4 contain ceetm channel index (valid only for TX);
+ *             bits 3-0 contain traffic class.
+ *             Use macro DPNI_BUILD_CH_TC() to build correct value for
+ *             tc_id parameter.
+ * @cfg:       congestion notification configuration
+ *
+ * Return:     '0' on Success; error code otherwise.
+ */
+int dpni_get_congestion_notification(
+                       struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       enum dpni_queue_type qtype,
+                       u8 tc_id,
+                       struct dpni_congestion_notification_cfg *cfg)
+{
+       struct dpni_rsp_get_congestion_notification *rsp_params;
+       struct dpni_cmd_get_congestion_notification *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(
+                               DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
+                               cmd_flags,
+                               token);
+       cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
+       cmd_params->qtype = qtype;
+       cmd_params->tc = tc_id;
+
+       /* send command to mc*/
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
+       cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
+       cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+       cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+       cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+       cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+       cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+       cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+       cfg->dest_cfg.priority = rsp_params->dest_priority;
+       cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
+                                                DEST_TYPE);
+
+       return 0;
+}
+
 /**
  * dpni_set_queue() - Set queue parameters
  * @mc_io:     Pointer to MC portal's I/O object
@@ -1371,7 +1748,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_
                   u8 options,
                   const struct dpni_queue *queue)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_queue *cmd_params;
 
        /* prepare command */
@@ -1419,7 +1796,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_
                   struct dpni_queue *queue,
                   struct dpni_queue_id *qid)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_queue *cmd_params;
        struct dpni_rsp_get_queue *rsp_params;
        int err;
@@ -1463,6 +1840,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_
  * @token:     Token of DPNI object
  * @page:      Selects the statistics page to retrieve, see
  *             DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
+ * @param:     Custom parameter for some pages used to select a certain
+ *             statistic source, for example the TC.
  * @stat:      Structure containing the statistics
  *
  * Return:     '0' on Success; Error code otherwise.
@@ -1471,9 +1850,10 @@ int dpni_get_statistics(struct fsl_mc_io
                        u32 cmd_flags,
                        u16 token,
                        u8 page,
+                       u8 param,
                        union dpni_statistics *stat)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_statistics *cmd_params;
        struct dpni_rsp_get_statistics *rsp_params;
        int i, err;
@@ -1484,6 +1864,7 @@ int dpni_get_statistics(struct fsl_mc_io
                                          token);
        cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
        cmd_params->page_number = page;
+       cmd_params->param = param;
 
        /* send command to mc */
        err = mc_send_command(mc_io, &cmd);
@@ -1499,6 +1880,29 @@ int dpni_get_statistics(struct fsl_mc_io
 }
 
 /**
+ * dpni_reset_statistics() - Clears DPNI statistics
+ * @mc_io:             Pointer to MC portal's I/O object
+ * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:             Token of DPNI object
+ *
+ * Return:  '0' on Success; Error code otherwise.
+ */
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
+                         u32 cmd_flags,
+                         u16 token)
+{
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
+                                         cmd_flags,
+                                         token);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
  * dpni_set_taildrop() - Set taildrop per queue or TC
  * @mc_io:     Pointer to MC portal's I/O object
  * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -1506,7 +1910,10 @@ int dpni_get_statistics(struct fsl_mc_io
  * @cg_point:  Congestion point
  * @q_type:    Queue type on which the taildrop is configured.
  *             Only Rx queues are supported for now
- * @tc:                Traffic class to apply this taildrop to
+ * @tc:                bits 7-4 contain ceetm channel index (valid only for TX);
+ *             bits 3-0 contain traffic class.
+ *             Use macro DPNI_BUILD_CH_TC() to build correct value for
+ *             tc parameter.
  * @q_index:   Index of the queue if the DPNI supports multiple queues for
  *             traffic distribution. Ignored if CONGESTION_POINT is not 0.
  * @taildrop:  Taildrop structure
@@ -1522,7 +1929,7 @@ int dpni_set_taildrop(struct fsl_mc_io *
                      u8 index,
                      struct dpni_taildrop *taildrop)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_set_taildrop *cmd_params;
 
        /* prepare command */
@@ -1550,7 +1957,10 @@ int dpni_set_taildrop(struct fsl_mc_io *
  * @cg_point:  Congestion point
  * @q_type:    Queue type on which the taildrop is configured.
  *             Only Rx queues are supported for now
- * @tc:                Traffic class to apply this taildrop to
+ * @tc:                bits 7-4 contain ceetm channel index (valid only for TX);
+ *             bits 3-0 contain traffic class.
+ *             Use macro DPNI_BUILD_CH_TC() to build correct value for
+ *             tc parameter.
  * @q_index:   Index of the queue if the DPNI supports multiple queues for
  *             traffic distribution. Ignored if CONGESTION_POINT is not 0.
  * @taildrop:  Taildrop structure
@@ -1566,7 +1976,7 @@ int dpni_get_taildrop(struct fsl_mc_io *
                      u8 index,
                      struct dpni_taildrop *taildrop)
 {
-       struct mc_command cmd = { 0 };
+       struct fsl_mc_command cmd = { 0 };
        struct dpni_cmd_get_taildrop *cmd_params;
        struct dpni_rsp_get_taildrop *rsp_params;
        int err;
@@ -1594,3 +2004,109 @@ int dpni_get_taildrop(struct fsl_mc_io *
 
        return 0;
 }
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 *major_ver,
+                        u16 *minor_ver)
+{
+       struct dpni_rsp_get_api_version *rsp_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+                                         cmd_flags, 0);
+
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
+       *major_ver = le16_to_cpu(rsp_params->major);
+       *minor_ver = le16_to_cpu(rsp_params->minor);
+
+       return 0;
+}
+
+/**
+ * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg: Distribution configuration
+ * If the FS is already enabled with a previous call the classification
+ * key will be changed but all the table rules are kept. If the
+ * existing rules do not match the key the results will not be
+ * predictable. It is the user responsibility to keep key integrity.
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ * and will classify packets according to this table. The packets that
+ * miss all the table rules will be classified according to settings
+ * made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table.
+ * The packets will be classified according to settings made in
+ * dpni_set_rx_hash_dist()
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dpni_rx_dist_cfg *cfg)
+{
+       struct dpni_cmd_set_rx_fs_dist *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+       cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
+       dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+       cmd_params->tc = cfg->tc;
+       cmd_params->miss_flow_id = le16_to_cpu(cfg->fs_miss_flow_id);
+       cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ * function based on the key received in cfg.key_cfg_iova parameter.
+ * If cfg.enable is set to 0 the packets will be sent to the queue configured
+ * in dpni_set_rx_dist_default_queue() call
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+                         u32 cmd_flags,
+                         u16 token,
+                         const struct dpni_rx_dist_cfg *cfg)
+{
+       struct dpni_cmd_set_rx_hash_dist *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+       cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
+       dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+       cmd_params->tc = cfg->tc;
+       cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
@@ -52,6 +52,14 @@ struct fsl_mc_io;
  * Maximum number of buffer pools per DPNI
  */
 #define DPNI_MAX_DPBP                          8
+/**
+ * Maximum number of senders
+ */
+#define DPNI_MAX_SENDERS                       16
+/**
+ * Maximum distribution size
+ */
+#define DPNI_MAX_DIST_SIZE                     16
 
 /**
  * All traffic classes considered; see dpni_set_queue()
@@ -123,13 +131,15 @@ struct dpni_pools_cfg {
        /**
         * struct pools - Buffer pools parameters
         * @dpbp_id: DPBP object ID
+        * @priority_mask: priorities served by DPBP
         * @buffer_size: Buffer size
         * @backup_pool: Backup pool
         */
        struct {
-               int     dpbp_id;
+               u16     dpbp_id;
+               u8      priority_mask;
                u16     buffer_size;
-               int     backup_pool;
+               u8      backup_pool;
        } pools[DPNI_MAX_DPBP];
 };
 
@@ -476,6 +486,24 @@ union dpni_statistics {
                u64 egress_confirmed_frames;
        } page_2;
        /**
+        * struct page_3 - Page_3 statistics structure with values for the
+        *                 selected TC
+        * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
+        *                       dequeued
+        * @ceetm_dequeue_frames: Cumulative count of the number of frames
+        *                        dequeued
+        * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
+        *                      frames whose enqueue was rejected
+        * @ceetm_reject_frames: Cumulative count of all frame enqueues
+        *                       rejected
+        */
+       struct {
+               u64 ceetm_dequeue_bytes;
+               u64 ceetm_dequeue_frames;
+               u64 ceetm_reject_bytes;
+               u64 ceetm_reject_frames;
+       } page_3;
+       /**
         * struct raw - raw statistics structure
         */
        struct {
@@ -487,8 +515,13 @@ int dpni_get_statistics(struct fsl_mc_io
                        u32                     cmd_flags,
                        u16                     token,
                        u8                      page,
+                       u8                      param,
                        union dpni_statistics   *stat);
 
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
+                         u32 cmd_flags,
+                         u16 token);
+
 /**
  * Enable auto-negotiation
  */
@@ -505,6 +538,10 @@ int dpni_get_statistics(struct fsl_mc_io
  * Enable a-symmetric pause frames
  */
 #define DPNI_LINK_OPT_ASYM_PAUSE       0x0000000000000008ULL
+/**
+ * Enable priority flow control pause frames
+ */
+#define DPNI_LINK_OPT_PFC_PAUSE                0x0000000000000010ULL
 
 /**
  * struct - Structure representing DPNI link configuration
@@ -538,6 +575,23 @@ int dpni_get_link_state(struct fsl_mc_io
                        u16                     token,
                        struct dpni_link_state  *state);
 
+/**
+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
+ * @rate_limit: rate in Mbps
+ * @max_burst_size: burst size in bytes (up to 64KB)
+ */
+struct dpni_tx_shaping_cfg {
+       u32     rate_limit;
+       u16     max_burst_size;
+};
+
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+                       const struct dpni_tx_shaping_cfg *tx_er_shaper,
+                       int coupled);
+
 int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
                              u32               cmd_flags,
                              u16               token,
@@ -639,6 +693,70 @@ int dpni_prepare_key_cfg(const struct dp
                         u8 *key_cfg_buf);
 
 /**
+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ *             key extractions to be used as the QoS criteria by calling
+ *             dpkg_prepare_key_cfg()
+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+ *             '0' to use the 'default_tc' in such cases
+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
+ */
+struct dpni_qos_tbl_cfg {
+       u64 key_cfg_iova;
+       int discard_on_miss;
+       u8 default_tc;
+};
+
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+                      u32 cmd_flags,
+                      u16 token,
+                      const struct dpni_qos_tbl_cfg *cfg);
+
+/**
+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
+ * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
+ * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
+ */
+enum dpni_tx_schedule_mode {
+       DPNI_TX_SCHED_STRICT_PRIORITY = 0,
+       DPNI_TX_SCHED_WEIGHTED_A,
+       DPNI_TX_SCHED_WEIGHTED_B,
+};
+
+/**
+ * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
+ * @mode:              Scheduling mode
+ * @delta_bandwidth:   Bandwidth represented in weights from 100 to 10000;
+ *     not applicable for 'strict-priority' mode;
+ */
+struct dpni_tx_schedule_cfg {
+       enum dpni_tx_schedule_mode mode;
+       u16 delta_bandwidth;
+};
+
+/**
+ * struct dpni_tx_priorities_cfg - Structure representing transmission
+ *                                     priorities for DPNI TCs
+ * @tc_sched:  An array of traffic-classes
+ * @prio_group_A: Priority of group A
+ * @prio_group_B: Priority of group B
+ * @separate_groups: Treat A and B groups as separate
+ * @ceetm_ch_idx: ceetm channel index to apply the changes
+ */
+struct dpni_tx_priorities_cfg {
+       struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
+       u8 prio_group_A;
+       u8 prio_group_B;
+       u8 separate_groups;
+};
+
+int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
+                          u32 cmd_flags,
+                          u16 token,
+                          const struct dpni_tx_priorities_cfg *cfg);
+
+/**
  * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
  * @dist_size: Set the distribution size;
  *     supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
@@ -784,6 +902,108 @@ enum dpni_congestion_point {
 };
 
 /**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id:   Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority:  Priority selection within the DPIO or DPCON channel; valid
+ *             values are 0-1 or 0-7, depending on the number of priorities
+ *             in that channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+       enum dpni_dest dest_type;
+       int dest_id;
+       u8 priority;
+};
+
+/* DPNI congestion options */
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER        0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT         0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
+ */
+#define DPNI_CONG_OPT_COHERENT_WRITE            0x00000004
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER      0x00000008
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT       0x00000010
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
+ */
+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED  0x00000020
+/**
+ * This congestion will trigger flow control or priority flow control.
+ * This will have effect only if flow control is enabled with
+ * dpni_set_link_cfg().
+ */
+#define DPNI_CONG_OPT_FLOW_CONTROL     0x00000040
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
+ *                                     configuration
+ * @units: Units type
+ * @threshold_entry: Above this threshold we enter a congestion state.
+ *             set it to '0' to disable it
+ * @threshold_exit: Below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ *             must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
+ *             is contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
+
+struct dpni_congestion_notification_cfg {
+       enum dpni_congestion_unit units;
+       u32 threshold_entry;
+       u32 threshold_exit;
+       u64 message_ctx;
+       u64 message_iova;
+       struct dpni_dest_cfg dest_cfg;
+       u16 notification_mode;
+};
+
+/** Compose TC parameter for function dpni_set_congestion_notification()
+ * and dpni_get_congestion_notification().
+ */
+#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
+       ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
+
+int dpni_set_congestion_notification(
+                       struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       enum dpni_queue_type qtype,
+                       u8 tc_id,
+                       const struct dpni_congestion_notification_cfg *cfg);
+
+int dpni_get_congestion_notification(
+                       struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       enum dpni_queue_type qtype,
+                       u8 tc_id,
+                       struct dpni_congestion_notification_cfg *cfg);
+
+/**
  * struct dpni_taildrop - Structure representing the taildrop
  * @enable:    Indicates whether the taildrop is active or not.
  * @units:     Indicates the unit of THRESHOLD. Queue taildrop only supports
@@ -829,4 +1049,124 @@ struct dpni_rule_cfg {
        u8      key_size;
 };
 
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 *major_ver,
+                        u16 *minor_ver);
+
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+                      u32 cmd_flags,
+                      u16 token,
+                      const struct dpni_rule_cfg *cfg,
+                      u8 tc_id,
+                      u16 index);
+
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+                         u32 cmd_flags,
+                         u16 token,
+                         const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token);
+
+/**
+ * Discard matching traffic. If set, this takes precedence over any other
+ * configuration and matching traffic is always discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD            0x1
+
+/**
+ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
+ * override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC            0x2
+
+/*
+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
+ * control. If set, the 6 least significant bits in value are interpreted as
+ * follows:
+ *     - bits 0-1: indicates the number of 64 byte units of context that are
+ *     stashed. FLC value is interpreted as a memory address in this case,
+ *     excluding the 6 LS bits.
+ *     - bits 2-3: indicates the number of 64 byte units of frame annotation
+ *     to be stashed. Annotation is placed at FD[ADDR].
+ *     - bits 4-5: indicates the number of 64 byte units of frame data to be
+ *     stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL  0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc:       FLC value for traffic matching this rule. Please check the
+ *             Frame Descriptor section in the hardware documentation for
+ *             more information.
+ * @flow_id:   Identifies the Rx queue used for matching traffic. Supported
+ *             values are in range 0 to num_queue-1.
+ * @options:   Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+       u64 flc;
+       u16 flow_id;
+       u16 options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+                     u32 cmd_flags,
+                     u16 token,
+                     u8 tc_id,
+                     u16 index,
+                     const struct dpni_rule_cfg *cfg,
+                     const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token,
+                        u8 tc_id,
+                        const struct dpni_rule_cfg *cfg);
+
+/**
+ * When used for queue_idx in function dpni_set_rx_dist_default_queue
+ * will signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP              ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - distribution configuration
+ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
+ *             12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
+ *             512,768,896,1024
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ *             the extractions to be used for the distribution key by calling
+ *             dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
+ *             it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ *             hash is disabled it will be put into this queue id; use
+ *             DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ *             used only when flow steering distribution is enabled and hash
+ *             distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+       u16 dist_size;
+       u64 key_cfg_iova;
+       u8 enable;
+       u8 tc;
+       u16 fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+                         u32 cmd_flags,
+                         u16 token,
+                         const struct dpni_rx_dist_cfg *cfg);
+
 #endif /* __FSL_DPNI_H */