OpenWrt – Blame information for rev 2
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | From 6ec4d0cf0b0e5e41abc91012db4ebff7d415a92b Mon Sep 17 00:00:00 2001 |
2 | From: Biwen Li <biwen.li@nxp.com> |
||
3 | Date: Tue, 30 Oct 2018 18:26:13 +0800 |
||
4 | Subject: [PATCH 08/40] dpaa2-ethernet: support layerscape |
||
5 | This is an integrated patch of dpaa2-ethernet for |
||
6 | layerscape |
||
7 | |||
8 | Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com> |
||
9 | Signed-off-by: Camelia Groza <camelia.groza@nxp.com> |
||
10 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
||
11 | Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com> |
||
12 | Signed-off-by: Horia Geantă <horia.geanta@nxp.com> |
||
13 | Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com> |
||
14 | Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com> |
||
15 | Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> |
||
16 | Signed-off-by: Biwen Li <biwen.li@nxp.com> |
||
17 | --- |
||
18 | drivers/staging/fsl-dpaa2/Kconfig | 7 + |
||
19 | drivers/staging/fsl-dpaa2/ethernet/Makefile | 2 + |
||
20 | .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1240 +++++++++ |
||
21 | .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 183 ++ |
||
22 | .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 357 +++ |
||
23 | .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 + |
||
24 | .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2335 +++++++++++++---- |
||
25 | .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 388 ++- |
||
26 | .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 625 ++++- |
||
27 | drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 192 +- |
||
28 | drivers/staging/fsl-dpaa2/ethernet/dpni.c | 604 ++++- |
||
29 | drivers/staging/fsl-dpaa2/ethernet/dpni.h | 344 ++- |
||
30 | 12 files changed, 5723 insertions(+), 614 deletions(-) |
||
31 | create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c |
||
32 | create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h |
||
33 | create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c |
||
34 | create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h |
||
35 | |||
36 | --- a/drivers/staging/fsl-dpaa2/Kconfig |
||
37 | +++ b/drivers/staging/fsl-dpaa2/Kconfig |
||
38 | @@ -17,6 +17,13 @@ config FSL_DPAA2_ETH |
||
39 | Ethernet driver for Freescale DPAA2 SoCs, using the |
||
40 | Freescale MC bus driver |
||
41 | |||
42 | +config FSL_DPAA2_ETH_CEETM |
||
43 | + depends on NET_SCHED |
||
44 | + bool "DPAA2 Ethernet CEETM QoS" |
||
45 | + default n |
||
46 | + ---help--- |
||
47 | + Enable QoS offloading support through the CEETM hardware block. |
||
48 | + |
||
49 | if FSL_DPAA2_ETH |
||
50 | config FSL_DPAA2_ETH_USE_ERR_QUEUE |
||
51 | bool "Enable Rx error queue" |
||
52 | --- a/drivers/staging/fsl-dpaa2/ethernet/Makefile |
||
53 | +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile |
||
54 | @@ -5,6 +5,8 @@ |
||
55 | obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o |
||
56 | |||
57 | fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o |
||
58 | +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o |
||
59 | +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o |
||
60 | |||
61 | # Needed by the tracing framework |
||
62 | CFLAGS_dpaa2-eth.o := -I$(src) |
||
63 | --- /dev/null |
||
64 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c |
||
65 | @@ -0,0 +1,1240 @@ |
||
66 | +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
||
67 | +/* |
||
68 | + * Copyright 2017 NXP |
||
69 | + * |
||
70 | + */ |
||
71 | + |
||
72 | +#include <linux/init.h> |
||
73 | +#include <linux/module.h> |
||
74 | + |
||
75 | +#include "dpaa2-eth-ceetm.h" |
||
76 | +#include "dpaa2-eth.h" |
||
77 | + |
||
78 | +#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc" |
||
79 | +/* Conversion formula from userspace passed Bps to expected Mbit */ |
||
80 | +#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17) |
||
81 | + |
||
82 | +static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = { |
||
83 | + [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) }, |
||
84 | + [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) }, |
||
85 | +}; |
||
86 | + |
||
87 | +struct Qdisc_ops dpaa2_ceetm_qdisc_ops; |
||
88 | + |
||
89 | +static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv, |
||
90 | + struct dpni_tx_shaping_cfg *scfg, |
||
91 | + struct dpni_tx_shaping_cfg *ecfg, |
||
92 | + int coupled, int ch_id) |
||
93 | +{ |
||
94 | + int err = 0; |
||
95 | + |
||
96 | + netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__, |
||
97 | + ch_id, scfg->rate_limit); |
||
98 | + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg, |
||
99 | + ecfg, coupled); |
||
100 | + if (err) |
||
101 | + netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n"); |
||
102 | + |
||
103 | + return err; |
||
104 | +} |
||
105 | + |
||
106 | +static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv, |
||
107 | + int ch_id) |
||
108 | +{ |
||
109 | + struct dpni_tx_shaping_cfg cfg = { 0 }; |
||
110 | + |
||
111 | + return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id); |
||
112 | +} |
||
113 | + |
||
114 | +static inline int |
||
115 | +dpaa2_eth_update_shaping_cfg(struct net_device *dev, |
||
116 | + struct dpaa2_ceetm_shaping_cfg cfg, |
||
117 | + struct dpni_tx_shaping_cfg *scfg, |
||
118 | + struct dpni_tx_shaping_cfg *ecfg) |
||
119 | +{ |
||
120 | + scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir); |
||
121 | + ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir); |
||
122 | + |
||
123 | + if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) { |
||
124 | + netdev_err(dev, "Committed burst size must be under %d\n", |
||
125 | + DPAA2_ETH_MAX_BURST_SIZE); |
||
126 | + return -EINVAL; |
||
127 | + } |
||
128 | + |
||
129 | + scfg->max_burst_size = cfg.cbs; |
||
130 | + |
||
131 | + if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) { |
||
132 | + netdev_err(dev, "Excess burst size must be under %d\n", |
||
133 | + DPAA2_ETH_MAX_BURST_SIZE); |
||
134 | + return -EINVAL; |
||
135 | + } |
||
136 | + |
||
137 | + ecfg->max_burst_size = cfg.ebs; |
||
138 | + |
||
139 | + if ((!cfg.cir || !cfg.eir) && cfg.coupled) { |
||
140 | + netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n"); |
||
141 | + return -EINVAL; |
||
142 | + } |
||
143 | + |
||
144 | + return 0; |
||
145 | +} |
||
146 | + |
||
147 | +enum update_tx_prio { |
||
148 | + DPAA2_ETH_ADD_CQ, |
||
149 | + DPAA2_ETH_DEL_CQ, |
||
150 | +}; |
||
151 | + |
||
152 | +/* Normalize weights based on max passed value */ |
||
153 | +static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv) |
||
154 | +{ |
||
155 | + struct dpni_tx_schedule_cfg *sched_cfg; |
||
156 | + struct dpaa2_ceetm_class *cl; |
||
157 | + u32 qpri; |
||
158 | + u16 weight_max = 0, increment; |
||
159 | + int i; |
||
160 | + |
||
161 | + /* Check the boundaries of the provided values */ |
||
162 | + for (i = 0; i < priv->clhash.hashsize; i++) |
||
163 | + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) |
||
164 | + weight_max = (weight_max == 0 ? cl->prio.weight : |
||
165 | + (weight_max < cl->prio.weight ? |
||
166 | + cl->prio.weight : weight_max)); |
||
167 | + |
||
168 | + /* If there are no elements, there's nothing to do */ |
||
169 | + if (weight_max == 0) |
||
170 | + return 0; |
||
171 | + |
||
172 | + increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) / |
||
173 | + weight_max; |
||
174 | + |
||
175 | + for (i = 0; i < priv->clhash.hashsize; i++) { |
||
176 | + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) { |
||
177 | + if (cl->prio.mode == STRICT_PRIORITY) |
||
178 | + continue; |
||
179 | + |
||
180 | + qpri = cl->prio.qpri; |
||
181 | + sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri]; |
||
182 | + |
||
183 | + sched_cfg->delta_bandwidth = |
||
184 | + DPAA2_CEETM_MIN_WEIGHT + |
||
185 | + (cl->prio.weight * increment); |
||
186 | + |
||
187 | + pr_debug("%s: Normalized CQ qpri %d weight to %d\n", |
||
188 | + __func__, qpri, sched_cfg->delta_bandwidth); |
||
189 | + } |
||
190 | + } |
||
191 | + |
||
192 | + return 0; |
||
193 | +} |
||
194 | + |
||
195 | +static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv, |
||
196 | + struct dpaa2_ceetm_class *cl, |
||
197 | + enum update_tx_prio type) |
||
198 | +{ |
||
199 | + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent); |
||
200 | + struct dpni_congestion_notification_cfg notif_cfg = {0}; |
||
201 | + struct dpni_tx_schedule_cfg *sched_cfg; |
||
202 | + struct dpni_taildrop td = {0}; |
||
203 | + u8 ch_id = 0, tc_id = 0; |
||
204 | + u32 qpri = 0; |
||
205 | + int err = 0; |
||
206 | + |
||
207 | + qpri = cl->prio.qpri; |
||
208 | + tc_id = DPNI_BUILD_CH_TC(ch_id, qpri); |
||
209 | + |
||
210 | + switch (type) { |
||
211 | + case DPAA2_ETH_ADD_CQ: |
||
212 | + /* Disable congestion notifications */ |
||
213 | + notif_cfg.threshold_entry = 0; |
||
214 | + notif_cfg.threshold_exit = 0; |
||
215 | + err = dpni_set_congestion_notification(priv->mc_io, 0, |
||
216 | + priv->mc_token, |
||
217 | + DPNI_QUEUE_TX, tc_id, |
||
218 | + ¬if_cfg); |
||
219 | + if (err) { |
||
220 | + netdev_err(priv->net_dev, "Error disabling congestion notifications %d\n", |
||
221 | + err); |
||
222 | + return err; |
||
223 | + } |
||
224 | + /* Enable taildrop */ |
||
225 | + td.enable = 1; |
||
226 | + td.units = DPNI_CONGESTION_UNIT_FRAMES; |
||
227 | + td.threshold = DPAA2_CEETM_TD_THRESHOLD; |
||
228 | + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
||
229 | + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id, |
||
230 | + 0, &td); |
||
231 | + if (err) { |
||
232 | + netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n", |
||
233 | + err); |
||
234 | + return err; |
||
235 | + } |
||
236 | + break; |
||
237 | + case DPAA2_ETH_DEL_CQ: |
||
238 | + /* Disable taildrop */ |
||
239 | + td.enable = 0; |
||
240 | + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
||
241 | + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id, |
||
242 | + 0, &td); |
||
243 | + if (err) { |
||
244 | + netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n", |
||
245 | + err); |
||
246 | + return err; |
||
247 | + } |
||
248 | + /* Enable congestion notifications */ |
||
249 | + notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES; |
||
250 | + notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH; |
||
251 | + notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH; |
||
252 | + notif_cfg.message_ctx = (u64)priv; |
||
253 | + notif_cfg.message_iova = priv->cscn_dma; |
||
254 | + notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | |
||
255 | + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | |
||
256 | + DPNI_CONG_OPT_COHERENT_WRITE; |
||
257 | + err = dpni_set_congestion_notification(priv->mc_io, 0, |
||
258 | + priv->mc_token, |
||
259 | + DPNI_QUEUE_TX, tc_id, |
||
260 | + ¬if_cfg); |
||
261 | + if (err) { |
||
262 | + netdev_err(priv->net_dev, "Error enabling congestion notifications %d\n", |
||
263 | + err); |
||
264 | + return err; |
||
265 | + } |
||
266 | + break; |
||
267 | + } |
||
268 | + |
||
269 | + /* We can zero out the structure in the tx_prio_conf array */ |
||
270 | + if (type == DPAA2_ETH_DEL_CQ) { |
||
271 | + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri]; |
||
272 | + memset(sched_cfg, 0, sizeof(*sched_cfg)); |
||
273 | + } |
||
274 | + |
||
275 | + /* Normalize priorities */ |
||
276 | + err = dpaa2_eth_normalize_tx_prio(sch); |
||
277 | + |
||
278 | + /* Debug print goes here */ |
||
279 | + print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1, |
||
280 | + &sch->prio.tx_prio_cfg, |
||
281 | + sizeof(sch->prio.tx_prio_cfg), 0); |
||
282 | + |
||
283 | + /* Call dpni_set_tx_priorities for the entire prio qdisc */ |
||
284 | + err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token, |
||
285 | + &sch->prio.tx_prio_cfg); |
||
286 | + if (err) |
||
287 | + netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n", |
||
288 | + err); |
||
289 | + |
||
290 | + return err; |
||
291 | +} |
||
292 | + |
||
293 | +static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv) |
||
294 | +{ |
||
295 | + priv->ceetm_en = true; |
||
296 | +} |
||
297 | + |
||
298 | +static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv) |
||
299 | +{ |
||
300 | + priv->ceetm_en = false; |
||
301 | +} |
||
302 | + |
||
303 | +/* Find class in qdisc hash table using given handle */ |
||
304 | +static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle, |
||
305 | + struct Qdisc *sch) |
||
306 | +{ |
||
307 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
308 | + struct Qdisc_class_common *clc; |
||
309 | + |
||
310 | + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n", |
||
311 | + __func__, handle, sch->handle); |
||
312 | + |
||
313 | + clc = qdisc_class_find(&priv->clhash, handle); |
||
314 | + return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL; |
||
315 | +} |
||
316 | + |
||
317 | +/* Insert a class in the qdisc's class hash */ |
||
318 | +static void dpaa2_ceetm_link_class(struct Qdisc *sch, |
||
319 | + struct Qdisc_class_hash *clhash, |
||
320 | + struct Qdisc_class_common *common) |
||
321 | +{ |
||
322 | + sch_tree_lock(sch); |
||
323 | + qdisc_class_hash_insert(clhash, common); |
||
324 | + sch_tree_unlock(sch); |
||
325 | + qdisc_class_hash_grow(sch, clhash); |
||
326 | +} |
||
327 | + |
||
328 | +/* Destroy a ceetm class */ |
||
329 | +static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch, |
||
330 | + struct dpaa2_ceetm_class *cl) |
||
331 | +{ |
||
332 | + struct net_device *dev = qdisc_dev(sch); |
||
333 | + struct dpaa2_eth_priv *priv = netdev_priv(dev); |
||
334 | + |
||
335 | + if (!cl) |
||
336 | + return; |
||
337 | + |
||
338 | + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n", |
||
339 | + __func__, cl->common.classid, sch->handle); |
||
340 | + |
||
341 | + /* Recurse into child first */ |
||
342 | + if (cl->child) { |
||
343 | + qdisc_destroy(cl->child); |
||
344 | + cl->child = NULL; |
||
345 | + } |
||
346 | + |
||
347 | + switch (cl->type) { |
||
348 | + case CEETM_ROOT: |
||
349 | + if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id)) |
||
350 | + netdev_err(dev, "Error resetting channel shaping\n"); |
||
351 | + |
||
352 | + break; |
||
353 | + |
||
354 | + case CEETM_PRIO: |
||
355 | + if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ)) |
||
356 | + netdev_err(dev, "Error resetting tx_priorities\n"); |
||
357 | + |
||
358 | + if (cl->prio.cstats) |
||
359 | + free_percpu(cl->prio.cstats); |
||
360 | + |
||
361 | + break; |
||
362 | + } |
||
363 | + |
||
364 | + tcf_block_put(cl->block); |
||
365 | + kfree(cl); |
||
366 | +} |
||
367 | + |
||
368 | +/* Destroy a ceetm qdisc */ |
||
369 | +static void dpaa2_ceetm_destroy(struct Qdisc *sch) |
||
370 | +{ |
||
371 | + unsigned int i; |
||
372 | + struct hlist_node *next; |
||
373 | + struct dpaa2_ceetm_class *cl; |
||
374 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
375 | + struct net_device *dev = qdisc_dev(sch); |
||
376 | + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev); |
||
377 | + |
||
378 | + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n", |
||
379 | + __func__, sch->handle); |
||
380 | + |
||
381 | + /* All filters need to be removed before destroying the classes */ |
||
382 | + tcf_block_put(priv->block); |
||
383 | + |
||
384 | + for (i = 0; i < priv->clhash.hashsize; i++) { |
||
385 | + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) |
||
386 | + tcf_block_put(cl->block); |
||
387 | + } |
||
388 | + |
||
389 | + for (i = 0; i < priv->clhash.hashsize; i++) { |
||
390 | + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i], |
||
391 | + common.hnode) |
||
392 | + dpaa2_ceetm_cls_destroy(sch, cl); |
||
393 | + } |
||
394 | + |
||
395 | + qdisc_class_hash_destroy(&priv->clhash); |
||
396 | + |
||
397 | + switch (priv->type) { |
||
398 | + case CEETM_ROOT: |
||
399 | + dpaa2_eth_ceetm_disable(priv_eth); |
||
400 | + |
||
401 | + if (priv->root.qstats) |
||
402 | + free_percpu(priv->root.qstats); |
||
403 | + |
||
404 | + if (!priv->root.qdiscs) |
||
405 | + break; |
||
406 | + |
||
407 | + /* Destroy the pfifo qdiscs in case they haven't been attached |
||
408 | + * to the netdev queues yet. |
||
409 | + */ |
||
410 | + for (i = 0; i < dev->num_tx_queues; i++) |
||
411 | + if (priv->root.qdiscs[i]) |
||
412 | + qdisc_destroy(priv->root.qdiscs[i]); |
||
413 | + |
||
414 | + kfree(priv->root.qdiscs); |
||
415 | + break; |
||
416 | + |
||
417 | + case CEETM_PRIO: |
||
418 | + if (priv->prio.parent) |
||
419 | + priv->prio.parent->child = NULL; |
||
420 | + break; |
||
421 | + } |
||
422 | +} |
||
423 | + |
||
424 | +static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb) |
||
425 | +{ |
||
426 | + struct Qdisc *qdisc; |
||
427 | + unsigned int ntx, i; |
||
428 | + struct nlattr *nest; |
||
429 | + struct dpaa2_ceetm_tc_qopt qopt; |
||
430 | + struct dpaa2_ceetm_qdisc_stats *qstats; |
||
431 | + struct net_device *dev = qdisc_dev(sch); |
||
432 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
433 | + |
||
434 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
||
435 | + |
||
436 | + sch_tree_lock(sch); |
||
437 | + memset(&qopt, 0, sizeof(qopt)); |
||
438 | + qopt.type = priv->type; |
||
439 | + qopt.shaped = priv->shaped; |
||
440 | + |
||
441 | + switch (priv->type) { |
||
442 | + case CEETM_ROOT: |
||
443 | + /* Gather statistics from the underlying pfifo qdiscs */ |
||
444 | + sch->q.qlen = 0; |
||
445 | + memset(&sch->bstats, 0, sizeof(sch->bstats)); |
||
446 | + memset(&sch->qstats, 0, sizeof(sch->qstats)); |
||
447 | + |
||
448 | + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
||
449 | + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; |
||
450 | + sch->q.qlen += qdisc->q.qlen; |
||
451 | + sch->bstats.bytes += qdisc->bstats.bytes; |
||
452 | + sch->bstats.packets += qdisc->bstats.packets; |
||
453 | + sch->qstats.qlen += qdisc->qstats.qlen; |
||
454 | + sch->qstats.backlog += qdisc->qstats.backlog; |
||
455 | + sch->qstats.drops += qdisc->qstats.drops; |
||
456 | + sch->qstats.requeues += qdisc->qstats.requeues; |
||
457 | + sch->qstats.overlimits += qdisc->qstats.overlimits; |
||
458 | + } |
||
459 | + |
||
460 | + for_each_online_cpu(i) { |
||
461 | + qstats = per_cpu_ptr(priv->root.qstats, i); |
||
462 | + sch->qstats.drops += qstats->drops; |
||
463 | + } |
||
464 | + |
||
465 | + break; |
||
466 | + |
||
467 | + case CEETM_PRIO: |
||
468 | + qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A; |
||
469 | + qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B; |
||
470 | + qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups; |
||
471 | + break; |
||
472 | + |
||
473 | + default: |
||
474 | + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); |
||
475 | + sch_tree_unlock(sch); |
||
476 | + return -EINVAL; |
||
477 | + } |
||
478 | + |
||
479 | + nest = nla_nest_start(skb, TCA_OPTIONS); |
||
480 | + if (!nest) |
||
481 | + goto nla_put_failure; |
||
482 | + if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt)) |
||
483 | + goto nla_put_failure; |
||
484 | + nla_nest_end(skb, nest); |
||
485 | + |
||
486 | + sch_tree_unlock(sch); |
||
487 | + return skb->len; |
||
488 | + |
||
489 | +nla_put_failure: |
||
490 | + sch_tree_unlock(sch); |
||
491 | + nla_nest_cancel(skb, nest); |
||
492 | + return -EMSGSIZE; |
||
493 | +} |
||
494 | + |
||
495 | +static int dpaa2_ceetm_change_prio(struct Qdisc *sch, |
||
496 | + struct dpaa2_ceetm_qdisc *priv, |
||
497 | + struct dpaa2_ceetm_tc_qopt *qopt) |
||
498 | +{ |
||
499 | + /* TODO: Once LX2 support is added */ |
||
500 | + /* priv->shaped = parent_cl->shaped; */ |
||
501 | + priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A; |
||
502 | + priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B; |
||
503 | + priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups; |
||
504 | + |
||
505 | + return 0; |
||
506 | +} |
||
507 | + |
||
508 | +/* Edit a ceetm qdisc */ |
||
509 | +static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt) |
||
510 | +{ |
||
511 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
512 | + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1]; |
||
513 | + struct dpaa2_ceetm_tc_qopt *qopt; |
||
514 | + int err; |
||
515 | + |
||
516 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
||
517 | + |
||
518 | + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt, |
||
519 | + dpaa2_ceetm_policy, NULL); |
||
520 | + if (err < 0) { |
||
521 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
||
522 | + "nla_parse_nested"); |
||
523 | + return err; |
||
524 | + } |
||
525 | + |
||
526 | + if (!tb[DPAA2_CEETM_TCA_QOPS]) { |
||
527 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
||
528 | + "tb"); |
||
529 | + return -EINVAL; |
||
530 | + } |
||
531 | + |
||
532 | + if (TC_H_MIN(sch->handle)) { |
||
533 | + pr_err("CEETM: a qdisc should not have a minor\n"); |
||
534 | + return -EINVAL; |
||
535 | + } |
||
536 | + |
||
537 | + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]); |
||
538 | + |
||
539 | + if (priv->type != qopt->type) { |
||
540 | + pr_err("CEETM: qdisc %X is not of the provided type\n", |
||
541 | + sch->handle); |
||
542 | + return -EINVAL; |
||
543 | + } |
||
544 | + |
||
545 | + switch (priv->type) { |
||
546 | + case CEETM_PRIO: |
||
547 | + err = dpaa2_ceetm_change_prio(sch, priv, qopt); |
||
548 | + break; |
||
549 | + default: |
||
550 | + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); |
||
551 | + err = -EINVAL; |
||
552 | + } |
||
553 | + |
||
554 | + return err; |
||
555 | +} |
||
556 | + |
||
557 | +/* Configure a root ceetm qdisc */ |
||
558 | +static int dpaa2_ceetm_init_root(struct Qdisc *sch, |
||
559 | + struct dpaa2_ceetm_qdisc *priv, |
||
560 | + struct dpaa2_ceetm_tc_qopt *qopt) |
||
561 | +{ |
||
562 | + struct net_device *dev = qdisc_dev(sch); |
||
563 | + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev); |
||
564 | + struct netdev_queue *dev_queue; |
||
565 | + unsigned int i, parent_id; |
||
566 | + struct Qdisc *qdisc; |
||
567 | + int err; |
||
568 | + |
||
569 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
||
570 | + |
||
571 | + /* Validate inputs */ |
||
572 | + if (sch->parent != TC_H_ROOT) { |
||
573 | + pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n"); |
||
574 | + tcf_block_put(priv->block); |
||
575 | + qdisc_class_hash_destroy(&priv->clhash); |
||
576 | + return -EINVAL; |
||
577 | + } |
||
578 | + |
||
579 | + /* Pre-allocate underlying pfifo qdiscs. |
||
580 | + * |
||
581 | + * We want to offload shaping and scheduling decisions to the hardware. |
||
582 | + * The pfifo qdiscs will be attached to the netdev queues and will |
||
583 | + * guide the traffic from the IP stack down to the driver with minimum |
||
584 | + * interference. |
||
585 | + * |
||
586 | + * The CEETM qdiscs and classes will be crossed when the traffic |
||
587 | + * reaches the driver. |
||
588 | + */ |
||
589 | + priv->root.qdiscs = kcalloc(dev->num_tx_queues, |
||
590 | + sizeof(priv->root.qdiscs[0]), |
||
591 | + GFP_KERNEL); |
||
592 | + if (!priv->root.qdiscs) { |
||
593 | + err = -ENOMEM; |
||
594 | + goto err_init_root; |
||
595 | + } |
||
596 | + |
||
597 | + for (i = 0; i < dev->num_tx_queues; i++) { |
||
598 | + dev_queue = netdev_get_tx_queue(dev, i); |
||
599 | + parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle), |
||
600 | + TC_H_MIN(i + PFIFO_MIN_OFFSET)); |
||
601 | + |
||
602 | + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, |
||
603 | + parent_id); |
||
604 | + if (!qdisc) { |
||
605 | + err = -ENOMEM; |
||
606 | + goto err_init_root; |
||
607 | + } |
||
608 | + |
||
609 | + priv->root.qdiscs[i] = qdisc; |
||
610 | + qdisc->flags |= TCQ_F_ONETXQUEUE; |
||
611 | + } |
||
612 | + |
||
613 | + sch->flags |= TCQ_F_MQROOT; |
||
614 | + |
||
615 | + priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats); |
||
616 | + if (!priv->root.qstats) { |
||
617 | + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", |
||
618 | + __func__); |
||
619 | + err = -ENOMEM; |
||
620 | + goto err_init_root; |
||
621 | + } |
||
622 | + |
||
623 | + dpaa2_eth_ceetm_enable(priv_eth); |
||
624 | + return 0; |
||
625 | + |
||
626 | +err_init_root: |
||
627 | + dpaa2_ceetm_destroy(sch); |
||
628 | + return err; |
||
629 | +} |
||
630 | + |
||
631 | +/* Configure a prio ceetm qdisc */ |
||
632 | +static int dpaa2_ceetm_init_prio(struct Qdisc *sch, |
||
633 | + struct dpaa2_ceetm_qdisc *priv, |
||
634 | + struct dpaa2_ceetm_tc_qopt *qopt) |
||
635 | +{ |
||
636 | + struct net_device *dev = qdisc_dev(sch); |
||
637 | + struct dpaa2_ceetm_class *parent_cl; |
||
638 | + struct Qdisc *parent_qdisc; |
||
639 | + int err; |
||
640 | + |
||
641 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
||
642 | + |
||
643 | + if (sch->parent == TC_H_ROOT) { |
||
644 | + pr_err("CEETM: a prio ceetm qdisc can not be root\n"); |
||
645 | + err = -EINVAL; |
||
646 | + goto err_init_prio; |
||
647 | + } |
||
648 | + |
||
649 | + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent)); |
||
650 | + if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) { |
||
651 | + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n"); |
||
652 | + err = -EINVAL; |
||
653 | + goto err_init_prio; |
||
654 | + } |
||
655 | + |
||
656 | + /* Obtain the parent root ceetm_class */ |
||
657 | + parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc); |
||
658 | + |
||
659 | + if (!parent_cl || parent_cl->type != CEETM_ROOT) { |
||
660 | + pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n"); |
||
661 | + err = -EINVAL; |
||
662 | + goto err_init_prio; |
||
663 | + } |
||
664 | + |
||
665 | + priv->prio.parent = parent_cl; |
||
666 | + parent_cl->child = sch; |
||
667 | + |
||
668 | + err = dpaa2_ceetm_change_prio(sch, priv, qopt); |
||
669 | + |
||
670 | + return 0; |
||
671 | + |
||
672 | +err_init_prio: |
||
673 | + dpaa2_ceetm_destroy(sch); |
||
674 | + return err; |
||
675 | +} |
||
676 | + |
||
677 | +/* Configure a generic ceetm qdisc */ |
||
678 | +static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt) |
||
679 | +{ |
||
680 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
681 | + struct net_device *dev = qdisc_dev(sch); |
||
682 | + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1]; |
||
683 | + struct dpaa2_ceetm_tc_qopt *qopt; |
||
684 | + int err; |
||
685 | + |
||
686 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
||
687 | + |
||
688 | + if (!netif_is_multiqueue(dev)) |
||
689 | + return -EOPNOTSUPP; |
||
690 | + |
||
691 | + err = tcf_block_get(&priv->block, &priv->filter_list); |
||
692 | + if (err) { |
||
693 | + pr_err("CEETM: unable to get tcf_block\n"); |
||
694 | + return err; |
||
695 | + } |
||
696 | + |
||
697 | + if (!opt) { |
||
698 | + pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n", |
||
699 | + __func__); |
||
700 | + return -EINVAL; |
||
701 | + } |
||
702 | + |
||
703 | + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt, |
||
704 | + dpaa2_ceetm_policy, NULL); |
||
705 | + if (err < 0) { |
||
706 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
||
707 | + "nla_parse_nested"); |
||
708 | + return err; |
||
709 | + } |
||
710 | + |
||
711 | + if (!tb[DPAA2_CEETM_TCA_QOPS]) { |
||
712 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
||
713 | + "tb"); |
||
714 | + return -EINVAL; |
||
715 | + } |
||
716 | + |
||
717 | + if (TC_H_MIN(sch->handle)) { |
||
718 | + pr_err("CEETM: a qdisc should not have a minor\n"); |
||
719 | + return -EINVAL; |
||
720 | + } |
||
721 | + |
||
722 | + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]); |
||
723 | + |
||
724 | + /* Initialize the class hash list. Each qdisc has its own class hash */ |
||
725 | + err = qdisc_class_hash_init(&priv->clhash); |
||
726 | + if (err < 0) { |
||
727 | + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n", |
||
728 | + __func__); |
||
729 | + return err; |
||
730 | + } |
||
731 | + |
||
732 | + priv->type = qopt->type; |
||
733 | + priv->shaped = qopt->shaped; |
||
734 | + |
||
735 | + switch (priv->type) { |
||
736 | + case CEETM_ROOT: |
||
737 | + err = dpaa2_ceetm_init_root(sch, priv, qopt); |
||
738 | + break; |
||
739 | + case CEETM_PRIO: |
||
740 | + err = dpaa2_ceetm_init_prio(sch, priv, qopt); |
||
741 | + break; |
||
742 | + default: |
||
743 | + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); |
||
744 | + dpaa2_ceetm_destroy(sch); |
||
745 | + err = -EINVAL; |
||
746 | + } |
||
747 | + |
||
748 | + return err; |
||
749 | +} |
||
750 | + |
||
751 | +/* Attach the underlying pfifo qdiscs */ |
||
752 | +static void dpaa2_ceetm_attach(struct Qdisc *sch) |
||
753 | +{ |
||
754 | + struct net_device *dev = qdisc_dev(sch); |
||
755 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
756 | + struct Qdisc *qdisc, *old_qdisc; |
||
757 | + unsigned int i; |
||
758 | + |
||
759 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
||
760 | + |
||
761 | + for (i = 0; i < dev->num_tx_queues; i++) { |
||
762 | + qdisc = priv->root.qdiscs[i]; |
||
763 | + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); |
||
764 | + if (old_qdisc) |
||
765 | + qdisc_destroy(old_qdisc); |
||
766 | + } |
||
767 | + |
||
768 | + /* Remove the references to the pfifo qdiscs since the kernel will |
||
769 | + * destroy them when needed. No cleanup from our part is required from |
||
770 | + * this point on. |
||
771 | + */ |
||
772 | + kfree(priv->root.qdiscs); |
||
773 | + priv->root.qdiscs = NULL; |
||
774 | +} |
||
775 | + |
||
776 | +static unsigned long dpaa2_ceetm_cls_find(struct Qdisc *sch, u32 classid) |
||
777 | +{ |
||
778 | + struct dpaa2_ceetm_class *cl; |
||
779 | + |
||
780 | + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n", |
||
781 | + __func__, classid, sch->handle); |
||
782 | + cl = dpaa2_ceetm_find(classid, sch); |
||
783 | + |
||
784 | + return (unsigned long)cl; |
||
785 | +} |
||
786 | + |
||
787 | +static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl, |
||
788 | + struct dpaa2_ceetm_tc_copt *copt, |
||
789 | + struct net_device *dev) |
||
790 | +{ |
||
791 | + struct dpaa2_eth_priv *priv = netdev_priv(dev); |
||
792 | + struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 }; |
||
793 | + int err = 0; |
||
794 | + |
||
795 | + pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__, |
||
796 | + cl->common.classid); |
||
797 | + |
||
798 | + if (!cl->shaped) |
||
799 | + return 0; |
||
800 | + |
||
801 | + if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg, |
||
802 | + &scfg, &ecfg)) |
||
803 | + return -EINVAL; |
||
804 | + |
||
805 | + err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg, |
||
806 | + copt->shaping_cfg.coupled, |
||
807 | + cl->root.ch_id); |
||
808 | + if (err) |
||
809 | + return err; |
||
810 | + |
||
811 | + memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg, |
||
812 | + sizeof(struct dpaa2_ceetm_shaping_cfg)); |
||
813 | + |
||
814 | + return err; |
||
815 | +} |
||
816 | + |
||
817 | +static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl, |
||
818 | + struct dpaa2_ceetm_tc_copt *copt, |
||
819 | + struct net_device *dev) |
||
820 | +{ |
||
821 | + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent); |
||
822 | + struct dpni_tx_schedule_cfg *sched_cfg; |
||
823 | + struct dpaa2_eth_priv *priv = netdev_priv(dev); |
||
824 | + int err; |
||
825 | + |
||
826 | + pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n", |
||
827 | + __func__, cl->common.classid, copt->mode, copt->weight); |
||
828 | + |
||
829 | + if (!cl->prio.cstats) { |
||
830 | + cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats); |
||
831 | + if (!cl->prio.cstats) { |
||
832 | + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", |
||
833 | + __func__); |
||
834 | + return -ENOMEM; |
||
835 | + } |
||
836 | + } |
||
837 | + |
||
838 | + cl->prio.mode = copt->mode; |
||
839 | + cl->prio.weight = copt->weight; |
||
840 | + |
||
841 | + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri]; |
||
842 | + |
||
843 | + switch (copt->mode) { |
||
844 | + case STRICT_PRIORITY: |
||
845 | + sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY; |
||
846 | + break; |
||
847 | + case WEIGHTED_A: |
||
848 | + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A; |
||
849 | + break; |
||
850 | + case WEIGHTED_B: |
||
851 | + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B; |
||
852 | + break; |
||
853 | + } |
||
854 | + |
||
855 | + err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ); |
||
856 | + |
||
857 | + return err; |
||
858 | +} |
||
859 | + |
||
860 | +/* Add a new ceetm class */ |
||
861 | +static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid, |
||
862 | + struct dpaa2_ceetm_tc_copt *copt, |
||
863 | + unsigned long *arg) |
||
864 | +{ |
||
865 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
866 | + struct net_device *dev = qdisc_dev(sch); |
||
867 | + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev); |
||
868 | + struct dpaa2_ceetm_class *cl; |
||
869 | + int err; |
||
870 | + |
||
871 | + if (copt->type == CEETM_ROOT && |
||
872 | + priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) { |
||
873 | + pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n", |
||
874 | + dpaa2_eth_ch_count(priv_eth), |
||
875 | + dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s"); |
||
876 | + return -EINVAL; |
||
877 | + } |
||
878 | + |
||
879 | + if (copt->type == CEETM_PRIO && |
||
880 | + priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) { |
||
881 | + pr_err("CEETM: only %d queue%s per channel allowed, sorry\n", |
||
882 | + dpaa2_eth_tc_count(priv_eth), |
||
883 | + dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s"); |
||
884 | + return -EINVAL; |
||
885 | + } |
||
886 | + |
||
887 | + cl = kzalloc(sizeof(*cl), GFP_KERNEL); |
||
888 | + if (!cl) |
||
889 | + return -ENOMEM; |
||
890 | + |
||
891 | + err = tcf_block_get(&cl->block, &cl->filter_list); |
||
892 | + if (err) { |
||
893 | + pr_err("%s: Unable to set new root class\n", __func__); |
||
894 | + goto out_free; |
||
895 | + } |
||
896 | + |
||
897 | + cl->common.classid = classid; |
||
898 | + cl->parent = sch; |
||
899 | + cl->child = NULL; |
||
900 | + |
||
901 | + /* Add class handle in Qdisc */ |
||
902 | + dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common); |
||
903 | + |
||
904 | + cl->shaped = copt->shaped; |
||
905 | + cl->type = copt->type; |
||
906 | + |
||
907 | + /* Claim a CEETM channel / tc - DPAA2. will assume transition from |
||
908 | + * classid to qdid/qpri, starting from qdid / qpri 0 |
||
909 | + */ |
||
910 | + switch (copt->type) { |
||
911 | + case CEETM_ROOT: |
||
912 | + cl->root.ch_id = classid - sch->handle - 1; |
||
913 | + err = dpaa2_ceetm_cls_change_root(cl, copt, dev); |
||
914 | + break; |
||
915 | + case CEETM_PRIO: |
||
916 | + cl->prio.qpri = classid - sch->handle - 1; |
||
917 | + err = dpaa2_ceetm_cls_change_prio(cl, copt, dev); |
||
918 | + break; |
||
919 | + } |
||
920 | + |
||
921 | + if (err) { |
||
922 | + pr_err("%s: Unable to set new %s class\n", __func__, |
||
923 | + (copt->type == CEETM_ROOT ? "root" : "prio")); |
||
924 | + goto out_free; |
||
925 | + } |
||
926 | + |
||
927 | + switch (copt->type) { |
||
928 | + case CEETM_ROOT: |
||
929 | + pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n", |
||
930 | + __func__, classid, cl->root.ch_id); |
||
931 | + break; |
||
932 | + case CEETM_PRIO: |
||
933 | + pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n", |
||
934 | + __func__, classid, cl->prio.qpri); |
||
935 | + break; |
||
936 | + } |
||
937 | + |
||
938 | + *arg = (unsigned long)cl; |
||
939 | + return 0; |
||
940 | + |
||
941 | +out_free: |
||
942 | + kfree(cl); |
||
943 | + return err; |
||
944 | +} |
||
945 | + |
||
946 | +/* Add or configure a ceetm class */ |
||
947 | +static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid, |
||
948 | + struct nlattr **tca, unsigned long *arg) |
||
949 | +{ |
||
950 | + struct dpaa2_ceetm_qdisc *priv; |
||
951 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg; |
||
952 | + struct nlattr *opt = tca[TCA_OPTIONS]; |
||
953 | + struct nlattr *tb[DPAA2_CEETM_TCA_MAX]; |
||
954 | + struct dpaa2_ceetm_tc_copt *copt; |
||
955 | + struct net_device *dev = qdisc_dev(sch); |
||
956 | + int err; |
||
957 | + |
||
958 | + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n", |
||
959 | + __func__, classid, sch->handle); |
||
960 | + |
||
961 | + if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) { |
||
962 | + pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n"); |
||
963 | + return -EINVAL; |
||
964 | + } |
||
965 | + |
||
966 | + priv = qdisc_priv(sch); |
||
967 | + |
||
968 | + if (!opt) { |
||
969 | + pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__); |
||
970 | + return -EINVAL; |
||
971 | + } |
||
972 | + |
||
973 | + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt, |
||
974 | + dpaa2_ceetm_policy, NULL); |
||
975 | + if (err < 0) { |
||
976 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
||
977 | + "nla_parse_nested"); |
||
978 | + return -EINVAL; |
||
979 | + } |
||
980 | + |
||
981 | + if (!tb[DPAA2_CEETM_TCA_COPT]) { |
||
982 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
||
983 | + "tb"); |
||
984 | + return -EINVAL; |
||
985 | + } |
||
986 | + |
||
987 | + copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]); |
||
988 | + |
||
989 | + /* Configure an existing ceetm class */ |
||
990 | + if (cl) { |
||
991 | + if (copt->type != cl->type) { |
||
992 | + pr_err("CEETM: class %X is not of the provided type\n", |
||
993 | + cl->common.classid); |
||
994 | + return -EINVAL; |
||
995 | + } |
||
996 | + |
||
997 | + switch (copt->type) { |
||
998 | + case CEETM_ROOT: |
||
999 | + return dpaa2_ceetm_cls_change_root(cl, copt, dev); |
||
1000 | + case CEETM_PRIO: |
||
1001 | + return dpaa2_ceetm_cls_change_prio(cl, copt, dev); |
||
1002 | + |
||
1003 | + default: |
||
1004 | + pr_err(KBUILD_BASENAME " : %s : invalid class\n", |
||
1005 | + __func__); |
||
1006 | + return -EINVAL; |
||
1007 | + } |
||
1008 | + } |
||
1009 | + |
||
1010 | + return dpaa2_ceetm_cls_add(sch, classid, copt, arg); |
||
1011 | +} |
||
1012 | + |
||
1013 | +static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
||
1014 | +{ |
||
1015 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
1016 | + struct dpaa2_ceetm_class *cl; |
||
1017 | + unsigned int i; |
||
1018 | + |
||
1019 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
||
1020 | + |
||
1021 | + if (arg->stop) |
||
1022 | + return; |
||
1023 | + |
||
1024 | + for (i = 0; i < priv->clhash.hashsize; i++) { |
||
1025 | + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) { |
||
1026 | + if (arg->count < arg->skip) { |
||
1027 | + arg->count++; |
||
1028 | + continue; |
||
1029 | + } |
||
1030 | + if (arg->fn(sch, (unsigned long)cl, arg) < 0) { |
||
1031 | + arg->stop = 1; |
||
1032 | + return; |
||
1033 | + } |
||
1034 | + arg->count++; |
||
1035 | + } |
||
1036 | + } |
||
1037 | +} |
||
1038 | + |
||
1039 | +static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg, |
||
1040 | + struct sk_buff *skb, struct tcmsg *tcm) |
||
1041 | +{ |
||
1042 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
||
1043 | + struct nlattr *nest; |
||
1044 | + struct dpaa2_ceetm_tc_copt copt; |
||
1045 | + |
||
1046 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", |
||
1047 | + __func__, cl->common.classid, sch->handle); |
||
1048 | + |
||
1049 | + sch_tree_lock(sch); |
||
1050 | + |
||
1051 | + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle; |
||
1052 | + tcm->tcm_handle = cl->common.classid; |
||
1053 | + |
||
1054 | + memset(&copt, 0, sizeof(copt)); |
||
1055 | + |
||
1056 | + copt.shaped = cl->shaped; |
||
1057 | + copt.type = cl->type; |
||
1058 | + |
||
1059 | + switch (cl->type) { |
||
1060 | + case CEETM_ROOT: |
||
1061 | + if (cl->child) |
||
1062 | + tcm->tcm_info = cl->child->handle; |
||
1063 | + |
||
1064 | + memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg, |
||
1065 | + sizeof(struct dpaa2_ceetm_shaping_cfg)); |
||
1066 | + |
||
1067 | + break; |
||
1068 | + |
||
1069 | + case CEETM_PRIO: |
||
1070 | + if (cl->child) |
||
1071 | + tcm->tcm_info = cl->child->handle; |
||
1072 | + |
||
1073 | + copt.mode = cl->prio.mode; |
||
1074 | + copt.weight = cl->prio.weight; |
||
1075 | + |
||
1076 | + break; |
||
1077 | + } |
||
1078 | + |
||
1079 | + nest = nla_nest_start(skb, TCA_OPTIONS); |
||
1080 | + if (!nest) |
||
1081 | + goto nla_put_failure; |
||
1082 | + if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt)) |
||
1083 | + goto nla_put_failure; |
||
1084 | + nla_nest_end(skb, nest); |
||
1085 | + sch_tree_unlock(sch); |
||
1086 | + return skb->len; |
||
1087 | + |
||
1088 | +nla_put_failure: |
||
1089 | + sch_tree_unlock(sch); |
||
1090 | + nla_nest_cancel(skb, nest); |
||
1091 | + return -EMSGSIZE; |
||
1092 | +} |
||
1093 | + |
||
1094 | +static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg) |
||
1095 | +{ |
||
1096 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
1097 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
||
1098 | + |
||
1099 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", |
||
1100 | + __func__, cl->common.classid, sch->handle); |
||
1101 | + |
||
1102 | + sch_tree_lock(sch); |
||
1103 | + qdisc_class_hash_remove(&priv->clhash, &cl->common); |
||
1104 | + sch_tree_unlock(sch); |
||
1105 | + return 0; |
||
1106 | +} |
||
1107 | + |
||
1108 | +/* Get the class' child qdisc, if any */ |
||
1109 | +static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg) |
||
1110 | +{ |
||
1111 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
||
1112 | + |
||
1113 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", |
||
1114 | + __func__, cl->common.classid, sch->handle); |
||
1115 | + |
||
1116 | + switch (cl->type) { |
||
1117 | + case CEETM_ROOT: |
||
1118 | + case CEETM_PRIO: |
||
1119 | + return cl->child; |
||
1120 | + } |
||
1121 | + |
||
1122 | + return NULL; |
||
1123 | +} |
||
1124 | + |
||
1125 | +static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg, |
||
1126 | + struct Qdisc *new, struct Qdisc **old) |
||
1127 | +{ |
||
1128 | + if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) { |
||
1129 | + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n"); |
||
1130 | + return -EOPNOTSUPP; |
||
1131 | + } |
||
1132 | + |
||
1133 | + return 0; |
||
1134 | +} |
||
1135 | + |
||
1136 | +static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg, |
||
1137 | + struct gnet_dump *d) |
||
1138 | +{ |
||
1139 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
||
1140 | + struct gnet_stats_basic_packed tmp_bstats; |
||
1141 | + struct dpaa2_ceetm_tc_xstats xstats; |
||
1142 | + union dpni_statistics dpni_stats; |
||
1143 | + struct net_device *dev = qdisc_dev(sch); |
||
1144 | + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev); |
||
1145 | + u8 ch_id = 0; |
||
1146 | + int err; |
||
1147 | + |
||
1148 | + memset(&xstats, 0, sizeof(xstats)); |
||
1149 | + memset(&tmp_bstats, 0, sizeof(tmp_bstats)); |
||
1150 | + |
||
1151 | + if (cl->type == CEETM_ROOT) |
||
1152 | + return 0; |
||
1153 | + |
||
1154 | + err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3, |
||
1155 | + DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri), |
||
1156 | + &dpni_stats); |
||
1157 | + if (err) |
||
1158 | + netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err); |
||
1159 | + |
||
1160 | + xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes; |
||
1161 | + xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames; |
||
1162 | + xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes; |
||
1163 | + xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames; |
||
1164 | + |
||
1165 | + return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
||
1166 | +} |
||
1167 | + |
||
1168 | +static struct tcf_block *dpaa2_ceetm_tcf_block(struct Qdisc *sch, |
||
1169 | + unsigned long arg) |
||
1170 | +{ |
||
1171 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
1172 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
||
1173 | + |
||
1174 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, |
||
1175 | + cl ? cl->common.classid : 0, sch->handle); |
||
1176 | + return cl ? cl->block : priv->block; |
||
1177 | +} |
||
1178 | + |
||
1179 | +static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch, |
||
1180 | + unsigned long parent, |
||
1181 | + u32 classid) |
||
1182 | +{ |
||
1183 | + struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch); |
||
1184 | + |
||
1185 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, |
||
1186 | + cl ? cl->common.classid : 0, sch->handle); |
||
1187 | + return (unsigned long)cl; |
||
1188 | +} |
||
1189 | + |
||
1190 | +static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg) |
||
1191 | +{ |
||
1192 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
||
1193 | + |
||
1194 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, |
||
1195 | + cl ? cl->common.classid : 0, sch->handle); |
||
1196 | +} |
||
1197 | + |
||
1198 | +const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = { |
||
1199 | + .graft = dpaa2_ceetm_cls_graft, |
||
1200 | + .leaf = dpaa2_ceetm_cls_leaf, |
||
1201 | + .find = dpaa2_ceetm_cls_find, |
||
1202 | + .change = dpaa2_ceetm_cls_change, |
||
1203 | + .delete = dpaa2_ceetm_cls_delete, |
||
1204 | + .walk = dpaa2_ceetm_cls_walk, |
||
1205 | + .tcf_block = dpaa2_ceetm_tcf_block, |
||
1206 | + .bind_tcf = dpaa2_ceetm_tcf_bind, |
||
1207 | + .unbind_tcf = dpaa2_ceetm_tcf_unbind, |
||
1208 | + .dump = dpaa2_ceetm_cls_dump, |
||
1209 | + .dump_stats = dpaa2_ceetm_cls_dump_stats, |
||
1210 | +}; |
||
1211 | + |
||
1212 | +struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = { |
||
1213 | + .id = "ceetm", |
||
1214 | + .priv_size = sizeof(struct dpaa2_ceetm_qdisc), |
||
1215 | + .cl_ops = &dpaa2_ceetm_cls_ops, |
||
1216 | + .init = dpaa2_ceetm_init, |
||
1217 | + .destroy = dpaa2_ceetm_destroy, |
||
1218 | + .change = dpaa2_ceetm_change, |
||
1219 | + .dump = dpaa2_ceetm_dump, |
||
1220 | + .attach = dpaa2_ceetm_attach, |
||
1221 | + .owner = THIS_MODULE, |
||
1222 | +}; |
||
1223 | + |
||
1224 | +/* Run the filters and classifiers attached to the qdisc on the provided skb */ |
||
1225 | +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch, |
||
1226 | + int *qdid, u8 *qpri) |
||
1227 | +{ |
||
1228 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
||
1229 | + struct dpaa2_ceetm_class *cl = NULL; |
||
1230 | + struct tcf_result res; |
||
1231 | + struct tcf_proto *tcf; |
||
1232 | + int result; |
||
1233 | + |
||
1234 | + tcf = rcu_dereference_bh(priv->filter_list); |
||
1235 | + while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) { |
||
1236 | +#ifdef CONFIG_NET_CLS_ACT |
||
1237 | + switch (result) { |
||
1238 | + case TC_ACT_QUEUED: |
||
1239 | + case TC_ACT_STOLEN: |
||
1240 | + case TC_ACT_SHOT: |
||
1241 | + /* No valid class found due to action */ |
||
1242 | + return -1; |
||
1243 | + } |
||
1244 | +#endif |
||
1245 | + cl = (void *)res.class; |
||
1246 | + if (!cl) { |
||
1247 | + /* The filter leads to the qdisc */ |
||
1248 | + if (res.classid == sch->handle) |
||
1249 | + return 0; |
||
1250 | + |
||
1251 | + cl = dpaa2_ceetm_find(res.classid, sch); |
||
1252 | + /* The filter leads to an invalid class */ |
||
1253 | + if (!cl) |
||
1254 | + break; |
||
1255 | + } |
||
1256 | + |
||
1257 | + /* The class might have its own filters attached */ |
||
1258 | + tcf = rcu_dereference_bh(cl->filter_list); |
||
1259 | + } |
||
1260 | + |
||
1261 | + /* No valid class found */ |
||
1262 | + if (!cl) |
||
1263 | + return 0; |
||
1264 | + |
||
1265 | + switch (cl->type) { |
||
1266 | + case CEETM_ROOT: |
||
1267 | + *qdid = cl->root.ch_id; |
||
1268 | + |
||
1269 | + /* The root class does not have a child prio qdisc */ |
||
1270 | + if (!cl->child) |
||
1271 | + return 0; |
||
1272 | + |
||
1273 | + /* Run the prio qdisc classifiers */ |
||
1274 | + return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri); |
||
1275 | + |
||
1276 | + case CEETM_PRIO: |
||
1277 | + *qpri = cl->prio.qpri; |
||
1278 | + break; |
||
1279 | + } |
||
1280 | + |
||
1281 | + return 0; |
||
1282 | +} |
||
1283 | + |
||
1284 | +int __init dpaa2_ceetm_register(void) |
||
1285 | +{ |
||
1286 | + int err = 0; |
||
1287 | + |
||
1288 | + pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n"); |
||
1289 | + |
||
1290 | + err = register_qdisc(&dpaa2_ceetm_qdisc_ops); |
||
1291 | + if (unlikely(err)) |
||
1292 | + pr_err(KBUILD_MODNAME |
||
1293 | + ": %s:%hu:%s(): register_qdisc() = %d\n", |
||
1294 | + KBUILD_BASENAME ".c", __LINE__, __func__, err); |
||
1295 | + |
||
1296 | + return err; |
||
1297 | +} |
||
1298 | + |
||
1299 | +void __exit dpaa2_ceetm_unregister(void) |
||
1300 | +{ |
||
1301 | + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", |
||
1302 | + KBUILD_BASENAME ".c", __func__); |
||
1303 | + |
||
1304 | + unregister_qdisc(&dpaa2_ceetm_qdisc_ops); |
||
1305 | +} |
||
1306 | --- /dev/null |
||
1307 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h |
||
1308 | @@ -0,0 +1,183 @@ |
||
1309 | +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
||
1310 | +/* |
||
1311 | + * Copyright 2017 NXP |
||
1312 | + * |
||
1313 | + */ |
||
1314 | + |
||
1315 | +#ifndef __DPAA2_ETH_CEETM_H |
||
1316 | +#define __DPAA2_ETH_CEETM_H |
||
1317 | + |
||
1318 | +#include <net/pkt_sched.h> |
||
1319 | +#include <net/pkt_cls.h> |
||
1320 | +#include <net/netlink.h> |
||
1321 | + |
||
1322 | +#include "dpaa2-eth.h" |
||
1323 | + |
||
1324 | +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which |
||
1325 | + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20 |
||
1326 | + * are reserved for the maximum 32 CEETM channels (majors and minors are in |
||
1327 | + * hex). |
||
1328 | + */ |
||
1329 | +#define PFIFO_MIN_OFFSET 0x21 |
||
1330 | + |
||
1331 | +#define DPAA2_CEETM_MIN_WEIGHT 100 |
||
1332 | +#define DPAA2_CEETM_MAX_WEIGHT 24800 |
||
1333 | + |
||
1334 | +#define DPAA2_CEETM_TD_THRESHOLD 1000 |
||
1335 | + |
||
1336 | +enum wbfs_group_type { |
||
1337 | + WBFS_GRP_A, |
||
1338 | + WBFS_GRP_B, |
||
1339 | + WBFS_GRP_LARGE |
||
1340 | +}; |
||
1341 | + |
||
1342 | +enum { |
||
1343 | + DPAA2_CEETM_TCA_UNSPEC, |
||
1344 | + DPAA2_CEETM_TCA_COPT, |
||
1345 | + DPAA2_CEETM_TCA_QOPS, |
||
1346 | + DPAA2_CEETM_TCA_MAX, |
||
1347 | +}; |
||
1348 | + |
||
1349 | +/* CEETM configuration types */ |
||
1350 | +enum dpaa2_ceetm_type { |
||
1351 | + CEETM_ROOT = 1, |
||
1352 | + CEETM_PRIO, |
||
1353 | +}; |
||
1354 | + |
||
1355 | +enum { |
||
1356 | + STRICT_PRIORITY = 0, |
||
1357 | + WEIGHTED_A, |
||
1358 | + WEIGHTED_B, |
||
1359 | +}; |
||
1360 | + |
||
1361 | +struct dpaa2_ceetm_shaping_cfg { |
||
1362 | + __u64 cir; /* committed information rate */ |
||
1363 | + __u64 eir; /* excess information rate */ |
||
1364 | + __u16 cbs; /* committed burst size */ |
||
1365 | + __u16 ebs; /* excess burst size */ |
||
1366 | + __u8 coupled; /* shaper coupling */ |
||
1367 | +}; |
||
1368 | + |
||
1369 | +extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX]; |
||
1370 | + |
||
1371 | +struct dpaa2_ceetm_class; |
||
1372 | +struct dpaa2_ceetm_qdisc_stats; |
||
1373 | +struct dpaa2_ceetm_class_stats; |
||
1374 | + |
||
1375 | +/* corresponds to CEETM shaping at LNI level */ |
||
1376 | +struct dpaa2_root_q { |
||
1377 | + struct Qdisc **qdiscs; |
||
1378 | + struct dpaa2_ceetm_qdisc_stats __percpu *qstats; |
||
1379 | +}; |
||
1380 | + |
||
1381 | +/* corresponds to the number of priorities a channel serves */ |
||
1382 | +struct dpaa2_prio_q { |
||
1383 | + struct dpaa2_ceetm_class *parent; |
||
1384 | + struct dpni_tx_priorities_cfg tx_prio_cfg; |
||
1385 | +}; |
||
1386 | + |
||
1387 | +struct dpaa2_ceetm_qdisc { |
||
1388 | + struct Qdisc_class_hash clhash; |
||
1389 | + struct tcf_proto *filter_list; /* qdisc attached filters */ |
||
1390 | + struct tcf_block *block; |
||
1391 | + |
||
1392 | + enum dpaa2_ceetm_type type; /* ROOT/PRIO */ |
||
1393 | + bool shaped; |
||
1394 | + union { |
||
1395 | + struct dpaa2_root_q root; |
||
1396 | + struct dpaa2_prio_q prio; |
||
1397 | + }; |
||
1398 | +}; |
||
1399 | + |
||
1400 | +/* CEETM Qdisc configuration parameters */ |
||
1401 | +struct dpaa2_ceetm_tc_qopt { |
||
1402 | + enum dpaa2_ceetm_type type; |
||
1403 | + __u16 shaped; |
||
1404 | + __u8 prio_group_A; |
||
1405 | + __u8 prio_group_B; |
||
1406 | + __u8 separate_groups; |
||
1407 | +}; |
||
1408 | + |
||
1409 | +/* root class - corresponds to a channel */ |
||
1410 | +struct dpaa2_root_c { |
||
1411 | + struct dpaa2_ceetm_shaping_cfg shaping_cfg; |
||
1412 | + u32 ch_id; |
||
1413 | +}; |
||
1414 | + |
||
1415 | +/* prio class - corresponds to a strict priority queue (group) */ |
||
1416 | +struct dpaa2_prio_c { |
||
1417 | + struct dpaa2_ceetm_class_stats __percpu *cstats; |
||
1418 | + u32 qpri; |
||
1419 | + u8 mode; |
||
1420 | + u16 weight; |
||
1421 | +}; |
||
1422 | + |
||
1423 | +struct dpaa2_ceetm_class { |
||
1424 | + struct Qdisc_class_common common; |
||
1425 | + struct tcf_proto *filter_list; /* class attached filters */ |
||
1426 | + struct tcf_block *block; |
||
1427 | + struct Qdisc *parent; |
||
1428 | + struct Qdisc *child; |
||
1429 | + |
||
1430 | + enum dpaa2_ceetm_type type; /* ROOT/PRIO */ |
||
1431 | + bool shaped; |
||
1432 | + union { |
||
1433 | + struct dpaa2_root_c root; |
||
1434 | + struct dpaa2_prio_c prio; |
||
1435 | + }; |
||
1436 | +}; |
||
1437 | + |
||
1438 | +/* CEETM Class configuration parameters */ |
||
1439 | +struct dpaa2_ceetm_tc_copt { |
||
1440 | + enum dpaa2_ceetm_type type; |
||
1441 | + struct dpaa2_ceetm_shaping_cfg shaping_cfg; |
||
1442 | + __u16 shaped; |
||
1443 | + __u8 mode; |
||
1444 | + __u16 weight; |
||
1445 | +}; |
||
1446 | + |
||
1447 | +/* CEETM stats */ |
||
1448 | +struct dpaa2_ceetm_qdisc_stats { |
||
1449 | + __u32 drops; |
||
1450 | +}; |
||
1451 | + |
||
1452 | +struct dpaa2_ceetm_class_stats { |
||
1453 | + /* Software counters */ |
||
1454 | + struct gnet_stats_basic_packed bstats; |
||
1455 | + __u32 ern_drop_count; |
||
1456 | + __u32 congested_count; |
||
1457 | +}; |
||
1458 | + |
||
1459 | +struct dpaa2_ceetm_tc_xstats { |
||
1460 | + __u64 ceetm_dequeue_bytes; |
||
1461 | + __u64 ceetm_dequeue_frames; |
||
1462 | + __u64 ceetm_reject_bytes; |
||
1463 | + __u64 ceetm_reject_frames; |
||
1464 | +}; |
||
1465 | + |
||
1466 | +#ifdef CONFIG_FSL_DPAA2_ETH_CEETM |
||
1467 | +int __init dpaa2_ceetm_register(void); |
||
1468 | +void __exit dpaa2_ceetm_unregister(void); |
||
1469 | +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch, |
||
1470 | + int *qdid, u8 *qpri); |
||
1471 | +#else |
||
1472 | +static inline int dpaa2_ceetm_register(void) |
||
1473 | +{ |
||
1474 | + return 0; |
||
1475 | +} |
||
1476 | + |
||
1477 | +static inline void dpaa2_ceetm_unregister(void) {} |
||
1478 | + |
||
1479 | +static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch, |
||
1480 | + int *qdid, u8 *qpri) |
||
1481 | +{ |
||
1482 | + return 0; |
||
1483 | +} |
||
1484 | +#endif |
||
1485 | + |
||
1486 | +static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv) |
||
1487 | +{ |
||
1488 | + return priv->ceetm_en; |
||
1489 | +} |
||
1490 | + |
||
1491 | +#endif |
||
1492 | --- /dev/null |
||
1493 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c |
||
1494 | @@ -0,0 +1,357 @@ |
||
1495 | + |
||
1496 | +/* Copyright 2015 Freescale Semiconductor Inc. |
||
1497 | + * |
||
1498 | + * Redistribution and use in source and binary forms, with or without |
||
1499 | + * modification, are permitted provided that the following conditions are met: |
||
1500 | + * * Redistributions of source code must retain the above copyright |
||
1501 | + * notice, this list of conditions and the following disclaimer. |
||
1502 | + * * Redistributions in binary form must reproduce the above copyright |
||
1503 | + * notice, this list of conditions and the following disclaimer in the |
||
1504 | + * documentation and/or other materials provided with the distribution. |
||
1505 | + * * Neither the name of Freescale Semiconductor nor the |
||
1506 | + * names of its contributors may be used to endorse or promote products |
||
1507 | + * derived from this software without specific prior written permission. |
||
1508 | + * |
||
1509 | + * |
||
1510 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
||
1511 | + * GNU General Public License ("GPL") as published by the Free Software |
||
1512 | + * Foundation, either version 2 of that License or (at your option) any |
||
1513 | + * later version. |
||
1514 | + * |
||
1515 | + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
||
1516 | + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
||
1517 | + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
||
1518 | + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
||
1519 | + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
||
1520 | + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||
1521 | + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
||
1522 | + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
1523 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
||
1524 | + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
1525 | + */ |
||
1526 | + |
||
1527 | +#include <linux/module.h> |
||
1528 | +#include <linux/debugfs.h> |
||
1529 | +#include "dpaa2-eth.h" |
||
1530 | +#include "dpaa2-eth-debugfs.h" |
||
1531 | + |
||
1532 | +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" |
||
1533 | + |
||
1534 | +static struct dentry *dpaa2_dbg_root; |
||
1535 | + |
||
1536 | +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) |
||
1537 | +{ |
||
1538 | + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; |
||
1539 | + struct rtnl_link_stats64 *stats; |
||
1540 | + struct dpaa2_eth_drv_stats *extras; |
||
1541 | + int i; |
||
1542 | + |
||
1543 | + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); |
||
1544 | + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n", |
||
1545 | + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", |
||
1546 | + "Tx SG", "Tx realloc", "Enq busy"); |
||
1547 | + |
||
1548 | + for_each_online_cpu(i) { |
||
1549 | + stats = per_cpu_ptr(priv->percpu_stats, i); |
||
1550 | + extras = per_cpu_ptr(priv->percpu_extras, i); |
||
1551 | + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", |
||
1552 | + i, |
||
1553 | + stats->rx_packets, |
||
1554 | + stats->rx_errors, |
||
1555 | + extras->rx_sg_frames, |
||
1556 | + stats->tx_packets, |
||
1557 | + stats->tx_errors, |
||
1558 | + extras->tx_conf_frames, |
||
1559 | + extras->tx_sg_frames, |
||
1560 | + extras->tx_reallocs, |
||
1561 | + extras->tx_portal_busy); |
||
1562 | + } |
||
1563 | + |
||
1564 | + return 0; |
||
1565 | +} |
||
1566 | + |
||
1567 | +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) |
||
1568 | +{ |
||
1569 | + int err; |
||
1570 | + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; |
||
1571 | + |
||
1572 | + err = single_open(file, dpaa2_dbg_cpu_show, priv); |
||
1573 | + if (err < 0) |
||
1574 | + netdev_err(priv->net_dev, "single_open() failed\n"); |
||
1575 | + |
||
1576 | + return err; |
||
1577 | +} |
||
1578 | + |
||
1579 | +static const struct file_operations dpaa2_dbg_cpu_ops = { |
||
1580 | + .open = dpaa2_dbg_cpu_open, |
||
1581 | + .read = seq_read, |
||
1582 | + .llseek = seq_lseek, |
||
1583 | + .release = single_release, |
||
1584 | +}; |
||
1585 | + |
||
1586 | +static char *fq_type_to_str(struct dpaa2_eth_fq *fq) |
||
1587 | +{ |
||
1588 | + switch (fq->type) { |
||
1589 | + case DPAA2_RX_FQ: |
||
1590 | + return "Rx"; |
||
1591 | + case DPAA2_TX_CONF_FQ: |
||
1592 | + return "Tx conf"; |
||
1593 | + case DPAA2_RX_ERR_FQ: |
||
1594 | + return "Rx err"; |
||
1595 | + default: |
||
1596 | + return "N/A"; |
||
1597 | + } |
||
1598 | +} |
||
1599 | + |
||
1600 | +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) |
||
1601 | +{ |
||
1602 | + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; |
||
1603 | + struct dpaa2_eth_fq *fq; |
||
1604 | + u32 fcnt, bcnt; |
||
1605 | + int i, err; |
||
1606 | + |
||
1607 | + seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name); |
||
1608 | + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", |
||
1609 | + "VFQID", "CPU", "Traffic Class", "Type", "Frames", |
||
1610 | + "Pending frames", "Congestion"); |
||
1611 | + |
||
1612 | + for (i = 0; i < priv->num_fqs; i++) { |
||
1613 | + fq = &priv->fq[i]; |
||
1614 | + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); |
||
1615 | + if (err) |
||
1616 | + fcnt = 0; |
||
1617 | + |
||
1618 | + /* A lot of queues, no use displaying zero traffic ones */ |
||
1619 | + if (!fq->stats.frames && !fcnt) |
||
1620 | + continue; |
||
1621 | + |
||
1622 | + seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n", |
||
1623 | + fq->fqid, |
||
1624 | + fq->target_cpu, |
||
1625 | + fq->tc, |
||
1626 | + fq_type_to_str(fq), |
||
1627 | + fq->stats.frames, |
||
1628 | + fcnt, |
||
1629 | + fq->stats.congestion_entry); |
||
1630 | + } |
||
1631 | + |
||
1632 | + return 0; |
||
1633 | +} |
||
1634 | + |
||
1635 | +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) |
||
1636 | +{ |
||
1637 | + int err; |
||
1638 | + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; |
||
1639 | + |
||
1640 | + err = single_open(file, dpaa2_dbg_fqs_show, priv); |
||
1641 | + if (err < 0) |
||
1642 | + netdev_err(priv->net_dev, "single_open() failed\n"); |
||
1643 | + |
||
1644 | + return err; |
||
1645 | +} |
||
1646 | + |
||
1647 | +static const struct file_operations dpaa2_dbg_fq_ops = { |
||
1648 | + .open = dpaa2_dbg_fqs_open, |
||
1649 | + .read = seq_read, |
||
1650 | + .llseek = seq_lseek, |
||
1651 | + .release = single_release, |
||
1652 | +}; |
||
1653 | + |
||
1654 | +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) |
||
1655 | +{ |
||
1656 | + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; |
||
1657 | + struct dpaa2_eth_channel *ch; |
||
1658 | + int i; |
||
1659 | + |
||
1660 | + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); |
||
1661 | + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", |
||
1662 | + "CHID", "CPU", "Deq busy", "Frames", "CDANs", |
||
1663 | + "Avg frm/CDAN", "Buf count"); |
||
1664 | + |
||
1665 | + for (i = 0; i < priv->num_channels; i++) { |
||
1666 | + ch = priv->channel[i]; |
||
1667 | + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n", |
||
1668 | + ch->ch_id, |
||
1669 | + ch->nctx.desired_cpu, |
||
1670 | + ch->stats.dequeue_portal_busy, |
||
1671 | + ch->stats.frames, |
||
1672 | + ch->stats.cdan, |
||
1673 | + ch->stats.frames / ch->stats.cdan, |
||
1674 | + ch->buf_count); |
||
1675 | + } |
||
1676 | + |
||
1677 | + return 0; |
||
1678 | +} |
||
1679 | + |
||
1680 | +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) |
||
1681 | +{ |
||
1682 | + int err; |
||
1683 | + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; |
||
1684 | + |
||
1685 | + err = single_open(file, dpaa2_dbg_ch_show, priv); |
||
1686 | + if (err < 0) |
||
1687 | + netdev_err(priv->net_dev, "single_open() failed\n"); |
||
1688 | + |
||
1689 | + return err; |
||
1690 | +} |
||
1691 | + |
||
1692 | +static const struct file_operations dpaa2_dbg_ch_ops = { |
||
1693 | + .open = dpaa2_dbg_ch_open, |
||
1694 | + .read = seq_read, |
||
1695 | + .llseek = seq_lseek, |
||
1696 | + .release = single_release, |
||
1697 | +}; |
||
1698 | + |
||
1699 | +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf, |
||
1700 | + size_t count, loff_t *offset) |
||
1701 | +{ |
||
1702 | + struct dpaa2_eth_priv *priv = file->private_data; |
||
1703 | + struct rtnl_link_stats64 *percpu_stats; |
||
1704 | + struct dpaa2_eth_drv_stats *percpu_extras; |
||
1705 | + struct dpaa2_eth_fq *fq; |
||
1706 | + struct dpaa2_eth_channel *ch; |
||
1707 | + int i; |
||
1708 | + |
||
1709 | + for_each_online_cpu(i) { |
||
1710 | + percpu_stats = per_cpu_ptr(priv->percpu_stats, i); |
||
1711 | + memset(percpu_stats, 0, sizeof(*percpu_stats)); |
||
1712 | + |
||
1713 | + percpu_extras = per_cpu_ptr(priv->percpu_extras, i); |
||
1714 | + memset(percpu_extras, 0, sizeof(*percpu_extras)); |
||
1715 | + } |
||
1716 | + |
||
1717 | + for (i = 0; i < priv->num_fqs; i++) { |
||
1718 | + fq = &priv->fq[i]; |
||
1719 | + memset(&fq->stats, 0, sizeof(fq->stats)); |
||
1720 | + } |
||
1721 | + |
||
1722 | + for (i = 0; i < priv->num_channels; i++) { |
||
1723 | + ch = priv->channel[i]; |
||
1724 | + memset(&ch->stats, 0, sizeof(ch->stats)); |
||
1725 | + } |
||
1726 | + |
||
1727 | + return count; |
||
1728 | +} |
||
1729 | + |
||
1730 | +static const struct file_operations dpaa2_dbg_reset_ops = { |
||
1731 | + .open = simple_open, |
||
1732 | + .write = dpaa2_dbg_reset_write, |
||
1733 | +}; |
||
1734 | + |
||
1735 | +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file, |
||
1736 | + const char __user *buf, |
||
1737 | + size_t count, loff_t *offset) |
||
1738 | +{ |
||
1739 | + struct dpaa2_eth_priv *priv = file->private_data; |
||
1740 | + int err; |
||
1741 | + |
||
1742 | + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token); |
||
1743 | + if (err) |
||
1744 | + netdev_err(priv->net_dev, |
||
1745 | + "dpni_reset_statistics() failed %d\n", err); |
||
1746 | + |
||
1747 | + return count; |
||
1748 | +} |
||
1749 | + |
||
1750 | +static const struct file_operations dpaa2_dbg_reset_mc_ops = { |
||
1751 | + .open = simple_open, |
||
1752 | + .write = dpaa2_dbg_reset_mc_write, |
||
1753 | +}; |
||
1754 | + |
||
1755 | +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) |
||
1756 | +{ |
||
1757 | + if (!dpaa2_dbg_root) |
||
1758 | + return; |
||
1759 | + |
||
1760 | + /* Create a directory for the interface */ |
||
1761 | + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, |
||
1762 | + dpaa2_dbg_root); |
||
1763 | + if (!priv->dbg.dir) { |
||
1764 | + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); |
||
1765 | + return; |
||
1766 | + } |
||
1767 | + |
||
1768 | + /* per-cpu stats file */ |
||
1769 | + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444, |
||
1770 | + priv->dbg.dir, priv, |
||
1771 | + &dpaa2_dbg_cpu_ops); |
||
1772 | + if (!priv->dbg.cpu_stats) { |
||
1773 | + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); |
||
1774 | + goto err_cpu_stats; |
||
1775 | + } |
||
1776 | + |
||
1777 | + /* per-fq stats file */ |
||
1778 | + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444, |
||
1779 | + priv->dbg.dir, priv, |
||
1780 | + &dpaa2_dbg_fq_ops); |
||
1781 | + if (!priv->dbg.fq_stats) { |
||
1782 | + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); |
||
1783 | + goto err_fq_stats; |
||
1784 | + } |
||
1785 | + |
||
1786 | + /* per-fq stats file */ |
||
1787 | + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444, |
||
1788 | + priv->dbg.dir, priv, |
||
1789 | + &dpaa2_dbg_ch_ops); |
||
1790 | + if (!priv->dbg.fq_stats) { |
||
1791 | + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); |
||
1792 | + goto err_ch_stats; |
||
1793 | + } |
||
1794 | + |
||
1795 | + /* reset stats */ |
||
1796 | + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200, |
||
1797 | + priv->dbg.dir, priv, |
||
1798 | + &dpaa2_dbg_reset_ops); |
||
1799 | + if (!priv->dbg.reset_stats) { |
||
1800 | + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); |
||
1801 | + goto err_reset_stats; |
||
1802 | + } |
||
1803 | + |
||
1804 | + /* reset MC stats */ |
||
1805 | + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats", |
||
1806 | + 0222, priv->dbg.dir, priv, |
||
1807 | + &dpaa2_dbg_reset_mc_ops); |
||
1808 | + if (!priv->dbg.reset_mc_stats) { |
||
1809 | + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); |
||
1810 | + goto err_reset_mc_stats; |
||
1811 | + } |
||
1812 | + |
||
1813 | + return; |
||
1814 | + |
||
1815 | +err_reset_mc_stats: |
||
1816 | + debugfs_remove(priv->dbg.reset_stats); |
||
1817 | +err_reset_stats: |
||
1818 | + debugfs_remove(priv->dbg.ch_stats); |
||
1819 | +err_ch_stats: |
||
1820 | + debugfs_remove(priv->dbg.fq_stats); |
||
1821 | +err_fq_stats: |
||
1822 | + debugfs_remove(priv->dbg.cpu_stats); |
||
1823 | +err_cpu_stats: |
||
1824 | + debugfs_remove(priv->dbg.dir); |
||
1825 | +} |
||
1826 | + |
||
1827 | +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) |
||
1828 | +{ |
||
1829 | + debugfs_remove(priv->dbg.reset_mc_stats); |
||
1830 | + debugfs_remove(priv->dbg.reset_stats); |
||
1831 | + debugfs_remove(priv->dbg.fq_stats); |
||
1832 | + debugfs_remove(priv->dbg.ch_stats); |
||
1833 | + debugfs_remove(priv->dbg.cpu_stats); |
||
1834 | + debugfs_remove(priv->dbg.dir); |
||
1835 | +} |
||
1836 | + |
||
1837 | +void dpaa2_eth_dbg_init(void) |
||
1838 | +{ |
||
1839 | + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); |
||
1840 | + if (!dpaa2_dbg_root) { |
||
1841 | + pr_err("DPAA2-ETH: debugfs create failed\n"); |
||
1842 | + return; |
||
1843 | + } |
||
1844 | + |
||
1845 | + pr_info("DPAA2-ETH: debugfs created\n"); |
||
1846 | +} |
||
1847 | + |
||
1848 | +void __exit dpaa2_eth_dbg_exit(void) |
||
1849 | +{ |
||
1850 | + debugfs_remove(dpaa2_dbg_root); |
||
1851 | +} |
||
1852 | --- /dev/null |
||
1853 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h |
||
1854 | @@ -0,0 +1,60 @@ |
||
1855 | +/* Copyright 2015 Freescale Semiconductor Inc. |
||
1856 | + * |
||
1857 | + * Redistribution and use in source and binary forms, with or without |
||
1858 | + * modification, are permitted provided that the following conditions are met: |
||
1859 | + * * Redistributions of source code must retain the above copyright |
||
1860 | + * notice, this list of conditions and the following disclaimer. |
||
1861 | + * * Redistributions in binary form must reproduce the above copyright |
||
1862 | + * notice, this list of conditions and the following disclaimer in the |
||
1863 | + * documentation and/or other materials provided with the distribution. |
||
1864 | + * * Neither the name of Freescale Semiconductor nor the |
||
1865 | + * names of its contributors may be used to endorse or promote products |
||
1866 | + * derived from this software without specific prior written permission. |
||
1867 | + * |
||
1868 | + * |
||
1869 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
||
1870 | + * GNU General Public License ("GPL") as published by the Free Software |
||
1871 | + * Foundation, either version 2 of that License or (at your option) any |
||
1872 | + * later version. |
||
1873 | + * |
||
1874 | + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
||
1875 | + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
||
1876 | + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
||
1877 | + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
||
1878 | + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
||
1879 | + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||
1880 | + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
||
1881 | + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
1882 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
||
1883 | + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
1884 | + */ |
||
1885 | + |
||
1886 | +#ifndef DPAA2_ETH_DEBUGFS_H |
||
1887 | +#define DPAA2_ETH_DEBUGFS_H |
||
1888 | + |
||
1889 | +#include <linux/dcache.h> |
||
1890 | + |
||
1891 | +struct dpaa2_eth_priv; |
||
1892 | + |
||
1893 | +struct dpaa2_debugfs { |
||
1894 | + struct dentry *dir; |
||
1895 | + struct dentry *fq_stats; |
||
1896 | + struct dentry *ch_stats; |
||
1897 | + struct dentry *cpu_stats; |
||
1898 | + struct dentry *reset_stats; |
||
1899 | + struct dentry *reset_mc_stats; |
||
1900 | +}; |
||
1901 | + |
||
1902 | +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS |
||
1903 | +void dpaa2_eth_dbg_init(void); |
||
1904 | +void dpaa2_eth_dbg_exit(void); |
||
1905 | +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); |
||
1906 | +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); |
||
1907 | +#else |
||
1908 | +static inline void dpaa2_eth_dbg_init(void) {} |
||
1909 | +static inline void dpaa2_eth_dbg_exit(void) {} |
||
1910 | +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} |
||
1911 | +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} |
||
1912 | +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */ |
||
1913 | + |
||
1914 | +#endif /* DPAA2_ETH_DEBUGFS_H */ |
||
1915 | --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c |
||
1916 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c |
||
1917 | @@ -38,9 +38,14 @@ |
||
1918 | #include <linux/msi.h> |
||
1919 | #include <linux/kthread.h> |
||
1920 | #include <linux/iommu.h> |
||
1921 | - |
||
1922 | +#include <linux/net_tstamp.h> |
||
1923 | +#include <linux/bpf.h> |
||
1924 | +#include <linux/filter.h> |
||
1925 | +#include <linux/atomic.h> |
||
1926 | +#include <net/sock.h> |
||
1927 | #include "../../fsl-mc/include/mc.h" |
||
1928 | #include "dpaa2-eth.h" |
||
1929 | +#include "dpaa2-eth-ceetm.h" |
||
1930 | |||
1931 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files |
||
1932 | * using trace events only need to #include <trace/events/sched.h> |
||
1933 | @@ -104,13 +109,15 @@ static void free_rx_fd(struct dpaa2_eth_ |
||
1934 | /* We don't support any other format */ |
||
1935 | return; |
||
1936 | |||
1937 | - /* For S/G frames, we first need to free all SG entries */ |
||
1938 | + /* For S/G frames, we first need to free all SG entries |
||
1939 | + * except the first one, which was taken care of already |
||
1940 | + */ |
||
1941 | sgt = vaddr + dpaa2_fd_get_offset(fd); |
||
1942 | - for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { |
||
1943 | + for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { |
||
1944 | addr = dpaa2_sg_get_addr(&sgt[i]); |
||
1945 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
||
1946 | dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
||
1947 | - DMA_FROM_DEVICE); |
||
1948 | + DMA_BIDIRECTIONAL); |
||
1949 | |||
1950 | skb_free_frag(sg_vaddr); |
||
1951 | if (dpaa2_sg_is_final(&sgt[i])) |
||
1952 | @@ -133,8 +140,7 @@ static struct sk_buff *build_linear_skb( |
||
1953 | |||
1954 | ch->buf_count--; |
||
1955 | |||
1956 | - skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + |
||
1957 | - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
||
1958 | + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); |
||
1959 | if (unlikely(!skb)) |
||
1960 | return NULL; |
||
1961 | |||
1962 | @@ -170,15 +176,19 @@ static struct sk_buff *build_frag_skb(st |
||
1963 | sg_addr = dpaa2_sg_get_addr(sge); |
||
1964 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); |
||
1965 | dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, |
||
1966 | - DMA_FROM_DEVICE); |
||
1967 | + DMA_BIDIRECTIONAL); |
||
1968 | |||
1969 | sg_length = dpaa2_sg_get_len(sge); |
||
1970 | |||
1971 | if (i == 0) { |
||
1972 | /* We build the skb around the first data buffer */ |
||
1973 | - skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + |
||
1974 | - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
||
1975 | + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); |
||
1976 | if (unlikely(!skb)) { |
||
1977 | + /* Free the first SG entry now, since we already |
||
1978 | + * unmapped it and obtained the virtual address |
||
1979 | + */ |
||
1980 | + skb_free_frag(sg_vaddr); |
||
1981 | + |
||
1982 | /* We still need to subtract the buffers used |
||
1983 | * by this FD from our software counter |
||
1984 | */ |
||
1985 | @@ -213,17 +223,173 @@ static struct sk_buff *build_frag_skb(st |
||
1986 | break; |
||
1987 | } |
||
1988 | |||
1989 | + WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); |
||
1990 | + |
||
1991 | /* Count all data buffers + SG table buffer */ |
||
1992 | ch->buf_count -= i + 2; |
||
1993 | |||
1994 | return skb; |
||
1995 | } |
||
1996 | |||
1997 | +static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv, |
||
1998 | + struct dpaa2_fd *fd, |
||
1999 | + void *buf_start, |
||
2000 | + u16 queue_id) |
||
2001 | +{ |
||
2002 | + struct dpaa2_eth_fq *fq; |
||
2003 | + struct rtnl_link_stats64 *percpu_stats; |
||
2004 | + struct dpaa2_eth_drv_stats *percpu_extras; |
||
2005 | + struct dpaa2_faead *faead; |
||
2006 | + u32 ctrl, frc; |
||
2007 | + int i, err; |
||
2008 | + |
||
2009 | + /* Mark the egress frame annotation area as valid */ |
||
2010 | + frc = dpaa2_fd_get_frc(fd); |
||
2011 | + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); |
||
2012 | + dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); |
||
2013 | + |
||
2014 | + ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; |
||
2015 | + faead = dpaa2_get_faead(buf_start, false); |
||
2016 | + faead->ctrl = cpu_to_le32(ctrl); |
||
2017 | + faead->conf_fqid = 0; |
||
2018 | + |
||
2019 | + percpu_stats = this_cpu_ptr(priv->percpu_stats); |
||
2020 | + percpu_extras = this_cpu_ptr(priv->percpu_extras); |
||
2021 | + |
||
2022 | + fq = &priv->fq[queue_id]; |
||
2023 | + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
||
2024 | + err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, |
||
2025 | + priv->tx_qdid, 0, |
||
2026 | + fq->tx_qdbin, fd); |
||
2027 | + if (err != -EBUSY) |
||
2028 | + break; |
||
2029 | + } |
||
2030 | + |
||
2031 | + percpu_extras->tx_portal_busy += i; |
||
2032 | + if (unlikely(err)) { |
||
2033 | + percpu_stats->tx_errors++; |
||
2034 | + } else { |
||
2035 | + percpu_stats->tx_packets++; |
||
2036 | + percpu_stats->tx_bytes += dpaa2_fd_get_len(fd); |
||
2037 | + } |
||
2038 | + |
||
2039 | + return err; |
||
2040 | +} |
||
2041 | + |
||
2042 | +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) |
||
2043 | +{ |
||
2044 | + struct device *dev = priv->net_dev->dev.parent; |
||
2045 | + void *vaddr; |
||
2046 | + int i; |
||
2047 | + |
||
2048 | + for (i = 0; i < count; i++) { |
||
2049 | + /* Same logic as on regular Rx path */ |
||
2050 | + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); |
||
2051 | + dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, |
||
2052 | + DMA_BIDIRECTIONAL); |
||
2053 | + skb_free_frag(vaddr); |
||
2054 | + } |
||
2055 | +} |
||
2056 | + |
||
2057 | +static void release_fd_buf(struct dpaa2_eth_priv *priv, |
||
2058 | + struct dpaa2_eth_channel *ch, |
||
2059 | + dma_addr_t addr) |
||
2060 | +{ |
||
2061 | + int err; |
||
2062 | + |
||
2063 | + ch->rel_buf_array[ch->rel_buf_cnt++] = addr; |
||
2064 | + if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD)) |
||
2065 | + return; |
||
2066 | + |
||
2067 | + while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, |
||
2068 | + ch->rel_buf_array, |
||
2069 | + ch->rel_buf_cnt)) == -EBUSY) |
||
2070 | + cpu_relax(); |
||
2071 | + |
||
2072 | + if (err) |
||
2073 | + free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt); |
||
2074 | + |
||
2075 | + ch->rel_buf_cnt = 0; |
||
2076 | +} |
||
2077 | + |
||
2078 | +static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, |
||
2079 | + struct dpaa2_eth_channel *ch, |
||
2080 | + struct dpaa2_fd *fd, |
||
2081 | + u16 queue_id, |
||
2082 | + void *vaddr) |
||
2083 | +{ |
||
2084 | + struct device *dev = priv->net_dev->dev.parent; |
||
2085 | + dma_addr_t addr = dpaa2_fd_get_addr(fd); |
||
2086 | + struct rtnl_link_stats64 *percpu_stats; |
||
2087 | + struct bpf_prog *xdp_prog; |
||
2088 | + struct xdp_buff xdp; |
||
2089 | + u32 xdp_act = XDP_PASS; |
||
2090 | + |
||
2091 | + xdp_prog = READ_ONCE(ch->xdp_prog); |
||
2092 | + if (!xdp_prog) |
||
2093 | + return xdp_act; |
||
2094 | + |
||
2095 | + percpu_stats = this_cpu_ptr(priv->percpu_stats); |
||
2096 | + |
||
2097 | + xdp.data = vaddr + dpaa2_fd_get_offset(fd); |
||
2098 | + xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); |
||
2099 | + /* Allow the XDP program to use the specially reserved headroom */ |
||
2100 | + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; |
||
2101 | + |
||
2102 | + rcu_read_lock(); |
||
2103 | + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); |
||
2104 | + |
||
2105 | + /* xdp.data pointer may have changed */ |
||
2106 | + dpaa2_fd_set_offset(fd, xdp.data - vaddr); |
||
2107 | + dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); |
||
2108 | + |
||
2109 | + switch (xdp_act) { |
||
2110 | + case XDP_PASS: |
||
2111 | + break; |
||
2112 | + default: |
||
2113 | + bpf_warn_invalid_xdp_action(xdp_act); |
||
2114 | + case XDP_ABORTED: |
||
2115 | + case XDP_DROP: |
||
2116 | + /* This is our buffer, so we can release it back to hardware */ |
||
2117 | + release_fd_buf(priv, ch, addr); |
||
2118 | + percpu_stats->rx_dropped++; |
||
2119 | + break; |
||
2120 | + case XDP_TX: |
||
2121 | + if (dpaa2_eth_xdp_tx(priv, fd, vaddr, queue_id)) { |
||
2122 | + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
||
2123 | + DMA_BIDIRECTIONAL); |
||
2124 | + free_rx_fd(priv, fd, vaddr); |
||
2125 | + ch->buf_count--; |
||
2126 | + } |
||
2127 | + break; |
||
2128 | + case XDP_REDIRECT: |
||
2129 | + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
||
2130 | + DMA_BIDIRECTIONAL); |
||
2131 | + ch->buf_count--; |
||
2132 | + ch->flush = true; |
||
2133 | + /* Mark the actual start of the data buffer */ |
||
2134 | + xdp.data_hard_start = vaddr; |
||
2135 | + if (xdp_do_redirect(priv->net_dev, &xdp, xdp_prog)) |
||
2136 | + free_rx_fd(priv, fd, vaddr); |
||
2137 | + break; |
||
2138 | + } |
||
2139 | + |
||
2140 | + if (xdp_act == XDP_TX || xdp_act == XDP_REDIRECT) { |
||
2141 | + percpu_stats->rx_packets++; |
||
2142 | + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); |
||
2143 | + } |
||
2144 | + |
||
2145 | + rcu_read_unlock(); |
||
2146 | + |
||
2147 | + return xdp_act; |
||
2148 | +} |
||
2149 | + |
||
2150 | /* Main Rx frame processing routine */ |
||
2151 | static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, |
||
2152 | struct dpaa2_eth_channel *ch, |
||
2153 | const struct dpaa2_fd *fd, |
||
2154 | - struct napi_struct *napi) |
||
2155 | + struct napi_struct *napi, |
||
2156 | + u16 queue_id) |
||
2157 | { |
||
2158 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
||
2159 | u8 fd_format = dpaa2_fd_get_format(fd); |
||
2160 | @@ -235,14 +401,16 @@ static void dpaa2_eth_rx(struct dpaa2_et |
||
2161 | struct dpaa2_fas *fas; |
||
2162 | void *buf_data; |
||
2163 | u32 status = 0; |
||
2164 | + u32 xdp_act; |
||
2165 | |||
2166 | /* Tracing point */ |
||
2167 | trace_dpaa2_rx_fd(priv->net_dev, fd); |
||
2168 | |||
2169 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
||
2170 | - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); |
||
2171 | + dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
||
2172 | + DMA_BIDIRECTIONAL); |
||
2173 | |||
2174 | - fas = dpaa2_get_fas(vaddr); |
||
2175 | + fas = dpaa2_get_fas(vaddr, false); |
||
2176 | prefetch(fas); |
||
2177 | buf_data = vaddr + dpaa2_fd_get_offset(fd); |
||
2178 | prefetch(buf_data); |
||
2179 | @@ -251,22 +419,41 @@ static void dpaa2_eth_rx(struct dpaa2_et |
||
2180 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
||
2181 | |||
2182 | if (fd_format == dpaa2_fd_single) { |
||
2183 | + xdp_act = dpaa2_eth_run_xdp(priv, ch, (struct dpaa2_fd *)fd, |
||
2184 | + queue_id, vaddr); |
||
2185 | + if (xdp_act != XDP_PASS) |
||
2186 | + return; |
||
2187 | + |
||
2188 | + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
||
2189 | + DMA_BIDIRECTIONAL); |
||
2190 | skb = build_linear_skb(priv, ch, fd, vaddr); |
||
2191 | } else if (fd_format == dpaa2_fd_sg) { |
||
2192 | + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
||
2193 | + DMA_BIDIRECTIONAL); |
||
2194 | skb = build_frag_skb(priv, ch, buf_data); |
||
2195 | skb_free_frag(vaddr); |
||
2196 | percpu_extras->rx_sg_frames++; |
||
2197 | percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); |
||
2198 | } else { |
||
2199 | /* We don't support any other format */ |
||
2200 | - goto err_frame_format; |
||
2201 | + goto drop_cnt; |
||
2202 | } |
||
2203 | |||
2204 | if (unlikely(!skb)) |
||
2205 | - goto err_build_skb; |
||
2206 | + goto drop_fd; |
||
2207 | |||
2208 | prefetch(skb->data); |
||
2209 | |||
2210 | + /* Get the timestamp value */ |
||
2211 | + if (priv->ts_rx_en) { |
||
2212 | + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
||
2213 | + u64 *ns = dpaa2_get_ts(vaddr, false); |
||
2214 | + |
||
2215 | + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); |
||
2216 | + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
||
2217 | + shhwtstamps->hwtstamp = ns_to_ktime(*ns); |
||
2218 | + } |
||
2219 | + |
||
2220 | /* Check if we need to validate the L4 csum */ |
||
2221 | if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { |
||
2222 | status = le32_to_cpu(fas->status); |
||
2223 | @@ -275,6 +462,12 @@ static void dpaa2_eth_rx(struct dpaa2_et |
||
2224 | |||
2225 | skb->protocol = eth_type_trans(skb, priv->net_dev); |
||
2226 | |||
2227 | + /* Record Rx queue - this will be used when picking a Tx queue to |
||
2228 | + * forward the frames. We're keeping flow affinity through the |
||
2229 | + * network stack. |
||
2230 | + */ |
||
2231 | + skb_record_rx_queue(skb, queue_id); |
||
2232 | + |
||
2233 | percpu_stats->rx_packets++; |
||
2234 | percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); |
||
2235 | |||
2236 | @@ -282,22 +475,74 @@ static void dpaa2_eth_rx(struct dpaa2_et |
||
2237 | |||
2238 | return; |
||
2239 | |||
2240 | -err_build_skb: |
||
2241 | +drop_fd: |
||
2242 | free_rx_fd(priv, fd, vaddr); |
||
2243 | -err_frame_format: |
||
2244 | +drop_cnt: |
||
2245 | percpu_stats->rx_dropped++; |
||
2246 | } |
||
2247 | |||
2248 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE |
||
2249 | +/* Processing of Rx frames received on the error FQ |
||
2250 | + * We check and print the error bits and then free the frame |
||
2251 | + */ |
||
2252 | +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, |
||
2253 | + struct dpaa2_eth_channel *ch, |
||
2254 | + const struct dpaa2_fd *fd, |
||
2255 | + struct napi_struct *napi __always_unused, |
||
2256 | + u16 queue_id __always_unused) |
||
2257 | +{ |
||
2258 | + struct device *dev = priv->net_dev->dev.parent; |
||
2259 | + dma_addr_t addr = dpaa2_fd_get_addr(fd); |
||
2260 | + void *vaddr; |
||
2261 | + struct rtnl_link_stats64 *percpu_stats; |
||
2262 | + struct dpaa2_fas *fas; |
||
2263 | + u32 status = 0; |
||
2264 | + u32 fd_errors; |
||
2265 | + bool has_fas_errors = false; |
||
2266 | + |
||
2267 | + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
||
2268 | + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); |
||
2269 | + |
||
2270 | + /* check frame errors in the FD field */ |
||
2271 | + fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK; |
||
2272 | + if (likely(fd_errors)) { |
||
2273 | + has_fas_errors = (fd_errors & FD_CTRL_FAERR) && |
||
2274 | + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); |
||
2275 | + if (net_ratelimit()) |
||
2276 | + netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n", |
||
2277 | + fd_errors); |
||
2278 | + } |
||
2279 | + |
||
2280 | + /* check frame errors in the FAS field */ |
||
2281 | + if (has_fas_errors) { |
||
2282 | + fas = dpaa2_get_fas(vaddr, false); |
||
2283 | + status = le32_to_cpu(fas->status); |
||
2284 | + if (net_ratelimit()) |
||
2285 | + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n", |
||
2286 | + status & DPAA2_FAS_RX_ERR_MASK); |
||
2287 | + } |
||
2288 | + free_rx_fd(priv, fd, vaddr); |
||
2289 | + |
||
2290 | + percpu_stats = this_cpu_ptr(priv->percpu_stats); |
||
2291 | + percpu_stats->rx_errors++; |
||
2292 | + ch->buf_count--; |
||
2293 | +} |
||
2294 | +#endif |
||
2295 | + |
||
2296 | /* Consume all frames pull-dequeued into the store. This is the simplest way to |
||
2297 | * make sure we don't accidentally issue another volatile dequeue which would |
||
2298 | * overwrite (leak) frames already in the store. |
||
2299 | * |
||
2300 | + * The number of frames is returned using the last 2 output arguments, |
||
2301 | + * separately for Rx and Tx confirmations. |
||
2302 | + * |
||
2303 | * Observance of NAPI budget is not our concern, leaving that to the caller. |
||
2304 | */ |
||
2305 | -static int consume_frames(struct dpaa2_eth_channel *ch) |
||
2306 | +static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned, |
||
2307 | + int *tx_conf_cleaned) |
||
2308 | { |
||
2309 | struct dpaa2_eth_priv *priv = ch->priv; |
||
2310 | - struct dpaa2_eth_fq *fq; |
||
2311 | + struct dpaa2_eth_fq *fq = NULL; |
||
2312 | struct dpaa2_dq *dq; |
||
2313 | const struct dpaa2_fd *fd; |
||
2314 | int cleaned = 0; |
||
2315 | @@ -315,14 +560,60 @@ static int consume_frames(struct dpaa2_e |
||
2316 | } |
||
2317 | |||
2318 | fd = dpaa2_dq_fd(dq); |
||
2319 | - fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); |
||
2320 | - fq->stats.frames++; |
||
2321 | + prefetch(fd); |
||
2322 | |||
2323 | - fq->consume(priv, ch, fd, &ch->napi); |
||
2324 | + fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); |
||
2325 | + fq->consume(priv, ch, fd, &ch->napi, fq->flowid); |
||
2326 | cleaned++; |
||
2327 | } while (!is_last); |
||
2328 | |||
2329 | - return cleaned; |
||
2330 | + if (!cleaned) |
||
2331 | + return false; |
||
2332 | + |
||
2333 | + /* All frames brought in store by a volatile dequeue |
||
2334 | + * come from the same queue |
||
2335 | + */ |
||
2336 | + if (fq->type == DPAA2_TX_CONF_FQ) { |
||
2337 | + *tx_conf_cleaned += cleaned; |
||
2338 | + } else { |
||
2339 | + *rx_cleaned += cleaned; |
||
2340 | + /* If we processed XDP_REDIRECT frames, flush them now */ |
||
2341 | + /* FIXME: Since we don't actually do anything inside |
||
2342 | + * ndo_xdp_flush, we call it here simply for compliance |
||
2343 | + * reasons |
||
2344 | + */ |
||
2345 | + if (ch->flush) { |
||
2346 | + xdp_do_flush_map(); |
||
2347 | + ch->flush = false; |
||
2348 | + } |
||
2349 | + } |
||
2350 | + |
||
2351 | + fq->stats.frames += cleaned; |
||
2352 | + ch->stats.frames += cleaned; |
||
2353 | + |
||
2354 | + return true; |
||
2355 | +} |
||
2356 | + |
||
2357 | +/* Configure the egress frame annotation for timestamp update */ |
||
2358 | +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) |
||
2359 | +{ |
||
2360 | + struct dpaa2_faead *faead; |
||
2361 | + u32 ctrl, frc; |
||
2362 | + |
||
2363 | + /* Mark the egress frame annotation area as valid */ |
||
2364 | + frc = dpaa2_fd_get_frc(fd); |
||
2365 | + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); |
||
2366 | + |
||
2367 | + /* Set hardware annotation size */ |
||
2368 | + ctrl = dpaa2_fd_get_ctrl(fd); |
||
2369 | + dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); |
||
2370 | + |
||
2371 | + /* enable UPD (update prepanded data) bit in FAEAD field of |
||
2372 | + * hardware frame annotation area |
||
2373 | + */ |
||
2374 | + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; |
||
2375 | + faead = dpaa2_get_faead(buf_start, true); |
||
2376 | + faead->ctrl = cpu_to_le32(ctrl); |
||
2377 | } |
||
2378 | |||
2379 | /* Create a frame descriptor based on a fragmented skb */ |
||
2380 | @@ -341,7 +632,6 @@ static int build_sg_fd(struct dpaa2_eth_ |
||
2381 | int num_sg; |
||
2382 | int num_dma_bufs; |
||
2383 | struct dpaa2_eth_swa *swa; |
||
2384 | - struct dpaa2_fas *fas; |
||
2385 | |||
2386 | /* Create and map scatterlist. |
||
2387 | * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have |
||
2388 | @@ -365,21 +655,14 @@ static int build_sg_fd(struct dpaa2_eth_ |
||
2389 | |||
2390 | /* Prepare the HW SGT structure */ |
||
2391 | sgt_buf_size = priv->tx_data_offset + |
||
2392 | - sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); |
||
2393 | - sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); |
||
2394 | + sizeof(struct dpaa2_sg_entry) * num_dma_bufs; |
||
2395 | + sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); |
||
2396 | if (unlikely(!sgt_buf)) { |
||
2397 | err = -ENOMEM; |
||
2398 | goto sgt_buf_alloc_failed; |
||
2399 | } |
||
2400 | sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); |
||
2401 | - |
||
2402 | - /* PTA from egress side is passed as is to the confirmation side so |
||
2403 | - * we need to clear some fields here in order to find consistent values |
||
2404 | - * on TX confirmation. We are clearing FAS (Frame Annotation Status) |
||
2405 | - * field from the hardware annotation area |
||
2406 | - */ |
||
2407 | - fas = dpaa2_get_fas(sgt_buf); |
||
2408 | - memset(fas, 0, DPAA2_FAS_SIZE); |
||
2409 | + memset(sgt_buf, 0, sgt_buf_size); |
||
2410 | |||
2411 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); |
||
2412 | |||
2413 | @@ -402,10 +685,11 @@ static int build_sg_fd(struct dpaa2_eth_ |
||
2414 | * all of them on Tx Conf. |
||
2415 | */ |
||
2416 | swa = (struct dpaa2_eth_swa *)sgt_buf; |
||
2417 | - swa->skb = skb; |
||
2418 | - swa->scl = scl; |
||
2419 | - swa->num_sg = num_sg; |
||
2420 | - swa->num_dma_bufs = num_dma_bufs; |
||
2421 | + swa->type = DPAA2_ETH_SWA_SG; |
||
2422 | + swa->sg.skb = skb; |
||
2423 | + swa->sg.scl = scl; |
||
2424 | + swa->sg.num_sg = num_sg; |
||
2425 | + swa->sg.sgt_size = sgt_buf_size; |
||
2426 | |||
2427 | /* Separately map the SGT buffer */ |
||
2428 | addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); |
||
2429 | @@ -417,13 +701,15 @@ static int build_sg_fd(struct dpaa2_eth_ |
||
2430 | dpaa2_fd_set_format(fd, dpaa2_fd_sg); |
||
2431 | dpaa2_fd_set_addr(fd, addr); |
||
2432 | dpaa2_fd_set_len(fd, skb->len); |
||
2433 | - dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | |
||
2434 | - DPAA2_FD_CTRL_PTV1); |
||
2435 | + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
||
2436 | + |
||
2437 | + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) |
||
2438 | + enable_tx_tstamp(fd, sgt_buf); |
||
2439 | |||
2440 | return 0; |
||
2441 | |||
2442 | dma_map_single_failed: |
||
2443 | - kfree(sgt_buf); |
||
2444 | + skb_free_frag(sgt_buf); |
||
2445 | sgt_buf_alloc_failed: |
||
2446 | dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
||
2447 | dma_map_sg_failed: |
||
2448 | @@ -437,29 +723,27 @@ static int build_single_fd(struct dpaa2_ |
||
2449 | struct dpaa2_fd *fd) |
||
2450 | { |
||
2451 | struct device *dev = priv->net_dev->dev.parent; |
||
2452 | - u8 *buffer_start; |
||
2453 | - struct dpaa2_fas *fas; |
||
2454 | - struct sk_buff **skbh; |
||
2455 | + u8 *buffer_start, *aligned_start; |
||
2456 | + struct dpaa2_eth_swa *swa; |
||
2457 | dma_addr_t addr; |
||
2458 | |||
2459 | - buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - |
||
2460 | - DPAA2_ETH_TX_BUF_ALIGN, |
||
2461 | - DPAA2_ETH_TX_BUF_ALIGN); |
||
2462 | - |
||
2463 | - /* PTA from egress side is passed as is to the confirmation side so |
||
2464 | - * we need to clear some fields here in order to find consistent values |
||
2465 | - * on TX confirmation. We are clearing FAS (Frame Annotation Status) |
||
2466 | - * field from the hardware annotation area |
||
2467 | + buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); |
||
2468 | + |
||
2469 | + /* If there's enough room to align the FD address, do it. |
||
2470 | + * It will help hardware optimize accesses. |
||
2471 | */ |
||
2472 | - fas = dpaa2_get_fas(buffer_start); |
||
2473 | - memset(fas, 0, DPAA2_FAS_SIZE); |
||
2474 | + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, |
||
2475 | + DPAA2_ETH_TX_BUF_ALIGN); |
||
2476 | + if (aligned_start >= skb->head) |
||
2477 | + buffer_start = aligned_start; |
||
2478 | |||
2479 | /* Store a backpointer to the skb at the beginning of the buffer |
||
2480 | * (in the private data area) such that we can release it |
||
2481 | * on Tx confirm |
||
2482 | */ |
||
2483 | - skbh = (struct sk_buff **)buffer_start; |
||
2484 | - *skbh = skb; |
||
2485 | + swa = (struct dpaa2_eth_swa *)buffer_start; |
||
2486 | + swa->type = DPAA2_ETH_SWA_SINGLE; |
||
2487 | + swa->single.skb = skb; |
||
2488 | |||
2489 | addr = dma_map_single(dev, buffer_start, |
||
2490 | skb_tail_pointer(skb) - buffer_start, |
||
2491 | @@ -471,8 +755,10 @@ static int build_single_fd(struct dpaa2_ |
||
2492 | dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); |
||
2493 | dpaa2_fd_set_len(fd, skb->len); |
||
2494 | dpaa2_fd_set_format(fd, dpaa2_fd_single); |
||
2495 | - dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | |
||
2496 | - DPAA2_FD_CTRL_PTV1); |
||
2497 | + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
||
2498 | + |
||
2499 | + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) |
||
2500 | + enable_tx_tstamp(fd, buffer_start); |
||
2501 | |||
2502 | return 0; |
||
2503 | } |
||
2504 | @@ -486,92 +772,128 @@ static int build_single_fd(struct dpaa2_ |
||
2505 | * Optionally, return the frame annotation status word (FAS), which needs |
||
2506 | * to be checked if we're on the confirmation path. |
||
2507 | */ |
||
2508 | -static void free_tx_fd(const struct dpaa2_eth_priv *priv, |
||
2509 | +static void free_tx_fd(struct dpaa2_eth_priv *priv, |
||
2510 | const struct dpaa2_fd *fd, |
||
2511 | - u32 *status) |
||
2512 | + bool in_napi) |
||
2513 | { |
||
2514 | struct device *dev = priv->net_dev->dev.parent; |
||
2515 | dma_addr_t fd_addr; |
||
2516 | - struct sk_buff **skbh, *skb; |
||
2517 | + struct sk_buff *skb = NULL; |
||
2518 | unsigned char *buffer_start; |
||
2519 | - int unmap_size; |
||
2520 | - struct scatterlist *scl; |
||
2521 | - int num_sg, num_dma_bufs; |
||
2522 | struct dpaa2_eth_swa *swa; |
||
2523 | u8 fd_format = dpaa2_fd_get_format(fd); |
||
2524 | - struct dpaa2_fas *fas; |
||
2525 | |||
2526 | fd_addr = dpaa2_fd_get_addr(fd); |
||
2527 | - skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); |
||
2528 | - fas = dpaa2_get_fas(skbh); |
||
2529 | + buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); |
||
2530 | + swa = (struct dpaa2_eth_swa *)buffer_start; |
||
2531 | |||
2532 | if (fd_format == dpaa2_fd_single) { |
||
2533 | - skb = *skbh; |
||
2534 | - buffer_start = (unsigned char *)skbh; |
||
2535 | - /* Accessing the skb buffer is safe before dma unmap, because |
||
2536 | - * we didn't map the actual skb shell. |
||
2537 | - */ |
||
2538 | - dma_unmap_single(dev, fd_addr, |
||
2539 | - skb_tail_pointer(skb) - buffer_start, |
||
2540 | - DMA_BIDIRECTIONAL); |
||
2541 | + if (swa->type == DPAA2_ETH_SWA_SINGLE) { |
||
2542 | + skb = swa->single.skb; |
||
2543 | + /* Accessing the skb buffer is safe before dma unmap, |
||
2544 | + * because we didn't map the actual skb shell. |
||
2545 | + */ |
||
2546 | + dma_unmap_single(dev, fd_addr, |
||
2547 | + skb_tail_pointer(skb) - buffer_start, |
||
2548 | + DMA_BIDIRECTIONAL); |
||
2549 | + } else { |
||
2550 | + WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, |
||
2551 | + "Wrong SWA type"); |
||
2552 | + dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, |
||
2553 | + DMA_BIDIRECTIONAL); |
||
2554 | + } |
||
2555 | } else if (fd_format == dpaa2_fd_sg) { |
||
2556 | - swa = (struct dpaa2_eth_swa *)skbh; |
||
2557 | - skb = swa->skb; |
||
2558 | - scl = swa->scl; |
||
2559 | - num_sg = swa->num_sg; |
||
2560 | - num_dma_bufs = swa->num_dma_bufs; |
||
2561 | + skb = swa->sg.skb; |
||
2562 | |||
2563 | /* Unmap the scatterlist */ |
||
2564 | - dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
||
2565 | - kfree(scl); |
||
2566 | + dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL); |
||
2567 | + kfree(swa->sg.scl); |
||
2568 | |||
2569 | /* Unmap the SGT buffer */ |
||
2570 | - unmap_size = priv->tx_data_offset + |
||
2571 | - sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); |
||
2572 | - dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL); |
||
2573 | + dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, |
||
2574 | + DMA_BIDIRECTIONAL); |
||
2575 | } else { |
||
2576 | - /* Unsupported format, mark it as errored and give up */ |
||
2577 | - if (status) |
||
2578 | - *status = ~0; |
||
2579 | + netdev_dbg(priv->net_dev, "Invalid FD format\n"); |
||
2580 | return; |
||
2581 | } |
||
2582 | |||
2583 | - /* Read the status from the Frame Annotation after we unmap the first |
||
2584 | - * buffer but before we free it. The caller function is responsible |
||
2585 | - * for checking the status value. |
||
2586 | - */ |
||
2587 | - if (status) |
||
2588 | - *status = le32_to_cpu(fas->status); |
||
2589 | + if (swa->type == DPAA2_ETH_SWA_XDP) { |
||
2590 | + page_frag_free(buffer_start); |
||
2591 | + return; |
||
2592 | + } |
||
2593 | + |
||
2594 | + /* Get the timestamp value */ |
||
2595 | + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { |
||
2596 | + struct skb_shared_hwtstamps shhwtstamps; |
||
2597 | + u64 *ns; |
||
2598 | + |
||
2599 | + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
||
2600 | + |
||
2601 | + ns = dpaa2_get_ts(buffer_start, true); |
||
2602 | + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); |
||
2603 | + shhwtstamps.hwtstamp = ns_to_ktime(*ns); |
||
2604 | + skb_tstamp_tx(skb, &shhwtstamps); |
||
2605 | + } |
||
2606 | |||
2607 | - /* Free SGT buffer kmalloc'ed on tx */ |
||
2608 | + /* Free SGT buffer allocated on tx */ |
||
2609 | if (fd_format != dpaa2_fd_single) |
||
2610 | - kfree(skbh); |
||
2611 | + skb_free_frag(buffer_start); |
||
2612 | |||
2613 | /* Move on with skb release */ |
||
2614 | - dev_kfree_skb(skb); |
||
2615 | + napi_consume_skb(skb, in_napi); |
||
2616 | } |
||
2617 | |||
2618 | static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) |
||
2619 | { |
||
2620 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
2621 | + struct device *dev = net_dev->dev.parent; |
||
2622 | struct dpaa2_fd fd; |
||
2623 | struct rtnl_link_stats64 *percpu_stats; |
||
2624 | struct dpaa2_eth_drv_stats *percpu_extras; |
||
2625 | struct dpaa2_eth_fq *fq; |
||
2626 | u16 queue_mapping; |
||
2627 | - int err, i; |
||
2628 | + unsigned int needed_headroom; |
||
2629 | + u8 prio; |
||
2630 | + int err, i, ch_id = 0; |
||
2631 | + |
||
2632 | + queue_mapping = skb_get_queue_mapping(skb); |
||
2633 | + prio = netdev_txq_to_tc(net_dev, queue_mapping); |
||
2634 | + |
||
2635 | + /* Hardware interprets priority level 0 as being the highest, |
||
2636 | + * so we need to do a reverse mapping to the netdev tc index |
||
2637 | + */ |
||
2638 | + if (net_dev->num_tc) |
||
2639 | + prio = net_dev->num_tc - prio - 1; |
||
2640 | + |
||
2641 | + queue_mapping %= dpaa2_eth_queue_count(priv); |
||
2642 | + fq = &priv->fq[queue_mapping]; |
||
2643 | + |
||
2644 | + /* If we're congested, stop this tx queue; transmission of |
||
2645 | + * the current skb happens regardless of congestion state |
||
2646 | + */ |
||
2647 | + dma_sync_single_for_cpu(dev, priv->cscn_dma, |
||
2648 | + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
||
2649 | + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { |
||
2650 | + netif_stop_subqueue(net_dev, queue_mapping); |
||
2651 | + fq->stats.congestion_entry++; |
||
2652 | + } |
||
2653 | |||
2654 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
||
2655 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
||
2656 | |||
2657 | - if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { |
||
2658 | + /* For non-linear skb we don't need a minimum headroom */ |
||
2659 | + needed_headroom = dpaa2_eth_needed_headroom(priv, skb); |
||
2660 | + if (skb_headroom(skb) < needed_headroom) { |
||
2661 | struct sk_buff *ns; |
||
2662 | |||
2663 | - ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); |
||
2664 | + ns = skb_realloc_headroom(skb, needed_headroom); |
||
2665 | if (unlikely(!ns)) { |
||
2666 | percpu_stats->tx_dropped++; |
||
2667 | goto err_alloc_headroom; |
||
2668 | } |
||
2669 | + percpu_extras->tx_reallocs++; |
||
2670 | + if (skb->sk) |
||
2671 | + skb_set_owner_w(ns, skb->sk); |
||
2672 | dev_kfree_skb(skb); |
||
2673 | skb = ns; |
||
2674 | } |
||
2675 | @@ -605,13 +927,15 @@ static netdev_tx_t dpaa2_eth_tx(struct s |
||
2676 | /* Tracing point */ |
||
2677 | trace_dpaa2_tx_fd(net_dev, &fd); |
||
2678 | |||
2679 | - /* TxConf FQ selection primarily based on cpu affinity; this is |
||
2680 | - * non-migratable context, so it's safe to call smp_processor_id(). |
||
2681 | - */ |
||
2682 | - queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv); |
||
2683 | - fq = &priv->fq[queue_mapping]; |
||
2684 | + if (dpaa2_eth_ceetm_is_enabled(priv)) { |
||
2685 | + err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio); |
||
2686 | + if (err) |
||
2687 | + goto err_ceetm_classify; |
||
2688 | + } |
||
2689 | + |
||
2690 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
||
2691 | - err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, |
||
2692 | + err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, |
||
2693 | + priv->tx_qdid, prio, |
||
2694 | fq->tx_qdbin, &fd); |
||
2695 | if (err != -EBUSY) |
||
2696 | break; |
||
2697 | @@ -620,7 +944,7 @@ static netdev_tx_t dpaa2_eth_tx(struct s |
||
2698 | if (unlikely(err < 0)) { |
||
2699 | percpu_stats->tx_errors++; |
||
2700 | /* Clean up everything, including freeing the skb */ |
||
2701 | - free_tx_fd(priv, &fd, NULL); |
||
2702 | + free_tx_fd(priv, &fd, false); |
||
2703 | } else { |
||
2704 | percpu_stats->tx_packets++; |
||
2705 | percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); |
||
2706 | @@ -628,6 +952,8 @@ static netdev_tx_t dpaa2_eth_tx(struct s |
||
2707 | |||
2708 | return NETDEV_TX_OK; |
||
2709 | |||
2710 | +err_ceetm_classify: |
||
2711 | + free_tx_fd(priv, &fd, false); |
||
2712 | err_build_fd: |
||
2713 | err_alloc_headroom: |
||
2714 | dev_kfree_skb(skb); |
||
2715 | @@ -639,13 +965,13 @@ err_alloc_headroom: |
||
2716 | static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, |
||
2717 | struct dpaa2_eth_channel *ch, |
||
2718 | const struct dpaa2_fd *fd, |
||
2719 | - struct napi_struct *napi __always_unused) |
||
2720 | + struct napi_struct *napi __always_unused, |
||
2721 | + u16 queue_id) |
||
2722 | { |
||
2723 | + struct device *dev = priv->net_dev->dev.parent; |
||
2724 | struct rtnl_link_stats64 *percpu_stats; |
||
2725 | struct dpaa2_eth_drv_stats *percpu_extras; |
||
2726 | - u32 status = 0; |
||
2727 | u32 fd_errors; |
||
2728 | - bool has_fas_errors = false; |
||
2729 | |||
2730 | /* Tracing point */ |
||
2731 | trace_dpaa2_tx_conf_fd(priv->net_dev, fd); |
||
2732 | @@ -654,31 +980,28 @@ static void dpaa2_eth_tx_conf(struct dpa |
||
2733 | percpu_extras->tx_conf_frames++; |
||
2734 | percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); |
||
2735 | |||
2736 | - /* Check frame errors in the FD field */ |
||
2737 | - fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; |
||
2738 | - if (unlikely(fd_errors)) { |
||
2739 | - /* We only check error bits in the FAS field if corresponding |
||
2740 | - * FAERR bit is set in FD and the FAS field is marked as valid |
||
2741 | - */ |
||
2742 | - has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) && |
||
2743 | - !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); |
||
2744 | - if (net_ratelimit()) |
||
2745 | - netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", |
||
2746 | - fd_errors); |
||
2747 | + /* Check congestion state and wake all queues if necessary */ |
||
2748 | + if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) { |
||
2749 | + dma_sync_single_for_cpu(dev, priv->cscn_dma, |
||
2750 | + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
||
2751 | + if (!dpaa2_cscn_state_congested(priv->cscn_mem)) |
||
2752 | + netif_tx_wake_all_queues(priv->net_dev); |
||
2753 | } |
||
2754 | |||
2755 | - free_tx_fd(priv, fd, has_fas_errors ? &status : NULL); |
||
2756 | + /* Check frame errors in the FD field */ |
||
2757 | + fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; |
||
2758 | + free_tx_fd(priv, fd, true); |
||
2759 | |||
2760 | if (likely(!fd_errors)) |
||
2761 | return; |
||
2762 | |||
2763 | + if (net_ratelimit()) |
||
2764 | + netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", |
||
2765 | + fd_errors); |
||
2766 | + |
||
2767 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
||
2768 | /* Tx-conf logically pertains to the egress path. */ |
||
2769 | percpu_stats->tx_errors++; |
||
2770 | - |
||
2771 | - if (has_fas_errors && net_ratelimit()) |
||
2772 | - netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n", |
||
2773 | - status & DPAA2_FAS_TX_ERR_MASK); |
||
2774 | } |
||
2775 | |||
2776 | static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) |
||
2777 | @@ -728,26 +1051,27 @@ static int set_tx_csum(struct dpaa2_eth_ |
||
2778 | /* Perform a single release command to add buffers |
||
2779 | * to the specified buffer pool |
||
2780 | */ |
||
2781 | -static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) |
||
2782 | +static int add_bufs(struct dpaa2_eth_priv *priv, |
||
2783 | + struct dpaa2_eth_channel *ch, u16 bpid) |
||
2784 | { |
||
2785 | struct device *dev = priv->net_dev->dev.parent; |
||
2786 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
||
2787 | void *buf; |
||
2788 | dma_addr_t addr; |
||
2789 | - int i; |
||
2790 | + int i, err; |
||
2791 | |||
2792 | for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { |
||
2793 | /* Allocate buffer visible to WRIOP + skb shared info + |
||
2794 | * alignment padding |
||
2795 | */ |
||
2796 | - buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); |
||
2797 | + buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); |
||
2798 | if (unlikely(!buf)) |
||
2799 | goto err_alloc; |
||
2800 | |||
2801 | - buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN); |
||
2802 | + buf = PTR_ALIGN(buf, priv->rx_buf_align); |
||
2803 | |||
2804 | addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, |
||
2805 | - DMA_FROM_DEVICE); |
||
2806 | + DMA_BIDIRECTIONAL); |
||
2807 | if (unlikely(dma_mapping_error(dev, addr))) |
||
2808 | goto err_map; |
||
2809 | |||
2810 | @@ -755,28 +1079,31 @@ static int add_bufs(struct dpaa2_eth_pri |
||
2811 | |||
2812 | /* tracing point */ |
||
2813 | trace_dpaa2_eth_buf_seed(priv->net_dev, |
||
2814 | - buf, DPAA2_ETH_BUF_RAW_SIZE, |
||
2815 | + buf, dpaa2_eth_buf_raw_size(priv), |
||
2816 | addr, DPAA2_ETH_RX_BUF_SIZE, |
||
2817 | bpid); |
||
2818 | } |
||
2819 | |||
2820 | release_bufs: |
||
2821 | - /* In case the portal is busy, retry until successful. |
||
2822 | - * The buffer release function would only fail if the QBMan portal |
||
2823 | - * was busy, which implies portal contention (i.e. more CPUs than |
||
2824 | - * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, |
||
2825 | - * there is little we can realistically do, short of giving up - |
||
2826 | - * in which case we'd risk depleting the buffer pool and never again |
||
2827 | - * receiving the Rx interrupt which would kick-start the refill logic. |
||
2828 | - * So just keep retrying, at the risk of being moved to ksoftirqd. |
||
2829 | - */ |
||
2830 | - while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) |
||
2831 | + /* In case the portal is busy, retry until successful */ |
||
2832 | + while ((err = dpaa2_io_service_release(ch->dpio, bpid, |
||
2833 | + buf_array, i)) == -EBUSY) |
||
2834 | cpu_relax(); |
||
2835 | + |
||
2836 | + /* If release command failed, clean up and bail out; not much |
||
2837 | + * else we can do about it |
||
2838 | + */ |
||
2839 | + if (err) { |
||
2840 | + free_bufs(priv, buf_array, i); |
||
2841 | + return 0; |
||
2842 | + } |
||
2843 | + |
||
2844 | return i; |
||
2845 | |||
2846 | err_map: |
||
2847 | skb_free_frag(buf); |
||
2848 | err_alloc: |
||
2849 | + /* If we managed to allocate at least some buffers, release them */ |
||
2850 | if (i) |
||
2851 | goto release_bufs; |
||
2852 | |||
2853 | @@ -796,9 +1123,10 @@ static int seed_pool(struct dpaa2_eth_pr |
||
2854 | */ |
||
2855 | preempt_disable(); |
||
2856 | for (j = 0; j < priv->num_channels; j++) { |
||
2857 | - for (i = 0; i < DPAA2_ETH_NUM_BUFS; |
||
2858 | + priv->channel[j]->buf_count = 0; |
||
2859 | + for (i = 0; i < priv->max_bufs_per_ch; |
||
2860 | i += DPAA2_ETH_BUFS_PER_CMD) { |
||
2861 | - new_count = add_bufs(priv, bpid); |
||
2862 | + new_count = add_bufs(priv, priv->channel[j], bpid); |
||
2863 | priv->channel[j]->buf_count += new_count; |
||
2864 | |||
2865 | if (new_count < DPAA2_ETH_BUFS_PER_CMD) { |
||
2866 | @@ -818,10 +1146,8 @@ static int seed_pool(struct dpaa2_eth_pr |
||
2867 | */ |
||
2868 | static void drain_bufs(struct dpaa2_eth_priv *priv, int count) |
||
2869 | { |
||
2870 | - struct device *dev = priv->net_dev->dev.parent; |
||
2871 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
||
2872 | - void *vaddr; |
||
2873 | - int ret, i; |
||
2874 | + int ret; |
||
2875 | |||
2876 | do { |
||
2877 | ret = dpaa2_io_service_acquire(NULL, priv->bpid, |
||
2878 | @@ -830,27 +1156,16 @@ static void drain_bufs(struct dpaa2_eth_ |
||
2879 | netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); |
||
2880 | return; |
||
2881 | } |
||
2882 | - for (i = 0; i < ret; i++) { |
||
2883 | - /* Same logic as on regular Rx path */ |
||
2884 | - vaddr = dpaa2_iova_to_virt(priv->iommu_domain, |
||
2885 | - buf_array[i]); |
||
2886 | - dma_unmap_single(dev, buf_array[i], |
||
2887 | - DPAA2_ETH_RX_BUF_SIZE, |
||
2888 | - DMA_FROM_DEVICE); |
||
2889 | - skb_free_frag(vaddr); |
||
2890 | - } |
||
2891 | + free_bufs(priv, buf_array, ret); |
||
2892 | } while (ret); |
||
2893 | } |
||
2894 | |||
2895 | static void drain_pool(struct dpaa2_eth_priv *priv) |
||
2896 | { |
||
2897 | - int i; |
||
2898 | - |
||
2899 | + preempt_disable(); |
||
2900 | drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); |
||
2901 | drain_bufs(priv, 1); |
||
2902 | - |
||
2903 | - for (i = 0; i < priv->num_channels; i++) |
||
2904 | - priv->channel[i]->buf_count = 0; |
||
2905 | + preempt_enable(); |
||
2906 | } |
||
2907 | |||
2908 | /* Function is called from softirq context only, so we don't need to guard |
||
2909 | @@ -862,19 +1177,19 @@ static int refill_pool(struct dpaa2_eth_ |
||
2910 | { |
||
2911 | int new_count; |
||
2912 | |||
2913 | - if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) |
||
2914 | + if (likely(ch->buf_count >= priv->refill_thresh)) |
||
2915 | return 0; |
||
2916 | |||
2917 | do { |
||
2918 | - new_count = add_bufs(priv, bpid); |
||
2919 | + new_count = add_bufs(priv, ch, bpid); |
||
2920 | if (unlikely(!new_count)) { |
||
2921 | /* Out of memory; abort for now, we'll try later on */ |
||
2922 | break; |
||
2923 | } |
||
2924 | ch->buf_count += new_count; |
||
2925 | - } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); |
||
2926 | + } while (ch->buf_count < priv->max_bufs_per_ch); |
||
2927 | |||
2928 | - if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) |
||
2929 | + if (unlikely(ch->buf_count < priv->max_bufs_per_ch)) |
||
2930 | return -ENOMEM; |
||
2931 | |||
2932 | return 0; |
||
2933 | @@ -887,7 +1202,8 @@ static int pull_channel(struct dpaa2_eth |
||
2934 | |||
2935 | /* Retry while portal is busy */ |
||
2936 | do { |
||
2937 | - err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); |
||
2938 | + err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, |
||
2939 | + ch->store); |
||
2940 | dequeues++; |
||
2941 | cpu_relax(); |
||
2942 | } while (err == -EBUSY); |
||
2943 | @@ -902,20 +1218,21 @@ static int pull_channel(struct dpaa2_eth |
||
2944 | /* NAPI poll routine |
||
2945 | * |
||
2946 | * Frames are dequeued from the QMan channel associated with this NAPI context. |
||
2947 | - * Rx, Tx confirmation and (if configured) Rx error frames all count |
||
2948 | - * towards the NAPI budget. |
||
2949 | + * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx |
||
2950 | + * confirmation frames are limited by a threshold per NAPI poll cycle. |
||
2951 | */ |
||
2952 | static int dpaa2_eth_poll(struct napi_struct *napi, int budget) |
||
2953 | { |
||
2954 | struct dpaa2_eth_channel *ch; |
||
2955 | - int cleaned = 0, store_cleaned; |
||
2956 | + int rx_cleaned = 0, tx_conf_cleaned = 0; |
||
2957 | + bool store_cleaned; |
||
2958 | struct dpaa2_eth_priv *priv; |
||
2959 | int err; |
||
2960 | |||
2961 | ch = container_of(napi, struct dpaa2_eth_channel, napi); |
||
2962 | priv = ch->priv; |
||
2963 | |||
2964 | - while (cleaned < budget) { |
||
2965 | + do { |
||
2966 | err = pull_channel(ch); |
||
2967 | if (unlikely(err)) |
||
2968 | break; |
||
2969 | @@ -923,29 +1240,29 @@ static int dpaa2_eth_poll(struct napi_st |
||
2970 | /* Refill pool if appropriate */ |
||
2971 | refill_pool(priv, ch, priv->bpid); |
||
2972 | |||
2973 | - store_cleaned = consume_frames(ch); |
||
2974 | - cleaned += store_cleaned; |
||
2975 | + store_cleaned = consume_frames(ch, &rx_cleaned, |
||
2976 | + &tx_conf_cleaned); |
||
2977 | |||
2978 | - /* If we have enough budget left for a full store, |
||
2979 | - * try a new pull dequeue, otherwise we're done here |
||
2980 | + /* If we've either consumed the budget with Rx frames, |
||
2981 | + * or reached the Tx conf threshold, we're done. |
||
2982 | */ |
||
2983 | - if (store_cleaned == 0 || |
||
2984 | - cleaned > budget - DPAA2_ETH_STORE_SIZE) |
||
2985 | - break; |
||
2986 | - } |
||
2987 | - |
||
2988 | - if (cleaned < budget) { |
||
2989 | - napi_complete_done(napi, cleaned); |
||
2990 | - /* Re-enable data available notifications */ |
||
2991 | - do { |
||
2992 | - err = dpaa2_io_service_rearm(NULL, &ch->nctx); |
||
2993 | - cpu_relax(); |
||
2994 | - } while (err == -EBUSY); |
||
2995 | - } |
||
2996 | + if (rx_cleaned >= budget || |
||
2997 | + tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL) |
||
2998 | + return budget; |
||
2999 | + } while (store_cleaned); |
||
3000 | |||
3001 | - ch->stats.frames += cleaned; |
||
3002 | + /* We didn't consume the entire budget, finish napi and |
||
3003 | + * re-enable data availability notifications |
||
3004 | + */ |
||
3005 | + napi_complete(napi); |
||
3006 | + do { |
||
3007 | + err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); |
||
3008 | + cpu_relax(); |
||
3009 | + } while (err == -EBUSY); |
||
3010 | + WARN_ONCE(err, "CDAN notifications rearm failed on core %d", |
||
3011 | + ch->nctx.desired_cpu); |
||
3012 | |||
3013 | - return cleaned; |
||
3014 | + return max(rx_cleaned, 1); |
||
3015 | } |
||
3016 | |||
3017 | static void enable_ch_napi(struct dpaa2_eth_priv *priv) |
||
3018 | @@ -1006,28 +1323,30 @@ static int dpaa2_eth_open(struct net_dev |
||
3019 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
3020 | int err; |
||
3021 | |||
3022 | - err = seed_pool(priv, priv->bpid); |
||
3023 | - if (err) { |
||
3024 | - /* Not much to do; the buffer pool, though not filled up, |
||
3025 | - * may still contain some buffers which would enable us |
||
3026 | - * to limp on. |
||
3027 | - */ |
||
3028 | - netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", |
||
3029 | - priv->dpbp_dev->obj_desc.id, priv->bpid); |
||
3030 | - } |
||
3031 | - |
||
3032 | /* We'll only start the txqs when the link is actually ready; make sure |
||
3033 | * we don't race against the link up notification, which may come |
||
3034 | * immediately after dpni_enable(); |
||
3035 | */ |
||
3036 | netif_tx_stop_all_queues(net_dev); |
||
3037 | - enable_ch_napi(priv); |
||
3038 | + |
||
3039 | /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will |
||
3040 | * return true and cause 'ip link show' to report the LOWER_UP flag, |
||
3041 | * even though the link notification wasn't even received. |
||
3042 | */ |
||
3043 | netif_carrier_off(net_dev); |
||
3044 | |||
3045 | + err = seed_pool(priv, priv->bpid); |
||
3046 | + if (err) { |
||
3047 | + /* Not much to do; the buffer pool, though not filled up, |
||
3048 | + * may still contain some buffers which would enable us |
||
3049 | + * to limp on. |
||
3050 | + */ |
||
3051 | + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", |
||
3052 | + priv->dpbp_dev->obj_desc.id, priv->bpid); |
||
3053 | + } |
||
3054 | + |
||
3055 | + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv); |
||
3056 | + |
||
3057 | err = dpni_enable(priv->mc_io, 0, priv->mc_token); |
||
3058 | if (err < 0) { |
||
3059 | netdev_err(net_dev, "dpni_enable() failed\n"); |
||
3060 | @@ -1047,48 +1366,17 @@ static int dpaa2_eth_open(struct net_dev |
||
3061 | |||
3062 | link_state_err: |
||
3063 | enable_err: |
||
3064 | - disable_ch_napi(priv); |
||
3065 | + priv->refill_thresh = 0; |
||
3066 | drain_pool(priv); |
||
3067 | return err; |
||
3068 | } |
||
3069 | |||
3070 | -/* The DPIO store must be empty when we call this, |
||
3071 | - * at the end of every NAPI cycle. |
||
3072 | - */ |
||
3073 | -static u32 drain_channel(struct dpaa2_eth_priv *priv, |
||
3074 | - struct dpaa2_eth_channel *ch) |
||
3075 | -{ |
||
3076 | - u32 drained = 0, total = 0; |
||
3077 | - |
||
3078 | - do { |
||
3079 | - pull_channel(ch); |
||
3080 | - drained = consume_frames(ch); |
||
3081 | - total += drained; |
||
3082 | - } while (drained); |
||
3083 | - |
||
3084 | - return total; |
||
3085 | -} |
||
3086 | - |
||
3087 | -static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) |
||
3088 | -{ |
||
3089 | - struct dpaa2_eth_channel *ch; |
||
3090 | - int i; |
||
3091 | - u32 drained = 0; |
||
3092 | - |
||
3093 | - for (i = 0; i < priv->num_channels; i++) { |
||
3094 | - ch = priv->channel[i]; |
||
3095 | - drained += drain_channel(priv, ch); |
||
3096 | - } |
||
3097 | - |
||
3098 | - return drained; |
||
3099 | -} |
||
3100 | - |
||
3101 | static int dpaa2_eth_stop(struct net_device *net_dev) |
||
3102 | { |
||
3103 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
3104 | int dpni_enabled; |
||
3105 | - int retries = 10; |
||
3106 | - u32 drained; |
||
3107 | + int retries = 10, i; |
||
3108 | + int err = 0; |
||
3109 | |||
3110 | netif_tx_stop_all_queues(net_dev); |
||
3111 | netif_carrier_off(net_dev); |
||
3112 | @@ -1105,56 +1393,24 @@ static int dpaa2_eth_stop(struct net_dev |
||
3113 | } while (dpni_enabled && --retries); |
||
3114 | if (!retries) { |
||
3115 | netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); |
||
3116 | - /* Must go on and disable NAPI nonetheless, so we don't crash at |
||
3117 | - * the next "ifconfig up" |
||
3118 | + /* Must go on and finish processing pending frames, so we don't |
||
3119 | + * crash at the next "ifconfig up" |
||
3120 | */ |
||
3121 | + err = -ETIMEDOUT; |
||
3122 | } |
||
3123 | |||
3124 | - /* Wait for NAPI to complete on every core and disable it. |
||
3125 | - * In particular, this will also prevent NAPI from being rescheduled if |
||
3126 | - * a new CDAN is serviced, effectively discarding the CDAN. We therefore |
||
3127 | - * don't even need to disarm the channels, except perhaps for the case |
||
3128 | - * of a huge coalescing value. |
||
3129 | - */ |
||
3130 | - disable_ch_napi(priv); |
||
3131 | + priv->refill_thresh = 0; |
||
3132 | |||
3133 | - /* Manually drain the Rx and TxConf queues */ |
||
3134 | - drained = drain_ingress_frames(priv); |
||
3135 | - if (drained) |
||
3136 | - netdev_dbg(net_dev, "Drained %d frames.\n", drained); |
||
3137 | + /* Wait for all running napi poll routines to finish, so that no |
||
3138 | + * new refill operations are started |
||
3139 | + */ |
||
3140 | + for (i = 0; i < priv->num_channels; i++) |
||
3141 | + napi_synchronize(&priv->channel[i]->napi); |
||
3142 | |||
3143 | /* Empty the buffer pool */ |
||
3144 | drain_pool(priv); |
||
3145 | |||
3146 | - return 0; |
||
3147 | -} |
||
3148 | - |
||
3149 | -static int dpaa2_eth_init(struct net_device *net_dev) |
||
3150 | -{ |
||
3151 | - u64 supported = 0; |
||
3152 | - u64 not_supported = 0; |
||
3153 | - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
3154 | - u32 options = priv->dpni_attrs.options; |
||
3155 | - |
||
3156 | - /* Capabilities listing */ |
||
3157 | - supported |= IFF_LIVE_ADDR_CHANGE; |
||
3158 | - |
||
3159 | - if (options & DPNI_OPT_NO_MAC_FILTER) |
||
3160 | - not_supported |= IFF_UNICAST_FLT; |
||
3161 | - else |
||
3162 | - supported |= IFF_UNICAST_FLT; |
||
3163 | - |
||
3164 | - net_dev->priv_flags |= supported; |
||
3165 | - net_dev->priv_flags &= ~not_supported; |
||
3166 | - |
||
3167 | - /* Features */ |
||
3168 | - net_dev->features = NETIF_F_RXCSUM | |
||
3169 | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
||
3170 | - NETIF_F_SG | NETIF_F_HIGHDMA | |
||
3171 | - NETIF_F_LLTX; |
||
3172 | - net_dev->hw_features = net_dev->features; |
||
3173 | - |
||
3174 | - return 0; |
||
3175 | + return err; |
||
3176 | } |
||
3177 | |||
3178 | static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) |
||
3179 | @@ -1200,25 +1456,6 @@ static void dpaa2_eth_get_stats(struct n |
||
3180 | } |
||
3181 | } |
||
3182 | |||
3183 | -static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) |
||
3184 | -{ |
||
3185 | - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
3186 | - int err; |
||
3187 | - |
||
3188 | - /* Set the maximum Rx frame length to match the transmit side; |
||
3189 | - * account for L2 headers when computing the MFL |
||
3190 | - */ |
||
3191 | - err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, |
||
3192 | - (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); |
||
3193 | - if (err) { |
||
3194 | - netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); |
||
3195 | - return err; |
||
3196 | - } |
||
3197 | - |
||
3198 | - net_dev->mtu = mtu; |
||
3199 | - return 0; |
||
3200 | -} |
||
3201 | - |
||
3202 | /* Copy mac unicast addresses from @net_dev to @priv. |
||
3203 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. |
||
3204 | */ |
||
3205 | @@ -1380,16 +1617,363 @@ static int dpaa2_eth_set_features(struct |
||
3206 | return 0; |
||
3207 | } |
||
3208 | |||
3209 | +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
||
3210 | +{ |
||
3211 | + struct dpaa2_eth_priv *priv = netdev_priv(dev); |
||
3212 | + struct hwtstamp_config config; |
||
3213 | + |
||
3214 | + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) |
||
3215 | + return -EFAULT; |
||
3216 | + |
||
3217 | + switch (config.tx_type) { |
||
3218 | + case HWTSTAMP_TX_OFF: |
||
3219 | + priv->ts_tx_en = false; |
||
3220 | + break; |
||
3221 | + case HWTSTAMP_TX_ON: |
||
3222 | + priv->ts_tx_en = true; |
||
3223 | + break; |
||
3224 | + default: |
||
3225 | + return -ERANGE; |
||
3226 | + } |
||
3227 | + |
||
3228 | + if (config.rx_filter == HWTSTAMP_FILTER_NONE) { |
||
3229 | + priv->ts_rx_en = false; |
||
3230 | + } else { |
||
3231 | + priv->ts_rx_en = true; |
||
3232 | + /* TS is set for all frame types, not only those requested */ |
||
3233 | + config.rx_filter = HWTSTAMP_FILTER_ALL; |
||
3234 | + } |
||
3235 | + |
||
3236 | + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? |
||
3237 | + -EFAULT : 0; |
||
3238 | +} |
||
3239 | + |
||
3240 | +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
||
3241 | +{ |
||
3242 | + if (cmd == SIOCSHWTSTAMP) |
||
3243 | + return dpaa2_eth_ts_ioctl(dev, rq, cmd); |
||
3244 | + |
||
3245 | + return -EINVAL; |
||
3246 | +} |
||
3247 | + |
||
3248 | +static int set_buffer_layout(struct dpaa2_eth_priv *priv) |
||
3249 | +{ |
||
3250 | + struct device *dev = priv->net_dev->dev.parent; |
||
3251 | + struct dpni_buffer_layout buf_layout = {0}; |
||
3252 | + int err; |
||
3253 | + |
||
3254 | + /* We need to check for WRIOP version 1.0.0, but depending on the MC |
||
3255 | + * version, this number is not always provided correctly on rev1. |
||
3256 | + * We need to check for both alternatives in this situation. |
||
3257 | + */ |
||
3258 | + if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || |
||
3259 | + priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) |
||
3260 | + priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; |
||
3261 | + else |
||
3262 | + priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; |
||
3263 | + |
||
3264 | + /* tx buffer */ |
||
3265 | + buf_layout.pass_timestamp = true; |
||
3266 | + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; |
||
3267 | + buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP | |
||
3268 | + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; |
||
3269 | + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
||
3270 | + DPNI_QUEUE_TX, &buf_layout); |
||
3271 | + if (err) { |
||
3272 | + dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); |
||
3273 | + return err; |
||
3274 | + } |
||
3275 | + |
||
3276 | + /* tx-confirm buffer */ |
||
3277 | + buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; |
||
3278 | + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
||
3279 | + DPNI_QUEUE_TX_CONFIRM, &buf_layout); |
||
3280 | + if (err) { |
||
3281 | + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); |
||
3282 | + return err; |
||
3283 | + } |
||
3284 | + |
||
3285 | + /* Now that we've set our tx buffer layout, retrieve the minimum |
||
3286 | + * required tx data offset. |
||
3287 | + */ |
||
3288 | + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, |
||
3289 | + &priv->tx_data_offset); |
||
3290 | + if (err) { |
||
3291 | + dev_err(dev, "dpni_get_tx_data_offset() failed\n"); |
||
3292 | + return err; |
||
3293 | + } |
||
3294 | + |
||
3295 | + if ((priv->tx_data_offset % 64) != 0) |
||
3296 | + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", |
||
3297 | + priv->tx_data_offset); |
||
3298 | + |
||
3299 | + /* rx buffer */ |
||
3300 | + buf_layout.pass_frame_status = true; |
||
3301 | + buf_layout.pass_parser_result = true; |
||
3302 | + buf_layout.data_align = priv->rx_buf_align; |
||
3303 | + buf_layout.private_data_size = 0; |
||
3304 | + buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv); |
||
3305 | + /* If XDP program is attached, reserve extra space for |
||
3306 | + * potential header expansions |
||
3307 | + */ |
||
3308 | + if (priv->has_xdp_prog) |
||
3309 | + buf_layout.data_head_room += XDP_PACKET_HEADROOM; |
||
3310 | + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | |
||
3311 | + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | |
||
3312 | + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | |
||
3313 | + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | |
||
3314 | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; |
||
3315 | + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
||
3316 | + DPNI_QUEUE_RX, &buf_layout); |
||
3317 | + if (err) { |
||
3318 | + dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); |
||
3319 | + return err; |
||
3320 | + } |
||
3321 | + |
||
3322 | + return 0; |
||
3323 | +} |
||
3324 | + |
||
3325 | +static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog) |
||
3326 | +{ |
||
3327 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
3328 | + struct dpaa2_eth_channel *ch; |
||
3329 | + struct bpf_prog *old_prog = NULL; |
||
3330 | + int i, err; |
||
3331 | + |
||
3332 | + /* No support for SG frames */ |
||
3333 | + if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE) |
||
3334 | + return -EINVAL; |
||
3335 | + |
||
3336 | + if (netif_running(net_dev)) { |
||
3337 | + err = dpaa2_eth_stop(net_dev); |
||
3338 | + if (err) |
||
3339 | + return err; |
||
3340 | + } |
||
3341 | + |
||
3342 | + if (prog) { |
||
3343 | + prog = bpf_prog_add(prog, priv->num_channels - 1); |
||
3344 | + if (IS_ERR(prog)) |
||
3345 | + return PTR_ERR(prog); |
||
3346 | + } |
||
3347 | + |
||
3348 | + priv->has_xdp_prog = !!prog; |
||
3349 | + |
||
3350 | + for (i = 0; i < priv->num_channels; i++) { |
||
3351 | + ch = priv->channel[i]; |
||
3352 | + old_prog = xchg(&ch->xdp_prog, prog); |
||
3353 | + if (old_prog) |
||
3354 | + bpf_prog_put(old_prog); |
||
3355 | + } |
||
3356 | + |
||
3357 | + /* When turning XDP on/off we need to do some reconfiguring |
||
3358 | + * of the Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, |
||
3359 | + * so we are sure no old format buffers will be used from now on |
||
3360 | + */ |
||
3361 | + if (priv->has_xdp_prog != !!old_prog) |
||
3362 | + set_buffer_layout(priv); |
||
3363 | + |
||
3364 | + if (netif_running(net_dev)) { |
||
3365 | + err = dpaa2_eth_open(net_dev); |
||
3366 | + if (err) |
||
3367 | + return err; |
||
3368 | + } |
||
3369 | + |
||
3370 | + return 0; |
||
3371 | +} |
||
3372 | + |
||
3373 | +static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp) |
||
3374 | +{ |
||
3375 | + struct dpaa2_eth_priv *priv = netdev_priv(dev); |
||
3376 | + |
||
3377 | + switch (xdp->command) { |
||
3378 | + case XDP_SETUP_PROG: |
||
3379 | + return dpaa2_eth_set_xdp(dev, xdp->prog); |
||
3380 | + case XDP_QUERY_PROG: |
||
3381 | + xdp->prog_attached = priv->has_xdp_prog; |
||
3382 | + return 0; |
||
3383 | + default: |
||
3384 | + return -EINVAL; |
||
3385 | + } |
||
3386 | +} |
||
3387 | + |
||
3388 | +static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, struct xdp_buff *xdp) |
||
3389 | +{ |
||
3390 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
3391 | + struct device *dev = net_dev->dev.parent; |
||
3392 | + struct rtnl_link_stats64 *percpu_stats; |
||
3393 | + struct dpaa2_eth_drv_stats *percpu_extras; |
||
3394 | + unsigned int needed_headroom; |
||
3395 | + struct dpaa2_eth_swa *swa; |
||
3396 | + struct dpaa2_eth_fq *fq; |
||
3397 | + struct dpaa2_fd fd; |
||
3398 | + void *buffer_start, *aligned_start; |
||
3399 | + dma_addr_t addr; |
||
3400 | + int err, i; |
||
3401 | + |
||
3402 | + if (!netif_running(net_dev)) |
||
3403 | + return -ENETDOWN; |
||
3404 | + |
||
3405 | + /* We require a minimum headroom to be able to transmit the frame. |
||
3406 | + * Otherwise return an error and let the original net_device handle it |
||
3407 | + */ |
||
3408 | + /* TODO: Do we update i/f counters here or just on the Rx device? */ |
||
3409 | + needed_headroom = dpaa2_eth_needed_headroom(priv, NULL); |
||
3410 | + if (xdp->data < xdp->data_hard_start || |
||
3411 | + xdp->data - xdp->data_hard_start < needed_headroom) { |
||
3412 | + percpu_stats->tx_dropped++; |
||
3413 | + return -EINVAL; |
||
3414 | + } |
||
3415 | + |
||
3416 | + percpu_stats = this_cpu_ptr(priv->percpu_stats); |
||
3417 | + percpu_extras = this_cpu_ptr(priv->percpu_extras); |
||
3418 | + |
||
3419 | + /* Setup the FD fields */ |
||
3420 | + memset(&fd, 0, sizeof(fd)); |
||
3421 | + |
||
3422 | + /* Align FD address, if possible */ |
||
3423 | + buffer_start = xdp->data - needed_headroom; |
||
3424 | + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, |
||
3425 | + DPAA2_ETH_TX_BUF_ALIGN); |
||
3426 | + if (aligned_start >= xdp->data_hard_start) |
||
3427 | + buffer_start = aligned_start; |
||
3428 | + |
||
3429 | + swa = (struct dpaa2_eth_swa *)buffer_start; |
||
3430 | + /* fill in necessary fields here */ |
||
3431 | + swa->type = DPAA2_ETH_SWA_XDP; |
||
3432 | + swa->xdp.dma_size = xdp->data_end - buffer_start; |
||
3433 | + |
||
3434 | + addr = dma_map_single(dev, buffer_start, |
||
3435 | + xdp->data_end - buffer_start, |
||
3436 | + DMA_BIDIRECTIONAL); |
||
3437 | + if (unlikely(dma_mapping_error(dev, addr))) { |
||
3438 | + percpu_stats->tx_dropped++; |
||
3439 | + return -ENOMEM; |
||
3440 | + } |
||
3441 | + |
||
3442 | + dpaa2_fd_set_addr(&fd, addr); |
||
3443 | + dpaa2_fd_set_offset(&fd, xdp->data - buffer_start); |
||
3444 | + dpaa2_fd_set_len(&fd, xdp->data_end - xdp->data); |
||
3445 | + dpaa2_fd_set_format(&fd, dpaa2_fd_single); |
||
3446 | + dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); |
||
3447 | + |
||
3448 | + fq = &priv->fq[smp_processor_id()]; |
||
3449 | + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
||
3450 | + err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, |
||
3451 | + fq->tx_qdbin, &fd); |
||
3452 | + if (err != -EBUSY) |
||
3453 | + break; |
||
3454 | + } |
||
3455 | + percpu_extras->tx_portal_busy += i; |
||
3456 | + if (unlikely(err < 0)) { |
||
3457 | + percpu_stats->tx_errors++; |
||
3458 | + /* let the Rx device handle the cleanup */ |
||
3459 | + return err; |
||
3460 | + } |
||
3461 | + |
||
3462 | + percpu_stats->tx_packets++; |
||
3463 | + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); |
||
3464 | + |
||
3465 | + return 0; |
||
3466 | +} |
||
3467 | + |
||
3468 | +static void dpaa2_eth_xdp_flush(struct net_device *net_dev) |
||
3469 | +{ |
||
3470 | + /* We don't have hardware support for Tx batching, |
||
3471 | + * so we do the actual frame enqueue in ndo_xdp_xmit |
||
3472 | + */ |
||
3473 | +} |
||
3474 | + |
||
3475 | +static int dpaa2_eth_update_xps(struct dpaa2_eth_priv *priv) |
||
3476 | +{ |
||
3477 | + struct net_device *net_dev = priv->net_dev; |
||
3478 | + unsigned int i, num_queues; |
||
3479 | + struct cpumask xps_mask; |
||
3480 | + struct dpaa2_eth_fq *fq; |
||
3481 | + int err = 0; |
||
3482 | + |
||
3483 | + num_queues = (net_dev->num_tc ? : 1) * dpaa2_eth_queue_count(priv); |
||
3484 | + for (i = 0; i < num_queues; i++) { |
||
3485 | + fq = &priv->fq[i % dpaa2_eth_queue_count(priv)]; |
||
3486 | + cpumask_clear(&xps_mask); |
||
3487 | + cpumask_set_cpu(fq->target_cpu, &xps_mask); |
||
3488 | + err = netif_set_xps_queue(net_dev, &xps_mask, i); |
||
3489 | + if (err) { |
||
3490 | + dev_info_once(net_dev->dev.parent, |
||
3491 | + "Error setting XPS queue\n"); |
||
3492 | + break; |
||
3493 | + } |
||
3494 | + } |
||
3495 | + |
||
3496 | + return err; |
||
3497 | +} |
||
3498 | + |
||
3499 | +static int dpaa2_eth_setup_tc(struct net_device *net_dev, |
||
3500 | + enum tc_setup_type type, |
||
3501 | + void *type_data) |
||
3502 | +{ |
||
3503 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
3504 | + struct tc_mqprio_qopt *mqprio = (struct tc_mqprio_qopt *)type_data; |
||
3505 | + int i, err = 0; |
||
3506 | + |
||
3507 | + if (type != TC_SETUP_MQPRIO) |
||
3508 | + return -EINVAL; |
||
3509 | + |
||
3510 | + if (mqprio->num_tc > dpaa2_eth_tc_count(priv)) { |
||
3511 | + netdev_err(net_dev, "Max %d traffic classes supported\n", |
||
3512 | + dpaa2_eth_tc_count(priv)); |
||
3513 | + return -EINVAL; |
||
3514 | + } |
||
3515 | + |
||
3516 | + if (mqprio->num_tc == net_dev->num_tc) |
||
3517 | + return 0; |
||
3518 | + |
||
3519 | + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
||
3520 | + |
||
3521 | + if (!mqprio->num_tc) { |
||
3522 | + netdev_reset_tc(net_dev); |
||
3523 | + err = netif_set_real_num_tx_queues(net_dev, |
||
3524 | + dpaa2_eth_queue_count(priv)); |
||
3525 | + if (err) |
||
3526 | + return err; |
||
3527 | + |
||
3528 | + goto update_xps; |
||
3529 | + } |
||
3530 | + |
||
3531 | + err = netdev_set_num_tc(net_dev, mqprio->num_tc); |
||
3532 | + if (err) |
||
3533 | + return err; |
||
3534 | + |
||
3535 | + err = netif_set_real_num_tx_queues(net_dev, mqprio->num_tc * |
||
3536 | + dpaa2_eth_queue_count(priv)); |
||
3537 | + if (err) |
||
3538 | + return err; |
||
3539 | + |
||
3540 | + for (i = 0; i < mqprio->num_tc; i++) { |
||
3541 | + err = netdev_set_tc_queue(net_dev, i, |
||
3542 | + dpaa2_eth_queue_count(priv), |
||
3543 | + i * dpaa2_eth_queue_count(priv)); |
||
3544 | + if (err) |
||
3545 | + return err; |
||
3546 | + } |
||
3547 | + |
||
3548 | +update_xps: |
||
3549 | + err = dpaa2_eth_update_xps(priv); |
||
3550 | + return err; |
||
3551 | +} |
||
3552 | + |
||
3553 | static const struct net_device_ops dpaa2_eth_ops = { |
||
3554 | .ndo_open = dpaa2_eth_open, |
||
3555 | .ndo_start_xmit = dpaa2_eth_tx, |
||
3556 | .ndo_stop = dpaa2_eth_stop, |
||
3557 | - .ndo_init = dpaa2_eth_init, |
||
3558 | .ndo_set_mac_address = dpaa2_eth_set_addr, |
||
3559 | .ndo_get_stats64 = dpaa2_eth_get_stats, |
||
3560 | - .ndo_change_mtu = dpaa2_eth_change_mtu, |
||
3561 | .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, |
||
3562 | .ndo_set_features = dpaa2_eth_set_features, |
||
3563 | + .ndo_do_ioctl = dpaa2_eth_ioctl, |
||
3564 | + .ndo_xdp = dpaa2_eth_xdp, |
||
3565 | + .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, |
||
3566 | + .ndo_xdp_flush = dpaa2_eth_xdp_flush, |
||
3567 | + .ndo_setup_tc = dpaa2_eth_setup_tc, |
||
3568 | }; |
||
3569 | |||
3570 | static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) |
||
3571 | @@ -1422,34 +2006,32 @@ static struct fsl_mc_device *setup_dpcon |
||
3572 | err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); |
||
3573 | if (err) { |
||
3574 | dev_err(dev, "dpcon_open() failed\n"); |
||
3575 | - goto err_open; |
||
3576 | + goto free; |
||
3577 | } |
||
3578 | |||
3579 | err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); |
||
3580 | if (err) { |
||
3581 | dev_err(dev, "dpcon_reset() failed\n"); |
||
3582 | - goto err_reset; |
||
3583 | + goto close; |
||
3584 | } |
||
3585 | |||
3586 | err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); |
||
3587 | if (err) { |
||
3588 | dev_err(dev, "dpcon_get_attributes() failed\n"); |
||
3589 | - goto err_get_attr; |
||
3590 | + goto close; |
||
3591 | } |
||
3592 | |||
3593 | err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); |
||
3594 | if (err) { |
||
3595 | dev_err(dev, "dpcon_enable() failed\n"); |
||
3596 | - goto err_enable; |
||
3597 | + goto close; |
||
3598 | } |
||
3599 | |||
3600 | return dpcon; |
||
3601 | |||
3602 | -err_enable: |
||
3603 | -err_get_attr: |
||
3604 | -err_reset: |
||
3605 | +close: |
||
3606 | dpcon_close(priv->mc_io, 0, dpcon->mc_handle); |
||
3607 | -err_open: |
||
3608 | +free: |
||
3609 | fsl_mc_object_free(dpcon); |
||
3610 | |||
3611 | return NULL; |
||
3612 | @@ -1502,7 +2084,14 @@ err_setup: |
||
3613 | static void free_channel(struct dpaa2_eth_priv *priv, |
||
3614 | struct dpaa2_eth_channel *channel) |
||
3615 | { |
||
3616 | + struct bpf_prog *prog; |
||
3617 | + |
||
3618 | free_dpcon(priv, channel->dpcon); |
||
3619 | + |
||
3620 | + prog = READ_ONCE(channel->xdp_prog); |
||
3621 | + if (prog) |
||
3622 | + bpf_prog_put(prog); |
||
3623 | + |
||
3624 | kfree(channel); |
||
3625 | } |
||
3626 | |||
3627 | @@ -1546,7 +2135,8 @@ static int setup_dpio(struct dpaa2_eth_p |
||
3628 | nctx->desired_cpu = i; |
||
3629 | |||
3630 | /* Register the new context */ |
||
3631 | - err = dpaa2_io_service_register(NULL, nctx); |
||
3632 | + channel->dpio = dpaa2_io_service_select(i); |
||
3633 | + err = dpaa2_io_service_register(channel->dpio, nctx); |
||
3634 | if (err) { |
||
3635 | dev_dbg(dev, "No affine DPIO for cpu %d\n", i); |
||
3636 | /* If no affine DPIO for this core, there's probably |
||
3637 | @@ -1586,7 +2176,7 @@ static int setup_dpio(struct dpaa2_eth_p |
||
3638 | return 0; |
||
3639 | |||
3640 | err_set_cdan: |
||
3641 | - dpaa2_io_service_deregister(NULL, nctx); |
||
3642 | + dpaa2_io_service_deregister(channel->dpio, nctx); |
||
3643 | err_service_reg: |
||
3644 | free_channel(priv, channel); |
||
3645 | err_alloc_ch: |
||
3646 | @@ -1609,7 +2199,7 @@ static void free_dpio(struct dpaa2_eth_p |
||
3647 | /* deregister CDAN notifications and free channels */ |
||
3648 | for (i = 0; i < priv->num_channels; i++) { |
||
3649 | ch = priv->channel[i]; |
||
3650 | - dpaa2_io_service_deregister(NULL, &ch->nctx); |
||
3651 | + dpaa2_io_service_deregister(ch->dpio, &ch->nctx); |
||
3652 | free_channel(priv, ch); |
||
3653 | } |
||
3654 | } |
||
3655 | @@ -1636,8 +2226,7 @@ static void set_fq_affinity(struct dpaa2 |
||
3656 | { |
||
3657 | struct device *dev = priv->net_dev->dev.parent; |
||
3658 | struct dpaa2_eth_fq *fq; |
||
3659 | - int rx_cpu, txc_cpu; |
||
3660 | - int i; |
||
3661 | + int rx_cpu, txc_cpu, i; |
||
3662 | |||
3663 | /* For each FQ, pick one channel/CPU to deliver frames to. |
||
3664 | * This may well change at runtime, either through irqbalance or |
||
3665 | @@ -1649,6 +2238,7 @@ static void set_fq_affinity(struct dpaa2 |
||
3666 | fq = &priv->fq[i]; |
||
3667 | switch (fq->type) { |
||
3668 | case DPAA2_RX_FQ: |
||
3669 | + case DPAA2_RX_ERR_FQ: |
||
3670 | fq->target_cpu = rx_cpu; |
||
3671 | rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); |
||
3672 | if (rx_cpu >= nr_cpu_ids) |
||
3673 | @@ -1656,6 +2246,7 @@ static void set_fq_affinity(struct dpaa2 |
||
3674 | break; |
||
3675 | case DPAA2_TX_CONF_FQ: |
||
3676 | fq->target_cpu = txc_cpu; |
||
3677 | + |
||
3678 | txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); |
||
3679 | if (txc_cpu >= nr_cpu_ids) |
||
3680 | txc_cpu = cpumask_first(&priv->dpio_cpumask); |
||
3681 | @@ -1665,11 +2256,13 @@ static void set_fq_affinity(struct dpaa2 |
||
3682 | } |
||
3683 | fq->channel = get_affine_channel(priv, fq->target_cpu); |
||
3684 | } |
||
3685 | + |
||
3686 | + dpaa2_eth_update_xps(priv); |
||
3687 | } |
||
3688 | |||
3689 | static void setup_fqs(struct dpaa2_eth_priv *priv) |
||
3690 | { |
||
3691 | - int i; |
||
3692 | + int i, j; |
||
3693 | |||
3694 | /* We have one TxConf FQ per Tx flow. |
||
3695 | * The number of Tx and Rx queues is the same. |
||
3696 | @@ -1681,11 +2274,19 @@ static void setup_fqs(struct dpaa2_eth_p |
||
3697 | priv->fq[priv->num_fqs++].flowid = (u16)i; |
||
3698 | } |
||
3699 | |||
3700 | - for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { |
||
3701 | - priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; |
||
3702 | - priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; |
||
3703 | - priv->fq[priv->num_fqs++].flowid = (u16)i; |
||
3704 | - } |
||
3705 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) |
||
3706 | + for (j = 0; j < dpaa2_eth_queue_count(priv); j++) { |
||
3707 | + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; |
||
3708 | + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; |
||
3709 | + priv->fq[priv->num_fqs].tc = (u8)i; |
||
3710 | + priv->fq[priv->num_fqs++].flowid = (u16)j; |
||
3711 | + } |
||
3712 | + |
||
3713 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE |
||
3714 | + /* We have exactly one Rx error queue per DPNI */ |
||
3715 | + priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; |
||
3716 | + priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; |
||
3717 | +#endif |
||
3718 | |||
3719 | /* For each FQ, decide on which core to process incoming frames */ |
||
3720 | set_fq_affinity(priv); |
||
3721 | @@ -1735,6 +2336,9 @@ static int setup_dpbp(struct dpaa2_eth_p |
||
3722 | } |
||
3723 | priv->bpid = dpbp_attrs.bpid; |
||
3724 | |||
3725 | + /* By default we start with flow control enabled */ |
||
3726 | + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels; |
||
3727 | + |
||
3728 | return 0; |
||
3729 | |||
3730 | err_get_attr: |
||
3731 | @@ -1756,13 +2360,59 @@ static void free_dpbp(struct dpaa2_eth_p |
||
3732 | fsl_mc_object_free(priv->dpbp_dev); |
||
3733 | } |
||
3734 | |||
3735 | +static int setup_tx_congestion(struct dpaa2_eth_priv *priv) |
||
3736 | +{ |
||
3737 | + struct dpni_congestion_notification_cfg notif_cfg = {0}; |
||
3738 | + struct device *dev = priv->net_dev->dev.parent; |
||
3739 | + int err; |
||
3740 | + |
||
3741 | + priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, |
||
3742 | + GFP_KERNEL); |
||
3743 | + |
||
3744 | + if (!priv->cscn_unaligned) |
||
3745 | + return -ENOMEM; |
||
3746 | + |
||
3747 | + priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN); |
||
3748 | + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE, |
||
3749 | + DMA_FROM_DEVICE); |
||
3750 | + if (dma_mapping_error(dev, priv->cscn_dma)) { |
||
3751 | + dev_err(dev, "Error mapping CSCN memory area\n"); |
||
3752 | + err = -ENOMEM; |
||
3753 | + goto err_dma_map; |
||
3754 | + } |
||
3755 | + |
||
3756 | + notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES; |
||
3757 | + notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH; |
||
3758 | + notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH; |
||
3759 | + notif_cfg.message_ctx = (u64)priv; |
||
3760 | + notif_cfg.message_iova = priv->cscn_dma; |
||
3761 | + notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | |
||
3762 | + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | |
||
3763 | + DPNI_CONG_OPT_COHERENT_WRITE; |
||
3764 | + err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token, |
||
3765 | + DPNI_QUEUE_TX, 0, ¬if_cfg); |
||
3766 | + if (err) { |
||
3767 | + dev_err(dev, "dpni_set_congestion_notification failed\n"); |
||
3768 | + goto err_set_cong; |
||
3769 | + } |
||
3770 | + |
||
3771 | + return 0; |
||
3772 | + |
||
3773 | +err_set_cong: |
||
3774 | + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
||
3775 | +err_dma_map: |
||
3776 | + kfree(priv->cscn_unaligned); |
||
3777 | + |
||
3778 | + return err; |
||
3779 | +} |
||
3780 | + |
||
3781 | /* Configure the DPNI object this interface is associated with */ |
||
3782 | static int setup_dpni(struct fsl_mc_device *ls_dev) |
||
3783 | { |
||
3784 | struct device *dev = &ls_dev->dev; |
||
3785 | struct dpaa2_eth_priv *priv; |
||
3786 | struct net_device *net_dev; |
||
3787 | - struct dpni_buffer_layout buf_layout = {0}; |
||
3788 | + struct dpni_link_cfg cfg = {0}; |
||
3789 | int err; |
||
3790 | |||
3791 | net_dev = dev_get_drvdata(dev); |
||
3792 | @@ -1772,7 +2422,22 @@ static int setup_dpni(struct fsl_mc_devi |
||
3793 | err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); |
||
3794 | if (err) { |
||
3795 | dev_err(dev, "dpni_open() failed\n"); |
||
3796 | - goto err_open; |
||
3797 | + return err; |
||
3798 | + } |
||
3799 | + |
||
3800 | + /* Check if we can work with this DPNI object */ |
||
3801 | + err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, |
||
3802 | + &priv->dpni_ver_minor); |
||
3803 | + if (err) { |
||
3804 | + dev_err(dev, "dpni_get_api_version() failed\n"); |
||
3805 | + goto close; |
||
3806 | + } |
||
3807 | + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { |
||
3808 | + dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", |
||
3809 | + priv->dpni_ver_major, priv->dpni_ver_minor, |
||
3810 | + DPNI_VER_MAJOR, DPNI_VER_MINOR); |
||
3811 | + err = -ENOTSUPP; |
||
3812 | + goto close; |
||
3813 | } |
||
3814 | |||
3815 | ls_dev->mc_io = priv->mc_io; |
||
3816 | @@ -1781,82 +2446,53 @@ static int setup_dpni(struct fsl_mc_devi |
||
3817 | err = dpni_reset(priv->mc_io, 0, priv->mc_token); |
||
3818 | if (err) { |
||
3819 | dev_err(dev, "dpni_reset() failed\n"); |
||
3820 | - goto err_reset; |
||
3821 | + goto close; |
||
3822 | } |
||
3823 | |||
3824 | err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, |
||
3825 | &priv->dpni_attrs); |
||
3826 | if (err) { |
||
3827 | dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); |
||
3828 | - goto err_get_attr; |
||
3829 | + goto close; |
||
3830 | } |
||
3831 | |||
3832 | - /* Configure buffer layouts */ |
||
3833 | - /* rx buffer */ |
||
3834 | - buf_layout.pass_parser_result = true; |
||
3835 | - buf_layout.pass_frame_status = true; |
||
3836 | - buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; |
||
3837 | - buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN; |
||
3838 | - buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | |
||
3839 | - DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | |
||
3840 | - DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | |
||
3841 | - DPNI_BUF_LAYOUT_OPT_DATA_ALIGN; |
||
3842 | - err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
||
3843 | - DPNI_QUEUE_RX, &buf_layout); |
||
3844 | - if (err) { |
||
3845 | - dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); |
||
3846 | - goto err_buf_layout; |
||
3847 | - } |
||
3848 | + err = set_buffer_layout(priv); |
||
3849 | + if (err) |
||
3850 | + goto close; |
||
3851 | |||
3852 | - /* tx buffer */ |
||
3853 | - buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | |
||
3854 | - DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; |
||
3855 | - err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
||
3856 | - DPNI_QUEUE_TX, &buf_layout); |
||
3857 | - if (err) { |
||
3858 | - dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); |
||
3859 | - goto err_buf_layout; |
||
3860 | - } |
||
3861 | + /* Enable congestion notifications for Tx queues */ |
||
3862 | + err = setup_tx_congestion(priv); |
||
3863 | + if (err) |
||
3864 | + goto close; |
||
3865 | |||
3866 | - /* tx-confirm buffer */ |
||
3867 | - buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; |
||
3868 | - err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
||
3869 | - DPNI_QUEUE_TX_CONFIRM, &buf_layout); |
||
3870 | - if (err) { |
||
3871 | - dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); |
||
3872 | - goto err_buf_layout; |
||
3873 | - } |
||
3874 | + /* allocate classification rule space */ |
||
3875 | + priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) * |
||
3876 | + dpaa2_eth_fs_count(priv), GFP_KERNEL); |
||
3877 | + if (!priv->cls_rule) |
||
3878 | + goto close; |
||
3879 | |||
3880 | - /* Now that we've set our tx buffer layout, retrieve the minimum |
||
3881 | - * required tx data offset. |
||
3882 | - */ |
||
3883 | - err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, |
||
3884 | - &priv->tx_data_offset); |
||
3885 | + /* Enable flow control */ |
||
3886 | + cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE; |
||
3887 | + priv->tx_pause_frames = true; |
||
3888 | + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); |
||
3889 | if (err) { |
||
3890 | - dev_err(dev, "dpni_get_tx_data_offset() failed\n"); |
||
3891 | - goto err_data_offset; |
||
3892 | + dev_err(dev, "dpni_set_link_cfg() failed\n"); |
||
3893 | + goto cls_free; |
||
3894 | } |
||
3895 | |||
3896 | - if ((priv->tx_data_offset % 64) != 0) |
||
3897 | - dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", |
||
3898 | - priv->tx_data_offset); |
||
3899 | - |
||
3900 | - /* Accommodate software annotation space (SWA) */ |
||
3901 | - priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; |
||
3902 | - |
||
3903 | return 0; |
||
3904 | |||
3905 | -err_data_offset: |
||
3906 | -err_buf_layout: |
||
3907 | -err_get_attr: |
||
3908 | -err_reset: |
||
3909 | +cls_free: |
||
3910 | + kfree(priv->cls_rule); |
||
3911 | +close: |
||
3912 | dpni_close(priv->mc_io, 0, priv->mc_token); |
||
3913 | -err_open: |
||
3914 | + |
||
3915 | return err; |
||
3916 | } |
||
3917 | |||
3918 | static void free_dpni(struct dpaa2_eth_priv *priv) |
||
3919 | { |
||
3920 | + struct device *dev = priv->net_dev->dev.parent; |
||
3921 | int err; |
||
3922 | |||
3923 | err = dpni_reset(priv->mc_io, 0, priv->mc_token); |
||
3924 | @@ -1865,6 +2501,11 @@ static void free_dpni(struct dpaa2_eth_p |
||
3925 | err); |
||
3926 | |||
3927 | dpni_close(priv->mc_io, 0, priv->mc_token); |
||
3928 | + |
||
3929 | + kfree(priv->cls_rule); |
||
3930 | + |
||
3931 | + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
||
3932 | + kfree(priv->cscn_unaligned); |
||
3933 | } |
||
3934 | |||
3935 | static int setup_rx_flow(struct dpaa2_eth_priv *priv, |
||
3936 | @@ -1873,11 +2514,10 @@ static int setup_rx_flow(struct dpaa2_et |
||
3937 | struct device *dev = priv->net_dev->dev.parent; |
||
3938 | struct dpni_queue queue; |
||
3939 | struct dpni_queue_id qid; |
||
3940 | - struct dpni_taildrop td; |
||
3941 | int err; |
||
3942 | |||
3943 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, |
||
3944 | - DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); |
||
3945 | + DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid); |
||
3946 | if (err) { |
||
3947 | dev_err(dev, "dpni_get_queue(RX) failed\n"); |
||
3948 | return err; |
||
3949 | @@ -1890,7 +2530,7 @@ static int setup_rx_flow(struct dpaa2_et |
||
3950 | queue.destination.priority = 1; |
||
3951 | queue.user_context = (u64)(uintptr_t)fq; |
||
3952 | err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, |
||
3953 | - DPNI_QUEUE_RX, 0, fq->flowid, |
||
3954 | + DPNI_QUEUE_RX, fq->tc, fq->flowid, |
||
3955 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, |
||
3956 | &queue); |
||
3957 | if (err) { |
||
3958 | @@ -1898,15 +2538,121 @@ static int setup_rx_flow(struct dpaa2_et |
||
3959 | return err; |
||
3960 | } |
||
3961 | |||
3962 | - td.enable = 1; |
||
3963 | - td.threshold = DPAA2_ETH_TAILDROP_THRESH; |
||
3964 | - err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, |
||
3965 | - DPNI_QUEUE_RX, 0, fq->flowid, &td); |
||
3966 | - if (err) { |
||
3967 | - dev_err(dev, "dpni_set_threshold() failed\n"); |
||
3968 | - return err; |
||
3969 | + return 0; |
||
3970 | +} |
||
3971 | + |
||
3972 | +static int set_queue_taildrop(struct dpaa2_eth_priv *priv, |
||
3973 | + struct dpni_taildrop *td) |
||
3974 | +{ |
||
3975 | + struct device *dev = priv->net_dev->dev.parent; |
||
3976 | + int i, err; |
||
3977 | + |
||
3978 | + for (i = 0; i < priv->num_fqs; i++) { |
||
3979 | + if (priv->fq[i].type != DPAA2_RX_FQ) |
||
3980 | + continue; |
||
3981 | + |
||
3982 | + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
||
3983 | + DPNI_CP_QUEUE, DPNI_QUEUE_RX, |
||
3984 | + priv->fq[i].tc, priv->fq[i].flowid, |
||
3985 | + td); |
||
3986 | + if (err) { |
||
3987 | + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err); |
||
3988 | + return err; |
||
3989 | + } |
||
3990 | + |
||
3991 | + dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n", |
||
3992 | + (td->enable ? "Enabled" : "Disabled"), |
||
3993 | + priv->fq[i].flowid, priv->fq[i].tc); |
||
3994 | + } |
||
3995 | + |
||
3996 | + return 0; |
||
3997 | +} |
||
3998 | + |
||
3999 | +static int set_group_taildrop(struct dpaa2_eth_priv *priv, |
||
4000 | + struct dpni_taildrop *td) |
||
4001 | +{ |
||
4002 | + struct device *dev = priv->net_dev->dev.parent; |
||
4003 | + struct dpni_taildrop disable_td, *tc_td; |
||
4004 | + int i, err; |
||
4005 | + |
||
4006 | + memset(&disable_td, 0, sizeof(struct dpni_taildrop)); |
||
4007 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
||
4008 | + if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i)) |
||
4009 | + /* Do not set taildrop thresholds for PFC-enabled |
||
4010 | + * traffic classes. We will enable congestion |
||
4011 | + * notifications for them. |
||
4012 | + */ |
||
4013 | + tc_td = &disable_td; |
||
4014 | + else |
||
4015 | + tc_td = td; |
||
4016 | + |
||
4017 | + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
||
4018 | + DPNI_CP_GROUP, DPNI_QUEUE_RX, |
||
4019 | + i, 0, tc_td); |
||
4020 | + if (err) { |
||
4021 | + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err); |
||
4022 | + return err; |
||
4023 | + } |
||
4024 | + |
||
4025 | + dev_dbg(dev, "%s taildrop for Rx group tc %d\n", |
||
4026 | + (tc_td->enable ? "Enabled" : "Disabled"), |
||
4027 | + i); |
||
4028 | + } |
||
4029 | + |
||
4030 | + return 0; |
||
4031 | +} |
||
4032 | + |
||
4033 | +/* Enable/disable Rx FQ taildrop |
||
4034 | + * |
||
4035 | + * Rx FQ taildrop is mutually exclusive with flow control and it only gets |
||
4036 | + * disabled when FC is active. Depending on FC status, we need to compute |
||
4037 | + * the maximum number of buffers in the pool differently, so use the |
||
4038 | + * opportunity to update max number of buffers as well. |
||
4039 | + */ |
||
4040 | +int set_rx_taildrop(struct dpaa2_eth_priv *priv) |
||
4041 | +{ |
||
4042 | + enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv); |
||
4043 | + struct dpni_taildrop td_queue, td_group; |
||
4044 | + int err = 0; |
||
4045 | + |
||
4046 | + switch (cfg) { |
||
4047 | + case DPAA2_ETH_TD_NONE: |
||
4048 | + memset(&td_queue, 0, sizeof(struct dpni_taildrop)); |
||
4049 | + memset(&td_group, 0, sizeof(struct dpni_taildrop)); |
||
4050 | + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / |
||
4051 | + priv->num_channels; |
||
4052 | + break; |
||
4053 | + case DPAA2_ETH_TD_QUEUE: |
||
4054 | + memset(&td_group, 0, sizeof(struct dpni_taildrop)); |
||
4055 | + td_queue.enable = 1; |
||
4056 | + td_queue.units = DPNI_CONGESTION_UNIT_BYTES; |
||
4057 | + td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH / |
||
4058 | + dpaa2_eth_tc_count(priv); |
||
4059 | + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH; |
||
4060 | + break; |
||
4061 | + case DPAA2_ETH_TD_GROUP: |
||
4062 | + memset(&td_queue, 0, sizeof(struct dpni_taildrop)); |
||
4063 | + td_group.enable = 1; |
||
4064 | + td_group.units = DPNI_CONGESTION_UNIT_FRAMES; |
||
4065 | + td_group.threshold = NAPI_POLL_WEIGHT * |
||
4066 | + dpaa2_eth_queue_count(priv); |
||
4067 | + priv->max_bufs_per_ch = NAPI_POLL_WEIGHT * |
||
4068 | + dpaa2_eth_tc_count(priv); |
||
4069 | + break; |
||
4070 | + default: |
||
4071 | + break; |
||
4072 | } |
||
4073 | |||
4074 | + err = set_queue_taildrop(priv, &td_queue); |
||
4075 | + if (err) |
||
4076 | + return err; |
||
4077 | + |
||
4078 | + err = set_group_taildrop(priv, &td_group); |
||
4079 | + if (err) |
||
4080 | + return err; |
||
4081 | + |
||
4082 | + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv); |
||
4083 | + |
||
4084 | return 0; |
||
4085 | } |
||
4086 | |||
4087 | @@ -1953,23 +2699,88 @@ static int setup_tx_flow(struct dpaa2_et |
||
4088 | return 0; |
||
4089 | } |
||
4090 | |||
4091 | -/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */ |
||
4092 | -static const struct dpaa2_eth_hash_fields hash_fields[] = { |
||
4093 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE |
||
4094 | +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, |
||
4095 | + struct dpaa2_eth_fq *fq) |
||
4096 | +{ |
||
4097 | + struct device *dev = priv->net_dev->dev.parent; |
||
4098 | + struct dpni_queue q = { { 0 } }; |
||
4099 | + struct dpni_queue_id qid; |
||
4100 | + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; |
||
4101 | + int err; |
||
4102 | + |
||
4103 | + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, |
||
4104 | + DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid); |
||
4105 | + if (err) { |
||
4106 | + dev_err(dev, "dpni_get_queue() failed (%d)\n", err); |
||
4107 | + return err; |
||
4108 | + } |
||
4109 | + |
||
4110 | + fq->fqid = qid.fqid; |
||
4111 | + |
||
4112 | + q.destination.id = fq->channel->dpcon_id; |
||
4113 | + q.destination.type = DPNI_DEST_DPCON; |
||
4114 | + q.destination.priority = 1; |
||
4115 | + q.user_context = (u64)fq; |
||
4116 | + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, |
||
4117 | + DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q); |
||
4118 | + if (err) { |
||
4119 | + dev_err(dev, "dpni_set_queue() failed (%d)\n", err); |
||
4120 | + return err; |
||
4121 | + } |
||
4122 | + |
||
4123 | + return 0; |
||
4124 | +} |
||
4125 | +#endif |
||
4126 | + |
||
4127 | +/* default hash key fields */ |
||
4128 | +static struct dpaa2_eth_dist_fields default_dist_fields[] = { |
||
4129 | { |
||
4130 | + /* L2 header */ |
||
4131 | + .rxnfc_field = RXH_L2DA, |
||
4132 | + .cls_prot = NET_PROT_ETH, |
||
4133 | + .cls_field = NH_FLD_ETH_DA, |
||
4134 | + .id = DPAA2_ETH_DIST_ETHDST, |
||
4135 | + .size = 6, |
||
4136 | + }, { |
||
4137 | + .cls_prot = NET_PROT_ETH, |
||
4138 | + .cls_field = NH_FLD_ETH_SA, |
||
4139 | + .id = DPAA2_ETH_DIST_ETHSRC, |
||
4140 | + .size = 6, |
||
4141 | + }, { |
||
4142 | + /* This is the last ethertype field parsed: |
||
4143 | + * depending on frame format, it can be the MAC ethertype |
||
4144 | + * or the VLAN etype. |
||
4145 | + */ |
||
4146 | + .cls_prot = NET_PROT_ETH, |
||
4147 | + .cls_field = NH_FLD_ETH_TYPE, |
||
4148 | + .id = DPAA2_ETH_DIST_ETHTYPE, |
||
4149 | + .size = 2, |
||
4150 | + }, { |
||
4151 | + /* VLAN header */ |
||
4152 | + .rxnfc_field = RXH_VLAN, |
||
4153 | + .cls_prot = NET_PROT_VLAN, |
||
4154 | + .cls_field = NH_FLD_VLAN_TCI, |
||
4155 | + .id = DPAA2_ETH_DIST_VLAN, |
||
4156 | + .size = 2, |
||
4157 | + }, { |
||
4158 | /* IP header */ |
||
4159 | .rxnfc_field = RXH_IP_SRC, |
||
4160 | .cls_prot = NET_PROT_IP, |
||
4161 | .cls_field = NH_FLD_IP_SRC, |
||
4162 | + .id = DPAA2_ETH_DIST_IPSRC, |
||
4163 | .size = 4, |
||
4164 | }, { |
||
4165 | .rxnfc_field = RXH_IP_DST, |
||
4166 | .cls_prot = NET_PROT_IP, |
||
4167 | .cls_field = NH_FLD_IP_DST, |
||
4168 | + .id = DPAA2_ETH_DIST_IPDST, |
||
4169 | .size = 4, |
||
4170 | }, { |
||
4171 | .rxnfc_field = RXH_L3_PROTO, |
||
4172 | .cls_prot = NET_PROT_IP, |
||
4173 | .cls_field = NH_FLD_IP_PROTO, |
||
4174 | + .id = DPAA2_ETH_DIST_IPPROTO, |
||
4175 | .size = 1, |
||
4176 | }, { |
||
4177 | /* Using UDP ports, this is functionally equivalent to raw |
||
4178 | @@ -1978,90 +2789,182 @@ static const struct dpaa2_eth_hash_field |
||
4179 | .rxnfc_field = RXH_L4_B_0_1, |
||
4180 | .cls_prot = NET_PROT_UDP, |
||
4181 | .cls_field = NH_FLD_UDP_PORT_SRC, |
||
4182 | + .id = DPAA2_ETH_DIST_L4SRC, |
||
4183 | .size = 2, |
||
4184 | }, { |
||
4185 | .rxnfc_field = RXH_L4_B_2_3, |
||
4186 | .cls_prot = NET_PROT_UDP, |
||
4187 | .cls_field = NH_FLD_UDP_PORT_DST, |
||
4188 | + .id = DPAA2_ETH_DIST_L4DST, |
||
4189 | .size = 2, |
||
4190 | }, |
||
4191 | }; |
||
4192 | |||
4193 | -/* Set RX hash options |
||
4194 | - * flags is a combination of RXH_ bits |
||
4195 | - */ |
||
4196 | -static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) |
||
4197 | +static int legacy_config_dist_key(struct dpaa2_eth_priv *priv, |
||
4198 | + dma_addr_t key_iova) |
||
4199 | { |
||
4200 | - struct device *dev = net_dev->dev.parent; |
||
4201 | - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
4202 | - struct dpkg_profile_cfg cls_cfg; |
||
4203 | + struct device *dev = priv->net_dev->dev.parent; |
||
4204 | struct dpni_rx_tc_dist_cfg dist_cfg; |
||
4205 | - u8 *dma_mem; |
||
4206 | - int i; |
||
4207 | - int err = 0; |
||
4208 | + int i, err; |
||
4209 | |||
4210 | - if (!dpaa2_eth_hash_enabled(priv)) { |
||
4211 | - dev_dbg(dev, "Hashing support is not enabled\n"); |
||
4212 | - return 0; |
||
4213 | + /* In legacy mode, we can't configure flow steering independently */ |
||
4214 | + if (!dpaa2_eth_hash_enabled(priv)) |
||
4215 | + return -EOPNOTSUPP; |
||
4216 | + |
||
4217 | + memset(&dist_cfg, 0, sizeof(dist_cfg)); |
||
4218 | + |
||
4219 | + dist_cfg.key_cfg_iova = key_iova; |
||
4220 | + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
||
4221 | + if (dpaa2_eth_fs_enabled(priv)) { |
||
4222 | + dist_cfg.dist_mode = DPNI_DIST_MODE_FS; |
||
4223 | + dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; |
||
4224 | + } else { |
||
4225 | + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; |
||
4226 | } |
||
4227 | |||
4228 | - memset(&cls_cfg, 0, sizeof(cls_cfg)); |
||
4229 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
||
4230 | + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i, |
||
4231 | + &dist_cfg); |
||
4232 | + if (err) { |
||
4233 | + dev_err(dev, "dpni_set_rx_tc_dist failed\n"); |
||
4234 | + return err; |
||
4235 | + } |
||
4236 | + } |
||
4237 | + |
||
4238 | + return 0; |
||
4239 | +} |
||
4240 | + |
||
4241 | +static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova) |
||
4242 | +{ |
||
4243 | + struct device *dev = priv->net_dev->dev.parent; |
||
4244 | + struct dpni_rx_dist_cfg dist_cfg; |
||
4245 | + int i, err; |
||
4246 | |||
4247 | - for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { |
||
4248 | - struct dpkg_extract *key = |
||
4249 | - &cls_cfg.extracts[cls_cfg.num_extracts]; |
||
4250 | + if (!dpaa2_eth_hash_enabled(priv)) |
||
4251 | + return -EOPNOTSUPP; |
||
4252 | |||
4253 | - if (!(flags & hash_fields[i].rxnfc_field)) |
||
4254 | - continue; |
||
4255 | + memset(&dist_cfg, 0, sizeof(dist_cfg)); |
||
4256 | + |
||
4257 | + dist_cfg.key_cfg_iova = key_iova; |
||
4258 | + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
||
4259 | + dist_cfg.enable = true; |
||
4260 | + |
||
4261 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
||
4262 | + dist_cfg.tc = i; |
||
4263 | + |
||
4264 | + err = dpni_set_rx_hash_dist(priv->mc_io, 0, |
||
4265 | + priv->mc_token, &dist_cfg); |
||
4266 | + if (err) { |
||
4267 | + dev_err(dev, "dpni_set_rx_hash_dist failed\n"); |
||
4268 | + return err; |
||
4269 | + } |
||
4270 | + } |
||
4271 | + |
||
4272 | + return 0; |
||
4273 | +} |
||
4274 | + |
||
4275 | +static int config_fs_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova) |
||
4276 | +{ |
||
4277 | + struct device *dev = priv->net_dev->dev.parent; |
||
4278 | + struct dpni_rx_dist_cfg dist_cfg; |
||
4279 | + int i, err; |
||
4280 | |||
4281 | - if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { |
||
4282 | - dev_err(dev, "error adding key extraction rule, too many rules?\n"); |
||
4283 | - return -E2BIG; |
||
4284 | + if (!dpaa2_eth_fs_enabled(priv)) |
||
4285 | + return -EOPNOTSUPP; |
||
4286 | + |
||
4287 | + memset(&dist_cfg, 0, sizeof(dist_cfg)); |
||
4288 | + |
||
4289 | + dist_cfg.key_cfg_iova = key_iova; |
||
4290 | + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
||
4291 | + dist_cfg.enable = true; |
||
4292 | + |
||
4293 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
||
4294 | + dist_cfg.tc = i; |
||
4295 | + |
||
4296 | + err = dpni_set_rx_fs_dist(priv->mc_io, 0, |
||
4297 | + priv->mc_token, &dist_cfg); |
||
4298 | + if (err) { |
||
4299 | + dev_err(dev, "dpni_set_rx_fs_dist failed\n"); |
||
4300 | + return err; |
||
4301 | } |
||
4302 | + } |
||
4303 | + |
||
4304 | + return 0; |
||
4305 | +} |
||
4306 | |||
4307 | +int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv, |
||
4308 | + enum dpaa2_eth_rx_dist type, u32 key_fields) |
||
4309 | +{ |
||
4310 | + struct device *dev = priv->net_dev->dev.parent; |
||
4311 | + struct dpkg_profile_cfg cls_cfg; |
||
4312 | + struct dpkg_extract *key; |
||
4313 | + u32 hash_fields = 0; |
||
4314 | + dma_addr_t key_iova; |
||
4315 | + u8 *key_mem; |
||
4316 | + int i, err; |
||
4317 | + |
||
4318 | + memset(&cls_cfg, 0, sizeof(cls_cfg)); |
||
4319 | + |
||
4320 | + for (i = 0; i < priv->num_dist_fields; i++) { |
||
4321 | + if (!(key_fields & priv->dist_fields[i].id)) |
||
4322 | + continue; |
||
4323 | + |
||
4324 | + key = &cls_cfg.extracts[cls_cfg.num_extracts]; |
||
4325 | key->type = DPKG_EXTRACT_FROM_HDR; |
||
4326 | - key->extract.from_hdr.prot = hash_fields[i].cls_prot; |
||
4327 | + key->extract.from_hdr.prot = priv->dist_fields[i].cls_prot; |
||
4328 | key->extract.from_hdr.type = DPKG_FULL_FIELD; |
||
4329 | - key->extract.from_hdr.field = hash_fields[i].cls_field; |
||
4330 | + key->extract.from_hdr.field = priv->dist_fields[i].cls_field; |
||
4331 | cls_cfg.num_extracts++; |
||
4332 | |||
4333 | - priv->rx_hash_fields |= hash_fields[i].rxnfc_field; |
||
4334 | + hash_fields |= priv->dist_fields[i].rxnfc_field; |
||
4335 | } |
||
4336 | |||
4337 | - dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
||
4338 | - if (!dma_mem) |
||
4339 | + key_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
||
4340 | + if (!key_mem) |
||
4341 | return -ENOMEM; |
||
4342 | |||
4343 | - err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); |
||
4344 | + err = dpni_prepare_key_cfg(&cls_cfg, key_mem); |
||
4345 | if (err) { |
||
4346 | dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); |
||
4347 | - goto err_prep_key; |
||
4348 | + goto free_key; |
||
4349 | } |
||
4350 | |||
4351 | - memset(&dist_cfg, 0, sizeof(dist_cfg)); |
||
4352 | - |
||
4353 | - /* Prepare for setting the rx dist */ |
||
4354 | - dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, |
||
4355 | - DPAA2_CLASSIFIER_DMA_SIZE, |
||
4356 | - DMA_TO_DEVICE); |
||
4357 | - if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { |
||
4358 | + key_iova = dma_map_single(dev, key_mem, DPAA2_CLASSIFIER_DMA_SIZE, |
||
4359 | + DMA_TO_DEVICE); |
||
4360 | + if (dma_mapping_error(dev, key_iova)) { |
||
4361 | dev_err(dev, "DMA mapping failed\n"); |
||
4362 | err = -ENOMEM; |
||
4363 | - goto err_dma_map; |
||
4364 | + goto free_key; |
||
4365 | } |
||
4366 | |||
4367 | - dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
||
4368 | - dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; |
||
4369 | + switch (type) { |
||
4370 | + case DPAA2_ETH_RX_DIST_LEGACY: |
||
4371 | + err = legacy_config_dist_key(priv, key_iova); |
||
4372 | + break; |
||
4373 | + case DPAA2_ETH_RX_DIST_HASH: |
||
4374 | + err = config_hash_key(priv, key_iova); |
||
4375 | + break; |
||
4376 | + case DPAA2_ETH_RX_DIST_FS: |
||
4377 | + err = config_fs_key(priv, key_iova); |
||
4378 | + break; |
||
4379 | + default: |
||
4380 | + err = -EINVAL; |
||
4381 | + break; |
||
4382 | + } |
||
4383 | |||
4384 | - err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); |
||
4385 | - dma_unmap_single(dev, dist_cfg.key_cfg_iova, |
||
4386 | - DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); |
||
4387 | - if (err) |
||
4388 | - dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); |
||
4389 | + dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, |
||
4390 | + DMA_TO_DEVICE); |
||
4391 | + if (err) { |
||
4392 | + if (err != -EOPNOTSUPP) |
||
4393 | + dev_err(dev, "Distribution key config failed\n"); |
||
4394 | + goto free_key; |
||
4395 | + } |
||
4396 | |||
4397 | -err_dma_map: |
||
4398 | -err_prep_key: |
||
4399 | - kfree(dma_mem); |
||
4400 | + if (type != DPAA2_ETH_RX_DIST_FS) |
||
4401 | + priv->rx_hash_fields = hash_fields; |
||
4402 | + |
||
4403 | +free_key: |
||
4404 | + kfree(key_mem); |
||
4405 | return err; |
||
4406 | } |
||
4407 | |||
4408 | @@ -2080,6 +2983,7 @@ static int bind_dpni(struct dpaa2_eth_pr |
||
4409 | pools_params.num_dpbp = 1; |
||
4410 | pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; |
||
4411 | pools_params.pools[0].backup_pool = 0; |
||
4412 | + pools_params.pools[0].priority_mask = 0xff; |
||
4413 | pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; |
||
4414 | err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); |
||
4415 | if (err) { |
||
4416 | @@ -2087,17 +2991,36 @@ static int bind_dpni(struct dpaa2_eth_pr |
||
4417 | return err; |
||
4418 | } |
||
4419 | |||
4420 | - /* have the interface implicitly distribute traffic based on supported |
||
4421 | - * header fields |
||
4422 | + /* Verify classification options and disable hashing and/or |
||
4423 | + * flow steering support in case of invalid configuration values |
||
4424 | */ |
||
4425 | - err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); |
||
4426 | - if (err) |
||
4427 | - netdev_err(net_dev, "Failed to configure hashing\n"); |
||
4428 | + priv->dist_fields = default_dist_fields; |
||
4429 | + priv->num_dist_fields = ARRAY_SIZE(default_dist_fields); |
||
4430 | + check_cls_support(priv); |
||
4431 | + |
||
4432 | + /* have the interface implicitly distribute traffic based on |
||
4433 | + * a static hash key. Also configure flow steering key, if supported. |
||
4434 | + * Errors here are not blocking, so just let the called function |
||
4435 | + * print its error message and move along. |
||
4436 | + */ |
||
4437 | + if (dpaa2_eth_has_legacy_dist(priv)) { |
||
4438 | + dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_LEGACY, |
||
4439 | + DPAA2_ETH_DIST_ALL); |
||
4440 | + } else { |
||
4441 | + dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH, |
||
4442 | + DPAA2_ETH_DIST_DEFAULT_HASH); |
||
4443 | + dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_FS, |
||
4444 | + DPAA2_ETH_DIST_ALL); |
||
4445 | + } |
||
4446 | |||
4447 | /* Configure handling of error frames */ |
||
4448 | err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; |
||
4449 | err_cfg.set_frame_annotation = 1; |
||
4450 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE |
||
4451 | + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; |
||
4452 | +#else |
||
4453 | err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; |
||
4454 | +#endif |
||
4455 | err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, |
||
4456 | &err_cfg); |
||
4457 | if (err) { |
||
4458 | @@ -2114,6 +3037,11 @@ static int bind_dpni(struct dpaa2_eth_pr |
||
4459 | case DPAA2_TX_CONF_FQ: |
||
4460 | err = setup_tx_flow(priv, &priv->fq[i]); |
||
4461 | break; |
||
4462 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE |
||
4463 | + case DPAA2_RX_ERR_FQ: |
||
4464 | + err = setup_rx_err_flow(priv, &priv->fq[i]); |
||
4465 | + break; |
||
4466 | +#endif |
||
4467 | default: |
||
4468 | dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); |
||
4469 | return -EINVAL; |
||
4470 | @@ -2237,11 +3165,14 @@ static int netdev_init(struct net_device |
||
4471 | { |
||
4472 | struct device *dev = net_dev->dev.parent; |
||
4473 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
4474 | + u32 options = priv->dpni_attrs.options; |
||
4475 | + u64 supported = 0, not_supported = 0; |
||
4476 | u8 bcast_addr[ETH_ALEN]; |
||
4477 | u8 num_queues; |
||
4478 | int err; |
||
4479 | |||
4480 | net_dev->netdev_ops = &dpaa2_eth_ops; |
||
4481 | + net_dev->ethtool_ops = &dpaa2_ethtool_ops; |
||
4482 | |||
4483 | err = set_mac_addr(priv); |
||
4484 | if (err) |
||
4485 | @@ -2255,14 +3186,14 @@ static int netdev_init(struct net_device |
||
4486 | return err; |
||
4487 | } |
||
4488 | |||
4489 | - /* Reserve enough space to align buffer as per hardware requirement; |
||
4490 | - * NOTE: priv->tx_data_offset MUST be initialized at this point. |
||
4491 | - */ |
||
4492 | - net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); |
||
4493 | - |
||
4494 | - /* Set MTU limits */ |
||
4495 | - net_dev->min_mtu = 68; |
||
4496 | + /* Set MTU upper limit; lower limit is default (68B) */ |
||
4497 | net_dev->max_mtu = DPAA2_ETH_MAX_MTU; |
||
4498 | + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, |
||
4499 | + (u16)DPAA2_ETH_MFL); |
||
4500 | + if (err) { |
||
4501 | + dev_err(dev, "dpni_set_max_frame_length() failed\n"); |
||
4502 | + return err; |
||
4503 | + } |
||
4504 | |||
4505 | /* Set actual number of queues in the net device */ |
||
4506 | num_queues = dpaa2_eth_queue_count(priv); |
||
4507 | @@ -2277,12 +3208,23 @@ static int netdev_init(struct net_device |
||
4508 | return err; |
||
4509 | } |
||
4510 | |||
4511 | - /* Our .ndo_init will be called herein */ |
||
4512 | - err = register_netdev(net_dev); |
||
4513 | - if (err < 0) { |
||
4514 | - dev_err(dev, "register_netdev() failed\n"); |
||
4515 | - return err; |
||
4516 | - } |
||
4517 | + /* Capabilities listing */ |
||
4518 | + supported |= IFF_LIVE_ADDR_CHANGE; |
||
4519 | + |
||
4520 | + if (options & DPNI_OPT_NO_MAC_FILTER) |
||
4521 | + not_supported |= IFF_UNICAST_FLT; |
||
4522 | + else |
||
4523 | + supported |= IFF_UNICAST_FLT; |
||
4524 | + |
||
4525 | + net_dev->priv_flags |= supported; |
||
4526 | + net_dev->priv_flags &= ~not_supported; |
||
4527 | + |
||
4528 | + /* Features */ |
||
4529 | + net_dev->features = NETIF_F_RXCSUM | |
||
4530 | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
||
4531 | + NETIF_F_SG | NETIF_F_HIGHDMA | |
||
4532 | + NETIF_F_LLTX; |
||
4533 | + net_dev->hw_features = net_dev->features; |
||
4534 | |||
4535 | return 0; |
||
4536 | } |
||
4537 | @@ -2303,14 +3245,9 @@ static int poll_link_state(void *arg) |
||
4538 | return 0; |
||
4539 | } |
||
4540 | |||
4541 | -static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) |
||
4542 | -{ |
||
4543 | - return IRQ_WAKE_THREAD; |
||
4544 | -} |
||
4545 | - |
||
4546 | static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) |
||
4547 | { |
||
4548 | - u32 status = 0, clear = 0; |
||
4549 | + u32 status = ~0; |
||
4550 | struct device *dev = (struct device *)arg; |
||
4551 | struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); |
||
4552 | struct net_device *net_dev = dev_get_drvdata(dev); |
||
4553 | @@ -2320,18 +3257,12 @@ static irqreturn_t dpni_irq0_handler_thr |
||
4554 | DPNI_IRQ_INDEX, &status); |
||
4555 | if (unlikely(err)) { |
||
4556 | netdev_err(net_dev, "Can't get irq status (err %d)\n", err); |
||
4557 | - clear = 0xffffffff; |
||
4558 | - goto out; |
||
4559 | + return IRQ_HANDLED; |
||
4560 | } |
||
4561 | |||
4562 | - if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { |
||
4563 | - clear |= DPNI_IRQ_EVENT_LINK_CHANGED; |
||
4564 | + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) |
||
4565 | link_state_update(netdev_priv(net_dev)); |
||
4566 | - } |
||
4567 | |||
4568 | -out: |
||
4569 | - dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, |
||
4570 | - DPNI_IRQ_INDEX, clear); |
||
4571 | return IRQ_HANDLED; |
||
4572 | } |
||
4573 | |||
4574 | @@ -2348,8 +3279,7 @@ static int setup_irqs(struct fsl_mc_devi |
||
4575 | |||
4576 | irq = ls_dev->irqs[0]; |
||
4577 | err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, |
||
4578 | - dpni_irq0_handler, |
||
4579 | - dpni_irq0_handler_thread, |
||
4580 | + NULL, dpni_irq0_handler_thread, |
||
4581 | IRQF_NO_SUSPEND | IRQF_ONESHOT, |
||
4582 | dev_name(&ls_dev->dev), &ls_dev->dev); |
||
4583 | if (err < 0) { |
||
4584 | @@ -2405,6 +3335,393 @@ static void del_ch_napi(struct dpaa2_eth |
||
4585 | } |
||
4586 | } |
||
4587 | |||
4588 | +/* SysFS support */ |
||
4589 | +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, |
||
4590 | + struct device_attribute *attr, |
||
4591 | + char *buf) |
||
4592 | +{ |
||
4593 | + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); |
||
4594 | + /* No MC API for getting the shaping config. We're stateful. */ |
||
4595 | + struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg; |
||
4596 | + |
||
4597 | + return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size); |
||
4598 | +} |
||
4599 | + |
||
4600 | +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev, |
||
4601 | + struct device_attribute *attr, |
||
4602 | + const char *buf, |
||
4603 | + size_t count) |
||
4604 | +{ |
||
4605 | + int err, items; |
||
4606 | + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); |
||
4607 | + struct dpni_tx_shaping_cfg scfg, ercfg = { 0 }; |
||
4608 | + |
||
4609 | + items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size); |
||
4610 | + if (items != 2) { |
||
4611 | + pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n"); |
||
4612 | + return -EINVAL; |
||
4613 | + } |
||
4614 | + /* Size restriction as per MC API documentation */ |
||
4615 | + if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) { |
||
4616 | + pr_err("max_burst_size must be <= %d\n", |
||
4617 | + DPAA2_ETH_MAX_BURST_SIZE); |
||
4618 | + return -EINVAL; |
||
4619 | + } |
||
4620 | + |
||
4621 | + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg, |
||
4622 | + &ercfg, 0); |
||
4623 | + if (err) { |
||
4624 | + dev_err(dev, "dpni_set_tx_shaping() failed\n"); |
||
4625 | + return -EPERM; |
||
4626 | + } |
||
4627 | + /* If successful, save the current configuration for future inquiries */ |
||
4628 | + priv->shaping_cfg = scfg; |
||
4629 | + |
||
4630 | + return count; |
||
4631 | +} |
||
4632 | + |
||
4633 | +static struct device_attribute dpaa2_eth_attrs[] = { |
||
4634 | + __ATTR(tx_shaping, |
||
4635 | + 0600, |
||
4636 | + dpaa2_eth_show_tx_shaping, |
||
4637 | + dpaa2_eth_write_tx_shaping), |
||
4638 | +}; |
||
4639 | + |
||
4640 | +static void dpaa2_eth_sysfs_init(struct device *dev) |
||
4641 | +{ |
||
4642 | + int i, err; |
||
4643 | + |
||
4644 | + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) { |
||
4645 | + err = device_create_file(dev, &dpaa2_eth_attrs[i]); |
||
4646 | + if (err) { |
||
4647 | + dev_err(dev, "ERROR creating sysfs file\n"); |
||
4648 | + goto undo; |
||
4649 | + } |
||
4650 | + } |
||
4651 | + return; |
||
4652 | + |
||
4653 | +undo: |
||
4654 | + while (i > 0) |
||
4655 | + device_remove_file(dev, &dpaa2_eth_attrs[--i]); |
||
4656 | +} |
||
4657 | + |
||
4658 | +static void dpaa2_eth_sysfs_remove(struct device *dev) |
||
4659 | +{ |
||
4660 | + int i; |
||
4661 | + |
||
4662 | + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) |
||
4663 | + device_remove_file(dev, &dpaa2_eth_attrs[i]); |
||
4664 | +} |
||
4665 | + |
||
4666 | +#ifdef CONFIG_FSL_DPAA2_ETH_DCB |
||
4667 | +static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev, |
||
4668 | + struct ieee_pfc *pfc) |
||
4669 | +{ |
||
4670 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
4671 | + struct dpni_congestion_notification_cfg notification_cfg; |
||
4672 | + struct dpni_link_state state; |
||
4673 | + int err, i; |
||
4674 | + |
||
4675 | + priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv); |
||
4676 | + |
||
4677 | + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); |
||
4678 | + if (err) { |
||
4679 | + netdev_err(net_dev, "ERROR %d getting link state", err); |
||
4680 | + return err; |
||
4681 | + } |
||
4682 | + |
||
4683 | + if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE)) |
||
4684 | + return 0; |
||
4685 | + |
||
4686 | + priv->pfc.pfc_en = 0; |
||
4687 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
||
4688 | + err = dpni_get_congestion_notification(priv->mc_io, 0, |
||
4689 | + priv->mc_token, |
||
4690 | + DPNI_QUEUE_RX, |
||
4691 | + i, ¬ification_cfg); |
||
4692 | + if (err) { |
||
4693 | + netdev_err(net_dev, "Error %d getting congestion notif", |
||
4694 | + err); |
||
4695 | + return err; |
||
4696 | + } |
||
4697 | + |
||
4698 | + if (notification_cfg.threshold_entry) |
||
4699 | + priv->pfc.pfc_en |= 1 << i; |
||
4700 | + } |
||
4701 | + |
||
4702 | + memcpy(pfc, &priv->pfc, sizeof(priv->pfc)); |
||
4703 | + |
||
4704 | + return 0; |
||
4705 | +} |
||
4706 | + |
||
4707 | +/* Configure ingress classification based on VLAN PCP */ |
||
4708 | +static int set_vlan_qos(struct dpaa2_eth_priv *priv) |
||
4709 | +{ |
||
4710 | + struct device *dev = priv->net_dev->dev.parent; |
||
4711 | + struct dpkg_profile_cfg kg_cfg = {0}; |
||
4712 | + struct dpni_qos_tbl_cfg qos_cfg = {0}; |
||
4713 | + struct dpni_rule_cfg key_params; |
||
4714 | + u8 *params_iova, *key, *mask = NULL; |
||
4715 | + /* We only need the trailing 16 bits, without the TPID */ |
||
4716 | + u8 key_size = VLAN_HLEN / 2; |
||
4717 | + int err = 0, i, j = 0; |
||
4718 | + |
||
4719 | + if (priv->vlan_clsf_set) |
||
4720 | + return 0; |
||
4721 | + |
||
4722 | + params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
||
4723 | + if (!params_iova) |
||
4724 | + return -ENOMEM; |
||
4725 | + |
||
4726 | + kg_cfg.num_extracts = 1; |
||
4727 | + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; |
||
4728 | + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; |
||
4729 | + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; |
||
4730 | + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; |
||
4731 | + |
||
4732 | + err = dpni_prepare_key_cfg(&kg_cfg, params_iova); |
||
4733 | + if (err) { |
||
4734 | + dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err); |
||
4735 | + goto out_free; |
||
4736 | + } |
||
4737 | + |
||
4738 | + /* Set QoS table */ |
||
4739 | + qos_cfg.default_tc = 0; |
||
4740 | + qos_cfg.discard_on_miss = 0; |
||
4741 | + qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova, |
||
4742 | + DPAA2_CLASSIFIER_DMA_SIZE, |
||
4743 | + DMA_TO_DEVICE); |
||
4744 | + if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) { |
||
4745 | + dev_err(dev, "%s: DMA mapping failed\n", __func__); |
||
4746 | + err = -ENOMEM; |
||
4747 | + goto out_free; |
||
4748 | + } |
||
4749 | + err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); |
||
4750 | + dma_unmap_single(dev, qos_cfg.key_cfg_iova, |
||
4751 | + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); |
||
4752 | + |
||
4753 | + if (err) { |
||
4754 | + dev_err(dev, "dpni_set_qos_table failed: %d\n", err); |
||
4755 | + goto out_free; |
||
4756 | + } |
||
4757 | + |
||
4758 | + key_params.key_size = key_size; |
||
4759 | + |
||
4760 | + if (dpaa2_eth_fs_mask_enabled(priv)) { |
||
4761 | + mask = kzalloc(key_size, GFP_KERNEL); |
||
4762 | + if (!mask) |
||
4763 | + goto out_free; |
||
4764 | + |
||
4765 | + *mask = cpu_to_be16(VLAN_PRIO_MASK); |
||
4766 | + |
||
4767 | + key_params.mask_iova = dma_map_single(dev, mask, key_size, |
||
4768 | + DMA_TO_DEVICE); |
||
4769 | + if (dma_mapping_error(dev, key_params.mask_iova)) { |
||
4770 | + dev_err(dev, "DMA mapping failed %s\n", __func__); |
||
4771 | + err = -ENOMEM; |
||
4772 | + goto out_free_mask; |
||
4773 | + } |
||
4774 | + } else { |
||
4775 | + key_params.mask_iova = 0; |
||
4776 | + } |
||
4777 | + |
||
4778 | + key = kzalloc(key_size, GFP_KERNEL); |
||
4779 | + if (!key) |
||
4780 | + goto out_cleanup_mask; |
||
4781 | + |
||
4782 | + key_params.key_iova = dma_map_single(dev, key, key_size, |
||
4783 | + DMA_TO_DEVICE); |
||
4784 | + if (dma_mapping_error(dev, key_params.key_iova)) { |
||
4785 | + dev_err(dev, "%s: DMA mapping failed\n", __func__); |
||
4786 | + err = -ENOMEM; |
||
4787 | + goto out_free_key; |
||
4788 | + } |
||
4789 | + |
||
4790 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
||
4791 | + *key = cpu_to_be16(i << VLAN_PRIO_SHIFT); |
||
4792 | + |
||
4793 | + dma_sync_single_for_device(dev, key_params.key_iova, |
||
4794 | + key_size, DMA_TO_DEVICE); |
||
4795 | + |
||
4796 | + err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, |
||
4797 | + &key_params, i, j++); |
||
4798 | + if (err) { |
||
4799 | + dev_err(dev, "dpni_add_qos_entry failed: %d\n", err); |
||
4800 | + goto out_remove; |
||
4801 | + } |
||
4802 | + } |
||
4803 | + |
||
4804 | + priv->vlan_clsf_set = true; |
||
4805 | + dev_dbg(dev, "Vlan PCP QoS classification set\n"); |
||
4806 | + goto out_cleanup; |
||
4807 | + |
||
4808 | +out_remove: |
||
4809 | + for (j = 0; j < i; j++) { |
||
4810 | + *key = cpu_to_be16(j << VLAN_PRIO_SHIFT); |
||
4811 | + |
||
4812 | + dma_sync_single_for_device(dev, key_params.key_iova, key_size, |
||
4813 | + DMA_TO_DEVICE); |
||
4814 | + |
||
4815 | + err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token, |
||
4816 | + &key_params); |
||
4817 | + if (err) |
||
4818 | + dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err); |
||
4819 | + } |
||
4820 | + |
||
4821 | +out_cleanup: |
||
4822 | + dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE); |
||
4823 | +out_free_key: |
||
4824 | + kfree(key); |
||
4825 | +out_cleanup_mask: |
||
4826 | + if (key_params.mask_iova) |
||
4827 | + dma_unmap_single(dev, key_params.mask_iova, key_size, |
||
4828 | + DMA_TO_DEVICE); |
||
4829 | +out_free_mask: |
||
4830 | + kfree(mask); |
||
4831 | +out_free: |
||
4832 | + kfree(params_iova); |
||
4833 | + return err; |
||
4834 | +} |
||
4835 | + |
||
4836 | +static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev, |
||
4837 | + struct ieee_pfc *pfc) |
||
4838 | +{ |
||
4839 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
4840 | + struct dpni_congestion_notification_cfg notification_cfg = {0}; |
||
4841 | + struct dpni_link_state state = {0}; |
||
4842 | + struct dpni_link_cfg cfg = {0}; |
||
4843 | + struct ieee_pfc old_pfc; |
||
4844 | + int err = 0, i; |
||
4845 | + |
||
4846 | + if (dpaa2_eth_tc_count(priv) == 1) { |
||
4847 | + netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n"); |
||
4848 | + return 0; |
||
4849 | + } |
||
4850 | + |
||
4851 | + /* Zero out pfc_enabled prios greater than tc_count */ |
||
4852 | + pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1; |
||
4853 | + |
||
4854 | + if (priv->pfc.pfc_en == pfc->pfc_en) |
||
4855 | + /* Same enabled mask, nothing to be done */ |
||
4856 | + return 0; |
||
4857 | + |
||
4858 | + err = set_vlan_qos(priv); |
||
4859 | + if (err) |
||
4860 | + return err; |
||
4861 | + |
||
4862 | + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); |
||
4863 | + if (err) { |
||
4864 | + netdev_err(net_dev, "ERROR %d getting link state", err); |
||
4865 | + return err; |
||
4866 | + } |
||
4867 | + |
||
4868 | + cfg.rate = state.rate; |
||
4869 | + cfg.options = state.options; |
||
4870 | + if (pfc->pfc_en) |
||
4871 | + cfg.options |= DPNI_LINK_OPT_PFC_PAUSE; |
||
4872 | + else |
||
4873 | + cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE; |
||
4874 | + |
||
4875 | + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); |
||
4876 | + if (err) { |
||
4877 | + netdev_err(net_dev, "ERROR %d setting link cfg", err); |
||
4878 | + return err; |
||
4879 | + } |
||
4880 | + |
||
4881 | + memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc)); |
||
4882 | + memcpy(&priv->pfc, pfc, sizeof(priv->pfc)); |
||
4883 | + |
||
4884 | + err = set_rx_taildrop(priv); |
||
4885 | + if (err) |
||
4886 | + goto out_restore_config; |
||
4887 | + |
||
4888 | + /* configure congestion notifications */ |
||
4889 | + notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL; |
||
4890 | + notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; |
||
4891 | + notification_cfg.message_iova = 0ULL; |
||
4892 | + notification_cfg.message_ctx = 0ULL; |
||
4893 | + |
||
4894 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
||
4895 | + if (dpaa2_eth_is_pfc_enabled(priv, i)) { |
||
4896 | + notification_cfg.threshold_entry = NAPI_POLL_WEIGHT; |
||
4897 | + notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2; |
||
4898 | + } else { |
||
4899 | + notification_cfg.threshold_entry = 0; |
||
4900 | + notification_cfg.threshold_exit = 0; |
||
4901 | + } |
||
4902 | + |
||
4903 | + err = dpni_set_congestion_notification(priv->mc_io, 0, |
||
4904 | + priv->mc_token, |
||
4905 | + DPNI_QUEUE_RX, |
||
4906 | + i, ¬ification_cfg); |
||
4907 | + if (err) { |
||
4908 | + netdev_err(net_dev, "Error %d setting congestion notif", |
||
4909 | + err); |
||
4910 | + goto out_restore_config; |
||
4911 | + } |
||
4912 | + |
||
4913 | + netdev_dbg(net_dev, "%s congestion notifications for tc %d\n", |
||
4914 | + (notification_cfg.threshold_entry ? |
||
4915 | + "Enabled" : "Disabled"), i); |
||
4916 | + } |
||
4917 | + |
||
4918 | + return 0; |
||
4919 | + |
||
4920 | +out_restore_config: |
||
4921 | + memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc)); |
||
4922 | + return err; |
||
4923 | +} |
||
4924 | + |
||
4925 | +static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev) |
||
4926 | +{ |
||
4927 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
4928 | + |
||
4929 | + return priv->dcbx_mode; |
||
4930 | +} |
||
4931 | + |
||
4932 | +static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode) |
||
4933 | +{ |
||
4934 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
4935 | + |
||
4936 | + priv->dcbx_mode = mode; |
||
4937 | + return 0; |
||
4938 | +} |
||
4939 | + |
||
4940 | +static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap) |
||
4941 | +{ |
||
4942 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
4943 | + |
||
4944 | + switch (capid) { |
||
4945 | + case DCB_CAP_ATTR_PFC: |
||
4946 | + *cap = true; |
||
4947 | + break; |
||
4948 | + case DCB_CAP_ATTR_PFC_TCS: |
||
4949 | + /* bitmap where each bit represents a number of traffic |
||
4950 | + * classes the device can be configured to use for Priority |
||
4951 | + * Flow Control |
||
4952 | + */ |
||
4953 | + *cap = 1 << (dpaa2_eth_tc_count(priv) - 1); |
||
4954 | + break; |
||
4955 | + case DCB_CAP_ATTR_DCBX: |
||
4956 | + *cap = priv->dcbx_mode; |
||
4957 | + break; |
||
4958 | + default: |
||
4959 | + *cap = false; |
||
4960 | + break; |
||
4961 | + } |
||
4962 | + |
||
4963 | + return 0; |
||
4964 | +} |
||
4965 | + |
||
4966 | +const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = { |
||
4967 | + .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc, |
||
4968 | + .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc, |
||
4969 | + .getdcbx = dpaa2_eth_dcbnl_getdcbx, |
||
4970 | + .setdcbx = dpaa2_eth_dcbnl_setdcbx, |
||
4971 | + .getcap = dpaa2_eth_dcbnl_getcap, |
||
4972 | +}; |
||
4973 | +#endif |
||
4974 | + |
||
4975 | static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) |
||
4976 | { |
||
4977 | struct device *dev; |
||
4978 | @@ -2415,7 +3732,7 @@ static int dpaa2_eth_probe(struct fsl_mc |
||
4979 | dev = &dpni_dev->dev; |
||
4980 | |||
4981 | /* Net device */ |
||
4982 | - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); |
||
4983 | + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); |
||
4984 | if (!net_dev) { |
||
4985 | dev_err(dev, "alloc_etherdev_mq() failed\n"); |
||
4986 | return -ENOMEM; |
||
4987 | @@ -2433,7 +3750,10 @@ static int dpaa2_eth_probe(struct fsl_mc |
||
4988 | err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, |
||
4989 | &priv->mc_io); |
||
4990 | if (err) { |
||
4991 | - dev_err(dev, "MC portal allocation failed\n"); |
||
4992 | + if (err == -ENXIO) |
||
4993 | + err = -EPROBE_DEFER; |
||
4994 | + else |
||
4995 | + dev_err(dev, "MC portal allocation failed\n"); |
||
4996 | goto err_portal_alloc; |
||
4997 | } |
||
4998 | |||
4999 | @@ -2456,9 +3776,6 @@ static int dpaa2_eth_probe(struct fsl_mc |
||
5000 | if (err) |
||
5001 | goto err_bind; |
||
5002 | |||
5003 | - /* Add a NAPI context for each channel */ |
||
5004 | - add_ch_napi(priv); |
||
5005 | - |
||
5006 | /* Percpu statistics */ |
||
5007 | priv->percpu_stats = alloc_percpu(*priv->percpu_stats); |
||
5008 | if (!priv->percpu_stats) { |
||
5009 | @@ -2491,7 +3808,14 @@ static int dpaa2_eth_probe(struct fsl_mc |
||
5010 | if (err) |
||
5011 | goto err_alloc_rings; |
||
5012 | |||
5013 | - net_dev->ethtool_ops = &dpaa2_ethtool_ops; |
||
5014 | +#ifdef CONFIG_FSL_DPAA2_ETH_DCB |
||
5015 | + net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; |
||
5016 | + priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; |
||
5017 | +#endif |
||
5018 | + |
||
5019 | + /* Add a NAPI context for each channel */ |
||
5020 | + add_ch_napi(priv); |
||
5021 | + enable_ch_napi(priv); |
||
5022 | |||
5023 | err = setup_irqs(dpni_dev); |
||
5024 | if (err) { |
||
5025 | @@ -2499,25 +3823,41 @@ static int dpaa2_eth_probe(struct fsl_mc |
||
5026 | priv->poll_thread = kthread_run(poll_link_state, priv, |
||
5027 | "%s_poll_link", net_dev->name); |
||
5028 | if (IS_ERR(priv->poll_thread)) { |
||
5029 | - netdev_err(net_dev, "Error starting polling thread\n"); |
||
5030 | + dev_err(dev, "Error starting polling thread\n"); |
||
5031 | goto err_poll_thread; |
||
5032 | } |
||
5033 | priv->do_link_poll = true; |
||
5034 | } |
||
5035 | |||
5036 | + err = register_netdev(net_dev); |
||
5037 | + if (err < 0) { |
||
5038 | + dev_err(dev, "register_netdev() failed\n"); |
||
5039 | + goto err_netdev_reg; |
||
5040 | + } |
||
5041 | + |
||
5042 | + dpaa2_eth_sysfs_init(&net_dev->dev); |
||
5043 | +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS |
||
5044 | + dpaa2_dbg_add(priv); |
||
5045 | +#endif |
||
5046 | + |
||
5047 | dev_info(dev, "Probed interface %s\n", net_dev->name); |
||
5048 | return 0; |
||
5049 | |||
5050 | +err_netdev_reg: |
||
5051 | + if (priv->do_link_poll) |
||
5052 | + kthread_stop(priv->poll_thread); |
||
5053 | + else |
||
5054 | + fsl_mc_free_irqs(dpni_dev); |
||
5055 | err_poll_thread: |
||
5056 | free_rings(priv); |
||
5057 | err_alloc_rings: |
||
5058 | err_csum: |
||
5059 | - unregister_netdev(net_dev); |
||
5060 | err_netdev_init: |
||
5061 | free_percpu(priv->percpu_extras); |
||
5062 | err_alloc_percpu_extras: |
||
5063 | free_percpu(priv->percpu_stats); |
||
5064 | err_alloc_percpu_stats: |
||
5065 | + disable_ch_napi(priv); |
||
5066 | del_ch_napi(priv); |
||
5067 | err_bind: |
||
5068 | free_dpbp(priv); |
||
5069 | @@ -2544,8 +3884,15 @@ static int dpaa2_eth_remove(struct fsl_m |
||
5070 | net_dev = dev_get_drvdata(dev); |
||
5071 | priv = netdev_priv(net_dev); |
||
5072 | |||
5073 | +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS |
||
5074 | + dpaa2_dbg_remove(priv); |
||
5075 | +#endif |
||
5076 | + dpaa2_eth_sysfs_remove(&net_dev->dev); |
||
5077 | + |
||
5078 | unregister_netdev(net_dev); |
||
5079 | - dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); |
||
5080 | + |
||
5081 | + disable_ch_napi(priv); |
||
5082 | + del_ch_napi(priv); |
||
5083 | |||
5084 | if (priv->do_link_poll) |
||
5085 | kthread_stop(priv->poll_thread); |
||
5086 | @@ -2555,8 +3902,6 @@ static int dpaa2_eth_remove(struct fsl_m |
||
5087 | free_rings(priv); |
||
5088 | free_percpu(priv->percpu_stats); |
||
5089 | free_percpu(priv->percpu_extras); |
||
5090 | - |
||
5091 | - del_ch_napi(priv); |
||
5092 | free_dpbp(priv); |
||
5093 | free_dpio(priv); |
||
5094 | free_dpni(priv); |
||
5095 | @@ -2566,6 +3911,8 @@ static int dpaa2_eth_remove(struct fsl_m |
||
5096 | dev_set_drvdata(dev, NULL); |
||
5097 | free_netdev(net_dev); |
||
5098 | |||
5099 | + dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); |
||
5100 | + |
||
5101 | return 0; |
||
5102 | } |
||
5103 | |||
5104 | @@ -2588,4 +3935,34 @@ static struct fsl_mc_driver dpaa2_eth_dr |
||
5105 | .match_id_table = dpaa2_eth_match_id_table |
||
5106 | }; |
||
5107 | |||
5108 | -module_fsl_mc_driver(dpaa2_eth_driver); |
||
5109 | +static int __init dpaa2_eth_driver_init(void) |
||
5110 | +{ |
||
5111 | + int err; |
||
5112 | + |
||
5113 | + dpaa2_eth_dbg_init(); |
||
5114 | + err = fsl_mc_driver_register(&dpaa2_eth_driver); |
||
5115 | + if (err) |
||
5116 | + goto out_debugfs_err; |
||
5117 | + |
||
5118 | + err = dpaa2_ceetm_register(); |
||
5119 | + if (err) |
||
5120 | + goto out_ceetm_err; |
||
5121 | + |
||
5122 | + return 0; |
||
5123 | + |
||
5124 | +out_ceetm_err: |
||
5125 | + fsl_mc_driver_unregister(&dpaa2_eth_driver); |
||
5126 | +out_debugfs_err: |
||
5127 | + dpaa2_eth_dbg_exit(); |
||
5128 | + return err; |
||
5129 | +} |
||
5130 | + |
||
5131 | +static void __exit dpaa2_eth_driver_exit(void) |
||
5132 | +{ |
||
5133 | + dpaa2_ceetm_unregister(); |
||
5134 | + fsl_mc_driver_unregister(&dpaa2_eth_driver); |
||
5135 | + dpaa2_eth_dbg_exit(); |
||
5136 | +} |
||
5137 | + |
||
5138 | +module_init(dpaa2_eth_driver_init); |
||
5139 | +module_exit(dpaa2_eth_driver_exit); |
||
5140 | --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h |
||
5141 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h |
||
5142 | @@ -33,6 +33,7 @@ |
||
5143 | #ifndef __DPAA2_ETH_H |
||
5144 | #define __DPAA2_ETH_H |
||
5145 | |||
5146 | +#include <linux/dcbnl.h> |
||
5147 | #include <linux/netdevice.h> |
||
5148 | #include <linux/if_vlan.h> |
||
5149 | |||
5150 | @@ -44,9 +45,17 @@ |
||
5151 | #include "dpni-cmd.h" |
||
5152 | |||
5153 | #include "dpaa2-eth-trace.h" |
||
5154 | +#include "dpaa2-eth-debugfs.h" |
||
5155 | + |
||
5156 | +#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) |
||
5157 | |||
5158 | #define DPAA2_ETH_STORE_SIZE 16 |
||
5159 | |||
5160 | +/* We set a max threshold for how many Tx confirmations we should process |
||
5161 | + * on a NAPI poll call, they take less processing time. |
||
5162 | + */ |
||
5163 | +#define TX_CONF_PER_NAPI_POLL 256 |
||
5164 | + |
||
5165 | /* Maximum number of scatter-gather entries in an ingress frame, |
||
5166 | * considering the maximum receive frame size is 64K |
||
5167 | */ |
||
5168 | @@ -60,6 +69,14 @@ |
||
5169 | /* Convert L3 MTU to L2 MFL */ |
||
5170 | #define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN) |
||
5171 | |||
5172 | +/* Maximum burst size value for Tx shaping */ |
||
5173 | +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF |
||
5174 | + |
||
5175 | +/* Maximum number of buffers that can be acquired/released through a single |
||
5176 | + * QBMan command |
||
5177 | + */ |
||
5178 | +#define DPAA2_ETH_BUFS_PER_CMD 7 |
||
5179 | + |
||
5180 | /* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo |
||
5181 | * frames in the Rx queues (length of the current frame is not |
||
5182 | * taken into account when making the taildrop decision) |
||
5183 | @@ -72,31 +89,32 @@ |
||
5184 | * to accommodate the buffer refill delay. |
||
5185 | */ |
||
5186 | #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) |
||
5187 | -#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) |
||
5188 | -#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE |
||
5189 | +#define DPAA2_ETH_NUM_BUFS_PER_CH (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) |
||
5190 | +#define DPAA2_ETH_REFILL_THRESH(priv) \ |
||
5191 | + ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD) |
||
5192 | |||
5193 | -/* Maximum number of buffers that can be acquired/released through a single |
||
5194 | - * QBMan command |
||
5195 | - */ |
||
5196 | -#define DPAA2_ETH_BUFS_PER_CMD 7 |
||
5197 | +/* Global buffer quota in case flow control is enabled */ |
||
5198 | +#define DPAA2_ETH_NUM_BUFS_FC 256 |
||
5199 | + |
||
5200 | +/* Hardware requires alignment for ingress/egress buffer addresses */ |
||
5201 | +#define DPAA2_ETH_TX_BUF_ALIGN 64 |
||
5202 | |||
5203 | -/* Hardware requires alignment for ingress/egress buffer addresses |
||
5204 | - * and ingress buffer lengths. |
||
5205 | +/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned |
||
5206 | + * to 256B. For newer revisions, the requirement is only for 64B alignment |
||
5207 | */ |
||
5208 | +#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 |
||
5209 | +#define DPAA2_ETH_RX_BUF_ALIGN 64 |
||
5210 | + |
||
5211 | #define DPAA2_ETH_RX_BUF_SIZE 2048 |
||
5212 | -#define DPAA2_ETH_TX_BUF_ALIGN 64 |
||
5213 | -#define DPAA2_ETH_RX_BUF_ALIGN 256 |
||
5214 | -#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ |
||
5215 | - ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) |
||
5216 | - |
||
5217 | -/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress |
||
5218 | - * buffers large enough to allow building an skb around them and also account |
||
5219 | - * for alignment restrictions |
||
5220 | - */ |
||
5221 | -#define DPAA2_ETH_BUF_RAW_SIZE \ |
||
5222 | - (DPAA2_ETH_RX_BUF_SIZE + \ |
||
5223 | - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ |
||
5224 | - DPAA2_ETH_RX_BUF_ALIGN) |
||
5225 | +#define DPAA2_ETH_SKB_SIZE \ |
||
5226 | + (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
||
5227 | + |
||
5228 | +/* PTP nominal frequency 1GHz */ |
||
5229 | +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1 |
||
5230 | + |
||
5231 | +/* Hardware annotation area in RX/TX buffers */ |
||
5232 | +#define DPAA2_ETH_RX_HWA_SIZE 64 |
||
5233 | +#define DPAA2_ETH_TX_HWA_SIZE 128 |
||
5234 | |||
5235 | /* We are accommodating a skb backpointer and some S/G info |
||
5236 | * in the frame's software annotation. The hardware |
||
5237 | @@ -104,12 +122,32 @@ |
||
5238 | */ |
||
5239 | #define DPAA2_ETH_SWA_SIZE 64 |
||
5240 | |||
5241 | +/* We store different information in the software annotation area of a Tx frame |
||
5242 | + * based on what type of frame it is |
||
5243 | + */ |
||
5244 | +enum dpaa2_eth_swa_type { |
||
5245 | + DPAA2_ETH_SWA_SINGLE, |
||
5246 | + DPAA2_ETH_SWA_SG, |
||
5247 | + DPAA2_ETH_SWA_XDP, |
||
5248 | +}; |
||
5249 | + |
||
5250 | /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ |
||
5251 | struct dpaa2_eth_swa { |
||
5252 | - struct sk_buff *skb; |
||
5253 | - struct scatterlist *scl; |
||
5254 | - int num_sg; |
||
5255 | - int num_dma_bufs; |
||
5256 | + enum dpaa2_eth_swa_type type; |
||
5257 | + union { |
||
5258 | + struct { |
||
5259 | + struct sk_buff *skb; |
||
5260 | + } single; |
||
5261 | + struct { |
||
5262 | + struct sk_buff *skb; |
||
5263 | + struct scatterlist *scl; |
||
5264 | + int num_sg; |
||
5265 | + int sgt_size; |
||
5266 | + } sg; |
||
5267 | + struct { |
||
5268 | + int dma_size; |
||
5269 | + } xdp; |
||
5270 | + }; |
||
5271 | }; |
||
5272 | |||
5273 | /* Annotation valid bits in FD FRC */ |
||
5274 | @@ -120,23 +158,14 @@ struct dpaa2_eth_swa { |
||
5275 | #define DPAA2_FD_FRC_FASWOV 0x0800 |
||
5276 | #define DPAA2_FD_FRC_FAICFDV 0x0400 |
||
5277 | |||
5278 | -/* Error bits in FD CTRL */ |
||
5279 | -#define DPAA2_FD_CTRL_UFD 0x00000004 |
||
5280 | -#define DPAA2_FD_CTRL_SBE 0x00000008 |
||
5281 | -#define DPAA2_FD_CTRL_FSE 0x00000020 |
||
5282 | -#define DPAA2_FD_CTRL_FAERR 0x00000040 |
||
5283 | - |
||
5284 | -#define DPAA2_FD_RX_ERR_MASK (DPAA2_FD_CTRL_SBE | \ |
||
5285 | - DPAA2_FD_CTRL_FAERR) |
||
5286 | -#define DPAA2_FD_TX_ERR_MASK (DPAA2_FD_CTRL_UFD | \ |
||
5287 | - DPAA2_FD_CTRL_SBE | \ |
||
5288 | - DPAA2_FD_CTRL_FSE | \ |
||
5289 | - DPAA2_FD_CTRL_FAERR) |
||
5290 | +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR) |
||
5291 | +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \ |
||
5292 | + FD_CTRL_SBE | \ |
||
5293 | + FD_CTRL_FSE | \ |
||
5294 | + FD_CTRL_FAERR) |
||
5295 | |||
5296 | /* Annotation bits in FD CTRL */ |
||
5297 | #define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ |
||
5298 | -#define DPAA2_FD_CTRL_PTA 0x00800000 |
||
5299 | -#define DPAA2_FD_CTRL_PTV1 0x00400000 |
||
5300 | |||
5301 | /* Frame annotation status */ |
||
5302 | struct dpaa2_fas { |
||
5303 | @@ -144,7 +173,7 @@ struct dpaa2_fas { |
||
5304 | u8 ppid; |
||
5305 | __le16 ifpid; |
||
5306 | __le32 status; |
||
5307 | -} __packed; |
||
5308 | +}; |
||
5309 | |||
5310 | /* Frame annotation status word is located in the first 8 bytes |
||
5311 | * of the buffer's hardware annoatation area |
||
5312 | @@ -152,11 +181,45 @@ struct dpaa2_fas { |
||
5313 | #define DPAA2_FAS_OFFSET 0 |
||
5314 | #define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas)) |
||
5315 | |||
5316 | +/* Timestamp is located in the next 8 bytes of the buffer's |
||
5317 | + * hardware annotation area |
||
5318 | + */ |
||
5319 | +#define DPAA2_TS_OFFSET 0x8 |
||
5320 | + |
||
5321 | +/* Frame annotation egress action descriptor */ |
||
5322 | +#define DPAA2_FAEAD_OFFSET 0x58 |
||
5323 | + |
||
5324 | +struct dpaa2_faead { |
||
5325 | + __le32 conf_fqid; |
||
5326 | + __le32 ctrl; |
||
5327 | +}; |
||
5328 | + |
||
5329 | +#define DPAA2_FAEAD_A2V 0x20000000 |
||
5330 | +#define DPAA2_FAEAD_A4V 0x08000000 |
||
5331 | +#define DPAA2_FAEAD_UPDV 0x00001000 |
||
5332 | +#define DPAA2_FAEAD_EBDDV 0x00002000 |
||
5333 | +#define DPAA2_FAEAD_UPD 0x00000010 |
||
5334 | + |
||
5335 | /* Accessors for the hardware annotation fields that we use */ |
||
5336 | -#define dpaa2_get_hwa(buf_addr) \ |
||
5337 | - ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE) |
||
5338 | -#define dpaa2_get_fas(buf_addr) \ |
||
5339 | - (struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET) |
||
5340 | +static inline void *dpaa2_get_hwa(void *buf_addr, bool swa) |
||
5341 | +{ |
||
5342 | + return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0); |
||
5343 | +} |
||
5344 | + |
||
5345 | +static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa) |
||
5346 | +{ |
||
5347 | + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET; |
||
5348 | +} |
||
5349 | + |
||
5350 | +static inline u64 *dpaa2_get_ts(void *buf_addr, bool swa) |
||
5351 | +{ |
||
5352 | + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET; |
||
5353 | +} |
||
5354 | + |
||
5355 | +static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa) |
||
5356 | +{ |
||
5357 | + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET; |
||
5358 | +} |
||
5359 | |||
5360 | /* Error and status bits in the frame annotation status word */ |
||
5361 | /* Debug frame, otherwise supposed to be discarded */ |
||
5362 | @@ -203,11 +266,6 @@ struct dpaa2_fas { |
||
5363 | DPAA2_FAS_BLE | \ |
||
5364 | DPAA2_FAS_L3CE | \ |
||
5365 | DPAA2_FAS_L4CE) |
||
5366 | -/* Tx errors */ |
||
5367 | -#define DPAA2_FAS_TX_ERR_MASK (DPAA2_FAS_KSE | \ |
||
5368 | - DPAA2_FAS_EOFHE | \ |
||
5369 | - DPAA2_FAS_MNLE | \ |
||
5370 | - DPAA2_FAS_TIDE) |
||
5371 | |||
5372 | /* Time in milliseconds between link state updates */ |
||
5373 | #define DPAA2_ETH_LINK_STATE_REFRESH 1000 |
||
5374 | @@ -218,6 +276,14 @@ struct dpaa2_fas { |
||
5375 | */ |
||
5376 | #define DPAA2_ETH_ENQUEUE_RETRIES 10 |
||
5377 | |||
5378 | +/* Tx congestion entry & exit thresholds, in number of bytes. |
||
5379 | + * We allow a maximum of 512KB worth of frames pending processing on the Tx |
||
5380 | + * queues of an interface |
||
5381 | + */ |
||
5382 | +#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024) |
||
5383 | +#define DPAA2_ETH_TX_CONG_EXIT_THRESH \ |
||
5384 | + (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9 / 10) |
||
5385 | + |
||
5386 | /* Driver statistics, other than those in struct rtnl_link_stats64. |
||
5387 | * These are usually collected per-CPU and aggregated by ethtool. |
||
5388 | */ |
||
5389 | @@ -226,6 +292,7 @@ struct dpaa2_eth_drv_stats { |
||
5390 | __u64 tx_conf_bytes; |
||
5391 | __u64 tx_sg_frames; |
||
5392 | __u64 tx_sg_bytes; |
||
5393 | + __u64 tx_reallocs; |
||
5394 | __u64 rx_sg_frames; |
||
5395 | __u64 rx_sg_bytes; |
||
5396 | /* Enqueues retried due to portal busy */ |
||
5397 | @@ -236,6 +303,8 @@ struct dpaa2_eth_drv_stats { |
||
5398 | struct dpaa2_eth_fq_stats { |
||
5399 | /* Number of frames received on this queue */ |
||
5400 | __u64 frames; |
||
5401 | + /* Number of times this queue entered congestion */ |
||
5402 | + __u64 congestion_entry; |
||
5403 | }; |
||
5404 | |||
5405 | /* Per-channel statistics */ |
||
5406 | @@ -250,17 +319,23 @@ struct dpaa2_eth_ch_stats { |
||
5407 | __u64 pull_err; |
||
5408 | }; |
||
5409 | |||
5410 | +#define DPAA2_ETH_MAX_TCS 8 |
||
5411 | + |
||
5412 | /* Maximum number of queues associated with a DPNI */ |
||
5413 | -#define DPAA2_ETH_MAX_RX_QUEUES 16 |
||
5414 | -#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS |
||
5415 | +#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS) |
||
5416 | +#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS |
||
5417 | +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 |
||
5418 | #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ |
||
5419 | - DPAA2_ETH_MAX_TX_QUEUES) |
||
5420 | + DPAA2_ETH_MAX_TX_QUEUES + \ |
||
5421 | + DPAA2_ETH_MAX_RX_ERR_QUEUES) |
||
5422 | +#define DPAA2_ETH_MAX_NETDEV_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS) |
||
5423 | |||
5424 | -#define DPAA2_ETH_MAX_DPCONS NR_CPUS |
||
5425 | +#define DPAA2_ETH_MAX_DPCONS 16 |
||
5426 | |||
5427 | enum dpaa2_eth_fq_type { |
||
5428 | DPAA2_RX_FQ = 0, |
||
5429 | DPAA2_TX_CONF_FQ, |
||
5430 | + DPAA2_RX_ERR_FQ |
||
5431 | }; |
||
5432 | |||
5433 | struct dpaa2_eth_priv; |
||
5434 | @@ -269,6 +344,7 @@ struct dpaa2_eth_fq { |
||
5435 | u32 fqid; |
||
5436 | u32 tx_qdbin; |
||
5437 | u16 flowid; |
||
5438 | + u8 tc; |
||
5439 | int target_cpu; |
||
5440 | struct dpaa2_eth_channel *channel; |
||
5441 | enum dpaa2_eth_fq_type type; |
||
5442 | @@ -276,7 +352,8 @@ struct dpaa2_eth_fq { |
||
5443 | void (*consume)(struct dpaa2_eth_priv *, |
||
5444 | struct dpaa2_eth_channel *, |
||
5445 | const struct dpaa2_fd *, |
||
5446 | - struct napi_struct *); |
||
5447 | + struct napi_struct *, |
||
5448 | + u16 queue_id); |
||
5449 | struct dpaa2_eth_fq_stats stats; |
||
5450 | }; |
||
5451 | |||
5452 | @@ -285,24 +362,53 @@ struct dpaa2_eth_channel { |
||
5453 | struct fsl_mc_device *dpcon; |
||
5454 | int dpcon_id; |
||
5455 | int ch_id; |
||
5456 | - int dpio_id; |
||
5457 | struct napi_struct napi; |
||
5458 | + struct dpaa2_io *dpio; |
||
5459 | struct dpaa2_io_store *store; |
||
5460 | struct dpaa2_eth_priv *priv; |
||
5461 | int buf_count; |
||
5462 | struct dpaa2_eth_ch_stats stats; |
||
5463 | + struct bpf_prog *xdp_prog; |
||
5464 | + u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
||
5465 | + u8 rel_buf_cnt; |
||
5466 | + bool flush; |
||
5467 | +}; |
||
5468 | + |
||
5469 | +struct dpaa2_eth_cls_rule { |
||
5470 | + struct ethtool_rx_flow_spec fs; |
||
5471 | + bool in_use; |
||
5472 | }; |
||
5473 | |||
5474 | -struct dpaa2_eth_hash_fields { |
||
5475 | +struct dpaa2_eth_dist_fields { |
||
5476 | u64 rxnfc_field; |
||
5477 | enum net_prot cls_prot; |
||
5478 | int cls_field; |
||
5479 | + int offset; |
||
5480 | int size; |
||
5481 | + u32 id; |
||
5482 | }; |
||
5483 | |||
5484 | /* Driver private data */ |
||
5485 | struct dpaa2_eth_priv { |
||
5486 | struct net_device *net_dev; |
||
5487 | + /* Standard statistics */ |
||
5488 | + struct rtnl_link_stats64 __percpu *percpu_stats; |
||
5489 | + /* Extra stats, in addition to the ones known by the kernel */ |
||
5490 | + struct dpaa2_eth_drv_stats __percpu *percpu_extras; |
||
5491 | + bool ts_tx_en; /* Tx timestamping enabled */ |
||
5492 | + bool ts_rx_en; /* Rx timestamping enabled */ |
||
5493 | + u16 tx_data_offset; |
||
5494 | + u16 bpid; |
||
5495 | + u16 tx_qdid; |
||
5496 | + u16 rx_buf_align; |
||
5497 | + struct iommu_domain *iommu_domain; |
||
5498 | + int max_bufs_per_ch; |
||
5499 | + int refill_thresh; |
||
5500 | + bool has_xdp_prog; |
||
5501 | + |
||
5502 | + void *cscn_mem; /* Tx congestion notifications are written here */ |
||
5503 | + void *cscn_unaligned; |
||
5504 | + dma_addr_t cscn_dma; |
||
5505 | |||
5506 | u8 num_fqs; |
||
5507 | struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; |
||
5508 | @@ -311,51 +417,193 @@ struct dpaa2_eth_priv { |
||
5509 | struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; |
||
5510 | |||
5511 | struct dpni_attr dpni_attrs; |
||
5512 | - u16 tx_data_offset; |
||
5513 | - |
||
5514 | + u16 dpni_ver_major; |
||
5515 | + u16 dpni_ver_minor; |
||
5516 | struct fsl_mc_device *dpbp_dev; |
||
5517 | - u16 bpid; |
||
5518 | - struct iommu_domain *iommu_domain; |
||
5519 | |||
5520 | - u16 tx_qdid; |
||
5521 | struct fsl_mc_io *mc_io; |
||
5522 | /* Cores which have an affine DPIO/DPCON. |
||
5523 | * This is the cpu set on which Rx and Tx conf frames are processed |
||
5524 | */ |
||
5525 | struct cpumask dpio_cpumask; |
||
5526 | |||
5527 | - /* Standard statistics */ |
||
5528 | - struct rtnl_link_stats64 __percpu *percpu_stats; |
||
5529 | - /* Extra stats, in addition to the ones known by the kernel */ |
||
5530 | - struct dpaa2_eth_drv_stats __percpu *percpu_extras; |
||
5531 | - |
||
5532 | u16 mc_token; |
||
5533 | |||
5534 | struct dpni_link_state link_state; |
||
5535 | bool do_link_poll; |
||
5536 | struct task_struct *poll_thread; |
||
5537 | |||
5538 | + /* Rx distribution (hash and flow steering) header fields |
||
5539 | + * supported by the driver |
||
5540 | + */ |
||
5541 | + struct dpaa2_eth_dist_fields *dist_fields; |
||
5542 | + u8 num_dist_fields; |
||
5543 | /* enabled ethtool hashing bits */ |
||
5544 | u64 rx_hash_fields; |
||
5545 | +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS |
||
5546 | + struct dpaa2_debugfs dbg; |
||
5547 | +#endif |
||
5548 | + /* array of classification rules */ |
||
5549 | + struct dpaa2_eth_cls_rule *cls_rule; |
||
5550 | + struct dpni_tx_shaping_cfg shaping_cfg; |
||
5551 | + |
||
5552 | + u8 dcbx_mode; |
||
5553 | + struct ieee_pfc pfc; |
||
5554 | + bool vlan_clsf_set; |
||
5555 | + bool tx_pause_frames; |
||
5556 | + |
||
5557 | + bool ceetm_en; |
||
5558 | +}; |
||
5559 | + |
||
5560 | +enum dpaa2_eth_rx_dist { |
||
5561 | + DPAA2_ETH_RX_DIST_HASH, |
||
5562 | + DPAA2_ETH_RX_DIST_FS, |
||
5563 | + DPAA2_ETH_RX_DIST_LEGACY |
||
5564 | }; |
||
5565 | |||
5566 | -/* default Rx hash options, set during probing */ |
||
5567 | -#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ |
||
5568 | - | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ |
||
5569 | - | RXH_L4_B_2_3) |
||
5570 | +/* Supported Rx distribution field ids */ |
||
5571 | +#define DPAA2_ETH_DIST_ETHSRC BIT(0) |
||
5572 | +#define DPAA2_ETH_DIST_ETHDST BIT(1) |
||
5573 | +#define DPAA2_ETH_DIST_ETHTYPE BIT(2) |
||
5574 | +#define DPAA2_ETH_DIST_VLAN BIT(3) |
||
5575 | +#define DPAA2_ETH_DIST_IPSRC BIT(4) |
||
5576 | +#define DPAA2_ETH_DIST_IPDST BIT(5) |
||
5577 | +#define DPAA2_ETH_DIST_IPPROTO BIT(6) |
||
5578 | +#define DPAA2_ETH_DIST_L4SRC BIT(7) |
||
5579 | +#define DPAA2_ETH_DIST_L4DST BIT(8) |
||
5580 | +#define DPAA2_ETH_DIST_ALL (~0U) |
||
5581 | + |
||
5582 | +/* Default Rx hash key */ |
||
5583 | +#define DPAA2_ETH_DIST_DEFAULT_HASH \ |
||
5584 | + (DPAA2_ETH_DIST_IPPROTO | \ |
||
5585 | + DPAA2_ETH_DIST_IPSRC | DPAA2_ETH_DIST_IPDST | \ |
||
5586 | + DPAA2_ETH_DIST_L4SRC | DPAA2_ETH_DIST_L4DST) |
||
5587 | |||
5588 | #define dpaa2_eth_hash_enabled(priv) \ |
||
5589 | ((priv)->dpni_attrs.num_queues > 1) |
||
5590 | |||
5591 | +#define dpaa2_eth_fs_enabled(priv) \ |
||
5592 | + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS)) |
||
5593 | + |
||
5594 | +#define dpaa2_eth_fs_mask_enabled(priv) \ |
||
5595 | + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING) |
||
5596 | + |
||
5597 | +#define dpaa2_eth_fs_count(priv) \ |
||
5598 | + ((priv)->dpni_attrs.fs_entries) |
||
5599 | + |
||
5600 | /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */ |
||
5601 | #define DPAA2_CLASSIFIER_DMA_SIZE 256 |
||
5602 | |||
5603 | extern const struct ethtool_ops dpaa2_ethtool_ops; |
||
5604 | extern const char dpaa2_eth_drv_version[]; |
||
5605 | |||
5606 | -static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) |
||
5607 | +static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, |
||
5608 | + u16 ver_major, u16 ver_minor) |
||
5609 | +{ |
||
5610 | + if (priv->dpni_ver_major == ver_major) |
||
5611 | + return priv->dpni_ver_minor - ver_minor; |
||
5612 | + return priv->dpni_ver_major - ver_major; |
||
5613 | +} |
||
5614 | + |
||
5615 | +#define DPNI_DIST_KEY_VER_MAJOR 7 |
||
5616 | +#define DPNI_DIST_KEY_VER_MINOR 5 |
||
5617 | + |
||
5618 | +static inline bool dpaa2_eth_has_legacy_dist(struct dpaa2_eth_priv *priv) |
||
5619 | +{ |
||
5620 | + return (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DIST_KEY_VER_MAJOR, |
||
5621 | + DPNI_DIST_KEY_VER_MINOR) < 0); |
||
5622 | +} |
||
5623 | + |
||
5624 | +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around |
||
5625 | + * the buffer also needs space for its shared info struct, and we need |
||
5626 | + * to allocate enough to accommodate hardware alignment restrictions |
||
5627 | + */ |
||
5628 | +static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv) |
||
5629 | +{ |
||
5630 | + return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align; |
||
5631 | +} |
||
5632 | + |
||
5633 | +/* Total headroom needed by the hardware in Tx frame buffers */ |
||
5634 | +static inline unsigned int |
||
5635 | +dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb) |
||
5636 | +{ |
||
5637 | + unsigned int headroom = DPAA2_ETH_SWA_SIZE; |
||
5638 | + |
||
5639 | + /* If we don't have an skb (e.g. XDP buffer), we only need space for |
||
5640 | + * the software annotation area |
||
5641 | + */ |
||
5642 | + if (!skb) |
||
5643 | + return headroom; |
||
5644 | + |
||
5645 | + /* For non-linear skbs we have no headroom requirement, as we build a |
||
5646 | + * SG frame with a newly allocated SGT buffer |
||
5647 | + */ |
||
5648 | + if (skb_is_nonlinear(skb)) |
||
5649 | + return 0; |
||
5650 | + |
||
5651 | + /* If we have Tx timestamping, need 128B hardware annotation */ |
||
5652 | + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) |
||
5653 | + headroom += DPAA2_ETH_TX_HWA_SIZE; |
||
5654 | + |
||
5655 | + return headroom; |
||
5656 | +} |
||
5657 | + |
||
5658 | +/* Extra headroom space requested to hardware, in order to make sure there's |
||
5659 | + * no realloc'ing in forwarding scenarios. We need to reserve enough space |
||
5660 | + * such that we can accommodate the maximum required Tx offset and alignment |
||
5661 | + * in the ingress frame buffer |
||
5662 | + */ |
||
5663 | +static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv) |
||
5664 | +{ |
||
5665 | + return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - |
||
5666 | + DPAA2_ETH_RX_HWA_SIZE; |
||
5667 | +} |
||
5668 | + |
||
5669 | +static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) |
||
5670 | { |
||
5671 | return priv->dpni_attrs.num_queues; |
||
5672 | } |
||
5673 | |||
5674 | +static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv) |
||
5675 | +{ |
||
5676 | + return priv->dpni_attrs.num_tcs; |
||
5677 | +} |
||
5678 | + |
||
5679 | +static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv, |
||
5680 | + int traffic_class) |
||
5681 | +{ |
||
5682 | + return priv->pfc.pfc_en & (1 << traffic_class); |
||
5683 | +} |
||
5684 | + |
||
5685 | +enum dpaa2_eth_td_cfg { |
||
5686 | + DPAA2_ETH_TD_NONE, |
||
5687 | + DPAA2_ETH_TD_QUEUE, |
||
5688 | + DPAA2_ETH_TD_GROUP |
||
5689 | +}; |
||
5690 | + |
||
5691 | +static inline enum dpaa2_eth_td_cfg |
||
5692 | +dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv) |
||
5693 | +{ |
||
5694 | + bool pfc_enabled = !!(priv->pfc.pfc_en); |
||
5695 | + |
||
5696 | + if (pfc_enabled) |
||
5697 | + return DPAA2_ETH_TD_GROUP; |
||
5698 | + else if (priv->tx_pause_frames) |
||
5699 | + return DPAA2_ETH_TD_NONE; |
||
5700 | + else |
||
5701 | + return DPAA2_ETH_TD_QUEUE; |
||
5702 | +} |
||
5703 | + |
||
5704 | +static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv) |
||
5705 | +{ |
||
5706 | + return 1; |
||
5707 | +} |
||
5708 | + |
||
5709 | +void check_cls_support(struct dpaa2_eth_priv *priv); |
||
5710 | + |
||
5711 | +int set_rx_taildrop(struct dpaa2_eth_priv *priv); |
||
5712 | + |
||
5713 | +int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv, |
||
5714 | + enum dpaa2_eth_rx_dist type, u32 key_fields); |
||
5715 | + |
||
5716 | #endif /* __DPAA2_H */ |
||
5717 | --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c |
||
5718 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c |
||
5719 | @@ -1,5 +1,5 @@ |
||
5720 | /* Copyright 2014-2016 Freescale Semiconductor Inc. |
||
5721 | - * Copyright 2016 NXP |
||
5722 | + * Copyright 2016-2017 NXP |
||
5723 | * |
||
5724 | * Redistribution and use in source and binary forms, with or without |
||
5725 | * modification, are permitted provided that the following conditions are met: |
||
5726 | @@ -62,6 +62,7 @@ static char dpaa2_ethtool_extras[][ETH_G |
||
5727 | "[drv] tx conf bytes", |
||
5728 | "[drv] tx sg frames", |
||
5729 | "[drv] tx sg bytes", |
||
5730 | + "[drv] tx realloc frames", |
||
5731 | "[drv] rx sg frames", |
||
5732 | "[drv] rx sg bytes", |
||
5733 | "[drv] enqueue portal busy", |
||
5734 | @@ -69,6 +70,15 @@ static char dpaa2_ethtool_extras[][ETH_G |
||
5735 | "[drv] dequeue portal busy", |
||
5736 | "[drv] channel pull errors", |
||
5737 | "[drv] cdan", |
||
5738 | + "[drv] tx congestion state", |
||
5739 | +#ifdef CONFIG_FSL_QBMAN_DEBUG |
||
5740 | + /* FQ stats */ |
||
5741 | + "rx pending frames", |
||
5742 | + "rx pending bytes", |
||
5743 | + "tx conf pending frames", |
||
5744 | + "tx conf pending bytes", |
||
5745 | + "buffer count" |
||
5746 | +#endif |
||
5747 | }; |
||
5748 | |||
5749 | #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) |
||
5750 | @@ -76,10 +86,15 @@ static char dpaa2_ethtool_extras[][ETH_G |
||
5751 | static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, |
||
5752 | struct ethtool_drvinfo *drvinfo) |
||
5753 | { |
||
5754 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
5755 | + |
||
5756 | strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); |
||
5757 | strlcpy(drvinfo->version, dpaa2_eth_drv_version, |
||
5758 | sizeof(drvinfo->version)); |
||
5759 | - strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); |
||
5760 | + |
||
5761 | + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), |
||
5762 | + "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor); |
||
5763 | + |
||
5764 | strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), |
||
5765 | sizeof(drvinfo->bus_info)); |
||
5766 | } |
||
5767 | @@ -113,25 +128,37 @@ out: |
||
5768 | return err; |
||
5769 | } |
||
5770 | |||
5771 | +#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7 |
||
5772 | +#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1 |
||
5773 | static int |
||
5774 | dpaa2_eth_set_link_ksettings(struct net_device *net_dev, |
||
5775 | const struct ethtool_link_ksettings *link_settings) |
||
5776 | { |
||
5777 | - struct dpni_link_cfg cfg = {0}; |
||
5778 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
5779 | + struct dpni_link_state state = {0}; |
||
5780 | + struct dpni_link_cfg cfg = {0}; |
||
5781 | int err = 0; |
||
5782 | |||
5783 | - netdev_dbg(net_dev, "Setting link parameters..."); |
||
5784 | + /* If using an older MC version, the DPNI must be down |
||
5785 | + * in order to be able to change link settings. Taking steps to let |
||
5786 | + * the user know that. |
||
5787 | + */ |
||
5788 | + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR, |
||
5789 | + DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) { |
||
5790 | + if (netif_running(net_dev)) { |
||
5791 | + netdev_info(net_dev, "Interface must be brought down first.\n"); |
||
5792 | + return -EACCES; |
||
5793 | + } |
||
5794 | + } |
||
5795 | |||
5796 | - /* Due to a temporary MC limitation, the DPNI must be down |
||
5797 | - * in order to be able to change link settings. Taking steps to let |
||
5798 | - * the user know that. |
||
5799 | - */ |
||
5800 | - if (netif_running(net_dev)) { |
||
5801 | - netdev_info(net_dev, "Sorry, interface must be brought down first.\n"); |
||
5802 | - return -EACCES; |
||
5803 | + /* Need to interrogate link state to get flow control params */ |
||
5804 | + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); |
||
5805 | + if (err) { |
||
5806 | + netdev_err(net_dev, "Error getting link state\n"); |
||
5807 | + goto out; |
||
5808 | } |
||
5809 | |||
5810 | + cfg.options = state.options; |
||
5811 | cfg.rate = link_settings->base.speed; |
||
5812 | if (link_settings->base.autoneg == AUTONEG_ENABLE) |
||
5813 | cfg.options |= DPNI_LINK_OPT_AUTONEG; |
||
5814 | @@ -149,6 +176,81 @@ dpaa2_eth_set_link_ksettings(struct net_ |
||
5815 | */ |
||
5816 | netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err); |
||
5817 | |||
5818 | +out: |
||
5819 | + return err; |
||
5820 | +} |
||
5821 | + |
||
5822 | +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, |
||
5823 | + struct ethtool_pauseparam *pause) |
||
5824 | +{ |
||
5825 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
5826 | + struct dpni_link_state state = {0}; |
||
5827 | + int err; |
||
5828 | + |
||
5829 | + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); |
||
5830 | + if (err) |
||
5831 | + netdev_dbg(net_dev, "Error getting link state\n"); |
||
5832 | + |
||
5833 | + /* Report general port autonegotiation status */ |
||
5834 | + pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG); |
||
5835 | + pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE); |
||
5836 | + pause->tx_pause = pause->rx_pause ^ |
||
5837 | + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE); |
||
5838 | +} |
||
5839 | + |
||
5840 | +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, |
||
5841 | + struct ethtool_pauseparam *pause) |
||
5842 | +{ |
||
5843 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
5844 | + struct dpni_link_state state = {0}; |
||
5845 | + struct dpni_link_cfg cfg = {0}; |
||
5846 | + u32 current_tx_pause; |
||
5847 | + int err = 0; |
||
5848 | + |
||
5849 | + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); |
||
5850 | + if (err) { |
||
5851 | + netdev_dbg(net_dev, "Error getting link state\n"); |
||
5852 | + goto out; |
||
5853 | + } |
||
5854 | + |
||
5855 | + cfg.rate = state.rate; |
||
5856 | + cfg.options = state.options; |
||
5857 | + current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^ |
||
5858 | + !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE); |
||
5859 | + |
||
5860 | + /* We don't support changing pause frame autonegotiation separately |
||
5861 | + * from general port autoneg |
||
5862 | + */ |
||
5863 | + if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG)) |
||
5864 | + netdev_warn(net_dev, |
||
5865 | + "Cannot change pause frame autoneg separately\n"); |
||
5866 | + |
||
5867 | + if (pause->rx_pause) |
||
5868 | + cfg.options |= DPNI_LINK_OPT_PAUSE; |
||
5869 | + else |
||
5870 | + cfg.options &= ~DPNI_LINK_OPT_PAUSE; |
||
5871 | + |
||
5872 | + if (pause->rx_pause ^ pause->tx_pause) |
||
5873 | + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; |
||
5874 | + else |
||
5875 | + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; |
||
5876 | + |
||
5877 | + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); |
||
5878 | + if (err) { |
||
5879 | + netdev_dbg(net_dev, "Error setting link\n"); |
||
5880 | + goto out; |
||
5881 | + } |
||
5882 | + |
||
5883 | + /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */ |
||
5884 | + if (current_tx_pause == pause->tx_pause) |
||
5885 | + goto out; |
||
5886 | + |
||
5887 | + priv->tx_pause_frames = pause->tx_pause; |
||
5888 | + err = set_rx_taildrop(priv); |
||
5889 | + if (err) |
||
5890 | + netdev_dbg(net_dev, "Error configuring taildrop\n"); |
||
5891 | + |
||
5892 | +out: |
||
5893 | return err; |
||
5894 | } |
||
5895 | |||
5896 | @@ -192,6 +294,13 @@ static void dpaa2_eth_get_ethtool_stats( |
||
5897 | int j, k, err; |
||
5898 | int num_cnt; |
||
5899 | union dpni_statistics dpni_stats; |
||
5900 | + |
||
5901 | +#ifdef CONFIG_FSL_QBMAN_DEBUG |
||
5902 | + u32 fcnt, bcnt; |
||
5903 | + u32 fcnt_rx_total = 0, fcnt_tx_total = 0; |
||
5904 | + u32 bcnt_rx_total = 0, bcnt_tx_total = 0; |
||
5905 | + u32 buf_cnt; |
||
5906 | +#endif |
||
5907 | u64 cdan = 0; |
||
5908 | u64 portal_busy = 0, pull_err = 0; |
||
5909 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
5910 | @@ -204,7 +313,7 @@ static void dpaa2_eth_get_ethtool_stats( |
||
5911 | /* Print standard counters, from DPNI statistics */ |
||
5912 | for (j = 0; j <= 2; j++) { |
||
5913 | err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, |
||
5914 | - j, &dpni_stats); |
||
5915 | + j, 0, &dpni_stats); |
||
5916 | if (err != 0) |
||
5917 | netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); |
||
5918 | switch (j) { |
||
5919 | @@ -240,12 +349,474 @@ static void dpaa2_eth_get_ethtool_stats( |
||
5920 | *(data + i++) = portal_busy; |
||
5921 | *(data + i++) = pull_err; |
||
5922 | *(data + i++) = cdan; |
||
5923 | + |
||
5924 | + *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem); |
||
5925 | + |
||
5926 | +#ifdef CONFIG_FSL_QBMAN_DEBUG |
||
5927 | + for (j = 0; j < priv->num_fqs; j++) { |
||
5928 | + /* Print FQ instantaneous counts */ |
||
5929 | + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, |
||
5930 | + &fcnt, &bcnt); |
||
5931 | + if (err) { |
||
5932 | + netdev_warn(net_dev, "FQ query error %d", err); |
||
5933 | + return; |
||
5934 | + } |
||
5935 | + |
||
5936 | + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { |
||
5937 | + fcnt_tx_total += fcnt; |
||
5938 | + bcnt_tx_total += bcnt; |
||
5939 | + } else { |
||
5940 | + fcnt_rx_total += fcnt; |
||
5941 | + bcnt_rx_total += bcnt; |
||
5942 | + } |
||
5943 | + } |
||
5944 | + |
||
5945 | + *(data + i++) = fcnt_rx_total; |
||
5946 | + *(data + i++) = bcnt_rx_total; |
||
5947 | + *(data + i++) = fcnt_tx_total; |
||
5948 | + *(data + i++) = bcnt_tx_total; |
||
5949 | + |
||
5950 | + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt); |
||
5951 | + if (err) { |
||
5952 | + netdev_warn(net_dev, "Buffer count query error %d\n", err); |
||
5953 | + return; |
||
5954 | + } |
||
5955 | + *(data + i++) = buf_cnt; |
||
5956 | +#endif |
||
5957 | +} |
||
5958 | + |
||
5959 | +static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field) |
||
5960 | +{ |
||
5961 | + int i, off = 0; |
||
5962 | + |
||
5963 | + for (i = 0; i < priv->num_dist_fields; i++) { |
||
5964 | + if (priv->dist_fields[i].cls_prot == prot && |
||
5965 | + priv->dist_fields[i].cls_field == field) |
||
5966 | + return off; |
||
5967 | + off += priv->dist_fields[i].size; |
||
5968 | + } |
||
5969 | + |
||
5970 | + return -1; |
||
5971 | +} |
||
5972 | + |
||
5973 | +static u8 cls_key_size(struct dpaa2_eth_priv *priv) |
||
5974 | +{ |
||
5975 | + u8 i, size = 0; |
||
5976 | + |
||
5977 | + for (i = 0; i < priv->num_dist_fields; i++) |
||
5978 | + size += priv->dist_fields[i].size; |
||
5979 | + |
||
5980 | + return size; |
||
5981 | +} |
||
5982 | + |
||
5983 | +void check_cls_support(struct dpaa2_eth_priv *priv) |
||
5984 | +{ |
||
5985 | + u8 key_size = cls_key_size(priv); |
||
5986 | + struct device *dev = priv->net_dev->dev.parent; |
||
5987 | + |
||
5988 | + if (dpaa2_eth_hash_enabled(priv)) { |
||
5989 | + if (priv->dpni_attrs.fs_key_size < key_size) { |
||
5990 | + dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n", |
||
5991 | + priv->dpni_attrs.fs_key_size, |
||
5992 | + key_size); |
||
5993 | + goto disable_fs; |
||
5994 | + } |
||
5995 | + if (priv->num_dist_fields > DPKG_MAX_NUM_OF_EXTRACTS) { |
||
5996 | + dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n", |
||
5997 | + DPKG_MAX_NUM_OF_EXTRACTS); |
||
5998 | + goto disable_fs; |
||
5999 | + } |
||
6000 | + } |
||
6001 | + |
||
6002 | + if (dpaa2_eth_fs_enabled(priv)) { |
||
6003 | + if (!dpaa2_eth_hash_enabled(priv)) { |
||
6004 | + dev_info(dev, "Insufficient queues. Steering is disabled\n"); |
||
6005 | + goto disable_fs; |
||
6006 | + } |
||
6007 | + |
||
6008 | + if (!dpaa2_eth_fs_mask_enabled(priv)) { |
||
6009 | + dev_info(dev, "Key masks not supported. Steering is disabled\n"); |
||
6010 | + goto disable_fs; |
||
6011 | + } |
||
6012 | + } |
||
6013 | + |
||
6014 | + return; |
||
6015 | + |
||
6016 | +disable_fs: |
||
6017 | + priv->dpni_attrs.options |= DPNI_OPT_NO_FS; |
||
6018 | + priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING; |
||
6019 | +} |
||
6020 | + |
||
6021 | +static int prep_l4_rule(struct dpaa2_eth_priv *priv, |
||
6022 | + struct ethtool_tcpip4_spec *l4_value, |
||
6023 | + struct ethtool_tcpip4_spec *l4_mask, |
||
6024 | + void *key, void *mask, u8 l4_proto) |
||
6025 | +{ |
||
6026 | + int offset; |
||
6027 | + |
||
6028 | + if (l4_mask->tos) { |
||
6029 | + netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n"); |
||
6030 | + return -EOPNOTSUPP; |
||
6031 | + } |
||
6032 | + |
||
6033 | + if (l4_mask->ip4src) { |
||
6034 | + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); |
||
6035 | + *(u32 *)(key + offset) = l4_value->ip4src; |
||
6036 | + *(u32 *)(mask + offset) = l4_mask->ip4src; |
||
6037 | + } |
||
6038 | + |
||
6039 | + if (l4_mask->ip4dst) { |
||
6040 | + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); |
||
6041 | + *(u32 *)(key + offset) = l4_value->ip4dst; |
||
6042 | + *(u32 *)(mask + offset) = l4_mask->ip4dst; |
||
6043 | + } |
||
6044 | + |
||
6045 | + if (l4_mask->psrc) { |
||
6046 | + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); |
||
6047 | + *(u32 *)(key + offset) = l4_value->psrc; |
||
6048 | + *(u32 *)(mask + offset) = l4_mask->psrc; |
||
6049 | + } |
||
6050 | + |
||
6051 | + if (l4_mask->pdst) { |
||
6052 | + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); |
||
6053 | + *(u32 *)(key + offset) = l4_value->pdst; |
||
6054 | + *(u32 *)(mask + offset) = l4_mask->pdst; |
||
6055 | + } |
||
6056 | + |
||
6057 | + /* Only apply the rule for the user-specified L4 protocol |
||
6058 | + * and if ethertype matches IPv4 |
||
6059 | + */ |
||
6060 | + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); |
||
6061 | + *(u16 *)(key + offset) = htons(ETH_P_IP); |
||
6062 | + *(u16 *)(mask + offset) = 0xFFFF; |
||
6063 | + |
||
6064 | + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); |
||
6065 | + *(u8 *)(key + offset) = l4_proto; |
||
6066 | + *(u8 *)(mask + offset) = 0xFF; |
||
6067 | + |
||
6068 | + /* TODO: check IP version */ |
||
6069 | + |
||
6070 | + return 0; |
||
6071 | +} |
||
6072 | + |
||
6073 | +static int prep_eth_rule(struct dpaa2_eth_priv *priv, |
||
6074 | + struct ethhdr *eth_value, struct ethhdr *eth_mask, |
||
6075 | + void *key, void *mask) |
||
6076 | +{ |
||
6077 | + int offset; |
||
6078 | + |
||
6079 | + if (eth_mask->h_proto) { |
||
6080 | + netdev_err(priv->net_dev, "Ethertype is not supported!\n"); |
||
6081 | + return -EOPNOTSUPP; |
||
6082 | + } |
||
6083 | + |
||
6084 | + if (!is_zero_ether_addr(eth_mask->h_source)) { |
||
6085 | + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA); |
||
6086 | + ether_addr_copy(key + offset, eth_value->h_source); |
||
6087 | + ether_addr_copy(mask + offset, eth_mask->h_source); |
||
6088 | + } |
||
6089 | + |
||
6090 | + if (!is_zero_ether_addr(eth_mask->h_dest)) { |
||
6091 | + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); |
||
6092 | + ether_addr_copy(key + offset, eth_value->h_dest); |
||
6093 | + ether_addr_copy(mask + offset, eth_mask->h_dest); |
||
6094 | + } |
||
6095 | + |
||
6096 | + return 0; |
||
6097 | +} |
||
6098 | + |
||
6099 | +static int prep_user_ip_rule(struct dpaa2_eth_priv *priv, |
||
6100 | + struct ethtool_usrip4_spec *uip_value, |
||
6101 | + struct ethtool_usrip4_spec *uip_mask, |
||
6102 | + void *key, void *mask) |
||
6103 | +{ |
||
6104 | + int offset; |
||
6105 | + |
||
6106 | + if (uip_mask->tos) |
||
6107 | + return -EOPNOTSUPP; |
||
6108 | + |
||
6109 | + if (uip_mask->ip4src) { |
||
6110 | + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); |
||
6111 | + *(u32 *)(key + offset) = uip_value->ip4src; |
||
6112 | + *(u32 *)(mask + offset) = uip_mask->ip4src; |
||
6113 | + } |
||
6114 | + |
||
6115 | + if (uip_mask->ip4dst) { |
||
6116 | + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); |
||
6117 | + *(u32 *)(key + offset) = uip_value->ip4dst; |
||
6118 | + *(u32 *)(mask + offset) = uip_mask->ip4dst; |
||
6119 | + } |
||
6120 | + |
||
6121 | + if (uip_mask->proto) { |
||
6122 | + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); |
||
6123 | + *(u32 *)(key + offset) = uip_value->proto; |
||
6124 | + *(u32 *)(mask + offset) = uip_mask->proto; |
||
6125 | + } |
||
6126 | + if (uip_mask->l4_4_bytes) { |
||
6127 | + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); |
||
6128 | + *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16; |
||
6129 | + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16; |
||
6130 | + |
||
6131 | + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); |
||
6132 | + *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF; |
||
6133 | + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF; |
||
6134 | + } |
||
6135 | + |
||
6136 | + /* Ethertype must be IP */ |
||
6137 | + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); |
||
6138 | + *(u16 *)(key + offset) = htons(ETH_P_IP); |
||
6139 | + *(u16 *)(mask + offset) = 0xFFFF; |
||
6140 | + |
||
6141 | + return 0; |
||
6142 | +} |
||
6143 | + |
||
6144 | +static int prep_ext_rule(struct dpaa2_eth_priv *priv, |
||
6145 | + struct ethtool_flow_ext *ext_value, |
||
6146 | + struct ethtool_flow_ext *ext_mask, |
||
6147 | + void *key, void *mask) |
||
6148 | +{ |
||
6149 | + int offset; |
||
6150 | + |
||
6151 | + if (ext_mask->vlan_etype) |
||
6152 | + return -EOPNOTSUPP; |
||
6153 | + |
||
6154 | + if (ext_mask->vlan_tci) { |
||
6155 | + offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI); |
||
6156 | + *(u16 *)(key + offset) = ext_value->vlan_tci; |
||
6157 | + *(u16 *)(mask + offset) = ext_mask->vlan_tci; |
||
6158 | + } |
||
6159 | + |
||
6160 | + return 0; |
||
6161 | +} |
||
6162 | + |
||
6163 | +static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv, |
||
6164 | + struct ethtool_flow_ext *ext_value, |
||
6165 | + struct ethtool_flow_ext *ext_mask, |
||
6166 | + void *key, void *mask) |
||
6167 | +{ |
||
6168 | + int offset; |
||
6169 | + |
||
6170 | + if (!is_zero_ether_addr(ext_mask->h_dest)) { |
||
6171 | + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); |
||
6172 | + ether_addr_copy(key + offset, ext_value->h_dest); |
||
6173 | + ether_addr_copy(mask + offset, ext_mask->h_dest); |
||
6174 | + } |
||
6175 | + |
||
6176 | + return 0; |
||
6177 | +} |
||
6178 | + |
||
6179 | +static int prep_cls_rule(struct net_device *net_dev, |
||
6180 | + struct ethtool_rx_flow_spec *fs, |
||
6181 | + void *key) |
||
6182 | +{ |
||
6183 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
6184 | + const u8 key_size = cls_key_size(priv); |
||
6185 | + void *msk = key + key_size; |
||
6186 | + int err; |
||
6187 | + |
||
6188 | + memset(key, 0, key_size * 2); |
||
6189 | + |
||
6190 | + switch (fs->flow_type & 0xff) { |
||
6191 | + case TCP_V4_FLOW: |
||
6192 | + err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec, |
||
6193 | + &fs->m_u.tcp_ip4_spec, key, msk, |
||
6194 | + IPPROTO_TCP); |
||
6195 | + break; |
||
6196 | + case UDP_V4_FLOW: |
||
6197 | + err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec, |
||
6198 | + &fs->m_u.udp_ip4_spec, key, msk, |
||
6199 | + IPPROTO_UDP); |
||
6200 | + break; |
||
6201 | + case SCTP_V4_FLOW: |
||
6202 | + err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec, |
||
6203 | + &fs->m_u.sctp_ip4_spec, key, msk, |
||
6204 | + IPPROTO_SCTP); |
||
6205 | + break; |
||
6206 | + case ETHER_FLOW: |
||
6207 | + err = prep_eth_rule(priv, &fs->h_u.ether_spec, |
||
6208 | + &fs->m_u.ether_spec, key, msk); |
||
6209 | + break; |
||
6210 | + case IP_USER_FLOW: |
||
6211 | + err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec, |
||
6212 | + &fs->m_u.usr_ip4_spec, key, msk); |
||
6213 | + break; |
||
6214 | + default: |
||
6215 | + /* TODO: AH, ESP */ |
||
6216 | + return -EOPNOTSUPP; |
||
6217 | + } |
||
6218 | + if (err) |
||
6219 | + return err; |
||
6220 | + |
||
6221 | + if (fs->flow_type & FLOW_EXT) { |
||
6222 | + err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); |
||
6223 | + if (err) |
||
6224 | + return err; |
||
6225 | + } |
||
6226 | + |
||
6227 | + if (fs->flow_type & FLOW_MAC_EXT) { |
||
6228 | + err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); |
||
6229 | + if (err) |
||
6230 | + return err; |
||
6231 | + } |
||
6232 | + |
||
6233 | + return 0; |
||
6234 | +} |
||
6235 | + |
||
6236 | +static int del_cls(struct net_device *net_dev, int location); |
||
6237 | + |
||
6238 | +static int do_cls(struct net_device *net_dev, |
||
6239 | + struct ethtool_rx_flow_spec *fs, |
||
6240 | + bool add) |
||
6241 | +{ |
||
6242 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
6243 | + struct device *dev = net_dev->dev.parent; |
||
6244 | + const int rule_cnt = dpaa2_eth_fs_count(priv); |
||
6245 | + struct dpni_rule_cfg rule_cfg; |
||
6246 | + struct dpni_fs_action_cfg fs_act = { 0 }; |
||
6247 | + void *dma_mem; |
||
6248 | + int err = 0, tc; |
||
6249 | + |
||
6250 | + if (!dpaa2_eth_fs_enabled(priv)) { |
||
6251 | + netdev_err(net_dev, "dev does not support steering!\n"); |
||
6252 | + /* dev doesn't support steering */ |
||
6253 | + return -EOPNOTSUPP; |
||
6254 | + } |
||
6255 | + |
||
6256 | + if ((fs->ring_cookie != RX_CLS_FLOW_DISC && |
||
6257 | + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) || |
||
6258 | + fs->location >= rule_cnt) |
||
6259 | + return -EINVAL; |
||
6260 | + |
||
6261 | + /* When adding a new rule, check if location if available |
||
6262 | + * and if not, free the existing table entry before inserting |
||
6263 | + * the new one |
||
6264 | + */ |
||
6265 | + if (add && (priv->cls_rule[fs->location].in_use == true)) |
||
6266 | + del_cls(net_dev, fs->location); |
||
6267 | + |
||
6268 | + memset(&rule_cfg, 0, sizeof(rule_cfg)); |
||
6269 | + rule_cfg.key_size = cls_key_size(priv); |
||
6270 | + |
||
6271 | + /* allocate twice the key size, for the actual key and for mask */ |
||
6272 | + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); |
||
6273 | + if (!dma_mem) |
||
6274 | + return -ENOMEM; |
||
6275 | + |
||
6276 | + err = prep_cls_rule(net_dev, fs, dma_mem); |
||
6277 | + if (err) |
||
6278 | + goto err_free_mem; |
||
6279 | + |
||
6280 | + rule_cfg.key_iova = dma_map_single(dev, dma_mem, |
||
6281 | + rule_cfg.key_size * 2, |
||
6282 | + DMA_TO_DEVICE); |
||
6283 | + |
||
6284 | + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; |
||
6285 | + |
||
6286 | + if (fs->ring_cookie == RX_CLS_FLOW_DISC) |
||
6287 | + fs_act.options |= DPNI_FS_OPT_DISCARD; |
||
6288 | + else |
||
6289 | + fs_act.flow_id = fs->ring_cookie; |
||
6290 | + |
||
6291 | + for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) { |
||
6292 | + if (add) |
||
6293 | + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, |
||
6294 | + tc, fs->location, &rule_cfg, |
||
6295 | + &fs_act); |
||
6296 | + else |
||
6297 | + err = dpni_remove_fs_entry(priv->mc_io, 0, |
||
6298 | + priv->mc_token, tc, |
||
6299 | + &rule_cfg); |
||
6300 | + |
||
6301 | + if (err) |
||
6302 | + break; |
||
6303 | + } |
||
6304 | + |
||
6305 | + dma_unmap_single(dev, rule_cfg.key_iova, |
||
6306 | + rule_cfg.key_size * 2, DMA_TO_DEVICE); |
||
6307 | + |
||
6308 | + if (err) |
||
6309 | + netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err); |
||
6310 | + |
||
6311 | +err_free_mem: |
||
6312 | + kfree(dma_mem); |
||
6313 | + |
||
6314 | + return err; |
||
6315 | +} |
||
6316 | + |
||
6317 | +static int add_cls(struct net_device *net_dev, |
||
6318 | + struct ethtool_rx_flow_spec *fs) |
||
6319 | +{ |
||
6320 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
6321 | + int err; |
||
6322 | + |
||
6323 | + err = do_cls(net_dev, fs, true); |
||
6324 | + if (err) |
||
6325 | + return err; |
||
6326 | + |
||
6327 | + priv->cls_rule[fs->location].in_use = true; |
||
6328 | + priv->cls_rule[fs->location].fs = *fs; |
||
6329 | + |
||
6330 | + return 0; |
||
6331 | +} |
||
6332 | + |
||
6333 | +static int del_cls(struct net_device *net_dev, int location) |
||
6334 | +{ |
||
6335 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
6336 | + int err; |
||
6337 | + |
||
6338 | + err = do_cls(net_dev, &priv->cls_rule[location].fs, false); |
||
6339 | + if (err) |
||
6340 | + return err; |
||
6341 | + |
||
6342 | + priv->cls_rule[location].in_use = false; |
||
6343 | + |
||
6344 | + return 0; |
||
6345 | +} |
||
6346 | + |
||
6347 | +static int set_hash(struct net_device *net_dev, u64 data) |
||
6348 | +{ |
||
6349 | + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
6350 | + u32 key = 0; |
||
6351 | + int i; |
||
6352 | + |
||
6353 | + if (data & RXH_DISCARD) |
||
6354 | + return -EOPNOTSUPP; |
||
6355 | + |
||
6356 | + for (i = 0; i < priv->num_dist_fields; i++) |
||
6357 | + if (priv->dist_fields[i].rxnfc_field & data) |
||
6358 | + key |= priv->dist_fields[i].id; |
||
6359 | + |
||
6360 | + return dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH, key); |
||
6361 | +} |
||
6362 | + |
||
6363 | +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, |
||
6364 | + struct ethtool_rxnfc *rxnfc) |
||
6365 | +{ |
||
6366 | + int err = 0; |
||
6367 | + |
||
6368 | + switch (rxnfc->cmd) { |
||
6369 | + case ETHTOOL_SRXCLSRLINS: |
||
6370 | + err = add_cls(net_dev, &rxnfc->fs); |
||
6371 | + break; |
||
6372 | + case ETHTOOL_SRXCLSRLDEL: |
||
6373 | + err = del_cls(net_dev, rxnfc->fs.location); |
||
6374 | + break; |
||
6375 | + case ETHTOOL_SRXFH: |
||
6376 | + err = set_hash(net_dev, rxnfc->data); |
||
6377 | + break; |
||
6378 | + default: |
||
6379 | + err = -EOPNOTSUPP; |
||
6380 | + } |
||
6381 | + |
||
6382 | + return err; |
||
6383 | } |
||
6384 | |||
6385 | static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, |
||
6386 | struct ethtool_rxnfc *rxnfc, u32 *rule_locs) |
||
6387 | { |
||
6388 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
||
6389 | + const int rule_cnt = dpaa2_eth_fs_count(priv); |
||
6390 | + int i, j; |
||
6391 | |||
6392 | switch (rxnfc->cmd) { |
||
6393 | case ETHTOOL_GRXFH: |
||
6394 | @@ -258,6 +829,33 @@ static int dpaa2_eth_get_rxnfc(struct ne |
||
6395 | case ETHTOOL_GRXRINGS: |
||
6396 | rxnfc->data = dpaa2_eth_queue_count(priv); |
||
6397 | break; |
||
6398 | + |
||
6399 | + case ETHTOOL_GRXCLSRLCNT: |
||
6400 | + for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++) |
||
6401 | + if (priv->cls_rule[i].in_use) |
||
6402 | + rxnfc->rule_cnt++; |
||
6403 | + rxnfc->data = rule_cnt; |
||
6404 | + break; |
||
6405 | + |
||
6406 | + case ETHTOOL_GRXCLSRULE: |
||
6407 | + if (!priv->cls_rule[rxnfc->fs.location].in_use) |
||
6408 | + return -EINVAL; |
||
6409 | + |
||
6410 | + rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs; |
||
6411 | + break; |
||
6412 | + |
||
6413 | + case ETHTOOL_GRXCLSRLALL: |
||
6414 | + for (i = 0, j = 0; i < rule_cnt; i++) { |
||
6415 | + if (!priv->cls_rule[i].in_use) |
||
6416 | + continue; |
||
6417 | + if (j == rxnfc->rule_cnt) |
||
6418 | + return -EMSGSIZE; |
||
6419 | + rule_locs[j++] = i; |
||
6420 | + } |
||
6421 | + rxnfc->rule_cnt = j; |
||
6422 | + rxnfc->data = rule_cnt; |
||
6423 | + break; |
||
6424 | + |
||
6425 | default: |
||
6426 | return -EOPNOTSUPP; |
||
6427 | } |
||
6428 | @@ -270,8 +868,11 @@ const struct ethtool_ops dpaa2_ethtool_o |
||
6429 | .get_link = ethtool_op_get_link, |
||
6430 | .get_link_ksettings = dpaa2_eth_get_link_ksettings, |
||
6431 | .set_link_ksettings = dpaa2_eth_set_link_ksettings, |
||
6432 | + .get_pauseparam = dpaa2_eth_get_pauseparam, |
||
6433 | + .set_pauseparam = dpaa2_eth_set_pauseparam, |
||
6434 | .get_sset_count = dpaa2_eth_get_sset_count, |
||
6435 | .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, |
||
6436 | .get_strings = dpaa2_eth_get_strings, |
||
6437 | .get_rxnfc = dpaa2_eth_get_rxnfc, |
||
6438 | + .set_rxnfc = dpaa2_eth_set_rxnfc, |
||
6439 | }; |
||
6440 | --- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h |
||
6441 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h |
||
6442 | @@ -39,9 +39,11 @@ |
||
6443 | #define DPNI_VER_MAJOR 7 |
||
6444 | #define DPNI_VER_MINOR 0 |
||
6445 | #define DPNI_CMD_BASE_VERSION 1 |
||
6446 | +#define DPNI_CMD_2ND_VERSION 2 |
||
6447 | #define DPNI_CMD_ID_OFFSET 4 |
||
6448 | |||
6449 | #define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) |
||
6450 | +#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION) |
||
6451 | |||
6452 | #define DPNI_CMDID_OPEN DPNI_CMD(0x801) |
||
6453 | #define DPNI_CMDID_CLOSE DPNI_CMD(0x800) |
||
6454 | @@ -64,7 +66,7 @@ |
||
6455 | #define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) |
||
6456 | #define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) |
||
6457 | |||
6458 | -#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200) |
||
6459 | +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200) |
||
6460 | #define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) |
||
6461 | |||
6462 | #define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) |
||
6463 | @@ -73,7 +75,7 @@ |
||
6464 | #define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) |
||
6465 | #define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) |
||
6466 | #define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A) |
||
6467 | -#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B) |
||
6468 | +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B) |
||
6469 | |||
6470 | #define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) |
||
6471 | #define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) |
||
6472 | @@ -87,11 +89,16 @@ |
||
6473 | |||
6474 | #define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235) |
||
6475 | |||
6476 | +#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240) |
||
6477 | +#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241) |
||
6478 | +#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242) |
||
6479 | #define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) |
||
6480 | #define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) |
||
6481 | #define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) |
||
6482 | |||
6483 | -#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D) |
||
6484 | +#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250) |
||
6485 | +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D) |
||
6486 | +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E) |
||
6487 | #define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F) |
||
6488 | #define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260) |
||
6489 | #define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261) |
||
6490 | @@ -110,6 +117,9 @@ |
||
6491 | #define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) |
||
6492 | #define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) |
||
6493 | |||
6494 | +#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273) |
||
6495 | +#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274) |
||
6496 | + |
||
6497 | /* Macros for accessing command fields smaller than 1byte */ |
||
6498 | #define DPNI_MASK(field) \ |
||
6499 | GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ |
||
6500 | @@ -126,13 +136,14 @@ struct dpni_cmd_open { |
||
6501 | |||
6502 | #define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) |
||
6503 | struct dpni_cmd_set_pools { |
||
6504 | - /* cmd word 0 */ |
||
6505 | u8 num_dpbp; |
||
6506 | u8 backup_pool_mask; |
||
6507 | __le16 pad; |
||
6508 | - /* cmd word 0..4 */ |
||
6509 | - __le32 dpbp_id[DPNI_MAX_DPBP]; |
||
6510 | - /* cmd word 4..6 */ |
||
6511 | + struct { |
||
6512 | + __le16 dpbp_id; |
||
6513 | + u8 priority_mask; |
||
6514 | + u8 pad; |
||
6515 | + } pool[DPNI_MAX_DPBP]; |
||
6516 | __le16 buffer_size[DPNI_MAX_DPBP]; |
||
6517 | }; |
||
6518 | |||
6519 | @@ -303,6 +314,7 @@ struct dpni_rsp_get_tx_data_offset { |
||
6520 | |||
6521 | struct dpni_cmd_get_statistics { |
||
6522 | u8 page_number; |
||
6523 | + u8 param; |
||
6524 | }; |
||
6525 | |||
6526 | struct dpni_rsp_get_statistics { |
||
6527 | @@ -335,6 +347,22 @@ struct dpni_rsp_get_link_state { |
||
6528 | __le64 options; |
||
6529 | }; |
||
6530 | |||
6531 | +#define DPNI_COUPLED_SHIFT 0 |
||
6532 | +#define DPNI_COUPLED_SIZE 1 |
||
6533 | + |
||
6534 | +struct dpni_cmd_set_tx_shaping { |
||
6535 | + /* cmd word 0 */ |
||
6536 | + __le16 tx_cr_max_burst_size; |
||
6537 | + __le16 tx_er_max_burst_size; |
||
6538 | + __le32 pad; |
||
6539 | + /* cmd word 1 */ |
||
6540 | + __le32 tx_cr_rate_limit; |
||
6541 | + __le32 tx_er_rate_limit; |
||
6542 | + /* cmd word 2 */ |
||
6543 | + /* from LSB: coupled:1 */ |
||
6544 | + u8 coupled; |
||
6545 | +}; |
||
6546 | + |
||
6547 | struct dpni_cmd_set_max_frame_length { |
||
6548 | __le16 max_frame_length; |
||
6549 | }; |
||
6550 | @@ -394,6 +422,24 @@ struct dpni_cmd_clear_mac_filters { |
||
6551 | u8 flags; |
||
6552 | }; |
||
6553 | |||
6554 | +#define DPNI_SEPARATE_GRP_SHIFT 0 |
||
6555 | +#define DPNI_SEPARATE_GRP_SIZE 1 |
||
6556 | +#define DPNI_MODE_1_SHIFT 0 |
||
6557 | +#define DPNI_MODE_1_SIZE 4 |
||
6558 | +#define DPNI_MODE_2_SHIFT 4 |
||
6559 | +#define DPNI_MODE_2_SIZE 4 |
||
6560 | + |
||
6561 | +struct dpni_cmd_set_tx_priorities { |
||
6562 | + __le16 flags; |
||
6563 | + u8 prio_group_A; |
||
6564 | + u8 prio_group_B; |
||
6565 | + __le32 pad0; |
||
6566 | + u8 modes[4]; |
||
6567 | + __le32 pad1; |
||
6568 | + __le64 pad2; |
||
6569 | + __le16 delta_bandwidth[8]; |
||
6570 | +}; |
||
6571 | + |
||
6572 | #define DPNI_DIST_MODE_SHIFT 0 |
||
6573 | #define DPNI_DIST_MODE_SIZE 4 |
||
6574 | #define DPNI_MISS_ACTION_SHIFT 4 |
||
6575 | @@ -503,6 +549,63 @@ struct dpni_cmd_set_queue { |
||
6576 | __le64 user_context; |
||
6577 | }; |
||
6578 | |||
6579 | +#define DPNI_DISCARD_ON_MISS_SHIFT 0 |
||
6580 | +#define DPNI_DISCARD_ON_MISS_SIZE 1 |
||
6581 | + |
||
6582 | +struct dpni_cmd_set_qos_table { |
||
6583 | + __le32 pad; |
||
6584 | + u8 default_tc; |
||
6585 | + /* only the LSB */ |
||
6586 | + u8 discard_on_miss; |
||
6587 | + __le16 pad1[21]; |
||
6588 | + __le64 key_cfg_iova; |
||
6589 | +}; |
||
6590 | + |
||
6591 | +struct dpni_cmd_add_qos_entry { |
||
6592 | + __le16 pad; |
||
6593 | + u8 tc_id; |
||
6594 | + u8 key_size; |
||
6595 | + __le16 index; |
||
6596 | + __le16 pad2; |
||
6597 | + __le64 key_iova; |
||
6598 | + __le64 mask_iova; |
||
6599 | +}; |
||
6600 | + |
||
6601 | +struct dpni_cmd_remove_qos_entry { |
||
6602 | + u8 pad1[3]; |
||
6603 | + u8 key_size; |
||
6604 | + __le32 pad2; |
||
6605 | + __le64 key_iova; |
||
6606 | + __le64 mask_iova; |
||
6607 | +}; |
||
6608 | + |
||
6609 | +struct dpni_cmd_add_fs_entry { |
||
6610 | + /* cmd word 0 */ |
||
6611 | + __le16 options; |
||
6612 | + u8 tc_id; |
||
6613 | + u8 key_size; |
||
6614 | + __le16 index; |
||
6615 | + __le16 flow_id; |
||
6616 | + /* cmd word 1 */ |
||
6617 | + __le64 key_iova; |
||
6618 | + /* cmd word 2 */ |
||
6619 | + __le64 mask_iova; |
||
6620 | + /* cmd word 3 */ |
||
6621 | + __le64 flc; |
||
6622 | +}; |
||
6623 | + |
||
6624 | +struct dpni_cmd_remove_fs_entry { |
||
6625 | + /* cmd word 0 */ |
||
6626 | + __le16 pad0; |
||
6627 | + u8 tc_id; |
||
6628 | + u8 key_size; |
||
6629 | + __le32 pad1; |
||
6630 | + /* cmd word 1 */ |
||
6631 | + __le64 key_iova; |
||
6632 | + /* cmd word 2 */ |
||
6633 | + __le64 mask_iova; |
||
6634 | +}; |
||
6635 | + |
||
6636 | struct dpni_cmd_set_taildrop { |
||
6637 | /* cmd word 0 */ |
||
6638 | u8 congestion_point; |
||
6639 | @@ -538,4 +641,79 @@ struct dpni_rsp_get_taildrop { |
||
6640 | __le32 threshold; |
||
6641 | }; |
||
6642 | |||
6643 | +struct dpni_rsp_get_api_version { |
||
6644 | + u16 major; |
||
6645 | + u16 minor; |
||
6646 | +}; |
||
6647 | + |
||
6648 | +#define DPNI_DEST_TYPE_SHIFT 0 |
||
6649 | +#define DPNI_DEST_TYPE_SIZE 4 |
||
6650 | +#define DPNI_CONG_UNITS_SHIFT 4 |
||
6651 | +#define DPNI_CONG_UNITS_SIZE 2 |
||
6652 | + |
||
6653 | +struct dpni_cmd_set_congestion_notification { |
||
6654 | + /* cmd word 0 */ |
||
6655 | + u8 qtype; |
||
6656 | + u8 tc; |
||
6657 | + u8 pad[6]; |
||
6658 | + /* cmd word 1 */ |
||
6659 | + __le32 dest_id; |
||
6660 | + __le16 notification_mode; |
||
6661 | + u8 dest_priority; |
||
6662 | + /* from LSB: dest_type: 4 units:2 */ |
||
6663 | + u8 type_units; |
||
6664 | + /* cmd word 2 */ |
||
6665 | + __le64 message_iova; |
||
6666 | + /* cmd word 3 */ |
||
6667 | + __le64 message_ctx; |
||
6668 | + /* cmd word 4 */ |
||
6669 | + __le32 threshold_entry; |
||
6670 | + __le32 threshold_exit; |
||
6671 | +}; |
||
6672 | + |
||
6673 | +struct dpni_cmd_get_congestion_notification { |
||
6674 | + /* cmd word 0 */ |
||
6675 | + u8 qtype; |
||
6676 | + u8 tc; |
||
6677 | +}; |
||
6678 | + |
||
6679 | +struct dpni_rsp_get_congestion_notification { |
||
6680 | + /* cmd word 0 */ |
||
6681 | + __le64 pad; |
||
6682 | + /* cmd word 1 */ |
||
6683 | + __le32 dest_id; |
||
6684 | + __le16 notification_mode; |
||
6685 | + u8 dest_priority; |
||
6686 | + /* from LSB: dest_type: 4 units:2 */ |
||
6687 | + u8 type_units; |
||
6688 | + /* cmd word 2 */ |
||
6689 | + __le64 message_iova; |
||
6690 | + /* cmd word 3 */ |
||
6691 | + __le64 message_ctx; |
||
6692 | + /* cmd word 4 */ |
||
6693 | + __le32 threshold_entry; |
||
6694 | + __le32 threshold_exit; |
||
6695 | +}; |
||
6696 | + |
||
6697 | +#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0 |
||
6698 | +#define DPNI_RX_FS_DIST_ENABLE_SIZE 1 |
||
6699 | +struct dpni_cmd_set_rx_fs_dist { |
||
6700 | + __le16 dist_size; |
||
6701 | + u8 enable; |
||
6702 | + u8 tc; |
||
6703 | + __le16 miss_flow_id; |
||
6704 | + __le16 pad; |
||
6705 | + __le64 key_cfg_iova; |
||
6706 | +}; |
||
6707 | + |
||
6708 | +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0 |
||
6709 | +#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1 |
||
6710 | +struct dpni_cmd_set_rx_hash_dist { |
||
6711 | + __le16 dist_size; |
||
6712 | + u8 enable; |
||
6713 | + u8 tc; |
||
6714 | + __le32 pad; |
||
6715 | + __le64 key_cfg_iova; |
||
6716 | +}; |
||
6717 | + |
||
6718 | #endif /* _FSL_DPNI_CMD_H */ |
||
6719 | --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c |
||
6720 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c |
||
6721 | @@ -122,7 +122,7 @@ int dpni_open(struct fsl_mc_io *mc_io, |
||
6722 | int dpni_id, |
||
6723 | u16 *token) |
||
6724 | { |
||
6725 | - struct mc_command cmd = { 0 }; |
||
6726 | + struct fsl_mc_command cmd = { 0 }; |
||
6727 | struct dpni_cmd_open *cmd_params; |
||
6728 | |||
6729 | int err; |
||
6730 | @@ -160,7 +160,7 @@ int dpni_close(struct fsl_mc_io *mc_io, |
||
6731 | u32 cmd_flags, |
||
6732 | u16 token) |
||
6733 | { |
||
6734 | - struct mc_command cmd = { 0 }; |
||
6735 | + struct fsl_mc_command cmd = { 0 }; |
||
6736 | |||
6737 | /* prepare command */ |
||
6738 | cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, |
||
6739 | @@ -188,7 +188,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_ |
||
6740 | u16 token, |
||
6741 | const struct dpni_pools_cfg *cfg) |
||
6742 | { |
||
6743 | - struct mc_command cmd = { 0 }; |
||
6744 | + struct fsl_mc_command cmd = { 0 }; |
||
6745 | struct dpni_cmd_set_pools *cmd_params; |
||
6746 | int i; |
||
6747 | |||
6748 | @@ -199,7 +199,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_ |
||
6749 | cmd_params = (struct dpni_cmd_set_pools *)cmd.params; |
||
6750 | cmd_params->num_dpbp = cfg->num_dpbp; |
||
6751 | for (i = 0; i < DPNI_MAX_DPBP; i++) { |
||
6752 | - cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); |
||
6753 | + cmd_params->pool[i].dpbp_id = |
||
6754 | + cpu_to_le16(cfg->pools[i].dpbp_id); |
||
6755 | + cmd_params->pool[i].priority_mask = |
||
6756 | + cfg->pools[i].priority_mask; |
||
6757 | cmd_params->buffer_size[i] = |
||
6758 | cpu_to_le16(cfg->pools[i].buffer_size); |
||
6759 | cmd_params->backup_pool_mask |= |
||
6760 | @@ -222,7 +225,7 @@ int dpni_enable(struct fsl_mc_io *mc_io, |
||
6761 | u32 cmd_flags, |
||
6762 | u16 token) |
||
6763 | { |
||
6764 | - struct mc_command cmd = { 0 }; |
||
6765 | + struct fsl_mc_command cmd = { 0 }; |
||
6766 | |||
6767 | /* prepare command */ |
||
6768 | cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, |
||
6769 | @@ -245,7 +248,7 @@ int dpni_disable(struct fsl_mc_io *mc_io |
||
6770 | u32 cmd_flags, |
||
6771 | u16 token) |
||
6772 | { |
||
6773 | - struct mc_command cmd = { 0 }; |
||
6774 | + struct fsl_mc_command cmd = { 0 }; |
||
6775 | |||
6776 | /* prepare command */ |
||
6777 | cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, |
||
6778 | @@ -270,7 +273,7 @@ int dpni_is_enabled(struct fsl_mc_io *mc |
||
6779 | u16 token, |
||
6780 | int *en) |
||
6781 | { |
||
6782 | - struct mc_command cmd = { 0 }; |
||
6783 | + struct fsl_mc_command cmd = { 0 }; |
||
6784 | struct dpni_rsp_is_enabled *rsp_params; |
||
6785 | int err; |
||
6786 | |||
6787 | @@ -303,7 +306,7 @@ int dpni_reset(struct fsl_mc_io *mc_io, |
||
6788 | u32 cmd_flags, |
||
6789 | u16 token) |
||
6790 | { |
||
6791 | - struct mc_command cmd = { 0 }; |
||
6792 | + struct fsl_mc_command cmd = { 0 }; |
||
6793 | |||
6794 | /* prepare command */ |
||
6795 | cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, |
||
6796 | @@ -335,7 +338,7 @@ int dpni_set_irq_enable(struct fsl_mc_io |
||
6797 | u8 irq_index, |
||
6798 | u8 en) |
||
6799 | { |
||
6800 | - struct mc_command cmd = { 0 }; |
||
6801 | + struct fsl_mc_command cmd = { 0 }; |
||
6802 | struct dpni_cmd_set_irq_enable *cmd_params; |
||
6803 | |||
6804 | /* prepare command */ |
||
6805 | @@ -366,7 +369,7 @@ int dpni_get_irq_enable(struct fsl_mc_io |
||
6806 | u8 irq_index, |
||
6807 | u8 *en) |
||
6808 | { |
||
6809 | - struct mc_command cmd = { 0 }; |
||
6810 | + struct fsl_mc_command cmd = { 0 }; |
||
6811 | struct dpni_cmd_get_irq_enable *cmd_params; |
||
6812 | struct dpni_rsp_get_irq_enable *rsp_params; |
||
6813 | |||
6814 | @@ -413,7 +416,7 @@ int dpni_set_irq_mask(struct fsl_mc_io * |
||
6815 | u8 irq_index, |
||
6816 | u32 mask) |
||
6817 | { |
||
6818 | - struct mc_command cmd = { 0 }; |
||
6819 | + struct fsl_mc_command cmd = { 0 }; |
||
6820 | struct dpni_cmd_set_irq_mask *cmd_params; |
||
6821 | |||
6822 | /* prepare command */ |
||
6823 | @@ -447,7 +450,7 @@ int dpni_get_irq_mask(struct fsl_mc_io * |
||
6824 | u8 irq_index, |
||
6825 | u32 *mask) |
||
6826 | { |
||
6827 | - struct mc_command cmd = { 0 }; |
||
6828 | + struct fsl_mc_command cmd = { 0 }; |
||
6829 | struct dpni_cmd_get_irq_mask *cmd_params; |
||
6830 | struct dpni_rsp_get_irq_mask *rsp_params; |
||
6831 | int err; |
||
6832 | @@ -489,7 +492,7 @@ int dpni_get_irq_status(struct fsl_mc_io |
||
6833 | u8 irq_index, |
||
6834 | u32 *status) |
||
6835 | { |
||
6836 | - struct mc_command cmd = { 0 }; |
||
6837 | + struct fsl_mc_command cmd = { 0 }; |
||
6838 | struct dpni_cmd_get_irq_status *cmd_params; |
||
6839 | struct dpni_rsp_get_irq_status *rsp_params; |
||
6840 | int err; |
||
6841 | @@ -532,7 +535,7 @@ int dpni_clear_irq_status(struct fsl_mc_ |
||
6842 | u8 irq_index, |
||
6843 | u32 status) |
||
6844 | { |
||
6845 | - struct mc_command cmd = { 0 }; |
||
6846 | + struct fsl_mc_command cmd = { 0 }; |
||
6847 | struct dpni_cmd_clear_irq_status *cmd_params; |
||
6848 | |||
6849 | /* prepare command */ |
||
6850 | @@ -561,7 +564,7 @@ int dpni_get_attributes(struct fsl_mc_io |
||
6851 | u16 token, |
||
6852 | struct dpni_attr *attr) |
||
6853 | { |
||
6854 | - struct mc_command cmd = { 0 }; |
||
6855 | + struct fsl_mc_command cmd = { 0 }; |
||
6856 | struct dpni_rsp_get_attr *rsp_params; |
||
6857 | |||
6858 | int err; |
||
6859 | @@ -609,7 +612,7 @@ int dpni_set_errors_behavior(struct fsl_ |
||
6860 | u16 token, |
||
6861 | struct dpni_error_cfg *cfg) |
||
6862 | { |
||
6863 | - struct mc_command cmd = { 0 }; |
||
6864 | + struct fsl_mc_command cmd = { 0 }; |
||
6865 | struct dpni_cmd_set_errors_behavior *cmd_params; |
||
6866 | |||
6867 | /* prepare command */ |
||
6868 | @@ -641,7 +644,7 @@ int dpni_get_buffer_layout(struct fsl_mc |
||
6869 | enum dpni_queue_type qtype, |
||
6870 | struct dpni_buffer_layout *layout) |
||
6871 | { |
||
6872 | - struct mc_command cmd = { 0 }; |
||
6873 | + struct fsl_mc_command cmd = { 0 }; |
||
6874 | struct dpni_cmd_get_buffer_layout *cmd_params; |
||
6875 | struct dpni_rsp_get_buffer_layout *rsp_params; |
||
6876 | int err; |
||
6877 | @@ -689,7 +692,7 @@ int dpni_set_buffer_layout(struct fsl_mc |
||
6878 | enum dpni_queue_type qtype, |
||
6879 | const struct dpni_buffer_layout *layout) |
||
6880 | { |
||
6881 | - struct mc_command cmd = { 0 }; |
||
6882 | + struct fsl_mc_command cmd = { 0 }; |
||
6883 | struct dpni_cmd_set_buffer_layout *cmd_params; |
||
6884 | |||
6885 | /* prepare command */ |
||
6886 | @@ -731,7 +734,7 @@ int dpni_set_offload(struct fsl_mc_io *m |
||
6887 | enum dpni_offload type, |
||
6888 | u32 config) |
||
6889 | { |
||
6890 | - struct mc_command cmd = { 0 }; |
||
6891 | + struct fsl_mc_command cmd = { 0 }; |
||
6892 | struct dpni_cmd_set_offload *cmd_params; |
||
6893 | |||
6894 | cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD, |
||
6895 | @@ -750,7 +753,7 @@ int dpni_get_offload(struct fsl_mc_io *m |
||
6896 | enum dpni_offload type, |
||
6897 | u32 *config) |
||
6898 | { |
||
6899 | - struct mc_command cmd = { 0 }; |
||
6900 | + struct fsl_mc_command cmd = { 0 }; |
||
6901 | struct dpni_cmd_get_offload *cmd_params; |
||
6902 | struct dpni_rsp_get_offload *rsp_params; |
||
6903 | int err; |
||
6904 | @@ -792,7 +795,7 @@ int dpni_get_qdid(struct fsl_mc_io *mc_i |
||
6905 | enum dpni_queue_type qtype, |
||
6906 | u16 *qdid) |
||
6907 | { |
||
6908 | - struct mc_command cmd = { 0 }; |
||
6909 | + struct fsl_mc_command cmd = { 0 }; |
||
6910 | struct dpni_cmd_get_qdid *cmd_params; |
||
6911 | struct dpni_rsp_get_qdid *rsp_params; |
||
6912 | int err; |
||
6913 | @@ -830,7 +833,7 @@ int dpni_get_tx_data_offset(struct fsl_m |
||
6914 | u16 token, |
||
6915 | u16 *data_offset) |
||
6916 | { |
||
6917 | - struct mc_command cmd = { 0 }; |
||
6918 | + struct fsl_mc_command cmd = { 0 }; |
||
6919 | struct dpni_rsp_get_tx_data_offset *rsp_params; |
||
6920 | int err; |
||
6921 | |||
6922 | @@ -865,7 +868,7 @@ int dpni_set_link_cfg(struct fsl_mc_io * |
||
6923 | u16 token, |
||
6924 | const struct dpni_link_cfg *cfg) |
||
6925 | { |
||
6926 | - struct mc_command cmd = { 0 }; |
||
6927 | + struct fsl_mc_command cmd = { 0 }; |
||
6928 | struct dpni_cmd_set_link_cfg *cmd_params; |
||
6929 | |||
6930 | /* prepare command */ |
||
6931 | @@ -894,7 +897,7 @@ int dpni_get_link_state(struct fsl_mc_io |
||
6932 | u16 token, |
||
6933 | struct dpni_link_state *state) |
||
6934 | { |
||
6935 | - struct mc_command cmd = { 0 }; |
||
6936 | + struct fsl_mc_command cmd = { 0 }; |
||
6937 | struct dpni_rsp_get_link_state *rsp_params; |
||
6938 | int err; |
||
6939 | |||
6940 | @@ -918,6 +921,44 @@ int dpni_get_link_state(struct fsl_mc_io |
||
6941 | } |
||
6942 | |||
6943 | /** |
||
6944 | + * dpni_set_tx_shaping() - Set the transmit shaping |
||
6945 | + * @mc_io: Pointer to MC portal's I/O object |
||
6946 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
6947 | + * @token: Token of DPNI object |
||
6948 | + * @tx_cr_shaper: TX committed rate shaping configuration |
||
6949 | + * @tx_er_shaper: TX excess rate shaping configuration |
||
6950 | + * @coupled: Committed and excess rate shapers are coupled |
||
6951 | + * |
||
6952 | + * Return: '0' on Success; Error code otherwise. |
||
6953 | + */ |
||
6954 | +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, |
||
6955 | + u32 cmd_flags, |
||
6956 | + u16 token, |
||
6957 | + const struct dpni_tx_shaping_cfg *tx_cr_shaper, |
||
6958 | + const struct dpni_tx_shaping_cfg *tx_er_shaper, |
||
6959 | + int coupled) |
||
6960 | +{ |
||
6961 | + struct fsl_mc_command cmd = { 0 }; |
||
6962 | + struct dpni_cmd_set_tx_shaping *cmd_params; |
||
6963 | + |
||
6964 | + /* prepare command */ |
||
6965 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, |
||
6966 | + cmd_flags, |
||
6967 | + token); |
||
6968 | + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params; |
||
6969 | + cmd_params->tx_cr_max_burst_size = |
||
6970 | + cpu_to_le16(tx_cr_shaper->max_burst_size); |
||
6971 | + cmd_params->tx_er_max_burst_size = |
||
6972 | + cpu_to_le16(tx_er_shaper->max_burst_size); |
||
6973 | + cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit); |
||
6974 | + cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit); |
||
6975 | + dpni_set_field(cmd_params->coupled, COUPLED, coupled); |
||
6976 | + |
||
6977 | + /* send command to mc*/ |
||
6978 | + return mc_send_command(mc_io, &cmd); |
||
6979 | +} |
||
6980 | + |
||
6981 | +/** |
||
6982 | * dpni_set_max_frame_length() - Set the maximum received frame length. |
||
6983 | * @mc_io: Pointer to MC portal's I/O object |
||
6984 | * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
6985 | @@ -933,7 +974,7 @@ int dpni_set_max_frame_length(struct fsl |
||
6986 | u16 token, |
||
6987 | u16 max_frame_length) |
||
6988 | { |
||
6989 | - struct mc_command cmd = { 0 }; |
||
6990 | + struct fsl_mc_command cmd = { 0 }; |
||
6991 | struct dpni_cmd_set_max_frame_length *cmd_params; |
||
6992 | |||
6993 | /* prepare command */ |
||
6994 | @@ -963,7 +1004,7 @@ int dpni_get_max_frame_length(struct fsl |
||
6995 | u16 token, |
||
6996 | u16 *max_frame_length) |
||
6997 | { |
||
6998 | - struct mc_command cmd = { 0 }; |
||
6999 | + struct fsl_mc_command cmd = { 0 }; |
||
7000 | struct dpni_rsp_get_max_frame_length *rsp_params; |
||
7001 | int err; |
||
7002 | |||
7003 | @@ -998,7 +1039,7 @@ int dpni_set_multicast_promisc(struct fs |
||
7004 | u16 token, |
||
7005 | int en) |
||
7006 | { |
||
7007 | - struct mc_command cmd = { 0 }; |
||
7008 | + struct fsl_mc_command cmd = { 0 }; |
||
7009 | struct dpni_cmd_set_multicast_promisc *cmd_params; |
||
7010 | |||
7011 | /* prepare command */ |
||
7012 | @@ -1026,7 +1067,7 @@ int dpni_get_multicast_promisc(struct fs |
||
7013 | u16 token, |
||
7014 | int *en) |
||
7015 | { |
||
7016 | - struct mc_command cmd = { 0 }; |
||
7017 | + struct fsl_mc_command cmd = { 0 }; |
||
7018 | struct dpni_rsp_get_multicast_promisc *rsp_params; |
||
7019 | int err; |
||
7020 | |||
7021 | @@ -1061,7 +1102,7 @@ int dpni_set_unicast_promisc(struct fsl_ |
||
7022 | u16 token, |
||
7023 | int en) |
||
7024 | { |
||
7025 | - struct mc_command cmd = { 0 }; |
||
7026 | + struct fsl_mc_command cmd = { 0 }; |
||
7027 | struct dpni_cmd_set_unicast_promisc *cmd_params; |
||
7028 | |||
7029 | /* prepare command */ |
||
7030 | @@ -1089,7 +1130,7 @@ int dpni_get_unicast_promisc(struct fsl_ |
||
7031 | u16 token, |
||
7032 | int *en) |
||
7033 | { |
||
7034 | - struct mc_command cmd = { 0 }; |
||
7035 | + struct fsl_mc_command cmd = { 0 }; |
||
7036 | struct dpni_rsp_get_unicast_promisc *rsp_params; |
||
7037 | int err; |
||
7038 | |||
7039 | @@ -1124,7 +1165,7 @@ int dpni_set_primary_mac_addr(struct fsl |
||
7040 | u16 token, |
||
7041 | const u8 mac_addr[6]) |
||
7042 | { |
||
7043 | - struct mc_command cmd = { 0 }; |
||
7044 | + struct fsl_mc_command cmd = { 0 }; |
||
7045 | struct dpni_cmd_set_primary_mac_addr *cmd_params; |
||
7046 | int i; |
||
7047 | |||
7048 | @@ -1154,7 +1195,7 @@ int dpni_get_primary_mac_addr(struct fsl |
||
7049 | u16 token, |
||
7050 | u8 mac_addr[6]) |
||
7051 | { |
||
7052 | - struct mc_command cmd = { 0 }; |
||
7053 | + struct fsl_mc_command cmd = { 0 }; |
||
7054 | struct dpni_rsp_get_primary_mac_addr *rsp_params; |
||
7055 | int i, err; |
||
7056 | |||
7057 | @@ -1193,7 +1234,7 @@ int dpni_get_port_mac_addr(struct fsl_mc |
||
7058 | u16 token, |
||
7059 | u8 mac_addr[6]) |
||
7060 | { |
||
7061 | - struct mc_command cmd = { 0 }; |
||
7062 | + struct fsl_mc_command cmd = { 0 }; |
||
7063 | struct dpni_rsp_get_port_mac_addr *rsp_params; |
||
7064 | int i, err; |
||
7065 | |||
7066 | @@ -1229,7 +1270,7 @@ int dpni_add_mac_addr(struct fsl_mc_io * |
||
7067 | u16 token, |
||
7068 | const u8 mac_addr[6]) |
||
7069 | { |
||
7070 | - struct mc_command cmd = { 0 }; |
||
7071 | + struct fsl_mc_command cmd = { 0 }; |
||
7072 | struct dpni_cmd_add_mac_addr *cmd_params; |
||
7073 | int i; |
||
7074 | |||
7075 | @@ -1259,7 +1300,7 @@ int dpni_remove_mac_addr(struct fsl_mc_i |
||
7076 | u16 token, |
||
7077 | const u8 mac_addr[6]) |
||
7078 | { |
||
7079 | - struct mc_command cmd = { 0 }; |
||
7080 | + struct fsl_mc_command cmd = { 0 }; |
||
7081 | struct dpni_cmd_remove_mac_addr *cmd_params; |
||
7082 | int i; |
||
7083 | |||
7084 | @@ -1293,7 +1334,7 @@ int dpni_clear_mac_filters(struct fsl_mc |
||
7085 | int unicast, |
||
7086 | int multicast) |
||
7087 | { |
||
7088 | - struct mc_command cmd = { 0 }; |
||
7089 | + struct fsl_mc_command cmd = { 0 }; |
||
7090 | struct dpni_cmd_clear_mac_filters *cmd_params; |
||
7091 | |||
7092 | /* prepare command */ |
||
7093 | @@ -1309,6 +1350,55 @@ int dpni_clear_mac_filters(struct fsl_mc |
||
7094 | } |
||
7095 | |||
7096 | /** |
||
7097 | + * dpni_set_tx_priorities() - Set transmission TC priority configuration |
||
7098 | + * @mc_io: Pointer to MC portal's I/O object |
||
7099 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7100 | + * @token: Token of DPNI object |
||
7101 | + * @cfg: Transmission selection configuration |
||
7102 | + * |
||
7103 | + * warning: Allowed only when DPNI is disabled |
||
7104 | + * |
||
7105 | + * Return: '0' on Success; Error code otherwise. |
||
7106 | + */ |
||
7107 | +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io, |
||
7108 | + u32 cmd_flags, |
||
7109 | + u16 token, |
||
7110 | + const struct dpni_tx_priorities_cfg *cfg) |
||
7111 | +{ |
||
7112 | + struct dpni_cmd_set_tx_priorities *cmd_params; |
||
7113 | + struct fsl_mc_command cmd = { 0 }; |
||
7114 | + int i; |
||
7115 | + |
||
7116 | + /* prepare command */ |
||
7117 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES, |
||
7118 | + cmd_flags, |
||
7119 | + token); |
||
7120 | + cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params; |
||
7121 | + dpni_set_field(cmd_params->flags, |
||
7122 | + SEPARATE_GRP, |
||
7123 | + cfg->separate_groups); |
||
7124 | + cmd_params->prio_group_A = cfg->prio_group_A; |
||
7125 | + cmd_params->prio_group_B = cfg->prio_group_B; |
||
7126 | + |
||
7127 | + for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) { |
||
7128 | + dpni_set_field(cmd_params->modes[i / 2], |
||
7129 | + MODE_1, |
||
7130 | + cfg->tc_sched[i].mode); |
||
7131 | + dpni_set_field(cmd_params->modes[i / 2], |
||
7132 | + MODE_2, |
||
7133 | + cfg->tc_sched[i + 1].mode); |
||
7134 | + } |
||
7135 | + |
||
7136 | + for (i = 0; i < DPNI_MAX_TC; i++) { |
||
7137 | + cmd_params->delta_bandwidth[i] = |
||
7138 | + cpu_to_le16(cfg->tc_sched[i].delta_bandwidth); |
||
7139 | + } |
||
7140 | + |
||
7141 | + /* send command to mc*/ |
||
7142 | + return mc_send_command(mc_io, &cmd); |
||
7143 | +} |
||
7144 | + |
||
7145 | +/** |
||
7146 | * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration |
||
7147 | * @mc_io: Pointer to MC portal's I/O object |
||
7148 | * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7149 | @@ -1327,7 +1417,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io |
||
7150 | u8 tc_id, |
||
7151 | const struct dpni_rx_tc_dist_cfg *cfg) |
||
7152 | { |
||
7153 | - struct mc_command cmd = { 0 }; |
||
7154 | + struct fsl_mc_command cmd = { 0 }; |
||
7155 | struct dpni_cmd_set_rx_tc_dist *cmd_params; |
||
7156 | |||
7157 | /* prepare command */ |
||
7158 | @@ -1346,6 +1436,293 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io |
||
7159 | return mc_send_command(mc_io, &cmd); |
||
7160 | } |
||
7161 | |||
7162 | +/* |
||
7163 | + * dpni_set_qos_table() - Set QoS mapping table |
||
7164 | + * @mc_io: Pointer to MC portal's I/O object |
||
7165 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7166 | + * @token: Token of DPNI object |
||
7167 | + * @cfg: QoS table configuration |
||
7168 | + * |
||
7169 | + * This function and all QoS-related functions require that |
||
7170 | + *'max_tcs > 1' was set at DPNI creation. |
||
7171 | + * |
||
7172 | + * warning: Before calling this function, call dpkg_prepare_key_cfg() to |
||
7173 | + * prepare the key_cfg_iova parameter |
||
7174 | + * |
||
7175 | + * Return: '0' on Success; Error code otherwise. |
||
7176 | + */ |
||
7177 | +int dpni_set_qos_table(struct fsl_mc_io *mc_io, |
||
7178 | + u32 cmd_flags, |
||
7179 | + u16 token, |
||
7180 | + const struct dpni_qos_tbl_cfg *cfg) |
||
7181 | +{ |
||
7182 | + struct dpni_cmd_set_qos_table *cmd_params; |
||
7183 | + struct fsl_mc_command cmd = { 0 }; |
||
7184 | + |
||
7185 | + /* prepare command */ |
||
7186 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, |
||
7187 | + cmd_flags, |
||
7188 | + token); |
||
7189 | + cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params; |
||
7190 | + cmd_params->default_tc = cfg->default_tc; |
||
7191 | + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); |
||
7192 | + dpni_set_field(cmd_params->discard_on_miss, |
||
7193 | + ENABLE, |
||
7194 | + cfg->discard_on_miss); |
||
7195 | + |
||
7196 | + /* send command to mc*/ |
||
7197 | + return mc_send_command(mc_io, &cmd); |
||
7198 | +} |
||
7199 | + |
||
7200 | +/** |
||
7201 | + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) |
||
7202 | + * @mc_io: Pointer to MC portal's I/O object |
||
7203 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7204 | + * @token: Token of DPNI object |
||
7205 | + * @cfg: QoS rule to add |
||
7206 | + * @tc_id: Traffic class selection (0-7) |
||
7207 | + * @index: Location in the QoS table where to insert the entry. |
||
7208 | + * Only relevant if MASKING is enabled for QoS classification on |
||
7209 | + * this DPNI, it is ignored for exact match. |
||
7210 | + * |
||
7211 | + * Return: '0' on Success; Error code otherwise. |
||
7212 | + */ |
||
7213 | +int dpni_add_qos_entry(struct fsl_mc_io *mc_io, |
||
7214 | + u32 cmd_flags, |
||
7215 | + u16 token, |
||
7216 | + const struct dpni_rule_cfg *cfg, |
||
7217 | + u8 tc_id, |
||
7218 | + u16 index) |
||
7219 | +{ |
||
7220 | + struct dpni_cmd_add_qos_entry *cmd_params; |
||
7221 | + struct fsl_mc_command cmd = { 0 }; |
||
7222 | + |
||
7223 | + /* prepare command */ |
||
7224 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, |
||
7225 | + cmd_flags, |
||
7226 | + token); |
||
7227 | + cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params; |
||
7228 | + cmd_params->tc_id = tc_id; |
||
7229 | + cmd_params->key_size = cfg->key_size; |
||
7230 | + cmd_params->index = cpu_to_le16(index); |
||
7231 | + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); |
||
7232 | + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); |
||
7233 | + |
||
7234 | + /* send command to mc*/ |
||
7235 | + return mc_send_command(mc_io, &cmd); |
||
7236 | +} |
||
7237 | + |
||
7238 | +/** |
||
7239 | + * dpni_remove_qos_entry() - Remove QoS mapping entry |
||
7240 | + * @mc_io: Pointer to MC portal's I/O object |
||
7241 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7242 | + * @token: Token of DPNI object |
||
7243 | + * @cfg: QoS rule to remove |
||
7244 | + * |
||
7245 | + * Return: '0' on Success; Error code otherwise. |
||
7246 | + */ |
||
7247 | +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, |
||
7248 | + u32 cmd_flags, |
||
7249 | + u16 token, |
||
7250 | + const struct dpni_rule_cfg *cfg) |
||
7251 | +{ |
||
7252 | + struct dpni_cmd_remove_qos_entry *cmd_params; |
||
7253 | + struct fsl_mc_command cmd = { 0 }; |
||
7254 | + |
||
7255 | + /* prepare command */ |
||
7256 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, |
||
7257 | + cmd_flags, |
||
7258 | + token); |
||
7259 | + cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params; |
||
7260 | + cmd_params->key_size = cfg->key_size; |
||
7261 | + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); |
||
7262 | + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); |
||
7263 | + |
||
7264 | + /* send command to mc*/ |
||
7265 | + return mc_send_command(mc_io, &cmd); |
||
7266 | +} |
||
7267 | + |
||
7268 | +/** |
||
7269 | + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class |
||
7270 | + * (to select a flow ID) |
||
7271 | + * @mc_io: Pointer to MC portal's I/O object |
||
7272 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7273 | + * @token: Token of DPNI object |
||
7274 | + * @tc_id: Traffic class selection (0-7) |
||
7275 | + * @index: Location in the QoS table where to insert the entry. |
||
7276 | + * Only relevant if MASKING is enabled for QoS |
||
7277 | + * classification on this DPNI, it is ignored for exact match. |
||
7278 | + * @cfg: Flow steering rule to add |
||
7279 | + * @action: Action to be taken as result of a classification hit |
||
7280 | + * |
||
7281 | + * Return: '0' on Success; Error code otherwise. |
||
7282 | + */ |
||
7283 | +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, |
||
7284 | + u32 cmd_flags, |
||
7285 | + u16 token, |
||
7286 | + u8 tc_id, |
||
7287 | + u16 index, |
||
7288 | + const struct dpni_rule_cfg *cfg, |
||
7289 | + const struct dpni_fs_action_cfg *action) |
||
7290 | +{ |
||
7291 | + struct dpni_cmd_add_fs_entry *cmd_params; |
||
7292 | + struct fsl_mc_command cmd = { 0 }; |
||
7293 | + |
||
7294 | + /* prepare command */ |
||
7295 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, |
||
7296 | + cmd_flags, |
||
7297 | + token); |
||
7298 | + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params; |
||
7299 | + cmd_params->tc_id = tc_id; |
||
7300 | + cmd_params->key_size = cfg->key_size; |
||
7301 | + cmd_params->index = cpu_to_le16(index); |
||
7302 | + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); |
||
7303 | + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); |
||
7304 | + cmd_params->options = cpu_to_le16(action->options); |
||
7305 | + cmd_params->flow_id = cpu_to_le16(action->flow_id); |
||
7306 | + cmd_params->flc = cpu_to_le64(action->flc); |
||
7307 | + |
||
7308 | + /* send command to mc*/ |
||
7309 | + return mc_send_command(mc_io, &cmd); |
||
7310 | +} |
||
7311 | + |
||
7312 | +/** |
||
7313 | + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific |
||
7314 | + * traffic class |
||
7315 | + * @mc_io: Pointer to MC portal's I/O object |
||
7316 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7317 | + * @token: Token of DPNI object |
||
7318 | + * @tc_id: Traffic class selection (0-7) |
||
7319 | + * @cfg: Flow steering rule to remove |
||
7320 | + * |
||
7321 | + * Return: '0' on Success; Error code otherwise. |
||
7322 | + */ |
||
7323 | +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, |
||
7324 | + u32 cmd_flags, |
||
7325 | + u16 token, |
||
7326 | + u8 tc_id, |
||
7327 | + const struct dpni_rule_cfg *cfg) |
||
7328 | +{ |
||
7329 | + struct dpni_cmd_remove_fs_entry *cmd_params; |
||
7330 | + struct fsl_mc_command cmd = { 0 }; |
||
7331 | + |
||
7332 | + /* prepare command */ |
||
7333 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, |
||
7334 | + cmd_flags, |
||
7335 | + token); |
||
7336 | + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params; |
||
7337 | + cmd_params->tc_id = tc_id; |
||
7338 | + cmd_params->key_size = cfg->key_size; |
||
7339 | + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); |
||
7340 | + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); |
||
7341 | + |
||
7342 | + /* send command to mc*/ |
||
7343 | + return mc_send_command(mc_io, &cmd); |
||
7344 | +} |
||
7345 | + |
||
7346 | +/** |
||
7347 | + * dpni_set_congestion_notification() - Set traffic class congestion |
||
7348 | + * notification configuration |
||
7349 | + * @mc_io: Pointer to MC portal's I/O object |
||
7350 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7351 | + * @token: Token of DPNI object |
||
7352 | + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported |
||
7353 | + * @tc_id: Traffic class selection (0-7) |
||
7354 | + * @cfg: Congestion notification configuration |
||
7355 | + * |
||
7356 | + * Return: '0' on Success; error code otherwise. |
||
7357 | + */ |
||
7358 | +int dpni_set_congestion_notification( |
||
7359 | + struct fsl_mc_io *mc_io, |
||
7360 | + u32 cmd_flags, |
||
7361 | + u16 token, |
||
7362 | + enum dpni_queue_type qtype, |
||
7363 | + u8 tc_id, |
||
7364 | + const struct dpni_congestion_notification_cfg *cfg) |
||
7365 | +{ |
||
7366 | + struct dpni_cmd_set_congestion_notification *cmd_params; |
||
7367 | + struct fsl_mc_command cmd = { 0 }; |
||
7368 | + |
||
7369 | + /* prepare command */ |
||
7370 | + cmd.header = mc_encode_cmd_header( |
||
7371 | + DPNI_CMDID_SET_CONGESTION_NOTIFICATION, |
||
7372 | + cmd_flags, |
||
7373 | + token); |
||
7374 | + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params; |
||
7375 | + cmd_params->qtype = qtype; |
||
7376 | + cmd_params->tc = tc_id; |
||
7377 | + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); |
||
7378 | + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); |
||
7379 | + cmd_params->dest_priority = cfg->dest_cfg.priority; |
||
7380 | + dpni_set_field(cmd_params->type_units, DEST_TYPE, |
||
7381 | + cfg->dest_cfg.dest_type); |
||
7382 | + dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units); |
||
7383 | + cmd_params->message_iova = cpu_to_le64(cfg->message_iova); |
||
7384 | + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); |
||
7385 | + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); |
||
7386 | + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); |
||
7387 | + |
||
7388 | + /* send command to mc*/ |
||
7389 | + return mc_send_command(mc_io, &cmd); |
||
7390 | +} |
||
7391 | + |
||
7392 | +/** |
||
7393 | + * dpni_get_congestion_notification() - Get traffic class congestion |
||
7394 | + * notification configuration |
||
7395 | + * @mc_io: Pointer to MC portal's I/O object |
||
7396 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7397 | + * @token: Token of DPNI object |
||
7398 | + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported |
||
7399 | + * @tc_id: bits 7-4 contain ceetm channel index (valid only for TX); |
||
7400 | + * bits 3-0 contain traffic class. |
||
7401 | + * Use macro DPNI_BUILD_CH_TC() to build correct value for |
||
7402 | + * tc_id parameter. |
||
7403 | + * @cfg: congestion notification configuration |
||
7404 | + * |
||
7405 | + * Return: '0' on Success; error code otherwise. |
||
7406 | + */ |
||
7407 | +int dpni_get_congestion_notification( |
||
7408 | + struct fsl_mc_io *mc_io, |
||
7409 | + u32 cmd_flags, |
||
7410 | + u16 token, |
||
7411 | + enum dpni_queue_type qtype, |
||
7412 | + u8 tc_id, |
||
7413 | + struct dpni_congestion_notification_cfg *cfg) |
||
7414 | +{ |
||
7415 | + struct dpni_rsp_get_congestion_notification *rsp_params; |
||
7416 | + struct dpni_cmd_get_congestion_notification *cmd_params; |
||
7417 | + struct fsl_mc_command cmd = { 0 }; |
||
7418 | + int err; |
||
7419 | + |
||
7420 | + /* prepare command */ |
||
7421 | + cmd.header = mc_encode_cmd_header( |
||
7422 | + DPNI_CMDID_GET_CONGESTION_NOTIFICATION, |
||
7423 | + cmd_flags, |
||
7424 | + token); |
||
7425 | + cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params; |
||
7426 | + cmd_params->qtype = qtype; |
||
7427 | + cmd_params->tc = tc_id; |
||
7428 | + |
||
7429 | + /* send command to mc*/ |
||
7430 | + err = mc_send_command(mc_io, &cmd); |
||
7431 | + if (err) |
||
7432 | + return err; |
||
7433 | + |
||
7434 | + rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params; |
||
7435 | + cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS); |
||
7436 | + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); |
||
7437 | + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); |
||
7438 | + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); |
||
7439 | + cfg->message_iova = le64_to_cpu(rsp_params->message_iova); |
||
7440 | + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); |
||
7441 | + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); |
||
7442 | + cfg->dest_cfg.priority = rsp_params->dest_priority; |
||
7443 | + cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units, |
||
7444 | + DEST_TYPE); |
||
7445 | + |
||
7446 | + return 0; |
||
7447 | +} |
||
7448 | + |
||
7449 | /** |
||
7450 | * dpni_set_queue() - Set queue parameters |
||
7451 | * @mc_io: Pointer to MC portal's I/O object |
||
7452 | @@ -1371,7 +1748,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_ |
||
7453 | u8 options, |
||
7454 | const struct dpni_queue *queue) |
||
7455 | { |
||
7456 | - struct mc_command cmd = { 0 }; |
||
7457 | + struct fsl_mc_command cmd = { 0 }; |
||
7458 | struct dpni_cmd_set_queue *cmd_params; |
||
7459 | |||
7460 | /* prepare command */ |
||
7461 | @@ -1419,7 +1796,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_ |
||
7462 | struct dpni_queue *queue, |
||
7463 | struct dpni_queue_id *qid) |
||
7464 | { |
||
7465 | - struct mc_command cmd = { 0 }; |
||
7466 | + struct fsl_mc_command cmd = { 0 }; |
||
7467 | struct dpni_cmd_get_queue *cmd_params; |
||
7468 | struct dpni_rsp_get_queue *rsp_params; |
||
7469 | int err; |
||
7470 | @@ -1463,6 +1840,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_ |
||
7471 | * @token: Token of DPNI object |
||
7472 | * @page: Selects the statistics page to retrieve, see |
||
7473 | * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2. |
||
7474 | + * @param: Custom parameter for some pages used to select a certain |
||
7475 | + * statistic source, for example the TC. |
||
7476 | * @stat: Structure containing the statistics |
||
7477 | * |
||
7478 | * Return: '0' on Success; Error code otherwise. |
||
7479 | @@ -1471,9 +1850,10 @@ int dpni_get_statistics(struct fsl_mc_io |
||
7480 | u32 cmd_flags, |
||
7481 | u16 token, |
||
7482 | u8 page, |
||
7483 | + u8 param, |
||
7484 | union dpni_statistics *stat) |
||
7485 | { |
||
7486 | - struct mc_command cmd = { 0 }; |
||
7487 | + struct fsl_mc_command cmd = { 0 }; |
||
7488 | struct dpni_cmd_get_statistics *cmd_params; |
||
7489 | struct dpni_rsp_get_statistics *rsp_params; |
||
7490 | int i, err; |
||
7491 | @@ -1484,6 +1864,7 @@ int dpni_get_statistics(struct fsl_mc_io |
||
7492 | token); |
||
7493 | cmd_params = (struct dpni_cmd_get_statistics *)cmd.params; |
||
7494 | cmd_params->page_number = page; |
||
7495 | + cmd_params->param = param; |
||
7496 | |||
7497 | /* send command to mc */ |
||
7498 | err = mc_send_command(mc_io, &cmd); |
||
7499 | @@ -1499,6 +1880,29 @@ int dpni_get_statistics(struct fsl_mc_io |
||
7500 | } |
||
7501 | |||
7502 | /** |
||
7503 | + * dpni_reset_statistics() - Clears DPNI statistics |
||
7504 | + * @mc_io: Pointer to MC portal's I/O object |
||
7505 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7506 | + * @token: Token of DPNI object |
||
7507 | + * |
||
7508 | + * Return: '0' on Success; Error code otherwise. |
||
7509 | + */ |
||
7510 | +int dpni_reset_statistics(struct fsl_mc_io *mc_io, |
||
7511 | + u32 cmd_flags, |
||
7512 | + u16 token) |
||
7513 | +{ |
||
7514 | + struct fsl_mc_command cmd = { 0 }; |
||
7515 | + |
||
7516 | + /* prepare command */ |
||
7517 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS, |
||
7518 | + cmd_flags, |
||
7519 | + token); |
||
7520 | + |
||
7521 | + /* send command to mc*/ |
||
7522 | + return mc_send_command(mc_io, &cmd); |
||
7523 | +} |
||
7524 | + |
||
7525 | +/** |
||
7526 | * dpni_set_taildrop() - Set taildrop per queue or TC |
||
7527 | * @mc_io: Pointer to MC portal's I/O object |
||
7528 | * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7529 | @@ -1506,7 +1910,10 @@ int dpni_get_statistics(struct fsl_mc_io |
||
7530 | * @cg_point: Congestion point |
||
7531 | * @q_type: Queue type on which the taildrop is configured. |
||
7532 | * Only Rx queues are supported for now |
||
7533 | - * @tc: Traffic class to apply this taildrop to |
||
7534 | + * @tc: bits 7-4 contain ceetm channel index (valid only for TX); |
||
7535 | + * bits 3-0 contain traffic class. |
||
7536 | + * Use macro DPNI_BUILD_CH_TC() to build correct value for |
||
7537 | + * tc parameter. |
||
7538 | * @q_index: Index of the queue if the DPNI supports multiple queues for |
||
7539 | * traffic distribution. Ignored if CONGESTION_POINT is not 0. |
||
7540 | * @taildrop: Taildrop structure |
||
7541 | @@ -1522,7 +1929,7 @@ int dpni_set_taildrop(struct fsl_mc_io * |
||
7542 | u8 index, |
||
7543 | struct dpni_taildrop *taildrop) |
||
7544 | { |
||
7545 | - struct mc_command cmd = { 0 }; |
||
7546 | + struct fsl_mc_command cmd = { 0 }; |
||
7547 | struct dpni_cmd_set_taildrop *cmd_params; |
||
7548 | |||
7549 | /* prepare command */ |
||
7550 | @@ -1550,7 +1957,10 @@ int dpni_set_taildrop(struct fsl_mc_io * |
||
7551 | * @cg_point: Congestion point |
||
7552 | * @q_type: Queue type on which the taildrop is configured. |
||
7553 | * Only Rx queues are supported for now |
||
7554 | - * @tc: Traffic class to apply this taildrop to |
||
7555 | + * @tc: bits 7-4 contain ceetm channel index (valid only for TX); |
||
7556 | + * bits 3-0 contain traffic class. |
||
7557 | + * Use macro DPNI_BUILD_CH_TC() to build correct value for |
||
7558 | + * tc parameter. |
||
7559 | * @q_index: Index of the queue if the DPNI supports multiple queues for |
||
7560 | * traffic distribution. Ignored if CONGESTION_POINT is not 0. |
||
7561 | * @taildrop: Taildrop structure |
||
7562 | @@ -1566,7 +1976,7 @@ int dpni_get_taildrop(struct fsl_mc_io * |
||
7563 | u8 index, |
||
7564 | struct dpni_taildrop *taildrop) |
||
7565 | { |
||
7566 | - struct mc_command cmd = { 0 }; |
||
7567 | + struct fsl_mc_command cmd = { 0 }; |
||
7568 | struct dpni_cmd_get_taildrop *cmd_params; |
||
7569 | struct dpni_rsp_get_taildrop *rsp_params; |
||
7570 | int err; |
||
7571 | @@ -1594,3 +2004,109 @@ int dpni_get_taildrop(struct fsl_mc_io * |
||
7572 | |||
7573 | return 0; |
||
7574 | } |
||
7575 | + |
||
7576 | +/** |
||
7577 | + * dpni_get_api_version() - Get Data Path Network Interface API version |
||
7578 | + * @mc_io: Pointer to MC portal's I/O object |
||
7579 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7580 | + * @major_ver: Major version of data path network interface API |
||
7581 | + * @minor_ver: Minor version of data path network interface API |
||
7582 | + * |
||
7583 | + * Return: '0' on Success; Error code otherwise. |
||
7584 | + */ |
||
7585 | +int dpni_get_api_version(struct fsl_mc_io *mc_io, |
||
7586 | + u32 cmd_flags, |
||
7587 | + u16 *major_ver, |
||
7588 | + u16 *minor_ver) |
||
7589 | +{ |
||
7590 | + struct dpni_rsp_get_api_version *rsp_params; |
||
7591 | + struct fsl_mc_command cmd = { 0 }; |
||
7592 | + int err; |
||
7593 | + |
||
7594 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION, |
||
7595 | + cmd_flags, 0); |
||
7596 | + |
||
7597 | + err = mc_send_command(mc_io, &cmd); |
||
7598 | + if (err) |
||
7599 | + return err; |
||
7600 | + |
||
7601 | + rsp_params = (struct dpni_rsp_get_api_version *)cmd.params; |
||
7602 | + *major_ver = le16_to_cpu(rsp_params->major); |
||
7603 | + *minor_ver = le16_to_cpu(rsp_params->minor); |
||
7604 | + |
||
7605 | + return 0; |
||
7606 | +} |
||
7607 | + |
||
7608 | +/** |
||
7609 | + * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution |
||
7610 | + * @mc_io: Pointer to MC portal's I/O object |
||
7611 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7612 | + * @token: Token of DPNI object |
||
7613 | + * @cfg: Distribution configuration |
||
7614 | + * If the FS is already enabled with a previous call the classification |
||
7615 | + * key will be changed but all the table rules are kept. If the |
||
7616 | + * existing rules do not match the key the results will not be |
||
7617 | + * predictable. It is the user responsibility to keep key integrity. |
||
7618 | + * If cfg.enable is set to 1 the command will create a flow steering table |
||
7619 | + * and will classify packets according to this table. The packets that |
||
7620 | + * miss all the table rules will be classified according to settings |
||
7621 | + * made in dpni_set_rx_hash_dist() |
||
7622 | + * If cfg.enable is set to 0 the command will clear flow steering table. |
||
7623 | + * The packets will be classified according to settings made in |
||
7624 | + * dpni_set_rx_hash_dist() |
||
7625 | + */ |
||
7626 | +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, |
||
7627 | + u32 cmd_flags, |
||
7628 | + u16 token, |
||
7629 | + const struct dpni_rx_dist_cfg *cfg) |
||
7630 | +{ |
||
7631 | + struct dpni_cmd_set_rx_fs_dist *cmd_params; |
||
7632 | + struct fsl_mc_command cmd = { 0 }; |
||
7633 | + |
||
7634 | + /* prepare command */ |
||
7635 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST, |
||
7636 | + cmd_flags, |
||
7637 | + token); |
||
7638 | + cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params; |
||
7639 | + cmd_params->dist_size = le16_to_cpu(cfg->dist_size); |
||
7640 | + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable); |
||
7641 | + cmd_params->tc = cfg->tc; |
||
7642 | + cmd_params->miss_flow_id = le16_to_cpu(cfg->fs_miss_flow_id); |
||
7643 | + cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova); |
||
7644 | + |
||
7645 | + /* send command to mc*/ |
||
7646 | + return mc_send_command(mc_io, &cmd); |
||
7647 | +} |
||
7648 | + |
||
7649 | +/** |
||
7650 | + * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution |
||
7651 | + * @mc_io: Pointer to MC portal's I/O object |
||
7652 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
7653 | + * @token: Token of DPNI object |
||
7654 | + * @cfg: Distribution configuration |
||
7655 | + * If cfg.enable is set to 1 the packets will be classified using a hash |
||
7656 | + * function based on the key received in cfg.key_cfg_iova parameter. |
||
7657 | + * If cfg.enable is set to 0 the packets will be sent to the queue configured |
||
7658 | + * in dpni_set_rx_dist_default_queue() call |
||
7659 | + */ |
||
7660 | +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, |
||
7661 | + u32 cmd_flags, |
||
7662 | + u16 token, |
||
7663 | + const struct dpni_rx_dist_cfg *cfg) |
||
7664 | +{ |
||
7665 | + struct dpni_cmd_set_rx_hash_dist *cmd_params; |
||
7666 | + struct fsl_mc_command cmd = { 0 }; |
||
7667 | + |
||
7668 | + /* prepare command */ |
||
7669 | + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST, |
||
7670 | + cmd_flags, |
||
7671 | + token); |
||
7672 | + cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params; |
||
7673 | + cmd_params->dist_size = le16_to_cpu(cfg->dist_size); |
||
7674 | + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable); |
||
7675 | + cmd_params->tc = cfg->tc; |
||
7676 | + cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova); |
||
7677 | + |
||
7678 | + /* send command to mc*/ |
||
7679 | + return mc_send_command(mc_io, &cmd); |
||
7680 | +} |
||
7681 | --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h |
||
7682 | +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h |
||
7683 | @@ -52,6 +52,14 @@ struct fsl_mc_io; |
||
7684 | * Maximum number of buffer pools per DPNI |
||
7685 | */ |
||
7686 | #define DPNI_MAX_DPBP 8 |
||
7687 | +/** |
||
7688 | + * Maximum number of senders |
||
7689 | + */ |
||
7690 | +#define DPNI_MAX_SENDERS 16 |
||
7691 | +/** |
||
7692 | + * Maximum distribution size |
||
7693 | + */ |
||
7694 | +#define DPNI_MAX_DIST_SIZE 16 |
||
7695 | |||
7696 | /** |
||
7697 | * All traffic classes considered; see dpni_set_queue() |
||
7698 | @@ -123,13 +131,15 @@ struct dpni_pools_cfg { |
||
7699 | /** |
||
7700 | * struct pools - Buffer pools parameters |
||
7701 | * @dpbp_id: DPBP object ID |
||
7702 | + * @priority_mask: priorities served by DPBP |
||
7703 | * @buffer_size: Buffer size |
||
7704 | * @backup_pool: Backup pool |
||
7705 | */ |
||
7706 | struct { |
||
7707 | - int dpbp_id; |
||
7708 | + u16 dpbp_id; |
||
7709 | + u8 priority_mask; |
||
7710 | u16 buffer_size; |
||
7711 | - int backup_pool; |
||
7712 | + u8 backup_pool; |
||
7713 | } pools[DPNI_MAX_DPBP]; |
||
7714 | }; |
||
7715 | |||
7716 | @@ -476,6 +486,24 @@ union dpni_statistics { |
||
7717 | u64 egress_confirmed_frames; |
||
7718 | } page_2; |
||
7719 | /** |
||
7720 | + * struct page_3 - Page_3 statistics structure with values for the |
||
7721 | + * selected TC |
||
7722 | + * @ceetm_dequeue_bytes: Cumulative count of the number of bytes |
||
7723 | + * dequeued |
||
7724 | + * @ceetm_dequeue_frames: Cumulative count of the number of frames |
||
7725 | + * dequeued |
||
7726 | + * @ceetm_reject_bytes: Cumulative count of the number of bytes in all |
||
7727 | + * frames whose enqueue was rejected |
||
7728 | + * @ceetm_reject_frames: Cumulative count of all frame enqueues |
||
7729 | + * rejected |
||
7730 | + */ |
||
7731 | + struct { |
||
7732 | + u64 ceetm_dequeue_bytes; |
||
7733 | + u64 ceetm_dequeue_frames; |
||
7734 | + u64 ceetm_reject_bytes; |
||
7735 | + u64 ceetm_reject_frames; |
||
7736 | + } page_3; |
||
7737 | + /** |
||
7738 | * struct raw - raw statistics structure |
||
7739 | */ |
||
7740 | struct { |
||
7741 | @@ -487,8 +515,13 @@ int dpni_get_statistics(struct fsl_mc_io |
||
7742 | u32 cmd_flags, |
||
7743 | u16 token, |
||
7744 | u8 page, |
||
7745 | + u8 param, |
||
7746 | union dpni_statistics *stat); |
||
7747 | |||
7748 | +int dpni_reset_statistics(struct fsl_mc_io *mc_io, |
||
7749 | + u32 cmd_flags, |
||
7750 | + u16 token); |
||
7751 | + |
||
7752 | /** |
||
7753 | * Enable auto-negotiation |
||
7754 | */ |
||
7755 | @@ -505,6 +538,10 @@ int dpni_get_statistics(struct fsl_mc_io |
||
7756 | * Enable a-symmetric pause frames |
||
7757 | */ |
||
7758 | #define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL |
||
7759 | +/** |
||
7760 | + * Enable priority flow control pause frames |
||
7761 | + */ |
||
7762 | +#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL |
||
7763 | |||
7764 | /** |
||
7765 | * struct - Structure representing DPNI link configuration |
||
7766 | @@ -538,6 +575,23 @@ int dpni_get_link_state(struct fsl_mc_io |
||
7767 | u16 token, |
||
7768 | struct dpni_link_state *state); |
||
7769 | |||
7770 | +/** |
||
7771 | + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration |
||
7772 | + * @rate_limit: rate in Mbps |
||
7773 | + * @max_burst_size: burst size in bytes (up to 64KB) |
||
7774 | + */ |
||
7775 | +struct dpni_tx_shaping_cfg { |
||
7776 | + u32 rate_limit; |
||
7777 | + u16 max_burst_size; |
||
7778 | +}; |
||
7779 | + |
||
7780 | +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, |
||
7781 | + u32 cmd_flags, |
||
7782 | + u16 token, |
||
7783 | + const struct dpni_tx_shaping_cfg *tx_cr_shaper, |
||
7784 | + const struct dpni_tx_shaping_cfg *tx_er_shaper, |
||
7785 | + int coupled); |
||
7786 | + |
||
7787 | int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, |
||
7788 | u32 cmd_flags, |
||
7789 | u16 token, |
||
7790 | @@ -639,6 +693,70 @@ int dpni_prepare_key_cfg(const struct dp |
||
7791 | u8 *key_cfg_buf); |
||
7792 | |||
7793 | /** |
||
7794 | + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration |
||
7795 | + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with |
||
7796 | + * key extractions to be used as the QoS criteria by calling |
||
7797 | + * dpkg_prepare_key_cfg() |
||
7798 | + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); |
||
7799 | + * '0' to use the 'default_tc' in such cases |
||
7800 | + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 |
||
7801 | + */ |
||
7802 | +struct dpni_qos_tbl_cfg { |
||
7803 | + u64 key_cfg_iova; |
||
7804 | + int discard_on_miss; |
||
7805 | + u8 default_tc; |
||
7806 | +}; |
||
7807 | + |
||
7808 | +int dpni_set_qos_table(struct fsl_mc_io *mc_io, |
||
7809 | + u32 cmd_flags, |
||
7810 | + u16 token, |
||
7811 | + const struct dpni_qos_tbl_cfg *cfg); |
||
7812 | + |
||
7813 | +/** |
||
7814 | + * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode |
||
7815 | + * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority |
||
7816 | + * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A |
||
7817 | + * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B |
||
7818 | + */ |
||
7819 | +enum dpni_tx_schedule_mode { |
||
7820 | + DPNI_TX_SCHED_STRICT_PRIORITY = 0, |
||
7821 | + DPNI_TX_SCHED_WEIGHTED_A, |
||
7822 | + DPNI_TX_SCHED_WEIGHTED_B, |
||
7823 | +}; |
||
7824 | + |
||
7825 | +/** |
||
7826 | + * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf |
||
7827 | + * @mode: Scheduling mode |
||
7828 | + * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000; |
||
7829 | + * not applicable for 'strict-priority' mode; |
||
7830 | + */ |
||
7831 | +struct dpni_tx_schedule_cfg { |
||
7832 | + enum dpni_tx_schedule_mode mode; |
||
7833 | + u16 delta_bandwidth; |
||
7834 | +}; |
||
7835 | + |
||
7836 | +/** |
||
7837 | + * struct dpni_tx_priorities_cfg - Structure representing transmission |
||
7838 | + * priorities for DPNI TCs |
||
7839 | + * @tc_sched: An array of traffic-classes |
||
7840 | + * @prio_group_A: Priority of group A |
||
7841 | + * @prio_group_B: Priority of group B |
||
7842 | + * @separate_groups: Treat A and B groups as separate |
||
7843 | + * @ceetm_ch_idx: ceetm channel index to apply the changes |
||
7844 | + */ |
||
7845 | +struct dpni_tx_priorities_cfg { |
||
7846 | + struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC]; |
||
7847 | + u8 prio_group_A; |
||
7848 | + u8 prio_group_B; |
||
7849 | + u8 separate_groups; |
||
7850 | +}; |
||
7851 | + |
||
7852 | +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io, |
||
7853 | + u32 cmd_flags, |
||
7854 | + u16 token, |
||
7855 | + const struct dpni_tx_priorities_cfg *cfg); |
||
7856 | + |
||
7857 | +/** |
||
7858 | * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration |
||
7859 | * @dist_size: Set the distribution size; |
||
7860 | * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, |
||
7861 | @@ -784,6 +902,108 @@ enum dpni_congestion_point { |
||
7862 | }; |
||
7863 | |||
7864 | /** |
||
7865 | + * struct dpni_dest_cfg - Structure representing DPNI destination parameters |
||
7866 | + * @dest_type: Destination type |
||
7867 | + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type |
||
7868 | + * @priority: Priority selection within the DPIO or DPCON channel; valid |
||
7869 | + * values are 0-1 or 0-7, depending on the number of priorities |
||
7870 | + * in that channel; not relevant for 'DPNI_DEST_NONE' option |
||
7871 | + */ |
||
7872 | +struct dpni_dest_cfg { |
||
7873 | + enum dpni_dest dest_type; |
||
7874 | + int dest_id; |
||
7875 | + u8 priority; |
||
7876 | +}; |
||
7877 | + |
||
7878 | +/* DPNI congestion options */ |
||
7879 | + |
||
7880 | +/** |
||
7881 | + * CSCN message is written to message_iova once entering a |
||
7882 | + * congestion state (see 'threshold_entry') |
||
7883 | + */ |
||
7884 | +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 |
||
7885 | +/** |
||
7886 | + * CSCN message is written to message_iova once exiting a |
||
7887 | + * congestion state (see 'threshold_exit') |
||
7888 | + */ |
||
7889 | +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 |
||
7890 | +/** |
||
7891 | + * CSCN write will attempt to allocate into a cache (coherent write); |
||
7892 | + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected |
||
7893 | + */ |
||
7894 | +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 |
||
7895 | +/** |
||
7896 | + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to |
||
7897 | + * DPIO/DPCON's WQ channel once entering a congestion state |
||
7898 | + * (see 'threshold_entry') |
||
7899 | + */ |
||
7900 | +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 |
||
7901 | +/** |
||
7902 | + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to |
||
7903 | + * DPIO/DPCON's WQ channel once exiting a congestion state |
||
7904 | + * (see 'threshold_exit') |
||
7905 | + */ |
||
7906 | +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 |
||
7907 | +/** |
||
7908 | + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the |
||
7909 | + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) |
||
7910 | + */ |
||
7911 | +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 |
||
7912 | +/** |
||
7913 | + * This congestion will trigger flow control or priority flow control. |
||
7914 | + * This will have effect only if flow control is enabled with |
||
7915 | + * dpni_set_link_cfg(). |
||
7916 | + */ |
||
7917 | +#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040 |
||
7918 | + |
||
7919 | +/** |
||
7920 | + * struct dpni_congestion_notification_cfg - congestion notification |
||
7921 | + * configuration |
||
7922 | + * @units: Units type |
||
7923 | + * @threshold_entry: Above this threshold we enter a congestion state. |
||
7924 | + * set it to '0' to disable it |
||
7925 | + * @threshold_exit: Below this threshold we exit the congestion state. |
||
7926 | + * @message_ctx: The context that will be part of the CSCN message |
||
7927 | + * @message_iova: I/O virtual address (must be in DMA-able memory), |
||
7928 | + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' |
||
7929 | + * is contained in 'options' |
||
7930 | + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel |
||
7931 | + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values |
||
7932 | + */ |
||
7933 | + |
||
7934 | +struct dpni_congestion_notification_cfg { |
||
7935 | + enum dpni_congestion_unit units; |
||
7936 | + u32 threshold_entry; |
||
7937 | + u32 threshold_exit; |
||
7938 | + u64 message_ctx; |
||
7939 | + u64 message_iova; |
||
7940 | + struct dpni_dest_cfg dest_cfg; |
||
7941 | + u16 notification_mode; |
||
7942 | +}; |
||
7943 | + |
||
7944 | +/** Compose TC parameter for function dpni_set_congestion_notification() |
||
7945 | + * and dpni_get_congestion_notification(). |
||
7946 | + */ |
||
7947 | +#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \ |
||
7948 | + ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F)) |
||
7949 | + |
||
7950 | +int dpni_set_congestion_notification( |
||
7951 | + struct fsl_mc_io *mc_io, |
||
7952 | + u32 cmd_flags, |
||
7953 | + u16 token, |
||
7954 | + enum dpni_queue_type qtype, |
||
7955 | + u8 tc_id, |
||
7956 | + const struct dpni_congestion_notification_cfg *cfg); |
||
7957 | + |
||
7958 | +int dpni_get_congestion_notification( |
||
7959 | + struct fsl_mc_io *mc_io, |
||
7960 | + u32 cmd_flags, |
||
7961 | + u16 token, |
||
7962 | + enum dpni_queue_type qtype, |
||
7963 | + u8 tc_id, |
||
7964 | + struct dpni_congestion_notification_cfg *cfg); |
||
7965 | + |
||
7966 | +/** |
||
7967 | * struct dpni_taildrop - Structure representing the taildrop |
||
7968 | * @enable: Indicates whether the taildrop is active or not. |
||
7969 | * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports |
||
7970 | @@ -829,4 +1049,124 @@ struct dpni_rule_cfg { |
||
7971 | u8 key_size; |
||
7972 | }; |
||
7973 | |||
7974 | +int dpni_get_api_version(struct fsl_mc_io *mc_io, |
||
7975 | + u32 cmd_flags, |
||
7976 | + u16 *major_ver, |
||
7977 | + u16 *minor_ver); |
||
7978 | + |
||
7979 | +int dpni_add_qos_entry(struct fsl_mc_io *mc_io, |
||
7980 | + u32 cmd_flags, |
||
7981 | + u16 token, |
||
7982 | + const struct dpni_rule_cfg *cfg, |
||
7983 | + u8 tc_id, |
||
7984 | + u16 index); |
||
7985 | + |
||
7986 | +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, |
||
7987 | + u32 cmd_flags, |
||
7988 | + u16 token, |
||
7989 | + const struct dpni_rule_cfg *cfg); |
||
7990 | + |
||
7991 | +int dpni_clear_qos_table(struct fsl_mc_io *mc_io, |
||
7992 | + u32 cmd_flags, |
||
7993 | + u16 token); |
||
7994 | + |
||
7995 | +/** |
||
7996 | + * Discard matching traffic. If set, this takes precedence over any other |
||
7997 | + * configuration and matching traffic is always discarded. |
||
7998 | + */ |
||
7999 | + #define DPNI_FS_OPT_DISCARD 0x1 |
||
8000 | + |
||
8001 | +/** |
||
8002 | + * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to |
||
8003 | + * override the FLC value set per queue. |
||
8004 | + * For more details check the Frame Descriptor section in the hardware |
||
8005 | + * documentation. |
||
8006 | + */ |
||
8007 | +#define DPNI_FS_OPT_SET_FLC 0x2 |
||
8008 | + |
||
8009 | +/* |
||
8010 | + * Indicates whether the 6 lowest significant bits of FLC are used for stash |
||
8011 | + * control. If set, the 6 least significant bits in value are interpreted as |
||
8012 | + * follows: |
||
8013 | + * - bits 0-1: indicates the number of 64 byte units of context that are |
||
8014 | + * stashed. FLC value is interpreted as a memory address in this case, |
||
8015 | + * excluding the 6 LS bits. |
||
8016 | + * - bits 2-3: indicates the number of 64 byte units of frame annotation |
||
8017 | + * to be stashed. Annotation is placed at FD[ADDR]. |
||
8018 | + * - bits 4-5: indicates the number of 64 byte units of frame data to be |
||
8019 | + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET]. |
||
8020 | + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified. |
||
8021 | + */ |
||
8022 | +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4 |
||
8023 | + |
||
8024 | +/** |
||
8025 | + * struct dpni_fs_action_cfg - Action configuration for table look-up |
||
8026 | + * @flc: FLC value for traffic matching this rule. Please check the |
||
8027 | + * Frame Descriptor section in the hardware documentation for |
||
8028 | + * more information. |
||
8029 | + * @flow_id: Identifies the Rx queue used for matching traffic. Supported |
||
8030 | + * values are in range 0 to num_queue-1. |
||
8031 | + * @options: Any combination of DPNI_FS_OPT_ values. |
||
8032 | + */ |
||
8033 | +struct dpni_fs_action_cfg { |
||
8034 | + u64 flc; |
||
8035 | + u16 flow_id; |
||
8036 | + u16 options; |
||
8037 | +}; |
||
8038 | + |
||
8039 | +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, |
||
8040 | + u32 cmd_flags, |
||
8041 | + u16 token, |
||
8042 | + u8 tc_id, |
||
8043 | + u16 index, |
||
8044 | + const struct dpni_rule_cfg *cfg, |
||
8045 | + const struct dpni_fs_action_cfg *action); |
||
8046 | + |
||
8047 | +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, |
||
8048 | + u32 cmd_flags, |
||
8049 | + u16 token, |
||
8050 | + u8 tc_id, |
||
8051 | + const struct dpni_rule_cfg *cfg); |
||
8052 | + |
||
8053 | +/** |
||
8054 | + * When used for queue_idx in function dpni_set_rx_dist_default_queue |
||
8055 | + * will signal to dpni to drop all unclassified frames |
||
8056 | + */ |
||
8057 | +#define DPNI_FS_MISS_DROP ((uint16_t)-1) |
||
8058 | + |
||
8059 | +/** |
||
8060 | + * struct dpni_rx_dist_cfg - distribution configuration |
||
8061 | + * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8, |
||
8062 | + * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448, |
||
8063 | + * 512,768,896,1024 |
||
8064 | + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with |
||
8065 | + * the extractions to be used for the distribution key by calling |
||
8066 | + * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise |
||
8067 | + * it can be '0' |
||
8068 | + * @enable: enable/disable the distribution. |
||
8069 | + * @tc: TC id for which distribution is set |
||
8070 | + * @fs_miss_flow_id: when packet misses all rules from flow steering table and |
||
8071 | + * hash is disabled it will be put into this queue id; use |
||
8072 | + * DPNI_FS_MISS_DROP to drop frames. The value of this field is |
||
8073 | + * used only when flow steering distribution is enabled and hash |
||
8074 | + * distribution is disabled |
||
8075 | + */ |
||
8076 | +struct dpni_rx_dist_cfg { |
||
8077 | + u16 dist_size; |
||
8078 | + u64 key_cfg_iova; |
||
8079 | + u8 enable; |
||
8080 | + u8 tc; |
||
8081 | + u16 fs_miss_flow_id; |
||
8082 | +}; |
||
8083 | + |
||
8084 | +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, |
||
8085 | + u32 cmd_flags, |
||
8086 | + u16 token, |
||
8087 | + const struct dpni_rx_dist_cfg *cfg); |
||
8088 | + |
||
8089 | +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, |
||
8090 | + u32 cmd_flags, |
||
8091 | + u16 token, |
||
8092 | + const struct dpni_rx_dist_cfg *cfg); |
||
8093 | + |
||
8094 | #endif /* __FSL_DPNI_H */ |