OpenWrt – Blame information for rev 4
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
4 | office | 1 | --- a/Documentation/devicetree/bindings/net/stmmac.txt |
2 | +++ b/Documentation/devicetree/bindings/net/stmmac.txt |
||
3 | @@ -7,9 +7,12 @@ Required properties: |
||
4 | - interrupt-parent: Should be the phandle for the interrupt controller |
||
5 | that services interrupts for this device |
||
6 | - interrupts: Should contain the STMMAC interrupts |
||
7 | -- interrupt-names: Should contain the interrupt names "macirq" |
||
8 | - "eth_wake_irq" if this interrupt is supported in the "interrupts" |
||
9 | - property |
||
10 | +- interrupt-names: Should contain a list of interrupt names corresponding to |
||
11 | + the interrupts in the interrupts property, if available. |
||
12 | + Valid interrupt names are: |
||
13 | + - "macirq" (combined signal for various interrupt events) |
||
14 | + - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection) |
||
15 | + - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state) |
||
16 | - phy-mode: See ethernet.txt file in the same directory. |
||
17 | - snps,reset-gpio gpio number for phy reset. |
||
18 | - snps,reset-active-low boolean flag to indicate if phy reset is active low. |
||
19 | @@ -28,9 +31,9 @@ Optional properties: |
||
20 | clocks may be specified in derived bindings. |
||
21 | - clock-names: One name for each entry in the clocks property, the |
||
22 | first one should be "stmmaceth" and the second one should be "pclk". |
||
23 | -- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is |
||
24 | - available this clock is used for programming the Timestamp Addend Register. |
||
25 | - If not passed then the system clock will be used and this is fine on some |
||
26 | +- ptp_ref: this is the PTP reference clock; in case of the PTP is available |
||
27 | + this clock is used for programming the Timestamp Addend Register. If not |
||
28 | + passed then the system clock will be used and this is fine on some |
||
29 | platforms. |
||
30 | - tx-fifo-depth: See ethernet.txt file in the same directory |
||
31 | - rx-fifo-depth: See ethernet.txt file in the same directory |
||
32 | @@ -72,7 +75,45 @@ Optional properties: |
||
33 | - snps,mb: mixed-burst |
||
34 | - snps,rb: rebuild INCRx Burst |
||
35 | - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus. |
||
36 | - |
||
37 | +- Multiple RX Queues parameters: below the list of all the parameters to |
||
38 | + configure the multiple RX queues: |
||
39 | + - snps,rx-queues-to-use: number of RX queues to be used in the driver |
||
40 | + - Choose one of these RX scheduling algorithms: |
||
41 | + - snps,rx-sched-sp: Strict priority |
||
42 | + - snps,rx-sched-wsp: Weighted Strict priority |
||
43 | + - For each RX queue |
||
44 | + - Choose one of these modes: |
||
45 | + - snps,dcb-algorithm: Queue to be enabled as DCB |
||
46 | + - snps,avb-algorithm: Queue to be enabled as AVB |
||
47 | + - snps,map-to-dma-channel: Channel to map |
||
48 | + - Specifiy specific packet routing: |
||
49 | + - snps,route-avcp: AV Untagged Control packets |
||
50 | + - snps,route-ptp: PTP Packets |
||
51 | + - snps,route-dcbcp: DCB Control Packets |
||
52 | + - snps,route-up: Untagged Packets |
||
53 | + - snps,route-multi-broad: Multicast & Broadcast Packets |
||
54 | + - snps,priority: RX queue priority (Range: 0x0 to 0xF) |
||
55 | +- Multiple TX Queues parameters: below the list of all the parameters to |
||
56 | + configure the multiple TX queues: |
||
57 | + - snps,tx-queues-to-use: number of TX queues to be used in the driver |
||
58 | + - Choose one of these TX scheduling algorithms: |
||
59 | + - snps,tx-sched-wrr: Weighted Round Robin |
||
60 | + - snps,tx-sched-wfq: Weighted Fair Queuing |
||
61 | + - snps,tx-sched-dwrr: Deficit Weighted Round Robin |
||
62 | + - snps,tx-sched-sp: Strict priority |
||
63 | + - For each TX queue |
||
64 | + - snps,weight: TX queue weight (if using a DCB weight algorithm) |
||
65 | + - Choose one of these modes: |
||
66 | + - snps,dcb-algorithm: TX queue will be working in DCB |
||
67 | + - snps,avb-algorithm: TX queue will be working in AVB |
||
68 | + [Attention] Queue 0 is reserved for legacy traffic |
||
69 | + and so no AVB is available in this queue. |
||
70 | + - Configure Credit Base Shaper (if AVB Mode selected): |
||
71 | + - snps,send_slope: enable Low Power Interface |
||
72 | + - snps,idle_slope: unlock on WoL |
||
73 | + - snps,high_credit: max write outstanding req. limit |
||
74 | + - snps,low_credit: max read outstanding req. limit |
||
75 | + - snps,priority: TX queue priority (Range: 0x0 to 0xF) |
||
76 | Examples: |
||
77 | |||
78 | stmmac_axi_setup: stmmac-axi-config { |
||
79 | @@ -81,12 +122,41 @@ Examples: |
||
80 | snps,blen = <256 128 64 32 0 0 0>; |
||
81 | }; |
||
82 | |||
83 | + mtl_rx_setup: rx-queues-config { |
||
84 | + snps,rx-queues-to-use = <1>; |
||
85 | + snps,rx-sched-sp; |
||
86 | + queue0 { |
||
87 | + snps,dcb-algorithm; |
||
88 | + snps,map-to-dma-channel = <0x0>; |
||
89 | + snps,priority = <0x0>; |
||
90 | + }; |
||
91 | + }; |
||
92 | + |
||
93 | + mtl_tx_setup: tx-queues-config { |
||
94 | + snps,tx-queues-to-use = <2>; |
||
95 | + snps,tx-sched-wrr; |
||
96 | + queue0 { |
||
97 | + snps,weight = <0x10>; |
||
98 | + snps,dcb-algorithm; |
||
99 | + snps,priority = <0x0>; |
||
100 | + }; |
||
101 | + |
||
102 | + queue1 { |
||
103 | + snps,avb-algorithm; |
||
104 | + snps,send_slope = <0x1000>; |
||
105 | + snps,idle_slope = <0x1000>; |
||
106 | + snps,high_credit = <0x3E800>; |
||
107 | + snps,low_credit = <0xFFC18000>; |
||
108 | + snps,priority = <0x1>; |
||
109 | + }; |
||
110 | + }; |
||
111 | + |
||
112 | gmac0: ethernet@e0800000 { |
||
113 | compatible = "st,spear600-gmac"; |
||
114 | reg = <0xe0800000 0x8000>; |
||
115 | interrupt-parent = <&vic1>; |
||
116 | - interrupts = <24 23>; |
||
117 | - interrupt-names = "macirq", "eth_wake_irq"; |
||
118 | + interrupts = <24 23 22>; |
||
119 | + interrupt-names = "macirq", "eth_wake_irq", "eth_lpi"; |
||
120 | mac-address = [000000000000]; /* Filled in by U-Boot */ |
||
121 | max-frame-size = <3800>; |
||
122 | phy-mode = "gmii"; |
||
123 | @@ -104,4 +174,6 @@ Examples: |
||
124 | phy1: ethernet-phy@0 { |
||
125 | }; |
||
126 | }; |
||
127 | + snps,mtl-rx-config = <&mtl_rx_setup>; |
||
128 | + snps,mtl-tx-config = <&mtl_tx_setup>; |
||
129 | }; |
||
130 | --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c |
||
131 | +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c |
||
132 | @@ -37,6 +37,7 @@ |
||
133 | #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) |
||
134 | #define TSE_PCS_CONTROL_REG 0x00 |
||
135 | #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) |
||
136 | +#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140 |
||
137 | #define TSE_PCS_IF_MODE_REG 0x28 |
||
138 | #define TSE_PCS_LINK_TIMER_0_REG 0x24 |
||
139 | #define TSE_PCS_LINK_TIMER_1_REG 0x26 |
||
140 | @@ -65,6 +66,7 @@ |
||
141 | #define TSE_PCS_SW_RESET_TIMEOUT 100 |
||
142 | #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) |
||
143 | #define TSE_PCS_USE_SGMII_ENA BIT(0) |
||
144 | +#define TSE_PCS_IF_USE_SGMII 0x03 |
||
145 | |||
146 | #define SGMII_ADAPTER_CTRL_REG 0x00 |
||
147 | #define SGMII_ADAPTER_DISABLE 0x0001 |
||
148 | @@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, str |
||
149 | { |
||
150 | int ret = 0; |
||
151 | |||
152 | - writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); |
||
153 | + writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG); |
||
154 | + |
||
155 | + writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG); |
||
156 | |||
157 | writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); |
||
158 | writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); |
||
159 | --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c |
||
160 | +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c |
||
161 | @@ -26,12 +26,15 @@ |
||
162 | |||
163 | static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) |
||
164 | { |
||
165 | - struct stmmac_priv *priv = (struct stmmac_priv *)p; |
||
166 | - unsigned int entry = priv->cur_tx; |
||
167 | - struct dma_desc *desc = priv->dma_tx + entry; |
||
168 | + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; |
||
169 | unsigned int nopaged_len = skb_headlen(skb); |
||
170 | + struct stmmac_priv *priv = tx_q->priv_data; |
||
171 | + unsigned int entry = tx_q->cur_tx; |
||
172 | unsigned int bmax, des2; |
||
173 | unsigned int i = 1, len; |
||
174 | + struct dma_desc *desc; |
||
175 | + |
||
176 | + desc = tx_q->dma_tx + entry; |
||
177 | |||
178 | if (priv->plat->enh_desc) |
||
179 | bmax = BUF_SIZE_8KiB; |
||
180 | @@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, str |
||
181 | desc->des2 = cpu_to_le32(des2); |
||
182 | if (dma_mapping_error(priv->device, des2)) |
||
183 | return -1; |
||
184 | - priv->tx_skbuff_dma[entry].buf = des2; |
||
185 | - priv->tx_skbuff_dma[entry].len = bmax; |
||
186 | + tx_q->tx_skbuff_dma[entry].buf = des2; |
||
187 | + tx_q->tx_skbuff_dma[entry].len = bmax; |
||
188 | /* do not close the descriptor and do not set own bit */ |
||
189 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, |
||
190 | - 0, false); |
||
191 | + 0, false, skb->len); |
||
192 | |||
193 | while (len != 0) { |
||
194 | - priv->tx_skbuff[entry] = NULL; |
||
195 | + tx_q->tx_skbuff[entry] = NULL; |
||
196 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
||
197 | - desc = priv->dma_tx + entry; |
||
198 | + desc = tx_q->dma_tx + entry; |
||
199 | |||
200 | if (len > bmax) { |
||
201 | des2 = dma_map_single(priv->device, |
||
202 | @@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, str |
||
203 | desc->des2 = cpu_to_le32(des2); |
||
204 | if (dma_mapping_error(priv->device, des2)) |
||
205 | return -1; |
||
206 | - priv->tx_skbuff_dma[entry].buf = des2; |
||
207 | - priv->tx_skbuff_dma[entry].len = bmax; |
||
208 | + tx_q->tx_skbuff_dma[entry].buf = des2; |
||
209 | + tx_q->tx_skbuff_dma[entry].len = bmax; |
||
210 | priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, |
||
211 | STMMAC_CHAIN_MODE, 1, |
||
212 | - false); |
||
213 | + false, skb->len); |
||
214 | len -= bmax; |
||
215 | i++; |
||
216 | } else { |
||
217 | @@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, str |
||
218 | desc->des2 = cpu_to_le32(des2); |
||
219 | if (dma_mapping_error(priv->device, des2)) |
||
220 | return -1; |
||
221 | - priv->tx_skbuff_dma[entry].buf = des2; |
||
222 | - priv->tx_skbuff_dma[entry].len = len; |
||
223 | + tx_q->tx_skbuff_dma[entry].buf = des2; |
||
224 | + tx_q->tx_skbuff_dma[entry].len = len; |
||
225 | /* last descriptor can be set now */ |
||
226 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, |
||
227 | STMMAC_CHAIN_MODE, 1, |
||
228 | - true); |
||
229 | + true, skb->len); |
||
230 | len = 0; |
||
231 | } |
||
232 | } |
||
233 | |||
234 | - priv->cur_tx = entry; |
||
235 | + tx_q->cur_tx = entry; |
||
236 | |||
237 | return entry; |
||
238 | } |
||
239 | @@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void * |
||
240 | |||
241 | static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) |
||
242 | { |
||
243 | - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; |
||
244 | + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr; |
||
245 | + struct stmmac_priv *priv = rx_q->priv_data; |
||
246 | |||
247 | if (priv->hwts_rx_en && !priv->extend_desc) |
||
248 | /* NOTE: Device will overwrite des3 with timestamp value if |
||
249 | * 1588-2002 time stamping is enabled, hence reinitialize it |
||
250 | * to keep explicit chaining in the descriptor. |
||
251 | */ |
||
252 | - p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy + |
||
253 | - (((priv->dirty_rx) + 1) % |
||
254 | + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + |
||
255 | + (((rx_q->dirty_rx) + 1) % |
||
256 | DMA_RX_SIZE) * |
||
257 | sizeof(struct dma_desc))); |
||
258 | } |
||
259 | |||
260 | static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) |
||
261 | { |
||
262 | - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; |
||
263 | - unsigned int entry = priv->dirty_tx; |
||
264 | + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; |
||
265 | + struct stmmac_priv *priv = tx_q->priv_data; |
||
266 | + unsigned int entry = tx_q->dirty_tx; |
||
267 | |||
268 | - if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && |
||
269 | + if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && |
||
270 | priv->hwts_tx_en) |
||
271 | /* NOTE: Device will overwrite des3 with timestamp value if |
||
272 | * 1588-2002 time stamping is enabled, hence reinitialize it |
||
273 | * to keep explicit chaining in the descriptor. |
||
274 | */ |
||
275 | - p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy + |
||
276 | - ((priv->dirty_tx + 1) % DMA_TX_SIZE)) |
||
277 | + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + |
||
278 | + ((tx_q->dirty_tx + 1) % DMA_TX_SIZE)) |
||
279 | * sizeof(struct dma_desc))); |
||
280 | } |
||
281 | |||
282 | --- a/drivers/net/ethernet/stmicro/stmmac/common.h |
||
283 | +++ b/drivers/net/ethernet/stmicro/stmmac/common.h |
||
284 | @@ -246,6 +246,15 @@ struct stmmac_extra_stats { |
||
285 | #define STMMAC_TX_MAX_FRAMES 256 |
||
286 | #define STMMAC_TX_FRAMES 64 |
||
287 | |||
288 | +/* Packets types */ |
||
289 | +enum packets_types { |
||
290 | + PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */ |
||
291 | + PACKET_PTPQ = 0x2, /* PTP Packets */ |
||
292 | + PACKET_DCBCPQ = 0x3, /* DCB Control Packets */ |
||
293 | + PACKET_UPQ = 0x4, /* Untagged Packets */ |
||
294 | + PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */ |
||
295 | +}; |
||
296 | + |
||
297 | /* Rx IPC status */ |
||
298 | enum rx_frame_status { |
||
299 | good_frame = 0x0, |
||
300 | @@ -324,6 +333,9 @@ struct dma_features { |
||
301 | unsigned int number_tx_queues; |
||
302 | /* Alternate (enhanced) DESC mode */ |
||
303 | unsigned int enh_desc; |
||
304 | + /* TX and RX FIFO sizes */ |
||
305 | + unsigned int tx_fifo_size; |
||
306 | + unsigned int rx_fifo_size; |
||
307 | }; |
||
308 | |||
309 | /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ |
||
310 | @@ -361,7 +373,7 @@ struct stmmac_desc_ops { |
||
311 | /* Invoked by the xmit function to prepare the tx descriptor */ |
||
312 | void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, |
||
313 | bool csum_flag, int mode, bool tx_own, |
||
314 | - bool ls); |
||
315 | + bool ls, unsigned int tot_pkt_len); |
||
316 | void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, |
||
317 | int len2, bool tx_own, bool ls, |
||
318 | unsigned int tcphdrlen, |
||
319 | @@ -413,6 +425,14 @@ struct stmmac_dma_ops { |
||
320 | int (*reset)(void __iomem *ioaddr); |
||
321 | void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, |
||
322 | u32 dma_tx, u32 dma_rx, int atds); |
||
323 | + void (*init_chan)(void __iomem *ioaddr, |
||
324 | + struct stmmac_dma_cfg *dma_cfg, u32 chan); |
||
325 | + void (*init_rx_chan)(void __iomem *ioaddr, |
||
326 | + struct stmmac_dma_cfg *dma_cfg, |
||
327 | + u32 dma_rx_phy, u32 chan); |
||
328 | + void (*init_tx_chan)(void __iomem *ioaddr, |
||
329 | + struct stmmac_dma_cfg *dma_cfg, |
||
330 | + u32 dma_tx_phy, u32 chan); |
||
331 | /* Configure the AXI Bus Mode Register */ |
||
332 | void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); |
||
333 | /* Dump DMA registers */ |
||
334 | @@ -421,25 +441,28 @@ struct stmmac_dma_ops { |
||
335 | * An invalid value enables the store-and-forward mode */ |
||
336 | void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, |
||
337 | int rxfifosz); |
||
338 | + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, |
||
339 | + int fifosz); |
||
340 | + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel); |
||
341 | /* To track extra statistic (if supported) */ |
||
342 | void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, |
||
343 | void __iomem *ioaddr); |
||
344 | void (*enable_dma_transmission) (void __iomem *ioaddr); |
||
345 | - void (*enable_dma_irq) (void __iomem *ioaddr); |
||
346 | - void (*disable_dma_irq) (void __iomem *ioaddr); |
||
347 | - void (*start_tx) (void __iomem *ioaddr); |
||
348 | - void (*stop_tx) (void __iomem *ioaddr); |
||
349 | - void (*start_rx) (void __iomem *ioaddr); |
||
350 | - void (*stop_rx) (void __iomem *ioaddr); |
||
351 | + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); |
||
352 | + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); |
||
353 | + void (*start_tx)(void __iomem *ioaddr, u32 chan); |
||
354 | + void (*stop_tx)(void __iomem *ioaddr, u32 chan); |
||
355 | + void (*start_rx)(void __iomem *ioaddr, u32 chan); |
||
356 | + void (*stop_rx)(void __iomem *ioaddr, u32 chan); |
||
357 | int (*dma_interrupt) (void __iomem *ioaddr, |
||
358 | - struct stmmac_extra_stats *x); |
||
359 | + struct stmmac_extra_stats *x, u32 chan); |
||
360 | /* If supported then get the optional core features */ |
||
361 | void (*get_hw_feature)(void __iomem *ioaddr, |
||
362 | struct dma_features *dma_cap); |
||
363 | /* Program the HW RX Watchdog */ |
||
364 | - void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); |
||
365 | - void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len); |
||
366 | - void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len); |
||
367 | + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); |
||
368 | + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); |
||
369 | + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); |
||
370 | void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); |
||
371 | void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); |
||
372 | void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); |
||
373 | @@ -451,20 +474,44 @@ struct mac_device_info; |
||
374 | struct stmmac_ops { |
||
375 | /* MAC core initialization */ |
||
376 | void (*core_init)(struct mac_device_info *hw, int mtu); |
||
377 | + /* Enable the MAC RX/TX */ |
||
378 | + void (*set_mac)(void __iomem *ioaddr, bool enable); |
||
379 | /* Enable and verify that the IPC module is supported */ |
||
380 | int (*rx_ipc)(struct mac_device_info *hw); |
||
381 | /* Enable RX Queues */ |
||
382 | - void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); |
||
383 | + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); |
||
384 | + /* RX Queues Priority */ |
||
385 | + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); |
||
386 | + /* TX Queues Priority */ |
||
387 | + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); |
||
388 | + /* RX Queues Routing */ |
||
389 | + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, |
||
390 | + u32 queue); |
||
391 | + /* Program RX Algorithms */ |
||
392 | + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); |
||
393 | + /* Program TX Algorithms */ |
||
394 | + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); |
||
395 | + /* Set MTL TX queues weight */ |
||
396 | + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, |
||
397 | + u32 weight, u32 queue); |
||
398 | + /* RX MTL queue to RX dma mapping */ |
||
399 | + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); |
||
400 | + /* Configure AV Algorithm */ |
||
401 | + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, |
||
402 | + u32 idle_slope, u32 high_credit, u32 low_credit, |
||
403 | + u32 queue); |
||
404 | /* Dump MAC registers */ |
||
405 | void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); |
||
406 | /* Handle extra events on specific interrupts hw dependent */ |
||
407 | int (*host_irq_status)(struct mac_device_info *hw, |
||
408 | struct stmmac_extra_stats *x); |
||
409 | + /* Handle MTL interrupts */ |
||
410 | + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); |
||
411 | /* Multicast filter setting */ |
||
412 | void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); |
||
413 | /* Flow control setting */ |
||
414 | void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, |
||
415 | - unsigned int fc, unsigned int pause_time); |
||
416 | + unsigned int fc, unsigned int pause_time, u32 tx_cnt); |
||
417 | /* Set power management mode (e.g. magic frame) */ |
||
418 | void (*pmt)(struct mac_device_info *hw, unsigned long mode); |
||
419 | /* Set/Get Unicast MAC addresses */ |
||
420 | @@ -477,7 +524,8 @@ struct stmmac_ops { |
||
421 | void (*reset_eee_mode)(struct mac_device_info *hw); |
||
422 | void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); |
||
423 | void (*set_eee_pls)(struct mac_device_info *hw, int link); |
||
424 | - void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); |
||
425 | + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, |
||
426 | + u32 rx_queues, u32 tx_queues); |
||
427 | /* PCS calls */ |
||
428 | void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, |
||
429 | bool loopback); |
||
430 | @@ -547,6 +595,11 @@ struct mac_device_info { |
||
431 | unsigned int ps; |
||
432 | }; |
||
433 | |||
434 | +struct stmmac_rx_routing { |
||
435 | + u32 reg_mask; |
||
436 | + u32 reg_shift; |
||
437 | +}; |
||
438 | + |
||
439 | struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, |
||
440 | int perfect_uc_entries, |
||
441 | int *synopsys_id); |
||
442 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c |
||
443 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c |
||
444 | @@ -14,16 +14,34 @@ |
||
445 | #include <linux/clk.h> |
||
446 | #include <linux/clk-provider.h> |
||
447 | #include <linux/device.h> |
||
448 | +#include <linux/gpio/consumer.h> |
||
449 | #include <linux/ethtool.h> |
||
450 | #include <linux/io.h> |
||
451 | +#include <linux/iopoll.h> |
||
452 | #include <linux/ioport.h> |
||
453 | #include <linux/module.h> |
||
454 | +#include <linux/of_device.h> |
||
455 | #include <linux/of_net.h> |
||
456 | #include <linux/mfd/syscon.h> |
||
457 | #include <linux/platform_device.h> |
||
458 | +#include <linux/reset.h> |
||
459 | #include <linux/stmmac.h> |
||
460 | |||
461 | #include "stmmac_platform.h" |
||
462 | +#include "dwmac4.h" |
||
463 | + |
||
464 | +struct tegra_eqos { |
||
465 | + struct device *dev; |
||
466 | + void __iomem *regs; |
||
467 | + |
||
468 | + struct reset_control *rst; |
||
469 | + struct clk *clk_master; |
||
470 | + struct clk *clk_slave; |
||
471 | + struct clk *clk_tx; |
||
472 | + struct clk *clk_rx; |
||
473 | + |
||
474 | + struct gpio_desc *reset; |
||
475 | +}; |
||
476 | |||
477 | static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, |
||
478 | struct plat_stmmacenet_data *plat_dat) |
||
479 | @@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struc |
||
480 | return 0; |
||
481 | } |
||
482 | |||
483 | +static void *dwc_qos_probe(struct platform_device *pdev, |
||
484 | + struct plat_stmmacenet_data *plat_dat, |
||
485 | + struct stmmac_resources *stmmac_res) |
||
486 | +{ |
||
487 | + int err; |
||
488 | + |
||
489 | + plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); |
||
490 | + if (IS_ERR(plat_dat->stmmac_clk)) { |
||
491 | + dev_err(&pdev->dev, "apb_pclk clock not found.\n"); |
||
492 | + return ERR_CAST(plat_dat->stmmac_clk); |
||
493 | + } |
||
494 | + |
||
495 | + err = clk_prepare_enable(plat_dat->stmmac_clk); |
||
496 | + if (err < 0) { |
||
497 | + dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n", |
||
498 | + err); |
||
499 | + return ERR_PTR(err); |
||
500 | + } |
||
501 | + |
||
502 | + plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); |
||
503 | + if (IS_ERR(plat_dat->pclk)) { |
||
504 | + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); |
||
505 | + err = PTR_ERR(plat_dat->pclk); |
||
506 | + goto disable; |
||
507 | + } |
||
508 | + |
||
509 | + err = clk_prepare_enable(plat_dat->pclk); |
||
510 | + if (err < 0) { |
||
511 | + dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n", |
||
512 | + err); |
||
513 | + goto disable; |
||
514 | + } |
||
515 | + |
||
516 | + return NULL; |
||
517 | + |
||
518 | +disable: |
||
519 | + clk_disable_unprepare(plat_dat->stmmac_clk); |
||
520 | + return ERR_PTR(err); |
||
521 | +} |
||
522 | + |
||
523 | +static int dwc_qos_remove(struct platform_device *pdev) |
||
524 | +{ |
||
525 | + struct net_device *ndev = platform_get_drvdata(pdev); |
||
526 | + struct stmmac_priv *priv = netdev_priv(ndev); |
||
527 | + |
||
528 | + clk_disable_unprepare(priv->plat->pclk); |
||
529 | + clk_disable_unprepare(priv->plat->stmmac_clk); |
||
530 | + |
||
531 | + return 0; |
||
532 | +} |
||
533 | + |
||
534 | +#define SDMEMCOMPPADCTRL 0x8800 |
||
535 | +#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) |
||
536 | + |
||
537 | +#define AUTO_CAL_CONFIG 0x8804 |
||
538 | +#define AUTO_CAL_CONFIG_START BIT(31) |
||
539 | +#define AUTO_CAL_CONFIG_ENABLE BIT(29) |
||
540 | + |
||
541 | +#define AUTO_CAL_STATUS 0x880c |
||
542 | +#define AUTO_CAL_STATUS_ACTIVE BIT(31) |
||
543 | + |
||
544 | +static void tegra_eqos_fix_speed(void *priv, unsigned int speed) |
||
545 | +{ |
||
546 | + struct tegra_eqos *eqos = priv; |
||
547 | + unsigned long rate = 125000000; |
||
548 | + bool needs_calibration = false; |
||
549 | + u32 value; |
||
550 | + int err; |
||
551 | + |
||
552 | + switch (speed) { |
||
553 | + case SPEED_1000: |
||
554 | + needs_calibration = true; |
||
555 | + rate = 125000000; |
||
556 | + break; |
||
557 | + |
||
558 | + case SPEED_100: |
||
559 | + needs_calibration = true; |
||
560 | + rate = 25000000; |
||
561 | + break; |
||
562 | + |
||
563 | + case SPEED_10: |
||
564 | + rate = 2500000; |
||
565 | + break; |
||
566 | + |
||
567 | + default: |
||
568 | + dev_err(eqos->dev, "invalid speed %u\n", speed); |
||
569 | + break; |
||
570 | + } |
||
571 | + |
||
572 | + if (needs_calibration) { |
||
573 | + /* calibrate */ |
||
574 | + value = readl(eqos->regs + SDMEMCOMPPADCTRL); |
||
575 | + value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; |
||
576 | + writel(value, eqos->regs + SDMEMCOMPPADCTRL); |
||
577 | + |
||
578 | + udelay(1); |
||
579 | + |
||
580 | + value = readl(eqos->regs + AUTO_CAL_CONFIG); |
||
581 | + value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE; |
||
582 | + writel(value, eqos->regs + AUTO_CAL_CONFIG); |
||
583 | + |
||
584 | + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS, |
||
585 | + value, |
||
586 | + value & AUTO_CAL_STATUS_ACTIVE, |
||
587 | + 1, 10); |
||
588 | + if (err < 0) { |
||
589 | + dev_err(eqos->dev, "calibration did not start\n"); |
||
590 | + goto failed; |
||
591 | + } |
||
592 | + |
||
593 | + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS, |
||
594 | + value, |
||
595 | + (value & AUTO_CAL_STATUS_ACTIVE) == 0, |
||
596 | + 20, 200); |
||
597 | + if (err < 0) { |
||
598 | + dev_err(eqos->dev, "calibration didn't finish\n"); |
||
599 | + goto failed; |
||
600 | + } |
||
601 | + |
||
602 | + failed: |
||
603 | + value = readl(eqos->regs + SDMEMCOMPPADCTRL); |
||
604 | + value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; |
||
605 | + writel(value, eqos->regs + SDMEMCOMPPADCTRL); |
||
606 | + } else { |
||
607 | + value = readl(eqos->regs + AUTO_CAL_CONFIG); |
||
608 | + value &= ~AUTO_CAL_CONFIG_ENABLE; |
||
609 | + writel(value, eqos->regs + AUTO_CAL_CONFIG); |
||
610 | + } |
||
611 | + |
||
612 | + err = clk_set_rate(eqos->clk_tx, rate); |
||
613 | + if (err < 0) |
||
614 | + dev_err(eqos->dev, "failed to set TX rate: %d\n", err); |
||
615 | +} |
||
616 | + |
||
617 | +static int tegra_eqos_init(struct platform_device *pdev, void *priv) |
||
618 | +{ |
||
619 | + struct tegra_eqos *eqos = priv; |
||
620 | + unsigned long rate; |
||
621 | + u32 value; |
||
622 | + |
||
623 | + rate = clk_get_rate(eqos->clk_slave); |
||
624 | + |
||
625 | + value = (rate / 1000000) - 1; |
||
626 | + writel(value, eqos->regs + GMAC_1US_TIC_COUNTER); |
||
627 | + |
||
628 | + return 0; |
||
629 | +} |
||
630 | + |
||
631 | +static void *tegra_eqos_probe(struct platform_device *pdev, |
||
632 | + struct plat_stmmacenet_data *data, |
||
633 | + struct stmmac_resources *res) |
||
634 | +{ |
||
635 | + struct tegra_eqos *eqos; |
||
636 | + int err; |
||
637 | + |
||
638 | + eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL); |
||
639 | + if (!eqos) { |
||
640 | + err = -ENOMEM; |
||
641 | + goto error; |
||
642 | + } |
||
643 | + |
||
644 | + eqos->dev = &pdev->dev; |
||
645 | + eqos->regs = res->addr; |
||
646 | + |
||
647 | + eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus"); |
||
648 | + if (IS_ERR(eqos->clk_master)) { |
||
649 | + err = PTR_ERR(eqos->clk_master); |
||
650 | + goto error; |
||
651 | + } |
||
652 | + |
||
653 | + err = clk_prepare_enable(eqos->clk_master); |
||
654 | + if (err < 0) |
||
655 | + goto error; |
||
656 | + |
||
657 | + eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus"); |
||
658 | + if (IS_ERR(eqos->clk_slave)) { |
||
659 | + err = PTR_ERR(eqos->clk_slave); |
||
660 | + goto disable_master; |
||
661 | + } |
||
662 | + |
||
663 | + data->stmmac_clk = eqos->clk_slave; |
||
664 | + |
||
665 | + err = clk_prepare_enable(eqos->clk_slave); |
||
666 | + if (err < 0) |
||
667 | + goto disable_master; |
||
668 | + |
||
669 | + eqos->clk_rx = devm_clk_get(&pdev->dev, "rx"); |
||
670 | + if (IS_ERR(eqos->clk_rx)) { |
||
671 | + err = PTR_ERR(eqos->clk_rx); |
||
672 | + goto disable_slave; |
||
673 | + } |
||
674 | + |
||
675 | + err = clk_prepare_enable(eqos->clk_rx); |
||
676 | + if (err < 0) |
||
677 | + goto disable_slave; |
||
678 | + |
||
679 | + eqos->clk_tx = devm_clk_get(&pdev->dev, "tx"); |
||
680 | + if (IS_ERR(eqos->clk_tx)) { |
||
681 | + err = PTR_ERR(eqos->clk_tx); |
||
682 | + goto disable_rx; |
||
683 | + } |
||
684 | + |
||
685 | + err = clk_prepare_enable(eqos->clk_tx); |
||
686 | + if (err < 0) |
||
687 | + goto disable_rx; |
||
688 | + |
||
689 | + eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH); |
||
690 | + if (IS_ERR(eqos->reset)) { |
||
691 | + err = PTR_ERR(eqos->reset); |
||
692 | + goto disable_tx; |
||
693 | + } |
||
694 | + |
||
695 | + usleep_range(2000, 4000); |
||
696 | + gpiod_set_value(eqos->reset, 0); |
||
697 | + |
||
698 | + eqos->rst = devm_reset_control_get(&pdev->dev, "eqos"); |
||
699 | + if (IS_ERR(eqos->rst)) { |
||
700 | + err = PTR_ERR(eqos->rst); |
||
701 | + goto reset_phy; |
||
702 | + } |
||
703 | + |
||
704 | + err = reset_control_assert(eqos->rst); |
||
705 | + if (err < 0) |
||
706 | + goto reset_phy; |
||
707 | + |
||
708 | + usleep_range(2000, 4000); |
||
709 | + |
||
710 | + err = reset_control_deassert(eqos->rst); |
||
711 | + if (err < 0) |
||
712 | + goto reset_phy; |
||
713 | + |
||
714 | + usleep_range(2000, 4000); |
||
715 | + |
||
716 | + data->fix_mac_speed = tegra_eqos_fix_speed; |
||
717 | + data->init = tegra_eqos_init; |
||
718 | + data->bsp_priv = eqos; |
||
719 | + |
||
720 | + err = tegra_eqos_init(pdev, eqos); |
||
721 | + if (err < 0) |
||
722 | + goto reset; |
||
723 | + |
||
724 | +out: |
||
725 | + return eqos; |
||
726 | + |
||
727 | +reset: |
||
728 | + reset_control_assert(eqos->rst); |
||
729 | +reset_phy: |
||
730 | + gpiod_set_value(eqos->reset, 1); |
||
731 | +disable_tx: |
||
732 | + clk_disable_unprepare(eqos->clk_tx); |
||
733 | +disable_rx: |
||
734 | + clk_disable_unprepare(eqos->clk_rx); |
||
735 | +disable_slave: |
||
736 | + clk_disable_unprepare(eqos->clk_slave); |
||
737 | +disable_master: |
||
738 | + clk_disable_unprepare(eqos->clk_master); |
||
739 | +error: |
||
740 | + eqos = ERR_PTR(err); |
||
741 | + goto out; |
||
742 | +} |
||
743 | + |
||
744 | +static int tegra_eqos_remove(struct platform_device *pdev) |
||
745 | +{ |
||
746 | + struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev); |
||
747 | + |
||
748 | + reset_control_assert(eqos->rst); |
||
749 | + gpiod_set_value(eqos->reset, 1); |
||
750 | + clk_disable_unprepare(eqos->clk_tx); |
||
751 | + clk_disable_unprepare(eqos->clk_rx); |
||
752 | + clk_disable_unprepare(eqos->clk_slave); |
||
753 | + clk_disable_unprepare(eqos->clk_master); |
||
754 | + |
||
755 | + return 0; |
||
756 | +} |
||
757 | + |
||
758 | +struct dwc_eth_dwmac_data { |
||
759 | + void *(*probe)(struct platform_device *pdev, |
||
760 | + struct plat_stmmacenet_data *data, |
||
761 | + struct stmmac_resources *res); |
||
762 | + int (*remove)(struct platform_device *pdev); |
||
763 | +}; |
||
764 | + |
||
765 | +static const struct dwc_eth_dwmac_data dwc_qos_data = { |
||
766 | + .probe = dwc_qos_probe, |
||
767 | + .remove = dwc_qos_remove, |
||
768 | +}; |
||
769 | + |
||
770 | +static const struct dwc_eth_dwmac_data tegra_eqos_data = { |
||
771 | + .probe = tegra_eqos_probe, |
||
772 | + .remove = tegra_eqos_remove, |
||
773 | +}; |
||
774 | + |
||
775 | static int dwc_eth_dwmac_probe(struct platform_device *pdev) |
||
776 | { |
||
777 | + const struct dwc_eth_dwmac_data *data; |
||
778 | struct plat_stmmacenet_data *plat_dat; |
||
779 | struct stmmac_resources stmmac_res; |
||
780 | struct resource *res; |
||
781 | + void *priv; |
||
782 | int ret; |
||
783 | |||
784 | + data = of_device_get_match_data(&pdev->dev); |
||
785 | + |
||
786 | memset(&stmmac_res, 0, sizeof(struct stmmac_resources)); |
||
787 | |||
788 | /** |
||
789 | @@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct pl |
||
790 | if (IS_ERR(plat_dat)) |
||
791 | return PTR_ERR(plat_dat); |
||
792 | |||
793 | - plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); |
||
794 | - if (IS_ERR(plat_dat->stmmac_clk)) { |
||
795 | - dev_err(&pdev->dev, "apb_pclk clock not found.\n"); |
||
796 | - ret = PTR_ERR(plat_dat->stmmac_clk); |
||
797 | - plat_dat->stmmac_clk = NULL; |
||
798 | - goto err_remove_config_dt; |
||
799 | + priv = data->probe(pdev, plat_dat, &stmmac_res); |
||
800 | + if (IS_ERR(priv)) { |
||
801 | + ret = PTR_ERR(priv); |
||
802 | + dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret); |
||
803 | + goto remove_config; |
||
804 | } |
||
805 | - clk_prepare_enable(plat_dat->stmmac_clk); |
||
806 | - |
||
807 | - plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); |
||
808 | - if (IS_ERR(plat_dat->pclk)) { |
||
809 | - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); |
||
810 | - ret = PTR_ERR(plat_dat->pclk); |
||
811 | - plat_dat->pclk = NULL; |
||
812 | - goto err_out_clk_dis_phy; |
||
813 | - } |
||
814 | - clk_prepare_enable(plat_dat->pclk); |
||
815 | |||
816 | ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); |
||
817 | if (ret) |
||
818 | - goto err_out_clk_dis_aper; |
||
819 | + goto remove; |
||
820 | |||
821 | ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); |
||
822 | if (ret) |
||
823 | - goto err_out_clk_dis_aper; |
||
824 | + goto remove; |
||
825 | |||
826 | - return 0; |
||
827 | + return ret; |
||
828 | |||
829 | -err_out_clk_dis_aper: |
||
830 | - clk_disable_unprepare(plat_dat->pclk); |
||
831 | -err_out_clk_dis_phy: |
||
832 | - clk_disable_unprepare(plat_dat->stmmac_clk); |
||
833 | -err_remove_config_dt: |
||
834 | +remove: |
||
835 | + data->remove(pdev); |
||
836 | +remove_config: |
||
837 | stmmac_remove_config_dt(pdev, plat_dat); |
||
838 | |||
839 | return ret; |
||
840 | @@ -178,11 +479,29 @@ err_remove_config_dt: |
||
841 | |||
842 | static int dwc_eth_dwmac_remove(struct platform_device *pdev) |
||
843 | { |
||
844 | - return stmmac_pltfr_remove(pdev); |
||
845 | + struct net_device *ndev = platform_get_drvdata(pdev); |
||
846 | + struct stmmac_priv *priv = netdev_priv(ndev); |
||
847 | + const struct dwc_eth_dwmac_data *data; |
||
848 | + int err; |
||
849 | + |
||
850 | + data = of_device_get_match_data(&pdev->dev); |
||
851 | + |
||
852 | + err = stmmac_dvr_remove(&pdev->dev); |
||
853 | + if (err < 0) |
||
854 | + dev_err(&pdev->dev, "failed to remove platform: %d\n", err); |
||
855 | + |
||
856 | + err = data->remove(pdev); |
||
857 | + if (err < 0) |
||
858 | + dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err); |
||
859 | + |
||
860 | + stmmac_remove_config_dt(pdev, priv->plat); |
||
861 | + |
||
862 | + return err; |
||
863 | } |
||
864 | |||
865 | static const struct of_device_id dwc_eth_dwmac_match[] = { |
||
866 | - { .compatible = "snps,dwc-qos-ethernet-4.10", }, |
||
867 | + { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data }, |
||
868 | + { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data }, |
||
869 | { } |
||
870 | }; |
||
871 | MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); |
||
872 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c |
||
873 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c |
||
874 | @@ -74,6 +74,10 @@ struct rk_priv_data { |
||
875 | #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) |
||
876 | #define GRF_CLR_BIT(nr) (BIT(nr+16)) |
||
877 | |||
878 | +#define DELAY_ENABLE(soc, tx, rx) \ |
||
879 | + (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ |
||
880 | + ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) |
||
881 | + |
||
882 | #define RK3228_GRF_MAC_CON0 0x0900 |
||
883 | #define RK3228_GRF_MAC_CON1 0x0904 |
||
884 | |||
885 | @@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct r |
||
886 | regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, |
||
887 | RK3228_GMAC_PHY_INTF_SEL_RGMII | |
||
888 | RK3228_GMAC_RMII_MODE_CLR | |
||
889 | - RK3228_GMAC_RXCLK_DLY_ENABLE | |
||
890 | - RK3228_GMAC_TXCLK_DLY_ENABLE); |
||
891 | + DELAY_ENABLE(RK3228, tx_delay, rx_delay)); |
||
892 | |||
893 | regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0, |
||
894 | RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) | |
||
895 | @@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct r |
||
896 | RK3288_GMAC_PHY_INTF_SEL_RGMII | |
||
897 | RK3288_GMAC_RMII_MODE_CLR); |
||
898 | regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3, |
||
899 | - RK3288_GMAC_RXCLK_DLY_ENABLE | |
||
900 | - RK3288_GMAC_TXCLK_DLY_ENABLE | |
||
901 | + DELAY_ENABLE(RK3288, tx_delay, rx_delay) | |
||
902 | RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) | |
||
903 | RK3288_GMAC_CLK_TX_DL_CFG(tx_delay)); |
||
904 | } |
||
905 | @@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct r |
||
906 | RK3366_GMAC_PHY_INTF_SEL_RGMII | |
||
907 | RK3366_GMAC_RMII_MODE_CLR); |
||
908 | regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7, |
||
909 | - RK3366_GMAC_RXCLK_DLY_ENABLE | |
||
910 | - RK3366_GMAC_TXCLK_DLY_ENABLE | |
||
911 | + DELAY_ENABLE(RK3366, tx_delay, rx_delay) | |
||
912 | RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) | |
||
913 | RK3366_GMAC_CLK_TX_DL_CFG(tx_delay)); |
||
914 | } |
||
915 | @@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct r |
||
916 | RK3368_GMAC_PHY_INTF_SEL_RGMII | |
||
917 | RK3368_GMAC_RMII_MODE_CLR); |
||
918 | regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16, |
||
919 | - RK3368_GMAC_RXCLK_DLY_ENABLE | |
||
920 | - RK3368_GMAC_TXCLK_DLY_ENABLE | |
||
921 | + DELAY_ENABLE(RK3368, tx_delay, rx_delay) | |
||
922 | RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) | |
||
923 | RK3368_GMAC_CLK_TX_DL_CFG(tx_delay)); |
||
924 | } |
||
925 | @@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct r |
||
926 | RK3399_GMAC_PHY_INTF_SEL_RGMII | |
||
927 | RK3399_GMAC_RMII_MODE_CLR); |
||
928 | regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6, |
||
929 | - RK3399_GMAC_RXCLK_DLY_ENABLE | |
||
930 | - RK3399_GMAC_TXCLK_DLY_ENABLE | |
||
931 | + DELAY_ENABLE(RK3399, tx_delay, rx_delay) | |
||
932 | RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) | |
||
933 | RK3399_GMAC_CLK_TX_DL_CFG(tx_delay)); |
||
934 | } |
||
935 | @@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_pri |
||
936 | return ret; |
||
937 | |||
938 | /*rmii or rgmii*/ |
||
939 | - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) { |
||
940 | + switch (bsp_priv->phy_iface) { |
||
941 | + case PHY_INTERFACE_MODE_RGMII: |
||
942 | dev_info(dev, "init for RGMII\n"); |
||
943 | bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, |
||
944 | bsp_priv->rx_delay); |
||
945 | - } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) { |
||
946 | + break; |
||
947 | + case PHY_INTERFACE_MODE_RGMII_ID: |
||
948 | + dev_info(dev, "init for RGMII_ID\n"); |
||
949 | + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0); |
||
950 | + break; |
||
951 | + case PHY_INTERFACE_MODE_RGMII_RXID: |
||
952 | + dev_info(dev, "init for RGMII_RXID\n"); |
||
953 | + bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0); |
||
954 | + break; |
||
955 | + case PHY_INTERFACE_MODE_RGMII_TXID: |
||
956 | + dev_info(dev, "init for RGMII_TXID\n"); |
||
957 | + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay); |
||
958 | + break; |
||
959 | + case PHY_INTERFACE_MODE_RMII: |
||
960 | dev_info(dev, "init for RMII\n"); |
||
961 | bsp_priv->ops->set_to_rmii(bsp_priv); |
||
962 | - } else { |
||
963 | + break; |
||
964 | + default: |
||
965 | dev_err(dev, "NO interface defined!\n"); |
||
966 | } |
||
967 | |||
968 | @@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, uns |
||
969 | struct rk_priv_data *bsp_priv = priv; |
||
970 | struct device *dev = &bsp_priv->pdev->dev; |
||
971 | |||
972 | - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) |
||
973 | + switch (bsp_priv->phy_iface) { |
||
974 | + case PHY_INTERFACE_MODE_RGMII: |
||
975 | + case PHY_INTERFACE_MODE_RGMII_ID: |
||
976 | + case PHY_INTERFACE_MODE_RGMII_RXID: |
||
977 | + case PHY_INTERFACE_MODE_RGMII_TXID: |
||
978 | bsp_priv->ops->set_rgmii_speed(bsp_priv, speed); |
||
979 | - else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) |
||
980 | + break; |
||
981 | + case PHY_INTERFACE_MODE_RMII: |
||
982 | bsp_priv->ops->set_rmii_speed(bsp_priv, speed); |
||
983 | - else |
||
984 | + break; |
||
985 | + default: |
||
986 | dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface); |
||
987 | + } |
||
988 | } |
||
989 | |||
990 | static int rk_gmac_probe(struct platform_device *pdev) |
||
991 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c |
||
992 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c |
||
993 | @@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct |
||
994 | |||
995 | |||
996 | static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, |
||
997 | - unsigned int fc, unsigned int pause_time) |
||
998 | + unsigned int fc, unsigned int pause_time, |
||
999 | + u32 tx_cnt) |
||
1000 | { |
||
1001 | void __iomem *ioaddr = hw->pcsr; |
||
1002 | /* Set flow such that DZPQ in Mac Register 6 is 0, |
||
1003 | @@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __ |
||
1004 | dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); |
||
1005 | } |
||
1006 | |||
1007 | -static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) |
||
1008 | +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, |
||
1009 | + u32 rx_queues, u32 tx_queues) |
||
1010 | { |
||
1011 | u32 value = readl(ioaddr + GMAC_DEBUG); |
||
1012 | |||
1013 | @@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem |
||
1014 | |||
1015 | static const struct stmmac_ops dwmac1000_ops = { |
||
1016 | .core_init = dwmac1000_core_init, |
||
1017 | + .set_mac = stmmac_set_mac, |
||
1018 | .rx_ipc = dwmac1000_rx_ipc_enable, |
||
1019 | .dump_regs = dwmac1000_dump_regs, |
||
1020 | .host_irq_status = dwmac1000_irq_status, |
||
1021 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c |
||
1022 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c |
||
1023 | @@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(voi |
||
1024 | dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; |
||
1025 | } |
||
1026 | |||
1027 | -static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) |
||
1028 | +static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, |
||
1029 | + u32 number_chan) |
||
1030 | { |
||
1031 | writel(riwt, ioaddr + DMA_RX_WATCHDOG); |
||
1032 | } |
||
1033 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c |
||
1034 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c |
||
1035 | @@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct m |
||
1036 | } |
||
1037 | |||
1038 | static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, |
||
1039 | - unsigned int fc, unsigned int pause_time) |
||
1040 | + unsigned int fc, unsigned int pause_time, |
||
1041 | + u32 tx_cnt) |
||
1042 | { |
||
1043 | void __iomem *ioaddr = hw->pcsr; |
||
1044 | unsigned int flow = MAC_FLOW_CTRL_ENABLE; |
||
1045 | @@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_devi |
||
1046 | |||
1047 | static const struct stmmac_ops dwmac100_ops = { |
||
1048 | .core_init = dwmac100_core_init, |
||
1049 | + .set_mac = stmmac_set_mac, |
||
1050 | .rx_ipc = dwmac100_rx_ipc_enable, |
||
1051 | .dump_regs = dwmac100_dump_mac_regs, |
||
1052 | .host_irq_status = dwmac100_irq_status, |
||
1053 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h |
||
1054 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h |
||
1055 | @@ -22,9 +22,15 @@ |
||
1056 | #define GMAC_HASH_TAB_32_63 0x00000014 |
||
1057 | #define GMAC_RX_FLOW_CTRL 0x00000090 |
||
1058 | #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) |
||
1059 | +#define GMAC_TXQ_PRTY_MAP0 0x98 |
||
1060 | +#define GMAC_TXQ_PRTY_MAP1 0x9C |
||
1061 | #define GMAC_RXQ_CTRL0 0x000000a0 |
||
1062 | +#define GMAC_RXQ_CTRL1 0x000000a4 |
||
1063 | +#define GMAC_RXQ_CTRL2 0x000000a8 |
||
1064 | +#define GMAC_RXQ_CTRL3 0x000000ac |
||
1065 | #define GMAC_INT_STATUS 0x000000b0 |
||
1066 | #define GMAC_INT_EN 0x000000b4 |
||
1067 | +#define GMAC_1US_TIC_COUNTER 0x000000dc |
||
1068 | #define GMAC_PCS_BASE 0x000000e0 |
||
1069 | #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 |
||
1070 | #define GMAC_PMT 0x000000c0 |
||
1071 | @@ -38,6 +44,22 @@ |
||
1072 | #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) |
||
1073 | #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) |
||
1074 | |||
1075 | +/* RX Queues Routing */ |
||
1076 | +#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) |
||
1077 | +#define GMAC_RXQCTRL_AVCPQ_SHIFT 0 |
||
1078 | +#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4) |
||
1079 | +#define GMAC_RXQCTRL_PTPQ_SHIFT 4 |
||
1080 | +#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8) |
||
1081 | +#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8 |
||
1082 | +#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12) |
||
1083 | +#define GMAC_RXQCTRL_UPQ_SHIFT 12 |
||
1084 | +#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16) |
||
1085 | +#define GMAC_RXQCTRL_MCBCQ_SHIFT 16 |
||
1086 | +#define GMAC_RXQCTRL_MCBCQEN BIT(20) |
||
1087 | +#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20 |
||
1088 | +#define GMAC_RXQCTRL_TACPQE BIT(21) |
||
1089 | +#define GMAC_RXQCTRL_TACPQE_SHIFT 21 |
||
1090 | + |
||
1091 | /* MAC Packet Filtering */ |
||
1092 | #define GMAC_PACKET_FILTER_PR BIT(0) |
||
1093 | #define GMAC_PACKET_FILTER_HMC BIT(2) |
||
1094 | @@ -53,6 +75,14 @@ |
||
1095 | /* MAC Flow Control RX */ |
||
1096 | #define GMAC_RX_FLOW_CTRL_RFE BIT(0) |
||
1097 | |||
1098 | +/* RX Queues Priorities */ |
||
1099 | +#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) |
||
1100 | +#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8) |
||
1101 | + |
||
1102 | +/* TX Queues Priorities */ |
||
1103 | +#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) |
||
1104 | +#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8) |
||
1105 | + |
||
1106 | /* MAC Flow Control TX */ |
||
1107 | #define GMAC_TX_FLOW_CTRL_TFE BIT(1) |
||
1108 | #define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 |
||
1109 | @@ -148,6 +178,8 @@ enum power_event { |
||
1110 | /* MAC HW features1 bitmap */ |
||
1111 | #define GMAC_HW_FEAT_AVSEL BIT(20) |
||
1112 | #define GMAC_HW_TSOEN BIT(18) |
||
1113 | +#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6) |
||
1114 | +#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) |
||
1115 | |||
1116 | /* MAC HW features2 bitmap */ |
||
1117 | #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) |
||
1118 | @@ -161,8 +193,25 @@ enum power_event { |
||
1119 | #define GMAC_HI_REG_AE BIT(31) |
||
1120 | |||
1121 | /* MTL registers */ |
||
1122 | +#define MTL_OPERATION_MODE 0x00000c00 |
||
1123 | +#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5) |
||
1124 | +#define MTL_OPERATION_SCHALG_WRR (0x0 << 5) |
||
1125 | +#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5) |
||
1126 | +#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5) |
||
1127 | +#define MTL_OPERATION_SCHALG_SP (0x3 << 5) |
||
1128 | +#define MTL_OPERATION_RAA BIT(2) |
||
1129 | +#define MTL_OPERATION_RAA_SP (0x0 << 2) |
||
1130 | +#define MTL_OPERATION_RAA_WSP (0x1 << 2) |
||
1131 | + |
||
1132 | #define MTL_INT_STATUS 0x00000c20 |
||
1133 | -#define MTL_INT_Q0 BIT(0) |
||
1134 | +#define MTL_INT_QX(x) BIT(x) |
||
1135 | + |
||
1136 | +#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */ |
||
1137 | +#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */ |
||
1138 | +#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0) |
||
1139 | +#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0) |
||
1140 | +#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x)) |
||
1141 | +#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q))) |
||
1142 | |||
1143 | #define MTL_CHAN_BASE_ADDR 0x00000d00 |
||
1144 | #define MTL_CHAN_BASE_OFFSET 0x40 |
||
1145 | @@ -180,6 +229,7 @@ enum power_event { |
||
1146 | #define MTL_OP_MODE_TSF BIT(1) |
||
1147 | |||
1148 | #define MTL_OP_MODE_TQS_MASK GENMASK(24, 16) |
||
1149 | +#define MTL_OP_MODE_TQS_SHIFT 16 |
||
1150 | |||
1151 | #define MTL_OP_MODE_TTC_MASK 0x70 |
||
1152 | #define MTL_OP_MODE_TTC_SHIFT 4 |
||
1153 | @@ -193,6 +243,17 @@ enum power_event { |
||
1154 | #define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT) |
||
1155 | #define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT) |
||
1156 | |||
1157 | +#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20) |
||
1158 | +#define MTL_OP_MODE_RQS_SHIFT 20 |
||
1159 | + |
||
1160 | +#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14) |
||
1161 | +#define MTL_OP_MODE_RFD_SHIFT 14 |
||
1162 | + |
||
1163 | +#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8) |
||
1164 | +#define MTL_OP_MODE_RFA_SHIFT 8 |
||
1165 | + |
||
1166 | +#define MTL_OP_MODE_EHFC BIT(7) |
||
1167 | + |
||
1168 | #define MTL_OP_MODE_RTC_MASK 0x18 |
||
1169 | #define MTL_OP_MODE_RTC_SHIFT 3 |
||
1170 | |||
1171 | @@ -201,6 +262,46 @@ enum power_event { |
||
1172 | #define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT) |
||
1173 | #define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT) |
||
1174 | |||
1175 | +/* MTL ETS Control register */ |
||
1176 | +#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10 |
||
1177 | +#define MTL_ETS_CTRL_BASE_OFFSET 0x40 |
||
1178 | +#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \ |
||
1179 | + ((x) * MTL_ETS_CTRL_BASE_OFFSET)) |
||
1180 | + |
||
1181 | +#define MTL_ETS_CTRL_CC BIT(3) |
||
1182 | +#define MTL_ETS_CTRL_AVALG BIT(2) |
||
1183 | + |
||
1184 | +/* MTL Queue Quantum Weight */ |
||
1185 | +#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18 |
||
1186 | +#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40 |
||
1187 | +#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \ |
||
1188 | + ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET)) |
||
1189 | +#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0) |
||
1190 | + |
||
1191 | +/* MTL sendSlopeCredit register */ |
||
1192 | +#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c |
||
1193 | +#define MTL_SEND_SLP_CRED_OFFSET 0x40 |
||
1194 | +#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \ |
||
1195 | + ((x) * MTL_SEND_SLP_CRED_OFFSET)) |
||
1196 | + |
||
1197 | +#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0) |
||
1198 | + |
||
1199 | +/* MTL hiCredit register */ |
||
1200 | +#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20 |
||
1201 | +#define MTL_HIGH_CRED_OFFSET 0x40 |
||
1202 | +#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \ |
||
1203 | + ((x) * MTL_HIGH_CRED_OFFSET)) |
||
1204 | + |
||
1205 | +#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0) |
||
1206 | + |
||
1207 | +/* MTL loCredit register */ |
||
1208 | +#define MTL_LOW_CRED_BASE_ADDR 0x00000d24 |
||
1209 | +#define MTL_LOW_CRED_OFFSET 0x40 |
||
1210 | +#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \ |
||
1211 | + ((x) * MTL_LOW_CRED_OFFSET)) |
||
1212 | + |
||
1213 | +#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0) |
||
1214 | + |
||
1215 | /* MTL debug */ |
||
1216 | #define MTL_DEBUG_TXSTSFSTS BIT(5) |
||
1217 | #define MTL_DEBUG_TXFSTS BIT(4) |
||
1218 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c |
||
1219 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c |
||
1220 | @@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_ |
||
1221 | writel(value, ioaddr + GMAC_INT_EN); |
||
1222 | } |
||
1223 | |||
1224 | -static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) |
||
1225 | +static void dwmac4_rx_queue_enable(struct mac_device_info *hw, |
||
1226 | + u8 mode, u32 queue) |
||
1227 | { |
||
1228 | void __iomem *ioaddr = hw->pcsr; |
||
1229 | u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); |
||
1230 | |||
1231 | value &= GMAC_RX_QUEUE_CLEAR(queue); |
||
1232 | - value |= GMAC_RX_AV_QUEUE_ENABLE(queue); |
||
1233 | + if (mode == MTL_QUEUE_AVB) |
||
1234 | + value |= GMAC_RX_AV_QUEUE_ENABLE(queue); |
||
1235 | + else if (mode == MTL_QUEUE_DCB) |
||
1236 | + value |= GMAC_RX_DCB_QUEUE_ENABLE(queue); |
||
1237 | |||
1238 | writel(value, ioaddr + GMAC_RXQ_CTRL0); |
||
1239 | } |
||
1240 | |||
1241 | +static void dwmac4_rx_queue_priority(struct mac_device_info *hw, |
||
1242 | + u32 prio, u32 queue) |
||
1243 | +{ |
||
1244 | + void __iomem *ioaddr = hw->pcsr; |
||
1245 | + u32 base_register; |
||
1246 | + u32 value; |
||
1247 | + |
||
1248 | + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; |
||
1249 | + |
||
1250 | + value = readl(ioaddr + base_register); |
||
1251 | + |
||
1252 | + value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue); |
||
1253 | + value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & |
||
1254 | + GMAC_RXQCTRL_PSRQX_MASK(queue); |
||
1255 | + writel(value, ioaddr + base_register); |
||
1256 | +} |
||
1257 | + |
||
1258 | +static void dwmac4_tx_queue_priority(struct mac_device_info *hw, |
||
1259 | + u32 prio, u32 queue) |
||
1260 | +{ |
||
1261 | + void __iomem *ioaddr = hw->pcsr; |
||
1262 | + u32 base_register; |
||
1263 | + u32 value; |
||
1264 | + |
||
1265 | + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; |
||
1266 | + |
||
1267 | + value = readl(ioaddr + base_register); |
||
1268 | + |
||
1269 | + value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue); |
||
1270 | + value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) & |
||
1271 | + GMAC_TXQCTRL_PSTQX_MASK(queue); |
||
1272 | + |
||
1273 | + writel(value, ioaddr + base_register); |
||
1274 | +} |
||
1275 | + |
||
1276 | +static void dwmac4_tx_queue_routing(struct mac_device_info *hw, |
||
1277 | + u8 packet, u32 queue) |
||
1278 | +{ |
||
1279 | + void __iomem *ioaddr = hw->pcsr; |
||
1280 | + u32 value; |
||
1281 | + |
||
1282 | + const struct stmmac_rx_routing route_possibilities[] = { |
||
1283 | + { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, |
||
1284 | + { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, |
||
1285 | + { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, |
||
1286 | + { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT }, |
||
1287 | + { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT }, |
||
1288 | + }; |
||
1289 | + |
||
1290 | + value = readl(ioaddr + GMAC_RXQ_CTRL1); |
||
1291 | + |
||
1292 | + /* routing configuration */ |
||
1293 | + value &= ~route_possibilities[packet - 1].reg_mask; |
||
1294 | + value |= (queue << route_possibilities[packet-1].reg_shift) & |
||
1295 | + route_possibilities[packet - 1].reg_mask; |
||
1296 | + |
||
1297 | + /* some packets require extra ops */ |
||
1298 | + if (packet == PACKET_AVCPQ) { |
||
1299 | + value &= ~GMAC_RXQCTRL_TACPQE; |
||
1300 | + value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT; |
||
1301 | + } else if (packet == PACKET_MCBCQ) { |
||
1302 | + value &= ~GMAC_RXQCTRL_MCBCQEN; |
||
1303 | + value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT; |
||
1304 | + } |
||
1305 | + |
||
1306 | + writel(value, ioaddr + GMAC_RXQ_CTRL1); |
||
1307 | +} |
||
1308 | + |
||
1309 | +static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw, |
||
1310 | + u32 rx_alg) |
||
1311 | +{ |
||
1312 | + void __iomem *ioaddr = hw->pcsr; |
||
1313 | + u32 value = readl(ioaddr + MTL_OPERATION_MODE); |
||
1314 | + |
||
1315 | + value &= ~MTL_OPERATION_RAA; |
||
1316 | + switch (rx_alg) { |
||
1317 | + case MTL_RX_ALGORITHM_SP: |
||
1318 | + value |= MTL_OPERATION_RAA_SP; |
||
1319 | + break; |
||
1320 | + case MTL_RX_ALGORITHM_WSP: |
||
1321 | + value |= MTL_OPERATION_RAA_WSP; |
||
1322 | + break; |
||
1323 | + default: |
||
1324 | + break; |
||
1325 | + } |
||
1326 | + |
||
1327 | + writel(value, ioaddr + MTL_OPERATION_MODE); |
||
1328 | +} |
||
1329 | + |
||
1330 | +static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw, |
||
1331 | + u32 tx_alg) |
||
1332 | +{ |
||
1333 | + void __iomem *ioaddr = hw->pcsr; |
||
1334 | + u32 value = readl(ioaddr + MTL_OPERATION_MODE); |
||
1335 | + |
||
1336 | + value &= ~MTL_OPERATION_SCHALG_MASK; |
||
1337 | + switch (tx_alg) { |
||
1338 | + case MTL_TX_ALGORITHM_WRR: |
||
1339 | + value |= MTL_OPERATION_SCHALG_WRR; |
||
1340 | + break; |
||
1341 | + case MTL_TX_ALGORITHM_WFQ: |
||
1342 | + value |= MTL_OPERATION_SCHALG_WFQ; |
||
1343 | + break; |
||
1344 | + case MTL_TX_ALGORITHM_DWRR: |
||
1345 | + value |= MTL_OPERATION_SCHALG_DWRR; |
||
1346 | + break; |
||
1347 | + case MTL_TX_ALGORITHM_SP: |
||
1348 | + value |= MTL_OPERATION_SCHALG_SP; |
||
1349 | + break; |
||
1350 | + default: |
||
1351 | + break; |
||
1352 | + } |
||
1353 | +} |
||
1354 | + |
||
1355 | +static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw, |
||
1356 | + u32 weight, u32 queue) |
||
1357 | +{ |
||
1358 | + void __iomem *ioaddr = hw->pcsr; |
||
1359 | + u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); |
||
1360 | + |
||
1361 | + value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK; |
||
1362 | + value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK; |
||
1363 | + writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); |
||
1364 | +} |
||
1365 | + |
||
1366 | +static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) |
||
1367 | +{ |
||
1368 | + void __iomem *ioaddr = hw->pcsr; |
||
1369 | + u32 value; |
||
1370 | + |
||
1371 | + if (queue < 4) |
||
1372 | + value = readl(ioaddr + MTL_RXQ_DMA_MAP0); |
||
1373 | + else |
||
1374 | + value = readl(ioaddr + MTL_RXQ_DMA_MAP1); |
||
1375 | + |
||
1376 | + if (queue == 0 || queue == 4) { |
||
1377 | + value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; |
||
1378 | + value |= MTL_RXQ_DMA_Q04MDMACH(chan); |
||
1379 | + } else { |
||
1380 | + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); |
||
1381 | + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); |
||
1382 | + } |
||
1383 | + |
||
1384 | + if (queue < 4) |
||
1385 | + writel(value, ioaddr + MTL_RXQ_DMA_MAP0); |
||
1386 | + else |
||
1387 | + writel(value, ioaddr + MTL_RXQ_DMA_MAP1); |
||
1388 | +} |
||
1389 | + |
||
1390 | +static void dwmac4_config_cbs(struct mac_device_info *hw, |
||
1391 | + u32 send_slope, u32 idle_slope, |
||
1392 | + u32 high_credit, u32 low_credit, u32 queue) |
||
1393 | +{ |
||
1394 | + void __iomem *ioaddr = hw->pcsr; |
||
1395 | + u32 value; |
||
1396 | + |
||
1397 | + pr_debug("Queue %d configured as AVB. Parameters:\n", queue); |
||
1398 | + pr_debug("\tsend_slope: 0x%08x\n", send_slope); |
||
1399 | + pr_debug("\tidle_slope: 0x%08x\n", idle_slope); |
||
1400 | + pr_debug("\thigh_credit: 0x%08x\n", high_credit); |
||
1401 | + pr_debug("\tlow_credit: 0x%08x\n", low_credit); |
||
1402 | + |
||
1403 | + /* enable AV algorithm */ |
||
1404 | + value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); |
||
1405 | + value |= MTL_ETS_CTRL_AVALG; |
||
1406 | + value |= MTL_ETS_CTRL_CC; |
||
1407 | + writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); |
||
1408 | + |
||
1409 | + /* configure send slope */ |
||
1410 | + value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); |
||
1411 | + value &= ~MTL_SEND_SLP_CRED_SSC_MASK; |
||
1412 | + value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK; |
||
1413 | + writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); |
||
1414 | + |
||
1415 | + /* configure idle slope (same register as tx weight) */ |
||
1416 | + dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue); |
||
1417 | + |
||
1418 | + /* configure high credit */ |
||
1419 | + value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); |
||
1420 | + value &= ~MTL_HIGH_CRED_HC_MASK; |
||
1421 | + value |= high_credit & MTL_HIGH_CRED_HC_MASK; |
||
1422 | + writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); |
||
1423 | + |
||
1424 | + /* configure high credit */ |
||
1425 | + value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); |
||
1426 | + value &= ~MTL_HIGH_CRED_LC_MASK; |
||
1427 | + value |= low_credit & MTL_HIGH_CRED_LC_MASK; |
||
1428 | + writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); |
||
1429 | +} |
||
1430 | + |
||
1431 | static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) |
||
1432 | { |
||
1433 | void __iomem *ioaddr = hw->pcsr; |
||
1434 | @@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac |
||
1435 | } |
||
1436 | |||
1437 | static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, |
||
1438 | - unsigned int fc, unsigned int pause_time) |
||
1439 | + unsigned int fc, unsigned int pause_time, |
||
1440 | + u32 tx_cnt) |
||
1441 | { |
||
1442 | void __iomem *ioaddr = hw->pcsr; |
||
1443 | - u32 channel = STMMAC_CHAN0; /* FIXME */ |
||
1444 | unsigned int flow = 0; |
||
1445 | + u32 queue = 0; |
||
1446 | |||
1447 | pr_debug("GMAC Flow-Control:\n"); |
||
1448 | if (fc & FLOW_RX) { |
||
1449 | @@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_ |
||
1450 | } |
||
1451 | if (fc & FLOW_TX) { |
||
1452 | pr_debug("\tTransmit Flow-Control ON\n"); |
||
1453 | - flow |= GMAC_TX_FLOW_CTRL_TFE; |
||
1454 | - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); |
||
1455 | |||
1456 | - if (duplex) { |
||
1457 | + if (duplex) |
||
1458 | pr_debug("\tduplex mode: PAUSE %d\n", pause_time); |
||
1459 | - flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); |
||
1460 | - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); |
||
1461 | + |
||
1462 | + for (queue = 0; queue < tx_cnt; queue++) { |
||
1463 | + flow |= GMAC_TX_FLOW_CTRL_TFE; |
||
1464 | + |
||
1465 | + if (duplex) |
||
1466 | + flow |= |
||
1467 | + (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); |
||
1468 | + |
||
1469 | + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); |
||
1470 | } |
||
1471 | } |
||
1472 | } |
||
1473 | @@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iome |
||
1474 | } |
||
1475 | } |
||
1476 | |||
1477 | +static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan) |
||
1478 | +{ |
||
1479 | + void __iomem *ioaddr = hw->pcsr; |
||
1480 | + u32 mtl_int_qx_status; |
||
1481 | + int ret = 0; |
||
1482 | + |
||
1483 | + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); |
||
1484 | + |
||
1485 | + /* Check MTL Interrupt */ |
||
1486 | + if (mtl_int_qx_status & MTL_INT_QX(chan)) { |
||
1487 | + /* read Queue x Interrupt status */ |
||
1488 | + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan)); |
||
1489 | + |
||
1490 | + if (status & MTL_RX_OVERFLOW_INT) { |
||
1491 | + /* clear Interrupt */ |
||
1492 | + writel(status | MTL_RX_OVERFLOW_INT, |
||
1493 | + ioaddr + MTL_CHAN_INT_CTRL(chan)); |
||
1494 | + ret = CORE_IRQ_MTL_RX_OVERFLOW; |
||
1495 | + } |
||
1496 | + } |
||
1497 | + |
||
1498 | + return ret; |
||
1499 | +} |
||
1500 | + |
||
1501 | static int dwmac4_irq_status(struct mac_device_info *hw, |
||
1502 | struct stmmac_extra_stats *x) |
||
1503 | { |
||
1504 | void __iomem *ioaddr = hw->pcsr; |
||
1505 | - u32 mtl_int_qx_status; |
||
1506 | u32 intr_status; |
||
1507 | int ret = 0; |
||
1508 | |||
1509 | @@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_ |
||
1510 | x->irq_receive_pmt_irq_n++; |
||
1511 | } |
||
1512 | |||
1513 | - mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); |
||
1514 | - /* Check MTL Interrupt: Currently only one queue is used: Q0. */ |
||
1515 | - if (mtl_int_qx_status & MTL_INT_Q0) { |
||
1516 | - /* read Queue 0 Interrupt status */ |
||
1517 | - u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); |
||
1518 | - |
||
1519 | - if (status & MTL_RX_OVERFLOW_INT) { |
||
1520 | - /* clear Interrupt */ |
||
1521 | - writel(status | MTL_RX_OVERFLOW_INT, |
||
1522 | - ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); |
||
1523 | - ret = CORE_IRQ_MTL_RX_OVERFLOW; |
||
1524 | - } |
||
1525 | - } |
||
1526 | - |
||
1527 | dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); |
||
1528 | if (intr_status & PCS_RGSMIIIS_IRQ) |
||
1529 | dwmac4_phystatus(ioaddr, x); |
||
1530 | @@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_ |
||
1531 | return ret; |
||
1532 | } |
||
1533 | |||
1534 | -static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) |
||
1535 | +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, |
||
1536 | + u32 rx_queues, u32 tx_queues) |
||
1537 | { |
||
1538 | u32 value; |
||
1539 | + u32 queue; |
||
1540 | |||
1541 | - /* Currently only channel 0 is supported */ |
||
1542 | - value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0)); |
||
1543 | + for (queue = 0; queue < tx_queues; queue++) { |
||
1544 | + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue)); |
||
1545 | |||
1546 | - if (value & MTL_DEBUG_TXSTSFSTS) |
||
1547 | - x->mtl_tx_status_fifo_full++; |
||
1548 | - if (value & MTL_DEBUG_TXFSTS) |
||
1549 | - x->mtl_tx_fifo_not_empty++; |
||
1550 | - if (value & MTL_DEBUG_TWCSTS) |
||
1551 | - x->mmtl_fifo_ctrl++; |
||
1552 | - if (value & MTL_DEBUG_TRCSTS_MASK) { |
||
1553 | - u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) |
||
1554 | - >> MTL_DEBUG_TRCSTS_SHIFT; |
||
1555 | - if (trcsts == MTL_DEBUG_TRCSTS_WRITE) |
||
1556 | - x->mtl_tx_fifo_read_ctrl_write++; |
||
1557 | - else if (trcsts == MTL_DEBUG_TRCSTS_TXW) |
||
1558 | - x->mtl_tx_fifo_read_ctrl_wait++; |
||
1559 | - else if (trcsts == MTL_DEBUG_TRCSTS_READ) |
||
1560 | - x->mtl_tx_fifo_read_ctrl_read++; |
||
1561 | - else |
||
1562 | - x->mtl_tx_fifo_read_ctrl_idle++; |
||
1563 | + if (value & MTL_DEBUG_TXSTSFSTS) |
||
1564 | + x->mtl_tx_status_fifo_full++; |
||
1565 | + if (value & MTL_DEBUG_TXFSTS) |
||
1566 | + x->mtl_tx_fifo_not_empty++; |
||
1567 | + if (value & MTL_DEBUG_TWCSTS) |
||
1568 | + x->mmtl_fifo_ctrl++; |
||
1569 | + if (value & MTL_DEBUG_TRCSTS_MASK) { |
||
1570 | + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) |
||
1571 | + >> MTL_DEBUG_TRCSTS_SHIFT; |
||
1572 | + if (trcsts == MTL_DEBUG_TRCSTS_WRITE) |
||
1573 | + x->mtl_tx_fifo_read_ctrl_write++; |
||
1574 | + else if (trcsts == MTL_DEBUG_TRCSTS_TXW) |
||
1575 | + x->mtl_tx_fifo_read_ctrl_wait++; |
||
1576 | + else if (trcsts == MTL_DEBUG_TRCSTS_READ) |
||
1577 | + x->mtl_tx_fifo_read_ctrl_read++; |
||
1578 | + else |
||
1579 | + x->mtl_tx_fifo_read_ctrl_idle++; |
||
1580 | + } |
||
1581 | + if (value & MTL_DEBUG_TXPAUSED) |
||
1582 | + x->mac_tx_in_pause++; |
||
1583 | } |
||
1584 | - if (value & MTL_DEBUG_TXPAUSED) |
||
1585 | - x->mac_tx_in_pause++; |
||
1586 | |||
1587 | - value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0)); |
||
1588 | + for (queue = 0; queue < rx_queues; queue++) { |
||
1589 | + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue)); |
||
1590 | |||
1591 | - if (value & MTL_DEBUG_RXFSTS_MASK) { |
||
1592 | - u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) |
||
1593 | - >> MTL_DEBUG_RRCSTS_SHIFT; |
||
1594 | - |
||
1595 | - if (rxfsts == MTL_DEBUG_RXFSTS_FULL) |
||
1596 | - x->mtl_rx_fifo_fill_level_full++; |
||
1597 | - else if (rxfsts == MTL_DEBUG_RXFSTS_AT) |
||
1598 | - x->mtl_rx_fifo_fill_above_thresh++; |
||
1599 | - else if (rxfsts == MTL_DEBUG_RXFSTS_BT) |
||
1600 | - x->mtl_rx_fifo_fill_below_thresh++; |
||
1601 | - else |
||
1602 | - x->mtl_rx_fifo_fill_level_empty++; |
||
1603 | - } |
||
1604 | - if (value & MTL_DEBUG_RRCSTS_MASK) { |
||
1605 | - u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> |
||
1606 | - MTL_DEBUG_RRCSTS_SHIFT; |
||
1607 | - |
||
1608 | - if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) |
||
1609 | - x->mtl_rx_fifo_read_ctrl_flush++; |
||
1610 | - else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) |
||
1611 | - x->mtl_rx_fifo_read_ctrl_read_data++; |
||
1612 | - else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) |
||
1613 | - x->mtl_rx_fifo_read_ctrl_status++; |
||
1614 | - else |
||
1615 | - x->mtl_rx_fifo_read_ctrl_idle++; |
||
1616 | + if (value & MTL_DEBUG_RXFSTS_MASK) { |
||
1617 | + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) |
||
1618 | + >> MTL_DEBUG_RRCSTS_SHIFT; |
||
1619 | + |
||
1620 | + if (rxfsts == MTL_DEBUG_RXFSTS_FULL) |
||
1621 | + x->mtl_rx_fifo_fill_level_full++; |
||
1622 | + else if (rxfsts == MTL_DEBUG_RXFSTS_AT) |
||
1623 | + x->mtl_rx_fifo_fill_above_thresh++; |
||
1624 | + else if (rxfsts == MTL_DEBUG_RXFSTS_BT) |
||
1625 | + x->mtl_rx_fifo_fill_below_thresh++; |
||
1626 | + else |
||
1627 | + x->mtl_rx_fifo_fill_level_empty++; |
||
1628 | + } |
||
1629 | + if (value & MTL_DEBUG_RRCSTS_MASK) { |
||
1630 | + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> |
||
1631 | + MTL_DEBUG_RRCSTS_SHIFT; |
||
1632 | + |
||
1633 | + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) |
||
1634 | + x->mtl_rx_fifo_read_ctrl_flush++; |
||
1635 | + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) |
||
1636 | + x->mtl_rx_fifo_read_ctrl_read_data++; |
||
1637 | + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) |
||
1638 | + x->mtl_rx_fifo_read_ctrl_status++; |
||
1639 | + else |
||
1640 | + x->mtl_rx_fifo_read_ctrl_idle++; |
||
1641 | + } |
||
1642 | + if (value & MTL_DEBUG_RWCSTS) |
||
1643 | + x->mtl_rx_fifo_ctrl_active++; |
||
1644 | } |
||
1645 | - if (value & MTL_DEBUG_RWCSTS) |
||
1646 | - x->mtl_rx_fifo_ctrl_active++; |
||
1647 | |||
1648 | /* GMAC debug */ |
||
1649 | value = readl(ioaddr + GMAC_DEBUG); |
||
1650 | @@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *i |
||
1651 | |||
1652 | static const struct stmmac_ops dwmac4_ops = { |
||
1653 | .core_init = dwmac4_core_init, |
||
1654 | + .set_mac = stmmac_set_mac, |
||
1655 | .rx_ipc = dwmac4_rx_ipc_enable, |
||
1656 | .rx_queue_enable = dwmac4_rx_queue_enable, |
||
1657 | + .rx_queue_prio = dwmac4_rx_queue_priority, |
||
1658 | + .tx_queue_prio = dwmac4_tx_queue_priority, |
||
1659 | + .rx_queue_routing = dwmac4_tx_queue_routing, |
||
1660 | + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, |
||
1661 | + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, |
||
1662 | + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, |
||
1663 | + .map_mtl_to_dma = dwmac4_map_mtl_dma, |
||
1664 | + .config_cbs = dwmac4_config_cbs, |
||
1665 | .dump_regs = dwmac4_dump_regs, |
||
1666 | .host_irq_status = dwmac4_irq_status, |
||
1667 | + .host_mtl_irq_status = dwmac4_irq_mtl_status, |
||
1668 | + .flow_ctrl = dwmac4_flow_ctrl, |
||
1669 | + .pmt = dwmac4_pmt, |
||
1670 | + .set_umac_addr = dwmac4_set_umac_addr, |
||
1671 | + .get_umac_addr = dwmac4_get_umac_addr, |
||
1672 | + .set_eee_mode = dwmac4_set_eee_mode, |
||
1673 | + .reset_eee_mode = dwmac4_reset_eee_mode, |
||
1674 | + .set_eee_timer = dwmac4_set_eee_timer, |
||
1675 | + .set_eee_pls = dwmac4_set_eee_pls, |
||
1676 | + .pcs_ctrl_ane = dwmac4_ctrl_ane, |
||
1677 | + .pcs_rane = dwmac4_rane, |
||
1678 | + .pcs_get_adv_lp = dwmac4_get_adv_lp, |
||
1679 | + .debug = dwmac4_debug, |
||
1680 | + .set_filter = dwmac4_set_filter, |
||
1681 | +}; |
||
1682 | + |
||
1683 | +static const struct stmmac_ops dwmac410_ops = { |
||
1684 | + .core_init = dwmac4_core_init, |
||
1685 | + .set_mac = stmmac_dwmac4_set_mac, |
||
1686 | + .rx_ipc = dwmac4_rx_ipc_enable, |
||
1687 | + .rx_queue_enable = dwmac4_rx_queue_enable, |
||
1688 | + .rx_queue_prio = dwmac4_rx_queue_priority, |
||
1689 | + .tx_queue_prio = dwmac4_tx_queue_priority, |
||
1690 | + .rx_queue_routing = dwmac4_tx_queue_routing, |
||
1691 | + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, |
||
1692 | + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, |
||
1693 | + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, |
||
1694 | + .map_mtl_to_dma = dwmac4_map_mtl_dma, |
||
1695 | + .config_cbs = dwmac4_config_cbs, |
||
1696 | + .dump_regs = dwmac4_dump_regs, |
||
1697 | + .host_irq_status = dwmac4_irq_status, |
||
1698 | + .host_mtl_irq_status = dwmac4_irq_mtl_status, |
||
1699 | .flow_ctrl = dwmac4_flow_ctrl, |
||
1700 | .pmt = dwmac4_pmt, |
||
1701 | .set_umac_addr = dwmac4_set_umac_addr, |
||
1702 | @@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(voi |
||
1703 | if (mac->multicast_filter_bins) |
||
1704 | mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); |
||
1705 | |||
1706 | - mac->mac = &dwmac4_ops; |
||
1707 | - |
||
1708 | mac->link.port = GMAC_CONFIG_PS; |
||
1709 | mac->link.duplex = GMAC_CONFIG_DM; |
||
1710 | mac->link.speed = GMAC_CONFIG_FES; |
||
1711 | @@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(voi |
||
1712 | else |
||
1713 | mac->dma = &dwmac4_dma_ops; |
||
1714 | |||
1715 | + if (*synopsys_id >= DWMAC_CORE_4_00) |
||
1716 | + mac->mac = &dwmac410_ops; |
||
1717 | + else |
||
1718 | + mac->mac = &dwmac4_ops; |
||
1719 | + |
||
1720 | return mac; |
||
1721 | } |
||
1722 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
||
1723 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
||
1724 | @@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestam |
||
1725 | { |
||
1726 | /* Context type from W/B descriptor must be zero */ |
||
1727 | if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) |
||
1728 | - return -EINVAL; |
||
1729 | + return 0; |
||
1730 | |||
1731 | /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ |
||
1732 | if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) |
||
1733 | - return 0; |
||
1734 | + return 1; |
||
1735 | |||
1736 | - return 1; |
||
1737 | + return 0; |
||
1738 | } |
||
1739 | |||
1740 | static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) |
||
1741 | @@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestam |
||
1742 | } |
||
1743 | } |
||
1744 | exit: |
||
1745 | - return ret; |
||
1746 | + if (likely(ret == 0)) |
||
1747 | + return 1; |
||
1748 | + |
||
1749 | + return 0; |
||
1750 | } |
||
1751 | |||
1752 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
||
1753 | @@ -304,12 +307,13 @@ static void dwmac4_rd_init_tx_desc(struc |
||
1754 | |||
1755 | static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, |
||
1756 | bool csum_flag, int mode, bool tx_own, |
||
1757 | - bool ls) |
||
1758 | + bool ls, unsigned int tot_pkt_len) |
||
1759 | { |
||
1760 | unsigned int tdes3 = le32_to_cpu(p->des3); |
||
1761 | |||
1762 | p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK); |
||
1763 | |||
1764 | + tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK; |
||
1765 | if (is_fs) |
||
1766 | tdes3 |= TDES3_FIRST_DESCRIPTOR; |
||
1767 | else |
||
1768 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c |
||
1769 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c |
||
1770 | @@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem |
||
1771 | writel(value, ioaddr + DMA_SYS_BUS_MODE); |
||
1772 | } |
||
1773 | |||
1774 | -static void dwmac4_dma_init_channel(void __iomem *ioaddr, |
||
1775 | - struct stmmac_dma_cfg *dma_cfg, |
||
1776 | - u32 dma_tx_phy, u32 dma_rx_phy, |
||
1777 | - u32 channel) |
||
1778 | +void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, |
||
1779 | + struct stmmac_dma_cfg *dma_cfg, |
||
1780 | + u32 dma_rx_phy, u32 chan) |
||
1781 | { |
||
1782 | u32 value; |
||
1783 | - int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; |
||
1784 | - int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; |
||
1785 | + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; |
||
1786 | |||
1787 | - /* set PBL for each channels. Currently we affect same configuration |
||
1788 | - * on each channel |
||
1789 | - */ |
||
1790 | - value = readl(ioaddr + DMA_CHAN_CONTROL(channel)); |
||
1791 | - if (dma_cfg->pblx8) |
||
1792 | - value = value | DMA_BUS_MODE_PBL; |
||
1793 | - writel(value, ioaddr + DMA_CHAN_CONTROL(channel)); |
||
1794 | + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); |
||
1795 | + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); |
||
1796 | + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); |
||
1797 | + |
||
1798 | + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); |
||
1799 | +} |
||
1800 | |||
1801 | - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); |
||
1802 | +void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, |
||
1803 | + struct stmmac_dma_cfg *dma_cfg, |
||
1804 | + u32 dma_tx_phy, u32 chan) |
||
1805 | +{ |
||
1806 | + u32 value; |
||
1807 | + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; |
||
1808 | + |
||
1809 | + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); |
||
1810 | value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); |
||
1811 | - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel)); |
||
1812 | + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); |
||
1813 | |||
1814 | - value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); |
||
1815 | - value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); |
||
1816 | - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel)); |
||
1817 | + writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); |
||
1818 | +} |
||
1819 | |||
1820 | - /* Mask interrupts by writing to CSR7 */ |
||
1821 | - writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel)); |
||
1822 | +void dwmac4_dma_init_channel(void __iomem *ioaddr, |
||
1823 | + struct stmmac_dma_cfg *dma_cfg, u32 chan) |
||
1824 | +{ |
||
1825 | + u32 value; |
||
1826 | + |
||
1827 | + /* common channel control register config */ |
||
1828 | + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); |
||
1829 | + if (dma_cfg->pblx8) |
||
1830 | + value = value | DMA_BUS_MODE_PBL; |
||
1831 | + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); |
||
1832 | |||
1833 | - writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); |
||
1834 | - writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); |
||
1835 | + /* Mask interrupts by writing to CSR7 */ |
||
1836 | + writel(DMA_CHAN_INTR_DEFAULT_MASK, |
||
1837 | + ioaddr + DMA_CHAN_INTR_ENA(chan)); |
||
1838 | } |
||
1839 | |||
1840 | static void dwmac4_dma_init(void __iomem *ioaddr, |
||
1841 | @@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem |
||
1842 | u32 dma_tx, u32 dma_rx, int atds) |
||
1843 | { |
||
1844 | u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); |
||
1845 | - int i; |
||
1846 | |||
1847 | /* Set the Fixed burst mode */ |
||
1848 | if (dma_cfg->fixed_burst) |
||
1849 | @@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem |
||
1850 | value |= DMA_SYS_BUS_AAL; |
||
1851 | |||
1852 | writel(value, ioaddr + DMA_SYS_BUS_MODE); |
||
1853 | - |
||
1854 | - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) |
||
1855 | - dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i); |
||
1856 | } |
||
1857 | |||
1858 | static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, |
||
1859 | @@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __ |
||
1860 | _dwmac4_dump_dma_regs(ioaddr, i, reg_space); |
||
1861 | } |
||
1862 | |||
1863 | -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) |
||
1864 | +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) |
||
1865 | { |
||
1866 | - int i; |
||
1867 | + u32 chan; |
||
1868 | |||
1869 | - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) |
||
1870 | - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i)); |
||
1871 | + for (chan = 0; chan < number_chan; chan++) |
||
1872 | + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan)); |
||
1873 | } |
||
1874 | |||
1875 | -static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, |
||
1876 | - int rxmode, u32 channel) |
||
1877 | +static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, |
||
1878 | + u32 channel, int fifosz) |
||
1879 | { |
||
1880 | - u32 mtl_tx_op, mtl_rx_op, mtl_rx_int; |
||
1881 | + unsigned int rqs = fifosz / 256 - 1; |
||
1882 | + u32 mtl_rx_op, mtl_rx_int; |
||
1883 | |||
1884 | - /* Following code only done for channel 0, other channels not yet |
||
1885 | - * supported. |
||
1886 | - */ |
||
1887 | - mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); |
||
1888 | + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); |
||
1889 | + |
||
1890 | + if (mode == SF_DMA_MODE) { |
||
1891 | + pr_debug("GMAC: enable RX store and forward mode\n"); |
||
1892 | + mtl_rx_op |= MTL_OP_MODE_RSF; |
||
1893 | + } else { |
||
1894 | + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); |
||
1895 | + mtl_rx_op &= ~MTL_OP_MODE_RSF; |
||
1896 | + mtl_rx_op &= MTL_OP_MODE_RTC_MASK; |
||
1897 | + if (mode <= 32) |
||
1898 | + mtl_rx_op |= MTL_OP_MODE_RTC_32; |
||
1899 | + else if (mode <= 64) |
||
1900 | + mtl_rx_op |= MTL_OP_MODE_RTC_64; |
||
1901 | + else if (mode <= 96) |
||
1902 | + mtl_rx_op |= MTL_OP_MODE_RTC_96; |
||
1903 | + else |
||
1904 | + mtl_rx_op |= MTL_OP_MODE_RTC_128; |
||
1905 | + } |
||
1906 | + |
||
1907 | + mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; |
||
1908 | + mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; |
||
1909 | + |
||
1910 | + /* enable flow control only if each channel gets 4 KiB or more FIFO */ |
||
1911 | + if (fifosz >= 4096) { |
||
1912 | + unsigned int rfd, rfa; |
||
1913 | + |
||
1914 | + mtl_rx_op |= MTL_OP_MODE_EHFC; |
||
1915 | + |
||
1916 | + /* Set Threshold for Activating Flow Control to min 2 frames, |
||
1917 | + * i.e. 1500 * 2 = 3000 bytes. |
||
1918 | + * |
||
1919 | + * Set Threshold for Deactivating Flow Control to min 1 frame, |
||
1920 | + * i.e. 1500 bytes. |
||
1921 | + */ |
||
1922 | + switch (fifosz) { |
||
1923 | + case 4096: |
||
1924 | + /* This violates the above formula because of FIFO size |
||
1925 | + * limit therefore overflow may occur in spite of this. |
||
1926 | + */ |
||
1927 | + rfd = 0x03; /* Full-2.5K */ |
||
1928 | + rfa = 0x01; /* Full-1.5K */ |
||
1929 | + break; |
||
1930 | + |
||
1931 | + case 8192: |
||
1932 | + rfd = 0x06; /* Full-4K */ |
||
1933 | + rfa = 0x0a; /* Full-6K */ |
||
1934 | + break; |
||
1935 | + |
||
1936 | + case 16384: |
||
1937 | + rfd = 0x06; /* Full-4K */ |
||
1938 | + rfa = 0x12; /* Full-10K */ |
||
1939 | + break; |
||
1940 | + |
||
1941 | + default: |
||
1942 | + rfd = 0x06; /* Full-4K */ |
||
1943 | + rfa = 0x1e; /* Full-16K */ |
||
1944 | + break; |
||
1945 | + } |
||
1946 | |||
1947 | - if (txmode == SF_DMA_MODE) { |
||
1948 | + mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK; |
||
1949 | + mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; |
||
1950 | + |
||
1951 | + mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK; |
||
1952 | + mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; |
||
1953 | + } |
||
1954 | + |
||
1955 | + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); |
||
1956 | + |
||
1957 | + /* Enable MTL RX overflow */ |
||
1958 | + mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); |
||
1959 | + writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, |
||
1960 | + ioaddr + MTL_CHAN_INT_CTRL(channel)); |
||
1961 | +} |
||
1962 | + |
||
1963 | +static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, |
||
1964 | + u32 channel) |
||
1965 | +{ |
||
1966 | + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); |
||
1967 | + |
||
1968 | + if (mode == SF_DMA_MODE) { |
||
1969 | pr_debug("GMAC: enable TX store and forward mode\n"); |
||
1970 | /* Transmit COE type 2 cannot be done in cut-through mode. */ |
||
1971 | mtl_tx_op |= MTL_OP_MODE_TSF; |
||
1972 | } else { |
||
1973 | - pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); |
||
1974 | + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); |
||
1975 | mtl_tx_op &= ~MTL_OP_MODE_TSF; |
||
1976 | mtl_tx_op &= MTL_OP_MODE_TTC_MASK; |
||
1977 | /* Set the transmit threshold */ |
||
1978 | - if (txmode <= 32) |
||
1979 | + if (mode <= 32) |
||
1980 | mtl_tx_op |= MTL_OP_MODE_TTC_32; |
||
1981 | - else if (txmode <= 64) |
||
1982 | + else if (mode <= 64) |
||
1983 | mtl_tx_op |= MTL_OP_MODE_TTC_64; |
||
1984 | - else if (txmode <= 96) |
||
1985 | + else if (mode <= 96) |
||
1986 | mtl_tx_op |= MTL_OP_MODE_TTC_96; |
||
1987 | - else if (txmode <= 128) |
||
1988 | + else if (mode <= 128) |
||
1989 | mtl_tx_op |= MTL_OP_MODE_TTC_128; |
||
1990 | - else if (txmode <= 192) |
||
1991 | + else if (mode <= 192) |
||
1992 | mtl_tx_op |= MTL_OP_MODE_TTC_192; |
||
1993 | - else if (txmode <= 256) |
||
1994 | + else if (mode <= 256) |
||
1995 | mtl_tx_op |= MTL_OP_MODE_TTC_256; |
||
1996 | - else if (txmode <= 384) |
||
1997 | + else if (mode <= 384) |
||
1998 | mtl_tx_op |= MTL_OP_MODE_TTC_384; |
||
1999 | else |
||
2000 | mtl_tx_op |= MTL_OP_MODE_TTC_512; |
||
2001 | @@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void |
||
2002 | */ |
||
2003 | mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK; |
||
2004 | writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); |
||
2005 | - |
||
2006 | - mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); |
||
2007 | - |
||
2008 | - if (rxmode == SF_DMA_MODE) { |
||
2009 | - pr_debug("GMAC: enable RX store and forward mode\n"); |
||
2010 | - mtl_rx_op |= MTL_OP_MODE_RSF; |
||
2011 | - } else { |
||
2012 | - pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); |
||
2013 | - mtl_rx_op &= ~MTL_OP_MODE_RSF; |
||
2014 | - mtl_rx_op &= MTL_OP_MODE_RTC_MASK; |
||
2015 | - if (rxmode <= 32) |
||
2016 | - mtl_rx_op |= MTL_OP_MODE_RTC_32; |
||
2017 | - else if (rxmode <= 64) |
||
2018 | - mtl_rx_op |= MTL_OP_MODE_RTC_64; |
||
2019 | - else if (rxmode <= 96) |
||
2020 | - mtl_rx_op |= MTL_OP_MODE_RTC_96; |
||
2021 | - else |
||
2022 | - mtl_rx_op |= MTL_OP_MODE_RTC_128; |
||
2023 | - } |
||
2024 | - |
||
2025 | - writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); |
||
2026 | - |
||
2027 | - /* Enable MTL RX overflow */ |
||
2028 | - mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); |
||
2029 | - writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, |
||
2030 | - ioaddr + MTL_CHAN_INT_CTRL(channel)); |
||
2031 | -} |
||
2032 | - |
||
2033 | -static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode, |
||
2034 | - int rxmode, int rxfifosz) |
||
2035 | -{ |
||
2036 | - /* Only Channel 0 is actually configured and used */ |
||
2037 | - dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0); |
||
2038 | } |
||
2039 | |||
2040 | static void dwmac4_get_hw_feature(void __iomem *ioaddr, |
||
2041 | @@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void _ |
||
2042 | hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); |
||
2043 | dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; |
||
2044 | dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; |
||
2045 | + /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by |
||
2046 | + * shifting and store the sizes in bytes. |
||
2047 | + */ |
||
2048 | + dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6); |
||
2049 | + dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0); |
||
2050 | /* MAC HW feature2 */ |
||
2051 | hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); |
||
2052 | /* TX and RX number of channels */ |
||
2053 | @@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iom |
||
2054 | const struct stmmac_dma_ops dwmac4_dma_ops = { |
||
2055 | .reset = dwmac4_dma_reset, |
||
2056 | .init = dwmac4_dma_init, |
||
2057 | + .init_chan = dwmac4_dma_init_channel, |
||
2058 | + .init_rx_chan = dwmac4_dma_init_rx_chan, |
||
2059 | + .init_tx_chan = dwmac4_dma_init_tx_chan, |
||
2060 | .axi = dwmac4_dma_axi, |
||
2061 | .dump_regs = dwmac4_dump_dma_regs, |
||
2062 | - .dma_mode = dwmac4_dma_operation_mode, |
||
2063 | + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, |
||
2064 | + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, |
||
2065 | .enable_dma_irq = dwmac4_enable_dma_irq, |
||
2066 | .disable_dma_irq = dwmac4_disable_dma_irq, |
||
2067 | .start_tx = dwmac4_dma_start_tx, |
||
2068 | @@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_o |
||
2069 | const struct stmmac_dma_ops dwmac410_dma_ops = { |
||
2070 | .reset = dwmac4_dma_reset, |
||
2071 | .init = dwmac4_dma_init, |
||
2072 | + .init_chan = dwmac4_dma_init_channel, |
||
2073 | + .init_rx_chan = dwmac4_dma_init_rx_chan, |
||
2074 | + .init_tx_chan = dwmac4_dma_init_tx_chan, |
||
2075 | .axi = dwmac4_dma_axi, |
||
2076 | .dump_regs = dwmac4_dump_dma_regs, |
||
2077 | - .dma_mode = dwmac4_dma_operation_mode, |
||
2078 | + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, |
||
2079 | + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, |
||
2080 | .enable_dma_irq = dwmac410_enable_dma_irq, |
||
2081 | .disable_dma_irq = dwmac4_disable_dma_irq, |
||
2082 | .start_tx = dwmac4_dma_start_tx, |
||
2083 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h |
||
2084 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h |
||
2085 | @@ -185,17 +185,17 @@ |
||
2086 | |||
2087 | int dwmac4_dma_reset(void __iomem *ioaddr); |
||
2088 | void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); |
||
2089 | -void dwmac4_enable_dma_irq(void __iomem *ioaddr); |
||
2090 | -void dwmac410_enable_dma_irq(void __iomem *ioaddr); |
||
2091 | -void dwmac4_disable_dma_irq(void __iomem *ioaddr); |
||
2092 | -void dwmac4_dma_start_tx(void __iomem *ioaddr); |
||
2093 | -void dwmac4_dma_stop_tx(void __iomem *ioaddr); |
||
2094 | -void dwmac4_dma_start_rx(void __iomem *ioaddr); |
||
2095 | -void dwmac4_dma_stop_rx(void __iomem *ioaddr); |
||
2096 | +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan); |
||
2097 | +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan); |
||
2098 | +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan); |
||
2099 | +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan); |
||
2100 | +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); |
||
2101 | +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); |
||
2102 | +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); |
||
2103 | int dwmac4_dma_interrupt(void __iomem *ioaddr, |
||
2104 | - struct stmmac_extra_stats *x); |
||
2105 | -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len); |
||
2106 | -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len); |
||
2107 | + struct stmmac_extra_stats *x, u32 chan); |
||
2108 | +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); |
||
2109 | +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); |
||
2110 | void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); |
||
2111 | void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); |
||
2112 | |||
2113 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c |
||
2114 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c |
||
2115 | @@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioadd |
||
2116 | |||
2117 | void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) |
||
2118 | { |
||
2119 | - writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0)); |
||
2120 | + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan)); |
||
2121 | } |
||
2122 | |||
2123 | void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) |
||
2124 | { |
||
2125 | - writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0)); |
||
2126 | + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan)); |
||
2127 | } |
||
2128 | |||
2129 | -void dwmac4_dma_start_tx(void __iomem *ioaddr) |
||
2130 | +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan) |
||
2131 | { |
||
2132 | - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); |
||
2133 | + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); |
||
2134 | |||
2135 | value |= DMA_CONTROL_ST; |
||
2136 | - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); |
||
2137 | + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); |
||
2138 | |||
2139 | value = readl(ioaddr + GMAC_CONFIG); |
||
2140 | value |= GMAC_CONFIG_TE; |
||
2141 | writel(value, ioaddr + GMAC_CONFIG); |
||
2142 | } |
||
2143 | |||
2144 | -void dwmac4_dma_stop_tx(void __iomem *ioaddr) |
||
2145 | +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan) |
||
2146 | { |
||
2147 | - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); |
||
2148 | + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); |
||
2149 | |||
2150 | value &= ~DMA_CONTROL_ST; |
||
2151 | - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); |
||
2152 | + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); |
||
2153 | |||
2154 | value = readl(ioaddr + GMAC_CONFIG); |
||
2155 | value &= ~GMAC_CONFIG_TE; |
||
2156 | writel(value, ioaddr + GMAC_CONFIG); |
||
2157 | } |
||
2158 | |||
2159 | -void dwmac4_dma_start_rx(void __iomem *ioaddr) |
||
2160 | +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan) |
||
2161 | { |
||
2162 | - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); |
||
2163 | + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); |
||
2164 | |||
2165 | value |= DMA_CONTROL_SR; |
||
2166 | |||
2167 | - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); |
||
2168 | + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); |
||
2169 | |||
2170 | value = readl(ioaddr + GMAC_CONFIG); |
||
2171 | value |= GMAC_CONFIG_RE; |
||
2172 | writel(value, ioaddr + GMAC_CONFIG); |
||
2173 | } |
||
2174 | |||
2175 | -void dwmac4_dma_stop_rx(void __iomem *ioaddr) |
||
2176 | +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan) |
||
2177 | { |
||
2178 | - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); |
||
2179 | + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); |
||
2180 | |||
2181 | value &= ~DMA_CONTROL_SR; |
||
2182 | - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); |
||
2183 | + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); |
||
2184 | |||
2185 | value = readl(ioaddr + GMAC_CONFIG); |
||
2186 | value &= ~GMAC_CONFIG_RE; |
||
2187 | writel(value, ioaddr + GMAC_CONFIG); |
||
2188 | } |
||
2189 | |||
2190 | -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len) |
||
2191 | +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) |
||
2192 | { |
||
2193 | - writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0)); |
||
2194 | + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan)); |
||
2195 | } |
||
2196 | |||
2197 | -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len) |
||
2198 | +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) |
||
2199 | { |
||
2200 | - writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0)); |
||
2201 | + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan)); |
||
2202 | } |
||
2203 | |||
2204 | -void dwmac4_enable_dma_irq(void __iomem *ioaddr) |
||
2205 | +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan) |
||
2206 | { |
||
2207 | writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + |
||
2208 | - DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); |
||
2209 | + DMA_CHAN_INTR_ENA(chan)); |
||
2210 | } |
||
2211 | |||
2212 | -void dwmac410_enable_dma_irq(void __iomem *ioaddr) |
||
2213 | +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan) |
||
2214 | { |
||
2215 | writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, |
||
2216 | - ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); |
||
2217 | + ioaddr + DMA_CHAN_INTR_ENA(chan)); |
||
2218 | } |
||
2219 | |||
2220 | -void dwmac4_disable_dma_irq(void __iomem *ioaddr) |
||
2221 | +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan) |
||
2222 | { |
||
2223 | - writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); |
||
2224 | + writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan)); |
||
2225 | } |
||
2226 | |||
2227 | int dwmac4_dma_interrupt(void __iomem *ioaddr, |
||
2228 | - struct stmmac_extra_stats *x) |
||
2229 | + struct stmmac_extra_stats *x, u32 chan) |
||
2230 | { |
||
2231 | int ret = 0; |
||
2232 | |||
2233 | - u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0)); |
||
2234 | + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); |
||
2235 | |||
2236 | /* ABNORMAL interrupts */ |
||
2237 | if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { |
||
2238 | @@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *i |
||
2239 | if (likely(intr_status & DMA_CHAN_STATUS_RI)) { |
||
2240 | u32 value; |
||
2241 | |||
2242 | - value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); |
||
2243 | + value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); |
||
2244 | /* to schedule NAPI on real RIE event. */ |
||
2245 | if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { |
||
2246 | x->rx_normal_irq_n++; |
||
2247 | @@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *i |
||
2248 | * status [21-0] expect reserved bits [5-3] |
||
2249 | */ |
||
2250 | writel((intr_status & 0x3fffc7), |
||
2251 | - ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0)); |
||
2252 | + ioaddr + DMA_CHAN_STATUS(chan)); |
||
2253 | |||
2254 | return ret; |
||
2255 | } |
||
2256 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h |
||
2257 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h |
||
2258 | @@ -137,13 +137,14 @@ |
||
2259 | #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ |
||
2260 | |||
2261 | void dwmac_enable_dma_transmission(void __iomem *ioaddr); |
||
2262 | -void dwmac_enable_dma_irq(void __iomem *ioaddr); |
||
2263 | -void dwmac_disable_dma_irq(void __iomem *ioaddr); |
||
2264 | -void dwmac_dma_start_tx(void __iomem *ioaddr); |
||
2265 | -void dwmac_dma_stop_tx(void __iomem *ioaddr); |
||
2266 | -void dwmac_dma_start_rx(void __iomem *ioaddr); |
||
2267 | -void dwmac_dma_stop_rx(void __iomem *ioaddr); |
||
2268 | -int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); |
||
2269 | +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); |
||
2270 | +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); |
||
2271 | +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan); |
||
2272 | +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); |
||
2273 | +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); |
||
2274 | +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); |
||
2275 | +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, |
||
2276 | + u32 chan); |
||
2277 | int dwmac_dma_reset(void __iomem *ioaddr); |
||
2278 | |||
2279 | #endif /* __DWMAC_DMA_H__ */ |
||
2280 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c |
||
2281 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c |
||
2282 | @@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void |
||
2283 | writel(1, ioaddr + DMA_XMT_POLL_DEMAND); |
||
2284 | } |
||
2285 | |||
2286 | -void dwmac_enable_dma_irq(void __iomem *ioaddr) |
||
2287 | +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan) |
||
2288 | { |
||
2289 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); |
||
2290 | } |
||
2291 | |||
2292 | -void dwmac_disable_dma_irq(void __iomem *ioaddr) |
||
2293 | +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan) |
||
2294 | { |
||
2295 | writel(0, ioaddr + DMA_INTR_ENA); |
||
2296 | } |
||
2297 | |||
2298 | -void dwmac_dma_start_tx(void __iomem *ioaddr) |
||
2299 | +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) |
||
2300 | { |
||
2301 | u32 value = readl(ioaddr + DMA_CONTROL); |
||
2302 | value |= DMA_CONTROL_ST; |
||
2303 | writel(value, ioaddr + DMA_CONTROL); |
||
2304 | } |
||
2305 | |||
2306 | -void dwmac_dma_stop_tx(void __iomem *ioaddr) |
||
2307 | +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan) |
||
2308 | { |
||
2309 | u32 value = readl(ioaddr + DMA_CONTROL); |
||
2310 | value &= ~DMA_CONTROL_ST; |
||
2311 | writel(value, ioaddr + DMA_CONTROL); |
||
2312 | } |
||
2313 | |||
2314 | -void dwmac_dma_start_rx(void __iomem *ioaddr) |
||
2315 | +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan) |
||
2316 | { |
||
2317 | u32 value = readl(ioaddr + DMA_CONTROL); |
||
2318 | value |= DMA_CONTROL_SR; |
||
2319 | writel(value, ioaddr + DMA_CONTROL); |
||
2320 | } |
||
2321 | |||
2322 | -void dwmac_dma_stop_rx(void __iomem *ioaddr) |
||
2323 | +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) |
||
2324 | { |
||
2325 | u32 value = readl(ioaddr + DMA_CONTROL); |
||
2326 | value &= ~DMA_CONTROL_SR; |
||
2327 | @@ -156,7 +156,7 @@ static void show_rx_process_state(unsign |
||
2328 | #endif |
||
2329 | |||
2330 | int dwmac_dma_interrupt(void __iomem *ioaddr, |
||
2331 | - struct stmmac_extra_stats *x) |
||
2332 | + struct stmmac_extra_stats *x, u32 chan) |
||
2333 | { |
||
2334 | int ret = 0; |
||
2335 | /* read the status register (CSR5) */ |
||
2336 | --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c |
||
2337 | +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c |
||
2338 | @@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(str |
||
2339 | |||
2340 | static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, |
||
2341 | bool csum_flag, int mode, bool tx_own, |
||
2342 | - bool ls) |
||
2343 | + bool ls, unsigned int tot_pkt_len) |
||
2344 | { |
||
2345 | unsigned int tdes0 = le32_to_cpu(p->des0); |
||
2346 | |||
2347 | --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c |
||
2348 | +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c |
||
2349 | @@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct |
||
2350 | |||
2351 | static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, |
||
2352 | bool csum_flag, int mode, bool tx_own, |
||
2353 | - bool ls) |
||
2354 | + bool ls, unsigned int tot_pkt_len) |
||
2355 | { |
||
2356 | unsigned int tdes1 = le32_to_cpu(p->des1); |
||
2357 | |||
2358 | --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c |
||
2359 | +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c |
||
2360 | @@ -26,16 +26,17 @@ |
||
2361 | |||
2362 | static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) |
||
2363 | { |
||
2364 | - struct stmmac_priv *priv = (struct stmmac_priv *)p; |
||
2365 | - unsigned int entry = priv->cur_tx; |
||
2366 | - struct dma_desc *desc; |
||
2367 | + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; |
||
2368 | unsigned int nopaged_len = skb_headlen(skb); |
||
2369 | + struct stmmac_priv *priv = tx_q->priv_data; |
||
2370 | + unsigned int entry = tx_q->cur_tx; |
||
2371 | unsigned int bmax, len, des2; |
||
2372 | + struct dma_desc *desc; |
||
2373 | |||
2374 | if (priv->extend_desc) |
||
2375 | - desc = (struct dma_desc *)(priv->dma_etx + entry); |
||
2376 | + desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
||
2377 | else |
||
2378 | - desc = priv->dma_tx + entry; |
||
2379 | + desc = tx_q->dma_tx + entry; |
||
2380 | |||
2381 | if (priv->plat->enh_desc) |
||
2382 | bmax = BUF_SIZE_8KiB; |
||
2383 | @@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, str |
||
2384 | if (dma_mapping_error(priv->device, des2)) |
||
2385 | return -1; |
||
2386 | |||
2387 | - priv->tx_skbuff_dma[entry].buf = des2; |
||
2388 | - priv->tx_skbuff_dma[entry].len = bmax; |
||
2389 | - priv->tx_skbuff_dma[entry].is_jumbo = true; |
||
2390 | + tx_q->tx_skbuff_dma[entry].buf = des2; |
||
2391 | + tx_q->tx_skbuff_dma[entry].len = bmax; |
||
2392 | + tx_q->tx_skbuff_dma[entry].is_jumbo = true; |
||
2393 | |||
2394 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); |
||
2395 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, |
||
2396 | - STMMAC_RING_MODE, 0, false); |
||
2397 | - priv->tx_skbuff[entry] = NULL; |
||
2398 | + STMMAC_RING_MODE, 0, |
||
2399 | + false, skb->len); |
||
2400 | + tx_q->tx_skbuff[entry] = NULL; |
||
2401 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
||
2402 | |||
2403 | if (priv->extend_desc) |
||
2404 | - desc = (struct dma_desc *)(priv->dma_etx + entry); |
||
2405 | + desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
||
2406 | else |
||
2407 | - desc = priv->dma_tx + entry; |
||
2408 | + desc = tx_q->dma_tx + entry; |
||
2409 | |||
2410 | des2 = dma_map_single(priv->device, skb->data + bmax, len, |
||
2411 | DMA_TO_DEVICE); |
||
2412 | desc->des2 = cpu_to_le32(des2); |
||
2413 | if (dma_mapping_error(priv->device, des2)) |
||
2414 | return -1; |
||
2415 | - priv->tx_skbuff_dma[entry].buf = des2; |
||
2416 | - priv->tx_skbuff_dma[entry].len = len; |
||
2417 | - priv->tx_skbuff_dma[entry].is_jumbo = true; |
||
2418 | + tx_q->tx_skbuff_dma[entry].buf = des2; |
||
2419 | + tx_q->tx_skbuff_dma[entry].len = len; |
||
2420 | + tx_q->tx_skbuff_dma[entry].is_jumbo = true; |
||
2421 | |||
2422 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); |
||
2423 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, |
||
2424 | - STMMAC_RING_MODE, 1, true); |
||
2425 | + STMMAC_RING_MODE, 1, |
||
2426 | + true, skb->len); |
||
2427 | } else { |
||
2428 | des2 = dma_map_single(priv->device, skb->data, |
||
2429 | nopaged_len, DMA_TO_DEVICE); |
||
2430 | desc->des2 = cpu_to_le32(des2); |
||
2431 | if (dma_mapping_error(priv->device, des2)) |
||
2432 | return -1; |
||
2433 | - priv->tx_skbuff_dma[entry].buf = des2; |
||
2434 | - priv->tx_skbuff_dma[entry].len = nopaged_len; |
||
2435 | - priv->tx_skbuff_dma[entry].is_jumbo = true; |
||
2436 | + tx_q->tx_skbuff_dma[entry].buf = des2; |
||
2437 | + tx_q->tx_skbuff_dma[entry].len = nopaged_len; |
||
2438 | + tx_q->tx_skbuff_dma[entry].is_jumbo = true; |
||
2439 | desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); |
||
2440 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, |
||
2441 | - STMMAC_RING_MODE, 0, true); |
||
2442 | + STMMAC_RING_MODE, 0, |
||
2443 | + true, skb->len); |
||
2444 | } |
||
2445 | |||
2446 | - priv->cur_tx = entry; |
||
2447 | + tx_q->cur_tx = entry; |
||
2448 | |||
2449 | return entry; |
||
2450 | } |
||
2451 | @@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma |
||
2452 | |||
2453 | static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) |
||
2454 | { |
||
2455 | - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; |
||
2456 | - unsigned int entry = priv->dirty_tx; |
||
2457 | + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; |
||
2458 | + struct stmmac_priv *priv = tx_q->priv_data; |
||
2459 | + unsigned int entry = tx_q->dirty_tx; |
||
2460 | |||
2461 | /* des3 is only used for jumbo frames tx or time stamping */ |
||
2462 | - if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo || |
||
2463 | - (priv->tx_skbuff_dma[entry].last_segment && |
||
2464 | + if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo || |
||
2465 | + (tx_q->tx_skbuff_dma[entry].last_segment && |
||
2466 | !priv->extend_desc && priv->hwts_tx_en))) |
||
2467 | p->des3 = 0; |
||
2468 | } |
||
2469 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h |
||
2470 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h |
||
2471 | @@ -46,38 +46,51 @@ struct stmmac_tx_info { |
||
2472 | bool is_jumbo; |
||
2473 | }; |
||
2474 | |||
2475 | -struct stmmac_priv { |
||
2476 | - /* Frequently used values are kept adjacent for cache effect */ |
||
2477 | +/* Frequently used values are kept adjacent for cache effect */ |
||
2478 | +struct stmmac_tx_queue { |
||
2479 | + u32 queue_index; |
||
2480 | + struct stmmac_priv *priv_data; |
||
2481 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; |
||
2482 | struct dma_desc *dma_tx; |
||
2483 | struct sk_buff **tx_skbuff; |
||
2484 | + struct stmmac_tx_info *tx_skbuff_dma; |
||
2485 | unsigned int cur_tx; |
||
2486 | unsigned int dirty_tx; |
||
2487 | + dma_addr_t dma_tx_phy; |
||
2488 | + u32 tx_tail_addr; |
||
2489 | +}; |
||
2490 | + |
||
2491 | +struct stmmac_rx_queue { |
||
2492 | + u32 queue_index; |
||
2493 | + struct stmmac_priv *priv_data; |
||
2494 | + struct dma_extended_desc *dma_erx; |
||
2495 | + struct dma_desc *dma_rx ____cacheline_aligned_in_smp; |
||
2496 | + struct sk_buff **rx_skbuff; |
||
2497 | + dma_addr_t *rx_skbuff_dma; |
||
2498 | + unsigned int cur_rx; |
||
2499 | + unsigned int dirty_rx; |
||
2500 | + u32 rx_zeroc_thresh; |
||
2501 | + dma_addr_t dma_rx_phy; |
||
2502 | + u32 rx_tail_addr; |
||
2503 | + struct napi_struct napi ____cacheline_aligned_in_smp; |
||
2504 | +}; |
||
2505 | + |
||
2506 | +struct stmmac_priv { |
||
2507 | + /* Frequently used values are kept adjacent for cache effect */ |
||
2508 | u32 tx_count_frames; |
||
2509 | u32 tx_coal_frames; |
||
2510 | u32 tx_coal_timer; |
||
2511 | - struct stmmac_tx_info *tx_skbuff_dma; |
||
2512 | - dma_addr_t dma_tx_phy; |
||
2513 | + |
||
2514 | int tx_coalesce; |
||
2515 | int hwts_tx_en; |
||
2516 | bool tx_path_in_lpi_mode; |
||
2517 | struct timer_list txtimer; |
||
2518 | bool tso; |
||
2519 | |||
2520 | - struct dma_desc *dma_rx ____cacheline_aligned_in_smp; |
||
2521 | - struct dma_extended_desc *dma_erx; |
||
2522 | - struct sk_buff **rx_skbuff; |
||
2523 | - unsigned int cur_rx; |
||
2524 | - unsigned int dirty_rx; |
||
2525 | unsigned int dma_buf_sz; |
||
2526 | unsigned int rx_copybreak; |
||
2527 | - unsigned int rx_zeroc_thresh; |
||
2528 | u32 rx_riwt; |
||
2529 | int hwts_rx_en; |
||
2530 | - dma_addr_t *rx_skbuff_dma; |
||
2531 | - dma_addr_t dma_rx_phy; |
||
2532 | - |
||
2533 | - struct napi_struct napi ____cacheline_aligned_in_smp; |
||
2534 | |||
2535 | void __iomem *ioaddr; |
||
2536 | struct net_device *dev; |
||
2537 | @@ -85,6 +98,12 @@ struct stmmac_priv { |
||
2538 | struct mac_device_info *hw; |
||
2539 | spinlock_t lock; |
||
2540 | |||
2541 | + /* RX Queue */ |
||
2542 | + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; |
||
2543 | + |
||
2544 | + /* TX Queue */ |
||
2545 | + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; |
||
2546 | + |
||
2547 | int oldlink; |
||
2548 | int speed; |
||
2549 | int oldduplex; |
||
2550 | @@ -119,8 +138,6 @@ struct stmmac_priv { |
||
2551 | spinlock_t ptp_lock; |
||
2552 | void __iomem *mmcaddr; |
||
2553 | void __iomem *ptpaddr; |
||
2554 | - u32 rx_tail_addr; |
||
2555 | - u32 tx_tail_addr; |
||
2556 | u32 mss; |
||
2557 | |||
2558 | #ifdef CONFIG_DEBUG_FS |
||
2559 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
||
2560 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
||
2561 | @@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device |
||
2562 | struct ethtool_pauseparam *pause) |
||
2563 | { |
||
2564 | struct stmmac_priv *priv = netdev_priv(netdev); |
||
2565 | + u32 tx_cnt = priv->plat->tx_queues_to_use; |
||
2566 | struct phy_device *phy = netdev->phydev; |
||
2567 | int new_pause = FLOW_OFF; |
||
2568 | |||
2569 | @@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device |
||
2570 | } |
||
2571 | |||
2572 | priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, |
||
2573 | - priv->pause); |
||
2574 | + priv->pause, tx_cnt); |
||
2575 | return 0; |
||
2576 | } |
||
2577 | |||
2578 | @@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(str |
||
2579 | struct ethtool_stats *dummy, u64 *data) |
||
2580 | { |
||
2581 | struct stmmac_priv *priv = netdev_priv(dev); |
||
2582 | + u32 rx_queues_count = priv->plat->rx_queues_to_use; |
||
2583 | + u32 tx_queues_count = priv->plat->tx_queues_to_use; |
||
2584 | int i, j = 0; |
||
2585 | |||
2586 | /* Update the DMA HW counters for dwmac10/100 */ |
||
2587 | @@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(str |
||
2588 | if ((priv->hw->mac->debug) && |
||
2589 | (priv->synopsys_id >= DWMAC_CORE_3_50)) |
||
2590 | priv->hw->mac->debug(priv->ioaddr, |
||
2591 | - (void *)&priv->xstats); |
||
2592 | + (void *)&priv->xstats, |
||
2593 | + rx_queues_count, tx_queues_count); |
||
2594 | } |
||
2595 | for (i = 0; i < STMMAC_STATS_LEN; i++) { |
||
2596 | char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; |
||
2597 | @@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct ne |
||
2598 | struct ethtool_coalesce *ec) |
||
2599 | { |
||
2600 | struct stmmac_priv *priv = netdev_priv(dev); |
||
2601 | + u32 rx_cnt = priv->plat->rx_queues_to_use; |
||
2602 | unsigned int rx_riwt; |
||
2603 | |||
2604 | /* Check not supported parameters */ |
||
2605 | @@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct ne |
||
2606 | priv->tx_coal_frames = ec->tx_max_coalesced_frames; |
||
2607 | priv->tx_coal_timer = ec->tx_coalesce_usecs; |
||
2608 | priv->rx_riwt = rx_riwt; |
||
2609 | - priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); |
||
2610 | + priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt); |
||
2611 | |||
2612 | return 0; |
||
2613 | } |
||
2614 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
||
2615 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
||
2616 | @@ -139,6 +139,64 @@ static void stmmac_verify_args(void) |
||
2617 | } |
||
2618 | |||
2619 | /** |
||
2620 | + * stmmac_disable_all_queues - Disable all queues |
||
2621 | + * @priv: driver private structure |
||
2622 | + */ |
||
2623 | +static void stmmac_disable_all_queues(struct stmmac_priv *priv) |
||
2624 | +{ |
||
2625 | + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
||
2626 | + u32 queue; |
||
2627 | + |
||
2628 | + for (queue = 0; queue < rx_queues_cnt; queue++) { |
||
2629 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
2630 | + |
||
2631 | + napi_disable(&rx_q->napi); |
||
2632 | + } |
||
2633 | +} |
||
2634 | + |
||
2635 | +/** |
||
2636 | + * stmmac_enable_all_queues - Enable all queues |
||
2637 | + * @priv: driver private structure |
||
2638 | + */ |
||
2639 | +static void stmmac_enable_all_queues(struct stmmac_priv *priv) |
||
2640 | +{ |
||
2641 | + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
||
2642 | + u32 queue; |
||
2643 | + |
||
2644 | + for (queue = 0; queue < rx_queues_cnt; queue++) { |
||
2645 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
2646 | + |
||
2647 | + napi_enable(&rx_q->napi); |
||
2648 | + } |
||
2649 | +} |
||
2650 | + |
||
2651 | +/** |
||
2652 | + * stmmac_stop_all_queues - Stop all queues |
||
2653 | + * @priv: driver private structure |
||
2654 | + */ |
||
2655 | +static void stmmac_stop_all_queues(struct stmmac_priv *priv) |
||
2656 | +{ |
||
2657 | + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; |
||
2658 | + u32 queue; |
||
2659 | + |
||
2660 | + for (queue = 0; queue < tx_queues_cnt; queue++) |
||
2661 | + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); |
||
2662 | +} |
||
2663 | + |
||
2664 | +/** |
||
2665 | + * stmmac_start_all_queues - Start all queues |
||
2666 | + * @priv: driver private structure |
||
2667 | + */ |
||
2668 | +static void stmmac_start_all_queues(struct stmmac_priv *priv) |
||
2669 | +{ |
||
2670 | + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; |
||
2671 | + u32 queue; |
||
2672 | + |
||
2673 | + for (queue = 0; queue < tx_queues_cnt; queue++) |
||
2674 | + netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); |
||
2675 | +} |
||
2676 | + |
||
2677 | +/** |
||
2678 | * stmmac_clk_csr_set - dynamically set the MDC clock |
||
2679 | * @priv: driver private structure |
||
2680 | * Description: this is to dynamically set the MDC clock according to the csr |
||
2681 | @@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf |
||
2682 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); |
||
2683 | } |
||
2684 | |||
2685 | -static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) |
||
2686 | +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) |
||
2687 | { |
||
2688 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
2689 | u32 avail; |
||
2690 | |||
2691 | - if (priv->dirty_tx > priv->cur_tx) |
||
2692 | - avail = priv->dirty_tx - priv->cur_tx - 1; |
||
2693 | + if (tx_q->dirty_tx > tx_q->cur_tx) |
||
2694 | + avail = tx_q->dirty_tx - tx_q->cur_tx - 1; |
||
2695 | else |
||
2696 | - avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1; |
||
2697 | + avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; |
||
2698 | |||
2699 | return avail; |
||
2700 | } |
||
2701 | |||
2702 | -static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) |
||
2703 | +/** |
||
2704 | + * stmmac_rx_dirty - Get RX queue dirty |
||
2705 | + * @priv: driver private structure |
||
2706 | + * @queue: RX queue index |
||
2707 | + */ |
||
2708 | +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) |
||
2709 | { |
||
2710 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
2711 | u32 dirty; |
||
2712 | |||
2713 | - if (priv->dirty_rx <= priv->cur_rx) |
||
2714 | - dirty = priv->cur_rx - priv->dirty_rx; |
||
2715 | + if (rx_q->dirty_rx <= rx_q->cur_rx) |
||
2716 | + dirty = rx_q->cur_rx - rx_q->dirty_rx; |
||
2717 | else |
||
2718 | - dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx; |
||
2719 | + dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; |
||
2720 | |||
2721 | return dirty; |
||
2722 | } |
||
2723 | @@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_spe |
||
2724 | */ |
||
2725 | static void stmmac_enable_eee_mode(struct stmmac_priv *priv) |
||
2726 | { |
||
2727 | + u32 tx_cnt = priv->plat->tx_queues_to_use; |
||
2728 | + u32 queue; |
||
2729 | + |
||
2730 | + /* check if all TX queues have the work finished */ |
||
2731 | + for (queue = 0; queue < tx_cnt; queue++) { |
||
2732 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
2733 | + |
||
2734 | + if (tx_q->dirty_tx != tx_q->cur_tx) |
||
2735 | + return; /* still unfinished work */ |
||
2736 | + } |
||
2737 | + |
||
2738 | /* Check and enter in LPI mode */ |
||
2739 | - if ((priv->dirty_tx == priv->cur_tx) && |
||
2740 | - (priv->tx_path_in_lpi_mode == false)) |
||
2741 | + if (!priv->tx_path_in_lpi_mode) |
||
2742 | priv->hw->mac->set_eee_mode(priv->hw, |
||
2743 | priv->plat->en_tx_lpi_clockgating); |
||
2744 | } |
||
2745 | @@ -365,14 +440,14 @@ static void stmmac_get_tx_hwtstamp(struc |
||
2746 | return; |
||
2747 | |||
2748 | /* check tx tstamp status */ |
||
2749 | - if (!priv->hw->desc->get_tx_timestamp_status(p)) { |
||
2750 | + if (priv->hw->desc->get_tx_timestamp_status(p)) { |
||
2751 | /* get the valid tstamp */ |
||
2752 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); |
||
2753 | |||
2754 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
||
2755 | shhwtstamp.hwtstamp = ns_to_ktime(ns); |
||
2756 | |||
2757 | - netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); |
||
2758 | + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); |
||
2759 | /* pass tstamp to stack */ |
||
2760 | skb_tstamp_tx(skb, &shhwtstamp); |
||
2761 | } |
||
2762 | @@ -399,19 +474,19 @@ static void stmmac_get_rx_hwtstamp(struc |
||
2763 | return; |
||
2764 | |||
2765 | /* Check if timestamp is available */ |
||
2766 | - if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { |
||
2767 | + if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { |
||
2768 | /* For GMAC4, the valid timestamp is from CTX next desc. */ |
||
2769 | if (priv->plat->has_gmac4) |
||
2770 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); |
||
2771 | else |
||
2772 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); |
||
2773 | |||
2774 | - netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
||
2775 | + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
||
2776 | shhwtstamp = skb_hwtstamps(skb); |
||
2777 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
||
2778 | shhwtstamp->hwtstamp = ns_to_ktime(ns); |
||
2779 | } else { |
||
2780 | - netdev_err(priv->dev, "cannot get RX hw timestamp\n"); |
||
2781 | + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); |
||
2782 | } |
||
2783 | } |
||
2784 | |||
2785 | @@ -688,6 +763,19 @@ static void stmmac_release_ptp(struct st |
||
2786 | } |
||
2787 | |||
2788 | /** |
||
2789 | + * stmmac_mac_flow_ctrl - Configure flow control in all queues |
||
2790 | + * @priv: driver private structure |
||
2791 | + * Description: It is used for configuring the flow control in all queues |
||
2792 | + */ |
||
2793 | +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) |
||
2794 | +{ |
||
2795 | + u32 tx_cnt = priv->plat->tx_queues_to_use; |
||
2796 | + |
||
2797 | + priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl, |
||
2798 | + priv->pause, tx_cnt); |
||
2799 | +} |
||
2800 | + |
||
2801 | +/** |
||
2802 | * stmmac_adjust_link - adjusts the link parameters |
||
2803 | * @dev: net device structure |
||
2804 | * Description: this is the helper called by the physical abstraction layer |
||
2805 | @@ -702,7 +790,6 @@ static void stmmac_adjust_link(struct ne |
||
2806 | struct phy_device *phydev = dev->phydev; |
||
2807 | unsigned long flags; |
||
2808 | int new_state = 0; |
||
2809 | - unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; |
||
2810 | |||
2811 | if (!phydev) |
||
2812 | return; |
||
2813 | @@ -724,8 +811,7 @@ static void stmmac_adjust_link(struct ne |
||
2814 | } |
||
2815 | /* Flow Control operation */ |
||
2816 | if (phydev->pause) |
||
2817 | - priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex, |
||
2818 | - fc, pause_time); |
||
2819 | + stmmac_mac_flow_ctrl(priv, phydev->duplex); |
||
2820 | |||
2821 | if (phydev->speed != priv->speed) { |
||
2822 | new_state = 1; |
||
2823 | @@ -893,22 +979,56 @@ static int stmmac_init_phy(struct net_de |
||
2824 | return 0; |
||
2825 | } |
||
2826 | |||
2827 | -static void stmmac_display_rings(struct stmmac_priv *priv) |
||
2828 | +static void stmmac_display_rx_rings(struct stmmac_priv *priv) |
||
2829 | { |
||
2830 | - void *head_rx, *head_tx; |
||
2831 | + u32 rx_cnt = priv->plat->rx_queues_to_use; |
||
2832 | + void *head_rx; |
||
2833 | + u32 queue; |
||
2834 | |||
2835 | - if (priv->extend_desc) { |
||
2836 | - head_rx = (void *)priv->dma_erx; |
||
2837 | - head_tx = (void *)priv->dma_etx; |
||
2838 | - } else { |
||
2839 | - head_rx = (void *)priv->dma_rx; |
||
2840 | - head_tx = (void *)priv->dma_tx; |
||
2841 | + /* Display RX rings */ |
||
2842 | + for (queue = 0; queue < rx_cnt; queue++) { |
||
2843 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
2844 | + |
||
2845 | + pr_info("\tRX Queue %u rings\n", queue); |
||
2846 | + |
||
2847 | + if (priv->extend_desc) |
||
2848 | + head_rx = (void *)rx_q->dma_erx; |
||
2849 | + else |
||
2850 | + head_rx = (void *)rx_q->dma_rx; |
||
2851 | + |
||
2852 | + /* Display RX ring */ |
||
2853 | + priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); |
||
2854 | + } |
||
2855 | +} |
||
2856 | + |
||
2857 | +static void stmmac_display_tx_rings(struct stmmac_priv *priv) |
||
2858 | +{ |
||
2859 | + u32 tx_cnt = priv->plat->tx_queues_to_use; |
||
2860 | + void *head_tx; |
||
2861 | + u32 queue; |
||
2862 | + |
||
2863 | + /* Display TX rings */ |
||
2864 | + for (queue = 0; queue < tx_cnt; queue++) { |
||
2865 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
2866 | + |
||
2867 | + pr_info("\tTX Queue %d rings\n", queue); |
||
2868 | + |
||
2869 | + if (priv->extend_desc) |
||
2870 | + head_tx = (void *)tx_q->dma_etx; |
||
2871 | + else |
||
2872 | + head_tx = (void *)tx_q->dma_tx; |
||
2873 | + |
||
2874 | + priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); |
||
2875 | } |
||
2876 | +} |
||
2877 | + |
||
2878 | +static void stmmac_display_rings(struct stmmac_priv *priv) |
||
2879 | +{ |
||
2880 | + /* Display RX ring */ |
||
2881 | + stmmac_display_rx_rings(priv); |
||
2882 | |||
2883 | - /* Display Rx ring */ |
||
2884 | - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); |
||
2885 | - /* Display Tx ring */ |
||
2886 | - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); |
||
2887 | + /* Display TX ring */ |
||
2888 | + stmmac_display_tx_rings(priv); |
||
2889 | } |
||
2890 | |||
2891 | static int stmmac_set_bfsize(int mtu, int bufsize) |
||
2892 | @@ -928,48 +1048,88 @@ static int stmmac_set_bfsize(int mtu, in |
||
2893 | } |
||
2894 | |||
2895 | /** |
||
2896 | - * stmmac_clear_descriptors - clear descriptors |
||
2897 | + * stmmac_clear_rx_descriptors - clear RX descriptors |
||
2898 | * @priv: driver private structure |
||
2899 | - * Description: this function is called to clear the tx and rx descriptors |
||
2900 | + * @queue: RX queue index |
||
2901 | + * Description: this function is called to clear the RX descriptors |
||
2902 | * in case of both basic and extended descriptors are used. |
||
2903 | */ |
||
2904 | -static void stmmac_clear_descriptors(struct stmmac_priv *priv) |
||
2905 | +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) |
||
2906 | { |
||
2907 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
2908 | int i; |
||
2909 | |||
2910 | - /* Clear the Rx/Tx descriptors */ |
||
2911 | + /* Clear the RX descriptors */ |
||
2912 | for (i = 0; i < DMA_RX_SIZE; i++) |
||
2913 | if (priv->extend_desc) |
||
2914 | - priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, |
||
2915 | + priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic, |
||
2916 | priv->use_riwt, priv->mode, |
||
2917 | (i == DMA_RX_SIZE - 1)); |
||
2918 | else |
||
2919 | - priv->hw->desc->init_rx_desc(&priv->dma_rx[i], |
||
2920 | + priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i], |
||
2921 | priv->use_riwt, priv->mode, |
||
2922 | (i == DMA_RX_SIZE - 1)); |
||
2923 | +} |
||
2924 | + |
||
2925 | +/** |
||
2926 | + * stmmac_clear_tx_descriptors - clear tx descriptors |
||
2927 | + * @priv: driver private structure |
||
2928 | + * @queue: TX queue index. |
||
2929 | + * Description: this function is called to clear the TX descriptors |
||
2930 | + * in case of both basic and extended descriptors are used. |
||
2931 | + */ |
||
2932 | +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) |
||
2933 | +{ |
||
2934 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
2935 | + int i; |
||
2936 | + |
||
2937 | + /* Clear the TX descriptors */ |
||
2938 | for (i = 0; i < DMA_TX_SIZE; i++) |
||
2939 | if (priv->extend_desc) |
||
2940 | - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, |
||
2941 | + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, |
||
2942 | priv->mode, |
||
2943 | (i == DMA_TX_SIZE - 1)); |
||
2944 | else |
||
2945 | - priv->hw->desc->init_tx_desc(&priv->dma_tx[i], |
||
2946 | + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], |
||
2947 | priv->mode, |
||
2948 | (i == DMA_TX_SIZE - 1)); |
||
2949 | } |
||
2950 | |||
2951 | /** |
||
2952 | + * stmmac_clear_descriptors - clear descriptors |
||
2953 | + * @priv: driver private structure |
||
2954 | + * Description: this function is called to clear the TX and RX descriptors |
||
2955 | + * in case of both basic and extended descriptors are used. |
||
2956 | + */ |
||
2957 | +static void stmmac_clear_descriptors(struct stmmac_priv *priv) |
||
2958 | +{ |
||
2959 | + u32 rx_queue_cnt = priv->plat->rx_queues_to_use; |
||
2960 | + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; |
||
2961 | + u32 queue; |
||
2962 | + |
||
2963 | + /* Clear the RX descriptors */ |
||
2964 | + for (queue = 0; queue < rx_queue_cnt; queue++) |
||
2965 | + stmmac_clear_rx_descriptors(priv, queue); |
||
2966 | + |
||
2967 | + /* Clear the TX descriptors */ |
||
2968 | + for (queue = 0; queue < tx_queue_cnt; queue++) |
||
2969 | + stmmac_clear_tx_descriptors(priv, queue); |
||
2970 | +} |
||
2971 | + |
||
2972 | +/** |
||
2973 | * stmmac_init_rx_buffers - init the RX descriptor buffer. |
||
2974 | * @priv: driver private structure |
||
2975 | * @p: descriptor pointer |
||
2976 | * @i: descriptor index |
||
2977 | - * @flags: gfp flag. |
||
2978 | + * @flags: gfp flag |
||
2979 | + * @queue: RX queue index |
||
2980 | * Description: this function is called to allocate a receive buffer, perform |
||
2981 | * the DMA mapping and init the descriptor. |
||
2982 | */ |
||
2983 | static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, |
||
2984 | - int i, gfp_t flags) |
||
2985 | + int i, gfp_t flags, u32 queue) |
||
2986 | { |
||
2987 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
2988 | struct sk_buff *skb; |
||
2989 | |||
2990 | skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); |
||
2991 | @@ -978,20 +1138,20 @@ static int stmmac_init_rx_buffers(struct |
||
2992 | "%s: Rx init fails; skb is NULL\n", __func__); |
||
2993 | return -ENOMEM; |
||
2994 | } |
||
2995 | - priv->rx_skbuff[i] = skb; |
||
2996 | - priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
||
2997 | + rx_q->rx_skbuff[i] = skb; |
||
2998 | + rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
||
2999 | priv->dma_buf_sz, |
||
3000 | DMA_FROM_DEVICE); |
||
3001 | - if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { |
||
3002 | + if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { |
||
3003 | netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); |
||
3004 | dev_kfree_skb_any(skb); |
||
3005 | return -EINVAL; |
||
3006 | } |
||
3007 | |||
3008 | if (priv->synopsys_id >= DWMAC_CORE_4_00) |
||
3009 | - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]); |
||
3010 | + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); |
||
3011 | else |
||
3012 | - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]); |
||
3013 | + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); |
||
3014 | |||
3015 | if ((priv->hw->mode->init_desc3) && |
||
3016 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) |
||
3017 | @@ -1000,30 +1160,71 @@ static int stmmac_init_rx_buffers(struct |
||
3018 | return 0; |
||
3019 | } |
||
3020 | |||
3021 | -static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) |
||
3022 | +/** |
||
3023 | + * stmmac_free_rx_buffer - free RX dma buffers |
||
3024 | + * @priv: private structure |
||
3025 | + * @queue: RX queue index |
||
3026 | + * @i: buffer index. |
||
3027 | + */ |
||
3028 | +static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) |
||
3029 | { |
||
3030 | - if (priv->rx_skbuff[i]) { |
||
3031 | - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], |
||
3032 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
3033 | + |
||
3034 | + if (rx_q->rx_skbuff[i]) { |
||
3035 | + dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], |
||
3036 | priv->dma_buf_sz, DMA_FROM_DEVICE); |
||
3037 | - dev_kfree_skb_any(priv->rx_skbuff[i]); |
||
3038 | + dev_kfree_skb_any(rx_q->rx_skbuff[i]); |
||
3039 | } |
||
3040 | - priv->rx_skbuff[i] = NULL; |
||
3041 | + rx_q->rx_skbuff[i] = NULL; |
||
3042 | } |
||
3043 | |||
3044 | /** |
||
3045 | - * init_dma_desc_rings - init the RX/TX descriptor rings |
||
3046 | + * stmmac_free_tx_buffer - free RX dma buffers |
||
3047 | + * @priv: private structure |
||
3048 | + * @queue: RX queue index |
||
3049 | + * @i: buffer index. |
||
3050 | + */ |
||
3051 | +static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) |
||
3052 | +{ |
||
3053 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
3054 | + |
||
3055 | + if (tx_q->tx_skbuff_dma[i].buf) { |
||
3056 | + if (tx_q->tx_skbuff_dma[i].map_as_page) |
||
3057 | + dma_unmap_page(priv->device, |
||
3058 | + tx_q->tx_skbuff_dma[i].buf, |
||
3059 | + tx_q->tx_skbuff_dma[i].len, |
||
3060 | + DMA_TO_DEVICE); |
||
3061 | + else |
||
3062 | + dma_unmap_single(priv->device, |
||
3063 | + tx_q->tx_skbuff_dma[i].buf, |
||
3064 | + tx_q->tx_skbuff_dma[i].len, |
||
3065 | + DMA_TO_DEVICE); |
||
3066 | + } |
||
3067 | + |
||
3068 | + if (tx_q->tx_skbuff[i]) { |
||
3069 | + dev_kfree_skb_any(tx_q->tx_skbuff[i]); |
||
3070 | + tx_q->tx_skbuff[i] = NULL; |
||
3071 | + tx_q->tx_skbuff_dma[i].buf = 0; |
||
3072 | + tx_q->tx_skbuff_dma[i].map_as_page = false; |
||
3073 | + } |
||
3074 | +} |
||
3075 | + |
||
3076 | +/** |
||
3077 | + * init_dma_rx_desc_rings - init the RX descriptor rings |
||
3078 | * @dev: net device structure |
||
3079 | * @flags: gfp flag. |
||
3080 | - * Description: this function initializes the DMA RX/TX descriptors |
||
3081 | + * Description: this function initializes the DMA RX descriptors |
||
3082 | * and allocates the socket buffers. It supports the chained and ring |
||
3083 | * modes. |
||
3084 | */ |
||
3085 | -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) |
||
3086 | +static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) |
||
3087 | { |
||
3088 | - int i; |
||
3089 | struct stmmac_priv *priv = netdev_priv(dev); |
||
3090 | + u32 rx_count = priv->plat->rx_queues_to_use; |
||
3091 | unsigned int bfsize = 0; |
||
3092 | int ret = -ENOMEM; |
||
3093 | + int queue; |
||
3094 | + int i; |
||
3095 | |||
3096 | if (priv->hw->mode->set_16kib_bfsize) |
||
3097 | bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); |
||
3098 | @@ -1033,235 +1234,409 @@ static int init_dma_desc_rings(struct ne |
||
3099 | |||
3100 | priv->dma_buf_sz = bfsize; |
||
3101 | |||
3102 | - netif_dbg(priv, probe, priv->dev, |
||
3103 | - "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", |
||
3104 | - __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy); |
||
3105 | - |
||
3106 | /* RX INITIALIZATION */ |
||
3107 | netif_dbg(priv, probe, priv->dev, |
||
3108 | "SKB addresses:\nskb\t\tskb data\tdma data\n"); |
||
3109 | |||
3110 | - for (i = 0; i < DMA_RX_SIZE; i++) { |
||
3111 | - struct dma_desc *p; |
||
3112 | - if (priv->extend_desc) |
||
3113 | - p = &((priv->dma_erx + i)->basic); |
||
3114 | - else |
||
3115 | - p = priv->dma_rx + i; |
||
3116 | + for (queue = 0; queue < rx_count; queue++) { |
||
3117 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
3118 | |||
3119 | - ret = stmmac_init_rx_buffers(priv, p, i, flags); |
||
3120 | - if (ret) |
||
3121 | - goto err_init_rx_buffers; |
||
3122 | + netif_dbg(priv, probe, priv->dev, |
||
3123 | + "(%s) dma_rx_phy=0x%08x\n", __func__, |
||
3124 | + (u32)rx_q->dma_rx_phy); |
||
3125 | |||
3126 | - netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", |
||
3127 | - priv->rx_skbuff[i], priv->rx_skbuff[i]->data, |
||
3128 | - (unsigned int)priv->rx_skbuff_dma[i]); |
||
3129 | + for (i = 0; i < DMA_RX_SIZE; i++) { |
||
3130 | + struct dma_desc *p; |
||
3131 | + |
||
3132 | + if (priv->extend_desc) |
||
3133 | + p = &((rx_q->dma_erx + i)->basic); |
||
3134 | + else |
||
3135 | + p = rx_q->dma_rx + i; |
||
3136 | + |
||
3137 | + ret = stmmac_init_rx_buffers(priv, p, i, flags, |
||
3138 | + queue); |
||
3139 | + if (ret) |
||
3140 | + goto err_init_rx_buffers; |
||
3141 | + |
||
3142 | + netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", |
||
3143 | + rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, |
||
3144 | + (unsigned int)rx_q->rx_skbuff_dma[i]); |
||
3145 | + } |
||
3146 | + |
||
3147 | + rx_q->cur_rx = 0; |
||
3148 | + rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); |
||
3149 | + |
||
3150 | + stmmac_clear_rx_descriptors(priv, queue); |
||
3151 | + |
||
3152 | + /* Setup the chained descriptor addresses */ |
||
3153 | + if (priv->mode == STMMAC_CHAIN_MODE) { |
||
3154 | + if (priv->extend_desc) |
||
3155 | + priv->hw->mode->init(rx_q->dma_erx, |
||
3156 | + rx_q->dma_rx_phy, |
||
3157 | + DMA_RX_SIZE, 1); |
||
3158 | + else |
||
3159 | + priv->hw->mode->init(rx_q->dma_rx, |
||
3160 | + rx_q->dma_rx_phy, |
||
3161 | + DMA_RX_SIZE, 0); |
||
3162 | + } |
||
3163 | } |
||
3164 | - priv->cur_rx = 0; |
||
3165 | - priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); |
||
3166 | + |
||
3167 | buf_sz = bfsize; |
||
3168 | |||
3169 | - /* Setup the chained descriptor addresses */ |
||
3170 | - if (priv->mode == STMMAC_CHAIN_MODE) { |
||
3171 | - if (priv->extend_desc) { |
||
3172 | - priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, |
||
3173 | - DMA_RX_SIZE, 1); |
||
3174 | - priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, |
||
3175 | - DMA_TX_SIZE, 1); |
||
3176 | - } else { |
||
3177 | - priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, |
||
3178 | - DMA_RX_SIZE, 0); |
||
3179 | - priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, |
||
3180 | - DMA_TX_SIZE, 0); |
||
3181 | - } |
||
3182 | + return 0; |
||
3183 | + |
||
3184 | +err_init_rx_buffers: |
||
3185 | + while (queue >= 0) { |
||
3186 | + while (--i >= 0) |
||
3187 | + stmmac_free_rx_buffer(priv, queue, i); |
||
3188 | + |
||
3189 | + if (queue == 0) |
||
3190 | + break; |
||
3191 | + |
||
3192 | + i = DMA_RX_SIZE; |
||
3193 | + queue--; |
||
3194 | } |
||
3195 | |||
3196 | - /* TX INITIALIZATION */ |
||
3197 | - for (i = 0; i < DMA_TX_SIZE; i++) { |
||
3198 | - struct dma_desc *p; |
||
3199 | - if (priv->extend_desc) |
||
3200 | - p = &((priv->dma_etx + i)->basic); |
||
3201 | - else |
||
3202 | - p = priv->dma_tx + i; |
||
3203 | + return ret; |
||
3204 | +} |
||
3205 | |||
3206 | - if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
||
3207 | - p->des0 = 0; |
||
3208 | - p->des1 = 0; |
||
3209 | - p->des2 = 0; |
||
3210 | - p->des3 = 0; |
||
3211 | - } else { |
||
3212 | - p->des2 = 0; |
||
3213 | +/** |
||
3214 | + * init_dma_tx_desc_rings - init the TX descriptor rings |
||
3215 | + * @dev: net device structure. |
||
3216 | + * Description: this function initializes the DMA TX descriptors |
||
3217 | + * and allocates the socket buffers. It supports the chained and ring |
||
3218 | + * modes. |
||
3219 | + */ |
||
3220 | +static int init_dma_tx_desc_rings(struct net_device *dev) |
||
3221 | +{ |
||
3222 | + struct stmmac_priv *priv = netdev_priv(dev); |
||
3223 | + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; |
||
3224 | + u32 queue; |
||
3225 | + int i; |
||
3226 | + |
||
3227 | + for (queue = 0; queue < tx_queue_cnt; queue++) { |
||
3228 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
3229 | + |
||
3230 | + netif_dbg(priv, probe, priv->dev, |
||
3231 | + "(%s) dma_tx_phy=0x%08x\n", __func__, |
||
3232 | + (u32)tx_q->dma_tx_phy); |
||
3233 | + |
||
3234 | + /* Setup the chained descriptor addresses */ |
||
3235 | + if (priv->mode == STMMAC_CHAIN_MODE) { |
||
3236 | + if (priv->extend_desc) |
||
3237 | + priv->hw->mode->init(tx_q->dma_etx, |
||
3238 | + tx_q->dma_tx_phy, |
||
3239 | + DMA_TX_SIZE, 1); |
||
3240 | + else |
||
3241 | + priv->hw->mode->init(tx_q->dma_tx, |
||
3242 | + tx_q->dma_tx_phy, |
||
3243 | + DMA_TX_SIZE, 0); |
||
3244 | + } |
||
3245 | + |
||
3246 | + for (i = 0; i < DMA_TX_SIZE; i++) { |
||
3247 | + struct dma_desc *p; |
||
3248 | + if (priv->extend_desc) |
||
3249 | + p = &((tx_q->dma_etx + i)->basic); |
||
3250 | + else |
||
3251 | + p = tx_q->dma_tx + i; |
||
3252 | + |
||
3253 | + if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
||
3254 | + p->des0 = 0; |
||
3255 | + p->des1 = 0; |
||
3256 | + p->des2 = 0; |
||
3257 | + p->des3 = 0; |
||
3258 | + } else { |
||
3259 | + p->des2 = 0; |
||
3260 | + } |
||
3261 | + |
||
3262 | + tx_q->tx_skbuff_dma[i].buf = 0; |
||
3263 | + tx_q->tx_skbuff_dma[i].map_as_page = false; |
||
3264 | + tx_q->tx_skbuff_dma[i].len = 0; |
||
3265 | + tx_q->tx_skbuff_dma[i].last_segment = false; |
||
3266 | + tx_q->tx_skbuff[i] = NULL; |
||
3267 | } |
||
3268 | |||
3269 | - priv->tx_skbuff_dma[i].buf = 0; |
||
3270 | - priv->tx_skbuff_dma[i].map_as_page = false; |
||
3271 | - priv->tx_skbuff_dma[i].len = 0; |
||
3272 | - priv->tx_skbuff_dma[i].last_segment = false; |
||
3273 | - priv->tx_skbuff[i] = NULL; |
||
3274 | + tx_q->dirty_tx = 0; |
||
3275 | + tx_q->cur_tx = 0; |
||
3276 | + |
||
3277 | + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); |
||
3278 | } |
||
3279 | |||
3280 | - priv->dirty_tx = 0; |
||
3281 | - priv->cur_tx = 0; |
||
3282 | - netdev_reset_queue(priv->dev); |
||
3283 | + return 0; |
||
3284 | +} |
||
3285 | + |
||
3286 | +/** |
||
3287 | + * init_dma_desc_rings - init the RX/TX descriptor rings |
||
3288 | + * @dev: net device structure |
||
3289 | + * @flags: gfp flag. |
||
3290 | + * Description: this function initializes the DMA RX/TX descriptors |
||
3291 | + * and allocates the socket buffers. It supports the chained and ring |
||
3292 | + * modes. |
||
3293 | + */ |
||
3294 | +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) |
||
3295 | +{ |
||
3296 | + struct stmmac_priv *priv = netdev_priv(dev); |
||
3297 | + int ret; |
||
3298 | + |
||
3299 | + ret = init_dma_rx_desc_rings(dev, flags); |
||
3300 | + if (ret) |
||
3301 | + return ret; |
||
3302 | + |
||
3303 | + ret = init_dma_tx_desc_rings(dev); |
||
3304 | |||
3305 | stmmac_clear_descriptors(priv); |
||
3306 | |||
3307 | if (netif_msg_hw(priv)) |
||
3308 | stmmac_display_rings(priv); |
||
3309 | |||
3310 | - return 0; |
||
3311 | -err_init_rx_buffers: |
||
3312 | - while (--i >= 0) |
||
3313 | - stmmac_free_rx_buffers(priv, i); |
||
3314 | return ret; |
||
3315 | } |
||
3316 | |||
3317 | -static void dma_free_rx_skbufs(struct stmmac_priv *priv) |
||
3318 | +/** |
||
3319 | + * dma_free_rx_skbufs - free RX dma buffers |
||
3320 | + * @priv: private structure |
||
3321 | + * @queue: RX queue index |
||
3322 | + */ |
||
3323 | +static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) |
||
3324 | { |
||
3325 | int i; |
||
3326 | |||
3327 | for (i = 0; i < DMA_RX_SIZE; i++) |
||
3328 | - stmmac_free_rx_buffers(priv, i); |
||
3329 | + stmmac_free_rx_buffer(priv, queue, i); |
||
3330 | } |
||
3331 | |||
3332 | -static void dma_free_tx_skbufs(struct stmmac_priv *priv) |
||
3333 | +/** |
||
3334 | + * dma_free_tx_skbufs - free TX dma buffers |
||
3335 | + * @priv: private structure |
||
3336 | + * @queue: TX queue index |
||
3337 | + */ |
||
3338 | +static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) |
||
3339 | { |
||
3340 | int i; |
||
3341 | |||
3342 | - for (i = 0; i < DMA_TX_SIZE; i++) { |
||
3343 | - if (priv->tx_skbuff_dma[i].buf) { |
||
3344 | - if (priv->tx_skbuff_dma[i].map_as_page) |
||
3345 | - dma_unmap_page(priv->device, |
||
3346 | - priv->tx_skbuff_dma[i].buf, |
||
3347 | - priv->tx_skbuff_dma[i].len, |
||
3348 | - DMA_TO_DEVICE); |
||
3349 | - else |
||
3350 | - dma_unmap_single(priv->device, |
||
3351 | - priv->tx_skbuff_dma[i].buf, |
||
3352 | - priv->tx_skbuff_dma[i].len, |
||
3353 | - DMA_TO_DEVICE); |
||
3354 | - } |
||
3355 | + for (i = 0; i < DMA_TX_SIZE; i++) |
||
3356 | + stmmac_free_tx_buffer(priv, queue, i); |
||
3357 | +} |
||
3358 | |||
3359 | - if (priv->tx_skbuff[i]) { |
||
3360 | - dev_kfree_skb_any(priv->tx_skbuff[i]); |
||
3361 | - priv->tx_skbuff[i] = NULL; |
||
3362 | - priv->tx_skbuff_dma[i].buf = 0; |
||
3363 | - priv->tx_skbuff_dma[i].map_as_page = false; |
||
3364 | - } |
||
3365 | +/** |
||
3366 | + * free_dma_rx_desc_resources - free RX dma desc resources |
||
3367 | + * @priv: private structure |
||
3368 | + */ |
||
3369 | +static void free_dma_rx_desc_resources(struct stmmac_priv *priv) |
||
3370 | +{ |
||
3371 | + u32 rx_count = priv->plat->rx_queues_to_use; |
||
3372 | + u32 queue; |
||
3373 | + |
||
3374 | + /* Free RX queue resources */ |
||
3375 | + for (queue = 0; queue < rx_count; queue++) { |
||
3376 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
3377 | + |
||
3378 | + /* Release the DMA RX socket buffers */ |
||
3379 | + dma_free_rx_skbufs(priv, queue); |
||
3380 | + |
||
3381 | + /* Free DMA regions of consistent memory previously allocated */ |
||
3382 | + if (!priv->extend_desc) |
||
3383 | + dma_free_coherent(priv->device, |
||
3384 | + DMA_RX_SIZE * sizeof(struct dma_desc), |
||
3385 | + rx_q->dma_rx, rx_q->dma_rx_phy); |
||
3386 | + else |
||
3387 | + dma_free_coherent(priv->device, DMA_RX_SIZE * |
||
3388 | + sizeof(struct dma_extended_desc), |
||
3389 | + rx_q->dma_erx, rx_q->dma_rx_phy); |
||
3390 | + |
||
3391 | + kfree(rx_q->rx_skbuff_dma); |
||
3392 | + kfree(rx_q->rx_skbuff); |
||
3393 | } |
||
3394 | } |
||
3395 | |||
3396 | /** |
||
3397 | - * alloc_dma_desc_resources - alloc TX/RX resources. |
||
3398 | + * free_dma_tx_desc_resources - free TX dma desc resources |
||
3399 | + * @priv: private structure |
||
3400 | + */ |
||
3401 | +static void free_dma_tx_desc_resources(struct stmmac_priv *priv) |
||
3402 | +{ |
||
3403 | + u32 tx_count = priv->plat->tx_queues_to_use; |
||
3404 | + u32 queue = 0; |
||
3405 | + |
||
3406 | + /* Free TX queue resources */ |
||
3407 | + for (queue = 0; queue < tx_count; queue++) { |
||
3408 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
3409 | + |
||
3410 | + /* Release the DMA TX socket buffers */ |
||
3411 | + dma_free_tx_skbufs(priv, queue); |
||
3412 | + |
||
3413 | + /* Free DMA regions of consistent memory previously allocated */ |
||
3414 | + if (!priv->extend_desc) |
||
3415 | + dma_free_coherent(priv->device, |
||
3416 | + DMA_TX_SIZE * sizeof(struct dma_desc), |
||
3417 | + tx_q->dma_tx, tx_q->dma_tx_phy); |
||
3418 | + else |
||
3419 | + dma_free_coherent(priv->device, DMA_TX_SIZE * |
||
3420 | + sizeof(struct dma_extended_desc), |
||
3421 | + tx_q->dma_etx, tx_q->dma_tx_phy); |
||
3422 | + |
||
3423 | + kfree(tx_q->tx_skbuff_dma); |
||
3424 | + kfree(tx_q->tx_skbuff); |
||
3425 | + } |
||
3426 | +} |
||
3427 | + |
||
3428 | +/** |
||
3429 | + * alloc_dma_rx_desc_resources - alloc RX resources. |
||
3430 | * @priv: private structure |
||
3431 | * Description: according to which descriptor can be used (extend or basic) |
||
3432 | * this function allocates the resources for TX and RX paths. In case of |
||
3433 | * reception, for example, it pre-allocated the RX socket buffer in order to |
||
3434 | * allow zero-copy mechanism. |
||
3435 | */ |
||
3436 | -static int alloc_dma_desc_resources(struct stmmac_priv *priv) |
||
3437 | +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) |
||
3438 | { |
||
3439 | + u32 rx_count = priv->plat->rx_queues_to_use; |
||
3440 | int ret = -ENOMEM; |
||
3441 | + u32 queue; |
||
3442 | |||
3443 | - priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), |
||
3444 | - GFP_KERNEL); |
||
3445 | - if (!priv->rx_skbuff_dma) |
||
3446 | - return -ENOMEM; |
||
3447 | + /* RX queues buffers and DMA */ |
||
3448 | + for (queue = 0; queue < rx_count; queue++) { |
||
3449 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
3450 | |||
3451 | - priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), |
||
3452 | - GFP_KERNEL); |
||
3453 | - if (!priv->rx_skbuff) |
||
3454 | - goto err_rx_skbuff; |
||
3455 | - |
||
3456 | - priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, |
||
3457 | - sizeof(*priv->tx_skbuff_dma), |
||
3458 | - GFP_KERNEL); |
||
3459 | - if (!priv->tx_skbuff_dma) |
||
3460 | - goto err_tx_skbuff_dma; |
||
3461 | - |
||
3462 | - priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), |
||
3463 | - GFP_KERNEL); |
||
3464 | - if (!priv->tx_skbuff) |
||
3465 | - goto err_tx_skbuff; |
||
3466 | - |
||
3467 | - if (priv->extend_desc) { |
||
3468 | - priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * |
||
3469 | - sizeof(struct |
||
3470 | - dma_extended_desc), |
||
3471 | - &priv->dma_rx_phy, |
||
3472 | - GFP_KERNEL); |
||
3473 | - if (!priv->dma_erx) |
||
3474 | - goto err_dma; |
||
3475 | + rx_q->queue_index = queue; |
||
3476 | + rx_q->priv_data = priv; |
||
3477 | |||
3478 | - priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * |
||
3479 | - sizeof(struct |
||
3480 | - dma_extended_desc), |
||
3481 | - &priv->dma_tx_phy, |
||
3482 | + rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, |
||
3483 | + sizeof(dma_addr_t), |
||
3484 | GFP_KERNEL); |
||
3485 | - if (!priv->dma_etx) { |
||
3486 | - dma_free_coherent(priv->device, DMA_RX_SIZE * |
||
3487 | - sizeof(struct dma_extended_desc), |
||
3488 | - priv->dma_erx, priv->dma_rx_phy); |
||
3489 | - goto err_dma; |
||
3490 | - } |
||
3491 | - } else { |
||
3492 | - priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * |
||
3493 | - sizeof(struct dma_desc), |
||
3494 | - &priv->dma_rx_phy, |
||
3495 | - GFP_KERNEL); |
||
3496 | - if (!priv->dma_rx) |
||
3497 | - goto err_dma; |
||
3498 | + if (!rx_q->rx_skbuff_dma) |
||
3499 | + return -ENOMEM; |
||
3500 | |||
3501 | - priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * |
||
3502 | - sizeof(struct dma_desc), |
||
3503 | - &priv->dma_tx_phy, |
||
3504 | - GFP_KERNEL); |
||
3505 | - if (!priv->dma_tx) { |
||
3506 | - dma_free_coherent(priv->device, DMA_RX_SIZE * |
||
3507 | - sizeof(struct dma_desc), |
||
3508 | - priv->dma_rx, priv->dma_rx_phy); |
||
3509 | + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, |
||
3510 | + sizeof(struct sk_buff *), |
||
3511 | + GFP_KERNEL); |
||
3512 | + if (!rx_q->rx_skbuff) |
||
3513 | goto err_dma; |
||
3514 | + |
||
3515 | + if (priv->extend_desc) { |
||
3516 | + rx_q->dma_erx = dma_zalloc_coherent(priv->device, |
||
3517 | + DMA_RX_SIZE * |
||
3518 | + sizeof(struct |
||
3519 | + dma_extended_desc), |
||
3520 | + &rx_q->dma_rx_phy, |
||
3521 | + GFP_KERNEL); |
||
3522 | + if (!rx_q->dma_erx) |
||
3523 | + goto err_dma; |
||
3524 | + |
||
3525 | + } else { |
||
3526 | + rx_q->dma_rx = dma_zalloc_coherent(priv->device, |
||
3527 | + DMA_RX_SIZE * |
||
3528 | + sizeof(struct |
||
3529 | + dma_desc), |
||
3530 | + &rx_q->dma_rx_phy, |
||
3531 | + GFP_KERNEL); |
||
3532 | + if (!rx_q->dma_rx) |
||
3533 | + goto err_dma; |
||
3534 | } |
||
3535 | } |
||
3536 | |||
3537 | return 0; |
||
3538 | |||
3539 | err_dma: |
||
3540 | - kfree(priv->tx_skbuff); |
||
3541 | -err_tx_skbuff: |
||
3542 | - kfree(priv->tx_skbuff_dma); |
||
3543 | -err_tx_skbuff_dma: |
||
3544 | - kfree(priv->rx_skbuff); |
||
3545 | -err_rx_skbuff: |
||
3546 | - kfree(priv->rx_skbuff_dma); |
||
3547 | + free_dma_rx_desc_resources(priv); |
||
3548 | + |
||
3549 | + return ret; |
||
3550 | +} |
||
3551 | + |
||
3552 | +/** |
||
3553 | + * alloc_dma_tx_desc_resources - alloc TX resources. |
||
3554 | + * @priv: private structure |
||
3555 | + * Description: according to which descriptor can be used (extend or basic) |
||
3556 | + * this function allocates the resources for TX and RX paths. In case of |
||
3557 | + * reception, for example, it pre-allocated the RX socket buffer in order to |
||
3558 | + * allow zero-copy mechanism. |
||
3559 | + */ |
||
3560 | +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) |
||
3561 | +{ |
||
3562 | + u32 tx_count = priv->plat->tx_queues_to_use; |
||
3563 | + int ret = -ENOMEM; |
||
3564 | + u32 queue; |
||
3565 | + |
||
3566 | + /* TX queues buffers and DMA */ |
||
3567 | + for (queue = 0; queue < tx_count; queue++) { |
||
3568 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
3569 | + |
||
3570 | + tx_q->queue_index = queue; |
||
3571 | + tx_q->priv_data = priv; |
||
3572 | + |
||
3573 | + tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, |
||
3574 | + sizeof(*tx_q->tx_skbuff_dma), |
||
3575 | + GFP_KERNEL); |
||
3576 | + if (!tx_q->tx_skbuff_dma) |
||
3577 | + return -ENOMEM; |
||
3578 | + |
||
3579 | + tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, |
||
3580 | + sizeof(struct sk_buff *), |
||
3581 | + GFP_KERNEL); |
||
3582 | + if (!tx_q->tx_skbuff) |
||
3583 | + goto err_dma_buffers; |
||
3584 | + |
||
3585 | + if (priv->extend_desc) { |
||
3586 | + tx_q->dma_etx = dma_zalloc_coherent(priv->device, |
||
3587 | + DMA_TX_SIZE * |
||
3588 | + sizeof(struct |
||
3589 | + dma_extended_desc), |
||
3590 | + &tx_q->dma_tx_phy, |
||
3591 | + GFP_KERNEL); |
||
3592 | + if (!tx_q->dma_etx) |
||
3593 | + goto err_dma_buffers; |
||
3594 | + } else { |
||
3595 | + tx_q->dma_tx = dma_zalloc_coherent(priv->device, |
||
3596 | + DMA_TX_SIZE * |
||
3597 | + sizeof(struct |
||
3598 | + dma_desc), |
||
3599 | + &tx_q->dma_tx_phy, |
||
3600 | + GFP_KERNEL); |
||
3601 | + if (!tx_q->dma_tx) |
||
3602 | + goto err_dma_buffers; |
||
3603 | + } |
||
3604 | + } |
||
3605 | + |
||
3606 | + return 0; |
||
3607 | + |
||
3608 | +err_dma_buffers: |
||
3609 | + free_dma_tx_desc_resources(priv); |
||
3610 | + |
||
3611 | + return ret; |
||
3612 | +} |
||
3613 | + |
||
3614 | +/** |
||
3615 | + * alloc_dma_desc_resources - alloc TX/RX resources. |
||
3616 | + * @priv: private structure |
||
3617 | + * Description: according to which descriptor can be used (extend or basic) |
||
3618 | + * this function allocates the resources for TX and RX paths. In case of |
||
3619 | + * reception, for example, it pre-allocated the RX socket buffer in order to |
||
3620 | + * allow zero-copy mechanism. |
||
3621 | + */ |
||
3622 | +static int alloc_dma_desc_resources(struct stmmac_priv *priv) |
||
3623 | +{ |
||
3624 | + /* RX Allocation */ |
||
3625 | + int ret = alloc_dma_rx_desc_resources(priv); |
||
3626 | + |
||
3627 | + if (ret) |
||
3628 | + return ret; |
||
3629 | + |
||
3630 | + ret = alloc_dma_tx_desc_resources(priv); |
||
3631 | + |
||
3632 | return ret; |
||
3633 | } |
||
3634 | |||
3635 | +/** |
||
3636 | + * free_dma_desc_resources - free dma desc resources |
||
3637 | + * @priv: private structure |
||
3638 | + */ |
||
3639 | static void free_dma_desc_resources(struct stmmac_priv *priv) |
||
3640 | { |
||
3641 | - /* Release the DMA TX/RX socket buffers */ |
||
3642 | - dma_free_rx_skbufs(priv); |
||
3643 | - dma_free_tx_skbufs(priv); |
||
3644 | - |
||
3645 | - /* Free DMA regions of consistent memory previously allocated */ |
||
3646 | - if (!priv->extend_desc) { |
||
3647 | - dma_free_coherent(priv->device, |
||
3648 | - DMA_TX_SIZE * sizeof(struct dma_desc), |
||
3649 | - priv->dma_tx, priv->dma_tx_phy); |
||
3650 | - dma_free_coherent(priv->device, |
||
3651 | - DMA_RX_SIZE * sizeof(struct dma_desc), |
||
3652 | - priv->dma_rx, priv->dma_rx_phy); |
||
3653 | - } else { |
||
3654 | - dma_free_coherent(priv->device, DMA_TX_SIZE * |
||
3655 | - sizeof(struct dma_extended_desc), |
||
3656 | - priv->dma_etx, priv->dma_tx_phy); |
||
3657 | - dma_free_coherent(priv->device, DMA_RX_SIZE * |
||
3658 | - sizeof(struct dma_extended_desc), |
||
3659 | - priv->dma_erx, priv->dma_rx_phy); |
||
3660 | - } |
||
3661 | - kfree(priv->rx_skbuff_dma); |
||
3662 | - kfree(priv->rx_skbuff); |
||
3663 | - kfree(priv->tx_skbuff_dma); |
||
3664 | - kfree(priv->tx_skbuff); |
||
3665 | + /* Release the DMA RX socket buffers */ |
||
3666 | + free_dma_rx_desc_resources(priv); |
||
3667 | + |
||
3668 | + /* Release the DMA TX socket buffers */ |
||
3669 | + free_dma_tx_desc_resources(priv); |
||
3670 | } |
||
3671 | |||
3672 | /** |
||
3673 | @@ -1271,19 +1646,104 @@ static void free_dma_desc_resources(stru |
||
3674 | */ |
||
3675 | static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) |
||
3676 | { |
||
3677 | - int rx_count = priv->dma_cap.number_rx_queues; |
||
3678 | - int queue = 0; |
||
3679 | + u32 rx_queues_count = priv->plat->rx_queues_to_use; |
||
3680 | + int queue; |
||
3681 | + u8 mode; |
||
3682 | |||
3683 | - /* If GMAC does not have multiple queues, then this is not necessary*/ |
||
3684 | - if (rx_count == 1) |
||
3685 | - return; |
||
3686 | + for (queue = 0; queue < rx_queues_count; queue++) { |
||
3687 | + mode = priv->plat->rx_queues_cfg[queue].mode_to_use; |
||
3688 | + priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); |
||
3689 | + } |
||
3690 | +} |
||
3691 | |||
3692 | - /** |
||
3693 | - * If the core is synthesized with multiple rx queues / multiple |
||
3694 | - * dma channels, then rx queues will be disabled by default. |
||
3695 | - * For now only rx queue 0 is enabled. |
||
3696 | - */ |
||
3697 | - priv->hw->mac->rx_queue_enable(priv->hw, queue); |
||
3698 | +/** |
||
3699 | + * stmmac_start_rx_dma - start RX DMA channel |
||
3700 | + * @priv: driver private structure |
||
3701 | + * @chan: RX channel index |
||
3702 | + * Description: |
||
3703 | + * This starts a RX DMA channel |
||
3704 | + */ |
||
3705 | +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) |
||
3706 | +{ |
||
3707 | + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); |
||
3708 | + priv->hw->dma->start_rx(priv->ioaddr, chan); |
||
3709 | +} |
||
3710 | + |
||
3711 | +/** |
||
3712 | + * stmmac_start_tx_dma - start TX DMA channel |
||
3713 | + * @priv: driver private structure |
||
3714 | + * @chan: TX channel index |
||
3715 | + * Description: |
||
3716 | + * This starts a TX DMA channel |
||
3717 | + */ |
||
3718 | +static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) |
||
3719 | +{ |
||
3720 | + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); |
||
3721 | + priv->hw->dma->start_tx(priv->ioaddr, chan); |
||
3722 | +} |
||
3723 | + |
||
3724 | +/** |
||
3725 | + * stmmac_stop_rx_dma - stop RX DMA channel |
||
3726 | + * @priv: driver private structure |
||
3727 | + * @chan: RX channel index |
||
3728 | + * Description: |
||
3729 | + * This stops a RX DMA channel |
||
3730 | + */ |
||
3731 | +static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) |
||
3732 | +{ |
||
3733 | + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); |
||
3734 | + priv->hw->dma->stop_rx(priv->ioaddr, chan); |
||
3735 | +} |
||
3736 | + |
||
3737 | +/** |
||
3738 | + * stmmac_stop_tx_dma - stop TX DMA channel |
||
3739 | + * @priv: driver private structure |
||
3740 | + * @chan: TX channel index |
||
3741 | + * Description: |
||
3742 | + * This stops a TX DMA channel |
||
3743 | + */ |
||
3744 | +static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) |
||
3745 | +{ |
||
3746 | + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); |
||
3747 | + priv->hw->dma->stop_tx(priv->ioaddr, chan); |
||
3748 | +} |
||
3749 | + |
||
3750 | +/** |
||
3751 | + * stmmac_start_all_dma - start all RX and TX DMA channels |
||
3752 | + * @priv: driver private structure |
||
3753 | + * Description: |
||
3754 | + * This starts all the RX and TX DMA channels |
||
3755 | + */ |
||
3756 | +static void stmmac_start_all_dma(struct stmmac_priv *priv) |
||
3757 | +{ |
||
3758 | + u32 rx_channels_count = priv->plat->rx_queues_to_use; |
||
3759 | + u32 tx_channels_count = priv->plat->tx_queues_to_use; |
||
3760 | + u32 chan = 0; |
||
3761 | + |
||
3762 | + for (chan = 0; chan < rx_channels_count; chan++) |
||
3763 | + stmmac_start_rx_dma(priv, chan); |
||
3764 | + |
||
3765 | + for (chan = 0; chan < tx_channels_count; chan++) |
||
3766 | + stmmac_start_tx_dma(priv, chan); |
||
3767 | +} |
||
3768 | + |
||
3769 | +/** |
||
3770 | + * stmmac_stop_all_dma - stop all RX and TX DMA channels |
||
3771 | + * @priv: driver private structure |
||
3772 | + * Description: |
||
3773 | + * This stops the RX and TX DMA channels |
||
3774 | + */ |
||
3775 | +static void stmmac_stop_all_dma(struct stmmac_priv *priv) |
||
3776 | +{ |
||
3777 | + u32 rx_channels_count = priv->plat->rx_queues_to_use; |
||
3778 | + u32 tx_channels_count = priv->plat->tx_queues_to_use; |
||
3779 | + u32 chan = 0; |
||
3780 | + |
||
3781 | + for (chan = 0; chan < rx_channels_count; chan++) |
||
3782 | + stmmac_stop_rx_dma(priv, chan); |
||
3783 | + |
||
3784 | + for (chan = 0; chan < tx_channels_count; chan++) |
||
3785 | + stmmac_stop_tx_dma(priv, chan); |
||
3786 | } |
||
3787 | |||
3788 | /** |
||
3789 | @@ -1294,11 +1754,20 @@ static void stmmac_mac_enable_rx_queues( |
||
3790 | */ |
||
3791 | static void stmmac_dma_operation_mode(struct stmmac_priv *priv) |
||
3792 | { |
||
3793 | + u32 rx_channels_count = priv->plat->rx_queues_to_use; |
||
3794 | + u32 tx_channels_count = priv->plat->tx_queues_to_use; |
||
3795 | int rxfifosz = priv->plat->rx_fifo_size; |
||
3796 | - |
||
3797 | - if (priv->plat->force_thresh_dma_mode) |
||
3798 | - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); |
||
3799 | - else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { |
||
3800 | + u32 txmode = 0; |
||
3801 | + u32 rxmode = 0; |
||
3802 | + u32 chan = 0; |
||
3803 | + |
||
3804 | + if (rxfifosz == 0) |
||
3805 | + rxfifosz = priv->dma_cap.rx_fifo_size; |
||
3806 | + |
||
3807 | + if (priv->plat->force_thresh_dma_mode) { |
||
3808 | + txmode = tc; |
||
3809 | + rxmode = tc; |
||
3810 | + } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { |
||
3811 | /* |
||
3812 | * In case of GMAC, SF mode can be enabled |
||
3813 | * to perform the TX COE in HW. This depends on: |
||
3814 | @@ -1306,37 +1775,53 @@ static void stmmac_dma_operation_mode(st |
||
3815 | * 2) There is no bugged Jumbo frame support |
||
3816 | * that needs to not insert csum in the TDES. |
||
3817 | */ |
||
3818 | - priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, |
||
3819 | - rxfifosz); |
||
3820 | + txmode = SF_DMA_MODE; |
||
3821 | + rxmode = SF_DMA_MODE; |
||
3822 | priv->xstats.threshold = SF_DMA_MODE; |
||
3823 | - } else |
||
3824 | - priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, |
||
3825 | + } else { |
||
3826 | + txmode = tc; |
||
3827 | + rxmode = SF_DMA_MODE; |
||
3828 | + } |
||
3829 | + |
||
3830 | + /* configure all channels */ |
||
3831 | + if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
||
3832 | + for (chan = 0; chan < rx_channels_count; chan++) |
||
3833 | + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, |
||
3834 | + rxfifosz); |
||
3835 | + |
||
3836 | + for (chan = 0; chan < tx_channels_count; chan++) |
||
3837 | + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); |
||
3838 | + } else { |
||
3839 | + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, |
||
3840 | rxfifosz); |
||
3841 | + } |
||
3842 | } |
||
3843 | |||
3844 | /** |
||
3845 | * stmmac_tx_clean - to manage the transmission completion |
||
3846 | * @priv: driver private structure |
||
3847 | + * @queue: TX queue index |
||
3848 | * Description: it reclaims the transmit resources after transmission completes. |
||
3849 | */ |
||
3850 | -static void stmmac_tx_clean(struct stmmac_priv *priv) |
||
3851 | +static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) |
||
3852 | { |
||
3853 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
3854 | unsigned int bytes_compl = 0, pkts_compl = 0; |
||
3855 | - unsigned int entry = priv->dirty_tx; |
||
3856 | + unsigned int entry = tx_q->dirty_tx; |
||
3857 | |||
3858 | netif_tx_lock(priv->dev); |
||
3859 | |||
3860 | priv->xstats.tx_clean++; |
||
3861 | |||
3862 | - while (entry != priv->cur_tx) { |
||
3863 | - struct sk_buff *skb = priv->tx_skbuff[entry]; |
||
3864 | + while (entry != tx_q->cur_tx) { |
||
3865 | + struct sk_buff *skb = tx_q->tx_skbuff[entry]; |
||
3866 | struct dma_desc *p; |
||
3867 | int status; |
||
3868 | |||
3869 | if (priv->extend_desc) |
||
3870 | - p = (struct dma_desc *)(priv->dma_etx + entry); |
||
3871 | + p = (struct dma_desc *)(tx_q->dma_etx + entry); |
||
3872 | else |
||
3873 | - p = priv->dma_tx + entry; |
||
3874 | + p = tx_q->dma_tx + entry; |
||
3875 | |||
3876 | status = priv->hw->desc->tx_status(&priv->dev->stats, |
||
3877 | &priv->xstats, p, |
||
3878 | @@ -1362,48 +1847,51 @@ static void stmmac_tx_clean(struct stmma |
||
3879 | stmmac_get_tx_hwtstamp(priv, p, skb); |
||
3880 | } |
||
3881 | |||
3882 | - if (likely(priv->tx_skbuff_dma[entry].buf)) { |
||
3883 | - if (priv->tx_skbuff_dma[entry].map_as_page) |
||
3884 | + if (likely(tx_q->tx_skbuff_dma[entry].buf)) { |
||
3885 | + if (tx_q->tx_skbuff_dma[entry].map_as_page) |
||
3886 | dma_unmap_page(priv->device, |
||
3887 | - priv->tx_skbuff_dma[entry].buf, |
||
3888 | - priv->tx_skbuff_dma[entry].len, |
||
3889 | + tx_q->tx_skbuff_dma[entry].buf, |
||
3890 | + tx_q->tx_skbuff_dma[entry].len, |
||
3891 | DMA_TO_DEVICE); |
||
3892 | else |
||
3893 | dma_unmap_single(priv->device, |
||
3894 | - priv->tx_skbuff_dma[entry].buf, |
||
3895 | - priv->tx_skbuff_dma[entry].len, |
||
3896 | + tx_q->tx_skbuff_dma[entry].buf, |
||
3897 | + tx_q->tx_skbuff_dma[entry].len, |
||
3898 | DMA_TO_DEVICE); |
||
3899 | - priv->tx_skbuff_dma[entry].buf = 0; |
||
3900 | - priv->tx_skbuff_dma[entry].len = 0; |
||
3901 | - priv->tx_skbuff_dma[entry].map_as_page = false; |
||
3902 | + tx_q->tx_skbuff_dma[entry].buf = 0; |
||
3903 | + tx_q->tx_skbuff_dma[entry].len = 0; |
||
3904 | + tx_q->tx_skbuff_dma[entry].map_as_page = false; |
||
3905 | } |
||
3906 | |||
3907 | if (priv->hw->mode->clean_desc3) |
||
3908 | - priv->hw->mode->clean_desc3(priv, p); |
||
3909 | + priv->hw->mode->clean_desc3(tx_q, p); |
||
3910 | |||
3911 | - priv->tx_skbuff_dma[entry].last_segment = false; |
||
3912 | - priv->tx_skbuff_dma[entry].is_jumbo = false; |
||
3913 | + tx_q->tx_skbuff_dma[entry].last_segment = false; |
||
3914 | + tx_q->tx_skbuff_dma[entry].is_jumbo = false; |
||
3915 | |||
3916 | if (likely(skb != NULL)) { |
||
3917 | pkts_compl++; |
||
3918 | bytes_compl += skb->len; |
||
3919 | dev_consume_skb_any(skb); |
||
3920 | - priv->tx_skbuff[entry] = NULL; |
||
3921 | + tx_q->tx_skbuff[entry] = NULL; |
||
3922 | } |
||
3923 | |||
3924 | priv->hw->desc->release_tx_desc(p, priv->mode); |
||
3925 | |||
3926 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
||
3927 | } |
||
3928 | - priv->dirty_tx = entry; |
||
3929 | + tx_q->dirty_tx = entry; |
||
3930 | + |
||
3931 | + netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), |
||
3932 | + pkts_compl, bytes_compl); |
||
3933 | |||
3934 | - netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); |
||
3935 | + if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, |
||
3936 | + queue))) && |
||
3937 | + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { |
||
3938 | |||
3939 | - if (unlikely(netif_queue_stopped(priv->dev) && |
||
3940 | - stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { |
||
3941 | netif_dbg(priv, tx_done, priv->dev, |
||
3942 | "%s: restart transmit\n", __func__); |
||
3943 | - netif_wake_queue(priv->dev); |
||
3944 | + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); |
||
3945 | } |
||
3946 | |||
3947 | if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { |
||
3948 | @@ -1413,45 +1901,76 @@ static void stmmac_tx_clean(struct stmma |
||
3949 | netif_tx_unlock(priv->dev); |
||
3950 | } |
||
3951 | |||
3952 | -static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) |
||
3953 | +static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan) |
||
3954 | { |
||
3955 | - priv->hw->dma->enable_dma_irq(priv->ioaddr); |
||
3956 | + priv->hw->dma->enable_dma_irq(priv->ioaddr, chan); |
||
3957 | } |
||
3958 | |||
3959 | -static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) |
||
3960 | +static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan) |
||
3961 | { |
||
3962 | - priv->hw->dma->disable_dma_irq(priv->ioaddr); |
||
3963 | + priv->hw->dma->disable_dma_irq(priv->ioaddr, chan); |
||
3964 | } |
||
3965 | |||
3966 | /** |
||
3967 | * stmmac_tx_err - to manage the tx error |
||
3968 | * @priv: driver private structure |
||
3969 | + * @chan: channel index |
||
3970 | * Description: it cleans the descriptors and restarts the transmission |
||
3971 | * in case of transmission errors. |
||
3972 | */ |
||
3973 | -static void stmmac_tx_err(struct stmmac_priv *priv) |
||
3974 | +static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) |
||
3975 | { |
||
3976 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
||
3977 | int i; |
||
3978 | - netif_stop_queue(priv->dev); |
||
3979 | |||
3980 | - priv->hw->dma->stop_tx(priv->ioaddr); |
||
3981 | - dma_free_tx_skbufs(priv); |
||
3982 | + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); |
||
3983 | + |
||
3984 | + stmmac_stop_tx_dma(priv, chan); |
||
3985 | + dma_free_tx_skbufs(priv, chan); |
||
3986 | for (i = 0; i < DMA_TX_SIZE; i++) |
||
3987 | if (priv->extend_desc) |
||
3988 | - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, |
||
3989 | + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, |
||
3990 | priv->mode, |
||
3991 | (i == DMA_TX_SIZE - 1)); |
||
3992 | else |
||
3993 | - priv->hw->desc->init_tx_desc(&priv->dma_tx[i], |
||
3994 | + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], |
||
3995 | priv->mode, |
||
3996 | (i == DMA_TX_SIZE - 1)); |
||
3997 | - priv->dirty_tx = 0; |
||
3998 | - priv->cur_tx = 0; |
||
3999 | - netdev_reset_queue(priv->dev); |
||
4000 | - priv->hw->dma->start_tx(priv->ioaddr); |
||
4001 | + tx_q->dirty_tx = 0; |
||
4002 | + tx_q->cur_tx = 0; |
||
4003 | + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); |
||
4004 | + stmmac_start_tx_dma(priv, chan); |
||
4005 | |||
4006 | priv->dev->stats.tx_errors++; |
||
4007 | - netif_wake_queue(priv->dev); |
||
4008 | + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); |
||
4009 | +} |
||
4010 | + |
||
4011 | +/** |
||
4012 | + * stmmac_set_dma_operation_mode - Set DMA operation mode by channel |
||
4013 | + * @priv: driver private structure |
||
4014 | + * @txmode: TX operating mode |
||
4015 | + * @rxmode: RX operating mode |
||
4016 | + * @chan: channel index |
||
4017 | + * Description: it is used for configuring of the DMA operation mode in |
||
4018 | + * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward |
||
4019 | + * mode. |
||
4020 | + */ |
||
4021 | +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, |
||
4022 | + u32 rxmode, u32 chan) |
||
4023 | +{ |
||
4024 | + int rxfifosz = priv->plat->rx_fifo_size; |
||
4025 | + |
||
4026 | + if (rxfifosz == 0) |
||
4027 | + rxfifosz = priv->dma_cap.rx_fifo_size; |
||
4028 | + |
||
4029 | + if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
||
4030 | + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, |
||
4031 | + rxfifosz); |
||
4032 | + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); |
||
4033 | + } else { |
||
4034 | + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, |
||
4035 | + rxfifosz); |
||
4036 | + } |
||
4037 | } |
||
4038 | |||
4039 | /** |
||
4040 | @@ -1463,31 +1982,43 @@ static void stmmac_tx_err(struct stmmac_ |
||
4041 | */ |
||
4042 | static void stmmac_dma_interrupt(struct stmmac_priv *priv) |
||
4043 | { |
||
4044 | + u32 tx_channel_count = priv->plat->tx_queues_to_use; |
||
4045 | int status; |
||
4046 | - int rxfifosz = priv->plat->rx_fifo_size; |
||
4047 | + u32 chan; |
||
4048 | + |
||
4049 | + for (chan = 0; chan < tx_channel_count; chan++) { |
||
4050 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; |
||
4051 | |||
4052 | - status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); |
||
4053 | - if (likely((status & handle_rx)) || (status & handle_tx)) { |
||
4054 | - if (likely(napi_schedule_prep(&priv->napi))) { |
||
4055 | - stmmac_disable_dma_irq(priv); |
||
4056 | - __napi_schedule(&priv->napi); |
||
4057 | + status = priv->hw->dma->dma_interrupt(priv->ioaddr, |
||
4058 | + &priv->xstats, chan); |
||
4059 | + if (likely((status & handle_rx)) || (status & handle_tx)) { |
||
4060 | + if (likely(napi_schedule_prep(&rx_q->napi))) { |
||
4061 | + stmmac_disable_dma_irq(priv, chan); |
||
4062 | + __napi_schedule(&rx_q->napi); |
||
4063 | + } |
||
4064 | } |
||
4065 | - } |
||
4066 | - if (unlikely(status & tx_hard_error_bump_tc)) { |
||
4067 | - /* Try to bump up the dma threshold on this failure */ |
||
4068 | - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && |
||
4069 | - (tc <= 256)) { |
||
4070 | - tc += 64; |
||
4071 | - if (priv->plat->force_thresh_dma_mode) |
||
4072 | - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, |
||
4073 | - rxfifosz); |
||
4074 | - else |
||
4075 | - priv->hw->dma->dma_mode(priv->ioaddr, tc, |
||
4076 | - SF_DMA_MODE, rxfifosz); |
||
4077 | - priv->xstats.threshold = tc; |
||
4078 | + |
||
4079 | + if (unlikely(status & tx_hard_error_bump_tc)) { |
||
4080 | + /* Try to bump up the dma threshold on this failure */ |
||
4081 | + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && |
||
4082 | + (tc <= 256)) { |
||
4083 | + tc += 64; |
||
4084 | + if (priv->plat->force_thresh_dma_mode) |
||
4085 | + stmmac_set_dma_operation_mode(priv, |
||
4086 | + tc, |
||
4087 | + tc, |
||
4088 | + chan); |
||
4089 | + else |
||
4090 | + stmmac_set_dma_operation_mode(priv, |
||
4091 | + tc, |
||
4092 | + SF_DMA_MODE, |
||
4093 | + chan); |
||
4094 | + priv->xstats.threshold = tc; |
||
4095 | + } |
||
4096 | + } else if (unlikely(status == tx_hard_error)) { |
||
4097 | + stmmac_tx_err(priv, chan); |
||
4098 | } |
||
4099 | - } else if (unlikely(status == tx_hard_error)) |
||
4100 | - stmmac_tx_err(priv); |
||
4101 | + } |
||
4102 | } |
||
4103 | |||
4104 | /** |
||
4105 | @@ -1594,6 +2125,13 @@ static void stmmac_check_ether_addr(stru |
||
4106 | */ |
||
4107 | static int stmmac_init_dma_engine(struct stmmac_priv *priv) |
||
4108 | { |
||
4109 | + u32 rx_channels_count = priv->plat->rx_queues_to_use; |
||
4110 | + u32 tx_channels_count = priv->plat->tx_queues_to_use; |
||
4111 | + struct stmmac_rx_queue *rx_q; |
||
4112 | + struct stmmac_tx_queue *tx_q; |
||
4113 | + u32 dummy_dma_rx_phy = 0; |
||
4114 | + u32 dummy_dma_tx_phy = 0; |
||
4115 | + u32 chan = 0; |
||
4116 | int atds = 0; |
||
4117 | int ret = 0; |
||
4118 | |||
4119 | @@ -1611,19 +2149,49 @@ static int stmmac_init_dma_engine(struct |
||
4120 | return ret; |
||
4121 | } |
||
4122 | |||
4123 | - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, |
||
4124 | - priv->dma_tx_phy, priv->dma_rx_phy, atds); |
||
4125 | - |
||
4126 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
||
4127 | - priv->rx_tail_addr = priv->dma_rx_phy + |
||
4128 | - (DMA_RX_SIZE * sizeof(struct dma_desc)); |
||
4129 | - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr, |
||
4130 | - STMMAC_CHAN0); |
||
4131 | - |
||
4132 | - priv->tx_tail_addr = priv->dma_tx_phy + |
||
4133 | - (DMA_TX_SIZE * sizeof(struct dma_desc)); |
||
4134 | - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, |
||
4135 | - STMMAC_CHAN0); |
||
4136 | + /* DMA Configuration */ |
||
4137 | + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, |
||
4138 | + dummy_dma_tx_phy, dummy_dma_rx_phy, atds); |
||
4139 | + |
||
4140 | + /* DMA RX Channel Configuration */ |
||
4141 | + for (chan = 0; chan < rx_channels_count; chan++) { |
||
4142 | + rx_q = &priv->rx_queue[chan]; |
||
4143 | + |
||
4144 | + priv->hw->dma->init_rx_chan(priv->ioaddr, |
||
4145 | + priv->plat->dma_cfg, |
||
4146 | + rx_q->dma_rx_phy, chan); |
||
4147 | + |
||
4148 | + rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
||
4149 | + (DMA_RX_SIZE * sizeof(struct dma_desc)); |
||
4150 | + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, |
||
4151 | + rx_q->rx_tail_addr, |
||
4152 | + chan); |
||
4153 | + } |
||
4154 | + |
||
4155 | + /* DMA TX Channel Configuration */ |
||
4156 | + for (chan = 0; chan < tx_channels_count; chan++) { |
||
4157 | + tx_q = &priv->tx_queue[chan]; |
||
4158 | + |
||
4159 | + priv->hw->dma->init_chan(priv->ioaddr, |
||
4160 | + priv->plat->dma_cfg, |
||
4161 | + chan); |
||
4162 | + |
||
4163 | + priv->hw->dma->init_tx_chan(priv->ioaddr, |
||
4164 | + priv->plat->dma_cfg, |
||
4165 | + tx_q->dma_tx_phy, chan); |
||
4166 | + |
||
4167 | + tx_q->tx_tail_addr = tx_q->dma_tx_phy + |
||
4168 | + (DMA_TX_SIZE * sizeof(struct dma_desc)); |
||
4169 | + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, |
||
4170 | + tx_q->tx_tail_addr, |
||
4171 | + chan); |
||
4172 | + } |
||
4173 | + } else { |
||
4174 | + rx_q = &priv->rx_queue[chan]; |
||
4175 | + tx_q = &priv->tx_queue[chan]; |
||
4176 | + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, |
||
4177 | + tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); |
||
4178 | } |
||
4179 | |||
4180 | if (priv->plat->axi && priv->hw->dma->axi) |
||
4181 | @@ -1641,8 +2209,12 @@ static int stmmac_init_dma_engine(struct |
||
4182 | static void stmmac_tx_timer(unsigned long data) |
||
4183 | { |
||
4184 | struct stmmac_priv *priv = (struct stmmac_priv *)data; |
||
4185 | + u32 tx_queues_count = priv->plat->tx_queues_to_use; |
||
4186 | + u32 queue; |
||
4187 | |||
4188 | - stmmac_tx_clean(priv); |
||
4189 | + /* let's scan all the tx queues */ |
||
4190 | + for (queue = 0; queue < tx_queues_count; queue++) |
||
4191 | + stmmac_tx_clean(priv, queue); |
||
4192 | } |
||
4193 | |||
4194 | /** |
||
4195 | @@ -1664,6 +2236,196 @@ static void stmmac_init_tx_coalesce(stru |
||
4196 | add_timer(&priv->txtimer); |
||
4197 | } |
||
4198 | |||
4199 | +static void stmmac_set_rings_length(struct stmmac_priv *priv) |
||
4200 | +{ |
||
4201 | + u32 rx_channels_count = priv->plat->rx_queues_to_use; |
||
4202 | + u32 tx_channels_count = priv->plat->tx_queues_to_use; |
||
4203 | + u32 chan; |
||
4204 | + |
||
4205 | + /* set TX ring length */ |
||
4206 | + if (priv->hw->dma->set_tx_ring_len) { |
||
4207 | + for (chan = 0; chan < tx_channels_count; chan++) |
||
4208 | + priv->hw->dma->set_tx_ring_len(priv->ioaddr, |
||
4209 | + (DMA_TX_SIZE - 1), chan); |
||
4210 | + } |
||
4211 | + |
||
4212 | + /* set RX ring length */ |
||
4213 | + if (priv->hw->dma->set_rx_ring_len) { |
||
4214 | + for (chan = 0; chan < rx_channels_count; chan++) |
||
4215 | + priv->hw->dma->set_rx_ring_len(priv->ioaddr, |
||
4216 | + (DMA_RX_SIZE - 1), chan); |
||
4217 | + } |
||
4218 | +} |
||
4219 | + |
||
4220 | +/** |
||
4221 | + * stmmac_set_tx_queue_weight - Set TX queue weight |
||
4222 | + * @priv: driver private structure |
||
4223 | + * Description: It is used for setting TX queues weight |
||
4224 | + */ |
||
4225 | +static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) |
||
4226 | +{ |
||
4227 | + u32 tx_queues_count = priv->plat->tx_queues_to_use; |
||
4228 | + u32 weight; |
||
4229 | + u32 queue; |
||
4230 | + |
||
4231 | + for (queue = 0; queue < tx_queues_count; queue++) { |
||
4232 | + weight = priv->plat->tx_queues_cfg[queue].weight; |
||
4233 | + priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue); |
||
4234 | + } |
||
4235 | +} |
||
4236 | + |
||
4237 | +/** |
||
4238 | + * stmmac_configure_cbs - Configure CBS in TX queue |
||
4239 | + * @priv: driver private structure |
||
4240 | + * Description: It is used for configuring CBS in AVB TX queues |
||
4241 | + */ |
||
4242 | +static void stmmac_configure_cbs(struct stmmac_priv *priv) |
||
4243 | +{ |
||
4244 | + u32 tx_queues_count = priv->plat->tx_queues_to_use; |
||
4245 | + u32 mode_to_use; |
||
4246 | + u32 queue; |
||
4247 | + |
||
4248 | + /* queue 0 is reserved for legacy traffic */ |
||
4249 | + for (queue = 1; queue < tx_queues_count; queue++) { |
||
4250 | + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; |
||
4251 | + if (mode_to_use == MTL_QUEUE_DCB) |
||
4252 | + continue; |
||
4253 | + |
||
4254 | + priv->hw->mac->config_cbs(priv->hw, |
||
4255 | + priv->plat->tx_queues_cfg[queue].send_slope, |
||
4256 | + priv->plat->tx_queues_cfg[queue].idle_slope, |
||
4257 | + priv->plat->tx_queues_cfg[queue].high_credit, |
||
4258 | + priv->plat->tx_queues_cfg[queue].low_credit, |
||
4259 | + queue); |
||
4260 | + } |
||
4261 | +} |
||
4262 | + |
||
4263 | +/** |
||
4264 | + * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel |
||
4265 | + * @priv: driver private structure |
||
4266 | + * Description: It is used for mapping RX queues to RX dma channels |
||
4267 | + */ |
||
4268 | +static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) |
||
4269 | +{ |
||
4270 | + u32 rx_queues_count = priv->plat->rx_queues_to_use; |
||
4271 | + u32 queue; |
||
4272 | + u32 chan; |
||
4273 | + |
||
4274 | + for (queue = 0; queue < rx_queues_count; queue++) { |
||
4275 | + chan = priv->plat->rx_queues_cfg[queue].chan; |
||
4276 | + priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan); |
||
4277 | + } |
||
4278 | +} |
||
4279 | + |
||
4280 | +/** |
||
4281 | + * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority |
||
4282 | + * @priv: driver private structure |
||
4283 | + * Description: It is used for configuring the RX Queue Priority |
||
4284 | + */ |
||
4285 | +static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) |
||
4286 | +{ |
||
4287 | + u32 rx_queues_count = priv->plat->rx_queues_to_use; |
||
4288 | + u32 queue; |
||
4289 | + u32 prio; |
||
4290 | + |
||
4291 | + for (queue = 0; queue < rx_queues_count; queue++) { |
||
4292 | + if (!priv->plat->rx_queues_cfg[queue].use_prio) |
||
4293 | + continue; |
||
4294 | + |
||
4295 | + prio = priv->plat->rx_queues_cfg[queue].prio; |
||
4296 | + priv->hw->mac->rx_queue_prio(priv->hw, prio, queue); |
||
4297 | + } |
||
4298 | +} |
||
4299 | + |
||
4300 | +/** |
||
4301 | + * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority |
||
4302 | + * @priv: driver private structure |
||
4303 | + * Description: It is used for configuring the TX Queue Priority |
||
4304 | + */ |
||
4305 | +static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) |
||
4306 | +{ |
||
4307 | + u32 tx_queues_count = priv->plat->tx_queues_to_use; |
||
4308 | + u32 queue; |
||
4309 | + u32 prio; |
||
4310 | + |
||
4311 | + for (queue = 0; queue < tx_queues_count; queue++) { |
||
4312 | + if (!priv->plat->tx_queues_cfg[queue].use_prio) |
||
4313 | + continue; |
||
4314 | + |
||
4315 | + prio = priv->plat->tx_queues_cfg[queue].prio; |
||
4316 | + priv->hw->mac->tx_queue_prio(priv->hw, prio, queue); |
||
4317 | + } |
||
4318 | +} |
||
4319 | + |
||
4320 | +/** |
||
4321 | + * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing |
||
4322 | + * @priv: driver private structure |
||
4323 | + * Description: It is used for configuring the RX queue routing |
||
4324 | + */ |
||
4325 | +static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) |
||
4326 | +{ |
||
4327 | + u32 rx_queues_count = priv->plat->rx_queues_to_use; |
||
4328 | + u32 queue; |
||
4329 | + u8 packet; |
||
4330 | + |
||
4331 | + for (queue = 0; queue < rx_queues_count; queue++) { |
||
4332 | + /* no specific packet type routing specified for the queue */ |
||
4333 | + if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) |
||
4334 | + continue; |
||
4335 | + |
||
4336 | + packet = priv->plat->rx_queues_cfg[queue].pkt_route; |
||
4337 | + priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); |
||
4338 | + } |
||
4339 | +} |
||
4340 | + |
||
4341 | +/** |
||
4342 | + * stmmac_mtl_configuration - Configure MTL |
||
4343 | + * @priv: driver private structure |
||
4344 | + * Description: It is used for configurring MTL |
||
4345 | + */ |
||
4346 | +static void stmmac_mtl_configuration(struct stmmac_priv *priv) |
||
4347 | +{ |
||
4348 | + u32 rx_queues_count = priv->plat->rx_queues_to_use; |
||
4349 | + u32 tx_queues_count = priv->plat->tx_queues_to_use; |
||
4350 | + |
||
4351 | + if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight) |
||
4352 | + stmmac_set_tx_queue_weight(priv); |
||
4353 | + |
||
4354 | + /* Configure MTL RX algorithms */ |
||
4355 | + if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms) |
||
4356 | + priv->hw->mac->prog_mtl_rx_algorithms(priv->hw, |
||
4357 | + priv->plat->rx_sched_algorithm); |
||
4358 | + |
||
4359 | + /* Configure MTL TX algorithms */ |
||
4360 | + if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms) |
||
4361 | + priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, |
||
4362 | + priv->plat->tx_sched_algorithm); |
||
4363 | + |
||
4364 | + /* Configure CBS in AVB TX queues */ |
||
4365 | + if (tx_queues_count > 1 && priv->hw->mac->config_cbs) |
||
4366 | + stmmac_configure_cbs(priv); |
||
4367 | + |
||
4368 | + /* Map RX MTL to DMA channels */ |
||
4369 | + if (priv->hw->mac->map_mtl_to_dma) |
||
4370 | + stmmac_rx_queue_dma_chan_map(priv); |
||
4371 | + |
||
4372 | + /* Enable MAC RX Queues */ |
||
4373 | + if (priv->hw->mac->rx_queue_enable) |
||
4374 | + stmmac_mac_enable_rx_queues(priv); |
||
4375 | + |
||
4376 | + /* Set RX priorities */ |
||
4377 | + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio) |
||
4378 | + stmmac_mac_config_rx_queues_prio(priv); |
||
4379 | + |
||
4380 | + /* Set TX priorities */ |
||
4381 | + if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio) |
||
4382 | + stmmac_mac_config_tx_queues_prio(priv); |
||
4383 | + |
||
4384 | + /* Set RX routing */ |
||
4385 | + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing) |
||
4386 | + stmmac_mac_config_rx_queues_routing(priv); |
||
4387 | +} |
||
4388 | + |
||
4389 | /** |
||
4390 | * stmmac_hw_setup - setup mac in a usable state. |
||
4391 | * @dev : pointer to the device structure. |
||
4392 | @@ -1679,6 +2441,9 @@ static void stmmac_init_tx_coalesce(stru |
||
4393 | static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) |
||
4394 | { |
||
4395 | struct stmmac_priv *priv = netdev_priv(dev); |
||
4396 | + u32 rx_cnt = priv->plat->rx_queues_to_use; |
||
4397 | + u32 tx_cnt = priv->plat->tx_queues_to_use; |
||
4398 | + u32 chan; |
||
4399 | int ret; |
||
4400 | |||
4401 | /* DMA initialization and SW reset */ |
||
4402 | @@ -1708,9 +2473,9 @@ static int stmmac_hw_setup(struct net_de |
||
4403 | /* Initialize the MAC Core */ |
||
4404 | priv->hw->mac->core_init(priv->hw, dev->mtu); |
||
4405 | |||
4406 | - /* Initialize MAC RX Queues */ |
||
4407 | - if (priv->hw->mac->rx_queue_enable) |
||
4408 | - stmmac_mac_enable_rx_queues(priv); |
||
4409 | + /* Initialize MTL*/ |
||
4410 | + if (priv->synopsys_id >= DWMAC_CORE_4_00) |
||
4411 | + stmmac_mtl_configuration(priv); |
||
4412 | |||
4413 | ret = priv->hw->mac->rx_ipc(priv->hw); |
||
4414 | if (!ret) { |
||
4415 | @@ -1720,10 +2485,7 @@ static int stmmac_hw_setup(struct net_de |
||
4416 | } |
||
4417 | |||
4418 | /* Enable the MAC Rx/Tx */ |
||
4419 | - if (priv->synopsys_id >= DWMAC_CORE_4_00) |
||
4420 | - stmmac_dwmac4_set_mac(priv->ioaddr, true); |
||
4421 | - else |
||
4422 | - stmmac_set_mac(priv->ioaddr, true); |
||
4423 | + priv->hw->mac->set_mac(priv->ioaddr, true); |
||
4424 | |||
4425 | /* Set the HW DMA mode and the COE */ |
||
4426 | stmmac_dma_operation_mode(priv); |
||
4427 | @@ -1731,6 +2493,10 @@ static int stmmac_hw_setup(struct net_de |
||
4428 | stmmac_mmc_setup(priv); |
||
4429 | |||
4430 | if (init_ptp) { |
||
4431 | + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); |
||
4432 | + if (ret < 0) |
||
4433 | + netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); |
||
4434 | + |
||
4435 | ret = stmmac_init_ptp(priv); |
||
4436 | if (ret == -EOPNOTSUPP) |
||
4437 | netdev_warn(priv->dev, "PTP not supported by HW\n"); |
||
4438 | @@ -1745,35 +2511,37 @@ static int stmmac_hw_setup(struct net_de |
||
4439 | __func__); |
||
4440 | #endif |
||
4441 | /* Start the ball rolling... */ |
||
4442 | - netdev_dbg(priv->dev, "DMA RX/TX processes started...\n"); |
||
4443 | - priv->hw->dma->start_tx(priv->ioaddr); |
||
4444 | - priv->hw->dma->start_rx(priv->ioaddr); |
||
4445 | + stmmac_start_all_dma(priv); |
||
4446 | |||
4447 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
||
4448 | |||
4449 | if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { |
||
4450 | priv->rx_riwt = MAX_DMA_RIWT; |
||
4451 | - priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); |
||
4452 | + priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt); |
||
4453 | } |
||
4454 | |||
4455 | if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) |
||
4456 | priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); |
||
4457 | |||
4458 | - /* set TX ring length */ |
||
4459 | - if (priv->hw->dma->set_tx_ring_len) |
||
4460 | - priv->hw->dma->set_tx_ring_len(priv->ioaddr, |
||
4461 | - (DMA_TX_SIZE - 1)); |
||
4462 | - /* set RX ring length */ |
||
4463 | - if (priv->hw->dma->set_rx_ring_len) |
||
4464 | - priv->hw->dma->set_rx_ring_len(priv->ioaddr, |
||
4465 | - (DMA_RX_SIZE - 1)); |
||
4466 | + /* set TX and RX rings length */ |
||
4467 | + stmmac_set_rings_length(priv); |
||
4468 | + |
||
4469 | /* Enable TSO */ |
||
4470 | - if (priv->tso) |
||
4471 | - priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0); |
||
4472 | + if (priv->tso) { |
||
4473 | + for (chan = 0; chan < tx_cnt; chan++) |
||
4474 | + priv->hw->dma->enable_tso(priv->ioaddr, 1, chan); |
||
4475 | + } |
||
4476 | |||
4477 | return 0; |
||
4478 | } |
||
4479 | |||
4480 | +static void stmmac_hw_teardown(struct net_device *dev) |
||
4481 | +{ |
||
4482 | + struct stmmac_priv *priv = netdev_priv(dev); |
||
4483 | + |
||
4484 | + clk_disable_unprepare(priv->plat->clk_ptp_ref); |
||
4485 | +} |
||
4486 | + |
||
4487 | /** |
||
4488 | * stmmac_open - open entry point of the driver |
||
4489 | * @dev : pointer to the device structure. |
||
4490 | @@ -1842,7 +2610,7 @@ static int stmmac_open(struct net_device |
||
4491 | netdev_err(priv->dev, |
||
4492 | "%s: ERROR: allocating the IRQ %d (error: %d)\n", |
||
4493 | __func__, dev->irq, ret); |
||
4494 | - goto init_error; |
||
4495 | + goto irq_error; |
||
4496 | } |
||
4497 | |||
4498 | /* Request the Wake IRQ in case of another line is used for WoL */ |
||
4499 | @@ -1869,8 +2637,8 @@ static int stmmac_open(struct net_device |
||
4500 | } |
||
4501 | } |
||
4502 | |||
4503 | - napi_enable(&priv->napi); |
||
4504 | - netif_start_queue(dev); |
||
4505 | + stmmac_enable_all_queues(priv); |
||
4506 | + stmmac_start_all_queues(priv); |
||
4507 | |||
4508 | return 0; |
||
4509 | |||
4510 | @@ -1879,7 +2647,12 @@ lpiirq_error: |
||
4511 | free_irq(priv->wol_irq, dev); |
||
4512 | wolirq_error: |
||
4513 | free_irq(dev->irq, dev); |
||
4514 | +irq_error: |
||
4515 | + if (dev->phydev) |
||
4516 | + phy_stop(dev->phydev); |
||
4517 | |||
4518 | + del_timer_sync(&priv->txtimer); |
||
4519 | + stmmac_hw_teardown(dev); |
||
4520 | init_error: |
||
4521 | free_dma_desc_resources(priv); |
||
4522 | dma_desc_error: |
||
4523 | @@ -1908,9 +2681,9 @@ static int stmmac_release(struct net_dev |
||
4524 | phy_disconnect(dev->phydev); |
||
4525 | } |
||
4526 | |||
4527 | - netif_stop_queue(dev); |
||
4528 | + stmmac_stop_all_queues(priv); |
||
4529 | |||
4530 | - napi_disable(&priv->napi); |
||
4531 | + stmmac_disable_all_queues(priv); |
||
4532 | |||
4533 | del_timer_sync(&priv->txtimer); |
||
4534 | |||
4535 | @@ -1922,14 +2695,13 @@ static int stmmac_release(struct net_dev |
||
4536 | free_irq(priv->lpi_irq, dev); |
||
4537 | |||
4538 | /* Stop TX/RX DMA and clear the descriptors */ |
||
4539 | - priv->hw->dma->stop_tx(priv->ioaddr); |
||
4540 | - priv->hw->dma->stop_rx(priv->ioaddr); |
||
4541 | + stmmac_stop_all_dma(priv); |
||
4542 | |||
4543 | /* Release and free the Rx/Tx resources */ |
||
4544 | free_dma_desc_resources(priv); |
||
4545 | |||
4546 | /* Disable the MAC Rx/Tx */ |
||
4547 | - stmmac_set_mac(priv->ioaddr, false); |
||
4548 | + priv->hw->mac->set_mac(priv->ioaddr, false); |
||
4549 | |||
4550 | netif_carrier_off(dev); |
||
4551 | |||
4552 | @@ -1948,22 +2720,24 @@ static int stmmac_release(struct net_dev |
||
4553 | * @des: buffer start address |
||
4554 | * @total_len: total length to fill in descriptors |
||
4555 | * @last_segmant: condition for the last descriptor |
||
4556 | + * @queue: TX queue index |
||
4557 | * Description: |
||
4558 | * This function fills descriptor and request new descriptors according to |
||
4559 | * buffer length to fill |
||
4560 | */ |
||
4561 | static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, |
||
4562 | - int total_len, bool last_segment) |
||
4563 | + int total_len, bool last_segment, u32 queue) |
||
4564 | { |
||
4565 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
4566 | struct dma_desc *desc; |
||
4567 | - int tmp_len; |
||
4568 | u32 buff_size; |
||
4569 | + int tmp_len; |
||
4570 | |||
4571 | tmp_len = total_len; |
||
4572 | |||
4573 | while (tmp_len > 0) { |
||
4574 | - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); |
||
4575 | - desc = priv->dma_tx + priv->cur_tx; |
||
4576 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
||
4577 | + desc = tx_q->dma_tx + tx_q->cur_tx; |
||
4578 | |||
4579 | desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); |
||
4580 | buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? |
||
4581 | @@ -1971,7 +2745,7 @@ static void stmmac_tso_allocator(struct |
||
4582 | |||
4583 | priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, |
||
4584 | 0, 1, |
||
4585 | - (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), |
||
4586 | + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), |
||
4587 | 0, 0); |
||
4588 | |||
4589 | tmp_len -= TSO_MAX_BUFF_SIZE; |
||
4590 | @@ -2007,23 +2781,28 @@ static void stmmac_tso_allocator(struct |
||
4591 | */ |
||
4592 | static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) |
||
4593 | { |
||
4594 | - u32 pay_len, mss; |
||
4595 | - int tmp_pay_len = 0; |
||
4596 | + struct dma_desc *desc, *first, *mss_desc = NULL; |
||
4597 | struct stmmac_priv *priv = netdev_priv(dev); |
||
4598 | int nfrags = skb_shinfo(skb)->nr_frags; |
||
4599 | + u32 queue = skb_get_queue_mapping(skb); |
||
4600 | unsigned int first_entry, des; |
||
4601 | - struct dma_desc *desc, *first, *mss_desc = NULL; |
||
4602 | + struct stmmac_tx_queue *tx_q; |
||
4603 | + int tmp_pay_len = 0; |
||
4604 | + u32 pay_len, mss; |
||
4605 | u8 proto_hdr_len; |
||
4606 | int i; |
||
4607 | |||
4608 | + tx_q = &priv->tx_queue[queue]; |
||
4609 | + |
||
4610 | /* Compute header lengths */ |
||
4611 | proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
||
4612 | |||
4613 | /* Desc availability based on threshold should be enough safe */ |
||
4614 | - if (unlikely(stmmac_tx_avail(priv) < |
||
4615 | + if (unlikely(stmmac_tx_avail(priv, queue) < |
||
4616 | (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { |
||
4617 | - if (!netif_queue_stopped(dev)) { |
||
4618 | - netif_stop_queue(dev); |
||
4619 | + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { |
||
4620 | + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, |
||
4621 | + queue)); |
||
4622 | /* This is a hard error, log it. */ |
||
4623 | netdev_err(priv->dev, |
||
4624 | "%s: Tx Ring full when queue awake\n", |
||
4625 | @@ -2038,10 +2817,10 @@ static netdev_tx_t stmmac_tso_xmit(struc |
||
4626 | |||
4627 | /* set new MSS value if needed */ |
||
4628 | if (mss != priv->mss) { |
||
4629 | - mss_desc = priv->dma_tx + priv->cur_tx; |
||
4630 | + mss_desc = tx_q->dma_tx + tx_q->cur_tx; |
||
4631 | priv->hw->desc->set_mss(mss_desc, mss); |
||
4632 | priv->mss = mss; |
||
4633 | - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); |
||
4634 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
||
4635 | } |
||
4636 | |||
4637 | if (netif_msg_tx_queued(priv)) { |
||
4638 | @@ -2051,9 +2830,9 @@ static netdev_tx_t stmmac_tso_xmit(struc |
||
4639 | skb->data_len); |
||
4640 | } |
||
4641 | |||
4642 | - first_entry = priv->cur_tx; |
||
4643 | + first_entry = tx_q->cur_tx; |
||
4644 | |||
4645 | - desc = priv->dma_tx + first_entry; |
||
4646 | + desc = tx_q->dma_tx + first_entry; |
||
4647 | first = desc; |
||
4648 | |||
4649 | /* first descriptor: fill Headers on Buf1 */ |
||
4650 | @@ -2062,9 +2841,8 @@ static netdev_tx_t stmmac_tso_xmit(struc |
||
4651 | if (dma_mapping_error(priv->device, des)) |
||
4652 | goto dma_map_err; |
||
4653 | |||
4654 | - priv->tx_skbuff_dma[first_entry].buf = des; |
||
4655 | - priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); |
||
4656 | - priv->tx_skbuff[first_entry] = skb; |
||
4657 | + tx_q->tx_skbuff_dma[first_entry].buf = des; |
||
4658 | + tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); |
||
4659 | |||
4660 | first->des0 = cpu_to_le32(des); |
||
4661 | |||
4662 | @@ -2075,7 +2853,7 @@ static netdev_tx_t stmmac_tso_xmit(struc |
||
4663 | /* If needed take extra descriptors to fill the remaining payload */ |
||
4664 | tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; |
||
4665 | |||
4666 | - stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0)); |
||
4667 | + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); |
||
4668 | |||
4669 | /* Prepare fragments */ |
||
4670 | for (i = 0; i < nfrags; i++) { |
||
4671 | @@ -2084,24 +2862,34 @@ static netdev_tx_t stmmac_tso_xmit(struc |
||
4672 | des = skb_frag_dma_map(priv->device, frag, 0, |
||
4673 | skb_frag_size(frag), |
||
4674 | DMA_TO_DEVICE); |
||
4675 | + if (dma_mapping_error(priv->device, des)) |
||
4676 | + goto dma_map_err; |
||
4677 | |||
4678 | stmmac_tso_allocator(priv, des, skb_frag_size(frag), |
||
4679 | - (i == nfrags - 1)); |
||
4680 | + (i == nfrags - 1), queue); |
||
4681 | |||
4682 | - priv->tx_skbuff_dma[priv->cur_tx].buf = des; |
||
4683 | - priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag); |
||
4684 | - priv->tx_skbuff[priv->cur_tx] = NULL; |
||
4685 | - priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true; |
||
4686 | + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; |
||
4687 | + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); |
||
4688 | + tx_q->tx_skbuff[tx_q->cur_tx] = NULL; |
||
4689 | + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; |
||
4690 | } |
||
4691 | |||
4692 | - priv->tx_skbuff_dma[priv->cur_tx].last_segment = true; |
||
4693 | + tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; |
||
4694 | + |
||
4695 | + /* Only the last descriptor gets to point to the skb. */ |
||
4696 | + tx_q->tx_skbuff[tx_q->cur_tx] = skb; |
||
4697 | |||
4698 | - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); |
||
4699 | + /* We've used all descriptors we need for this skb, however, |
||
4700 | + * advance cur_tx so that it references a fresh descriptor. |
||
4701 | + * ndo_start_xmit will fill this descriptor the next time it's |
||
4702 | + * called and stmmac_tx_clean may clean up to this descriptor. |
||
4703 | + */ |
||
4704 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
||
4705 | |||
4706 | - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { |
||
4707 | + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
||
4708 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", |
||
4709 | __func__); |
||
4710 | - netif_stop_queue(dev); |
||
4711 | + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); |
||
4712 | } |
||
4713 | |||
4714 | dev->stats.tx_bytes += skb->len; |
||
4715 | @@ -2133,7 +2921,7 @@ static netdev_tx_t stmmac_tso_xmit(struc |
||
4716 | priv->hw->desc->prepare_tso_tx_desc(first, 1, |
||
4717 | proto_hdr_len, |
||
4718 | pay_len, |
||
4719 | - 1, priv->tx_skbuff_dma[first_entry].last_segment, |
||
4720 | + 1, tx_q->tx_skbuff_dma[first_entry].last_segment, |
||
4721 | tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); |
||
4722 | |||
4723 | /* If context desc is used to change MSS */ |
||
4724 | @@ -2155,20 +2943,20 @@ static netdev_tx_t stmmac_tso_xmit(struc |
||
4725 | |||
4726 | if (netif_msg_pktdata(priv)) { |
||
4727 | pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", |
||
4728 | - __func__, priv->cur_tx, priv->dirty_tx, first_entry, |
||
4729 | - priv->cur_tx, first, nfrags); |
||
4730 | + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
||
4731 | + tx_q->cur_tx, first, nfrags); |
||
4732 | |||
4733 | - priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE, |
||
4734 | + priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, |
||
4735 | 0); |
||
4736 | |||
4737 | pr_info(">>> frame to be transmitted: "); |
||
4738 | print_pkt(skb->data, skb_headlen(skb)); |
||
4739 | } |
||
4740 | |||
4741 | - netdev_sent_queue(dev, skb->len); |
||
4742 | + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
||
4743 | |||
4744 | - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, |
||
4745 | - STMMAC_CHAN0); |
||
4746 | + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, |
||
4747 | + queue); |
||
4748 | |||
4749 | return NETDEV_TX_OK; |
||
4750 | |||
4751 | @@ -2192,21 +2980,27 @@ static netdev_tx_t stmmac_xmit(struct sk |
||
4752 | struct stmmac_priv *priv = netdev_priv(dev); |
||
4753 | unsigned int nopaged_len = skb_headlen(skb); |
||
4754 | int i, csum_insertion = 0, is_jumbo = 0; |
||
4755 | + u32 queue = skb_get_queue_mapping(skb); |
||
4756 | int nfrags = skb_shinfo(skb)->nr_frags; |
||
4757 | - unsigned int entry, first_entry; |
||
4758 | + int entry; |
||
4759 | + unsigned int first_entry; |
||
4760 | struct dma_desc *desc, *first; |
||
4761 | + struct stmmac_tx_queue *tx_q; |
||
4762 | unsigned int enh_desc; |
||
4763 | unsigned int des; |
||
4764 | |||
4765 | + tx_q = &priv->tx_queue[queue]; |
||
4766 | + |
||
4767 | /* Manage oversized TCP frames for GMAC4 device */ |
||
4768 | if (skb_is_gso(skb) && priv->tso) { |
||
4769 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
||
4770 | return stmmac_tso_xmit(skb, dev); |
||
4771 | } |
||
4772 | |||
4773 | - if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { |
||
4774 | - if (!netif_queue_stopped(dev)) { |
||
4775 | - netif_stop_queue(dev); |
||
4776 | + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { |
||
4777 | + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { |
||
4778 | + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, |
||
4779 | + queue)); |
||
4780 | /* This is a hard error, log it. */ |
||
4781 | netdev_err(priv->dev, |
||
4782 | "%s: Tx Ring full when queue awake\n", |
||
4783 | @@ -2218,20 +3012,18 @@ static netdev_tx_t stmmac_xmit(struct sk |
||
4784 | if (priv->tx_path_in_lpi_mode) |
||
4785 | stmmac_disable_eee_mode(priv); |
||
4786 | |||
4787 | - entry = priv->cur_tx; |
||
4788 | + entry = tx_q->cur_tx; |
||
4789 | first_entry = entry; |
||
4790 | |||
4791 | csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); |
||
4792 | |||
4793 | if (likely(priv->extend_desc)) |
||
4794 | - desc = (struct dma_desc *)(priv->dma_etx + entry); |
||
4795 | + desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
||
4796 | else |
||
4797 | - desc = priv->dma_tx + entry; |
||
4798 | + desc = tx_q->dma_tx + entry; |
||
4799 | |||
4800 | first = desc; |
||
4801 | |||
4802 | - priv->tx_skbuff[first_entry] = skb; |
||
4803 | - |
||
4804 | enh_desc = priv->plat->enh_desc; |
||
4805 | /* To program the descriptors according to the size of the frame */ |
||
4806 | if (enh_desc) |
||
4807 | @@ -2239,7 +3031,7 @@ static netdev_tx_t stmmac_xmit(struct sk |
||
4808 | |||
4809 | if (unlikely(is_jumbo) && likely(priv->synopsys_id < |
||
4810 | DWMAC_CORE_4_00)) { |
||
4811 | - entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); |
||
4812 | + entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion); |
||
4813 | if (unlikely(entry < 0)) |
||
4814 | goto dma_map_err; |
||
4815 | } |
||
4816 | @@ -2252,48 +3044,56 @@ static netdev_tx_t stmmac_xmit(struct sk |
||
4817 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
||
4818 | |||
4819 | if (likely(priv->extend_desc)) |
||
4820 | - desc = (struct dma_desc *)(priv->dma_etx + entry); |
||
4821 | + desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
||
4822 | else |
||
4823 | - desc = priv->dma_tx + entry; |
||
4824 | + desc = tx_q->dma_tx + entry; |
||
4825 | |||
4826 | des = skb_frag_dma_map(priv->device, frag, 0, len, |
||
4827 | DMA_TO_DEVICE); |
||
4828 | if (dma_mapping_error(priv->device, des)) |
||
4829 | goto dma_map_err; /* should reuse desc w/o issues */ |
||
4830 | |||
4831 | - priv->tx_skbuff[entry] = NULL; |
||
4832 | + tx_q->tx_skbuff[entry] = NULL; |
||
4833 | |||
4834 | - priv->tx_skbuff_dma[entry].buf = des; |
||
4835 | + tx_q->tx_skbuff_dma[entry].buf = des; |
||
4836 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) |
||
4837 | desc->des0 = cpu_to_le32(des); |
||
4838 | else |
||
4839 | desc->des2 = cpu_to_le32(des); |
||
4840 | |||
4841 | - priv->tx_skbuff_dma[entry].map_as_page = true; |
||
4842 | - priv->tx_skbuff_dma[entry].len = len; |
||
4843 | - priv->tx_skbuff_dma[entry].last_segment = last_segment; |
||
4844 | + tx_q->tx_skbuff_dma[entry].map_as_page = true; |
||
4845 | + tx_q->tx_skbuff_dma[entry].len = len; |
||
4846 | + tx_q->tx_skbuff_dma[entry].last_segment = last_segment; |
||
4847 | |||
4848 | /* Prepare the descriptor and set the own bit too */ |
||
4849 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, |
||
4850 | - priv->mode, 1, last_segment); |
||
4851 | + priv->mode, 1, last_segment, |
||
4852 | + skb->len); |
||
4853 | } |
||
4854 | |||
4855 | - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
||
4856 | + /* Only the last descriptor gets to point to the skb. */ |
||
4857 | + tx_q->tx_skbuff[entry] = skb; |
||
4858 | |||
4859 | - priv->cur_tx = entry; |
||
4860 | + /* We've used all descriptors we need for this skb, however, |
||
4861 | + * advance cur_tx so that it references a fresh descriptor. |
||
4862 | + * ndo_start_xmit will fill this descriptor the next time it's |
||
4863 | + * called and stmmac_tx_clean may clean up to this descriptor. |
||
4864 | + */ |
||
4865 | + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
||
4866 | + tx_q->cur_tx = entry; |
||
4867 | |||
4868 | if (netif_msg_pktdata(priv)) { |
||
4869 | void *tx_head; |
||
4870 | |||
4871 | netdev_dbg(priv->dev, |
||
4872 | "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", |
||
4873 | - __func__, priv->cur_tx, priv->dirty_tx, first_entry, |
||
4874 | + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
||
4875 | entry, first, nfrags); |
||
4876 | |||
4877 | if (priv->extend_desc) |
||
4878 | - tx_head = (void *)priv->dma_etx; |
||
4879 | + tx_head = (void *)tx_q->dma_etx; |
||
4880 | else |
||
4881 | - tx_head = (void *)priv->dma_tx; |
||
4882 | + tx_head = (void *)tx_q->dma_tx; |
||
4883 | |||
4884 | priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); |
||
4885 | |||
4886 | @@ -2301,10 +3101,10 @@ static netdev_tx_t stmmac_xmit(struct sk |
||
4887 | print_pkt(skb->data, skb->len); |
||
4888 | } |
||
4889 | |||
4890 | - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { |
||
4891 | + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
||
4892 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", |
||
4893 | __func__); |
||
4894 | - netif_stop_queue(dev); |
||
4895 | + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); |
||
4896 | } |
||
4897 | |||
4898 | dev->stats.tx_bytes += skb->len; |
||
4899 | @@ -2339,14 +3139,14 @@ static netdev_tx_t stmmac_xmit(struct sk |
||
4900 | if (dma_mapping_error(priv->device, des)) |
||
4901 | goto dma_map_err; |
||
4902 | |||
4903 | - priv->tx_skbuff_dma[first_entry].buf = des; |
||
4904 | + tx_q->tx_skbuff_dma[first_entry].buf = des; |
||
4905 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) |
||
4906 | first->des0 = cpu_to_le32(des); |
||
4907 | else |
||
4908 | first->des2 = cpu_to_le32(des); |
||
4909 | |||
4910 | - priv->tx_skbuff_dma[first_entry].len = nopaged_len; |
||
4911 | - priv->tx_skbuff_dma[first_entry].last_segment = last_segment; |
||
4912 | + tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; |
||
4913 | + tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; |
||
4914 | |||
4915 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
||
4916 | priv->hwts_tx_en)) { |
||
4917 | @@ -2358,7 +3158,7 @@ static netdev_tx_t stmmac_xmit(struct sk |
||
4918 | /* Prepare the first descriptor setting the OWN bit too */ |
||
4919 | priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, |
||
4920 | csum_insertion, priv->mode, 1, |
||
4921 | - last_segment); |
||
4922 | + last_segment, skb->len); |
||
4923 | |||
4924 | /* The own bit must be the latest setting done when prepare the |
||
4925 | * descriptor and then barrier is needed to make sure that |
||
4926 | @@ -2367,13 +3167,13 @@ static netdev_tx_t stmmac_xmit(struct sk |
||
4927 | dma_wmb(); |
||
4928 | } |
||
4929 | |||
4930 | - netdev_sent_queue(dev, skb->len); |
||
4931 | + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
||
4932 | |||
4933 | if (priv->synopsys_id < DWMAC_CORE_4_00) |
||
4934 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); |
||
4935 | else |
||
4936 | - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, |
||
4937 | - STMMAC_CHAN0); |
||
4938 | + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, |
||
4939 | + queue); |
||
4940 | |||
4941 | return NETDEV_TX_OK; |
||
4942 | |||
4943 | @@ -2401,9 +3201,9 @@ static void stmmac_rx_vlan(struct net_de |
||
4944 | } |
||
4945 | |||
4946 | |||
4947 | -static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) |
||
4948 | +static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) |
||
4949 | { |
||
4950 | - if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH) |
||
4951 | + if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) |
||
4952 | return 0; |
||
4953 | |||
4954 | return 1; |
||
4955 | @@ -2412,30 +3212,33 @@ static inline int stmmac_rx_threshold_co |
||
4956 | /** |
||
4957 | * stmmac_rx_refill - refill used skb preallocated buffers |
||
4958 | * @priv: driver private structure |
||
4959 | + * @queue: RX queue index |
||
4960 | * Description : this is to reallocate the skb for the reception process |
||
4961 | * that is based on zero-copy. |
||
4962 | */ |
||
4963 | -static inline void stmmac_rx_refill(struct stmmac_priv *priv) |
||
4964 | +static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) |
||
4965 | { |
||
4966 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
4967 | + int dirty = stmmac_rx_dirty(priv, queue); |
||
4968 | + unsigned int entry = rx_q->dirty_rx; |
||
4969 | + |
||
4970 | int bfsize = priv->dma_buf_sz; |
||
4971 | - unsigned int entry = priv->dirty_rx; |
||
4972 | - int dirty = stmmac_rx_dirty(priv); |
||
4973 | |||
4974 | while (dirty-- > 0) { |
||
4975 | struct dma_desc *p; |
||
4976 | |||
4977 | if (priv->extend_desc) |
||
4978 | - p = (struct dma_desc *)(priv->dma_erx + entry); |
||
4979 | + p = (struct dma_desc *)(rx_q->dma_erx + entry); |
||
4980 | else |
||
4981 | - p = priv->dma_rx + entry; |
||
4982 | + p = rx_q->dma_rx + entry; |
||
4983 | |||
4984 | - if (likely(priv->rx_skbuff[entry] == NULL)) { |
||
4985 | + if (likely(!rx_q->rx_skbuff[entry])) { |
||
4986 | struct sk_buff *skb; |
||
4987 | |||
4988 | skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); |
||
4989 | if (unlikely(!skb)) { |
||
4990 | /* so for a while no zero-copy! */ |
||
4991 | - priv->rx_zeroc_thresh = STMMAC_RX_THRESH; |
||
4992 | + rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; |
||
4993 | if (unlikely(net_ratelimit())) |
||
4994 | dev_err(priv->device, |
||
4995 | "fail to alloc skb entry %d\n", |
||
4996 | @@ -2443,28 +3246,28 @@ static inline void stmmac_rx_refill(stru |
||
4997 | break; |
||
4998 | } |
||
4999 | |||
5000 | - priv->rx_skbuff[entry] = skb; |
||
5001 | - priv->rx_skbuff_dma[entry] = |
||
5002 | + rx_q->rx_skbuff[entry] = skb; |
||
5003 | + rx_q->rx_skbuff_dma[entry] = |
||
5004 | dma_map_single(priv->device, skb->data, bfsize, |
||
5005 | DMA_FROM_DEVICE); |
||
5006 | if (dma_mapping_error(priv->device, |
||
5007 | - priv->rx_skbuff_dma[entry])) { |
||
5008 | + rx_q->rx_skbuff_dma[entry])) { |
||
5009 | netdev_err(priv->dev, "Rx DMA map failed\n"); |
||
5010 | dev_kfree_skb(skb); |
||
5011 | break; |
||
5012 | } |
||
5013 | |||
5014 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { |
||
5015 | - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]); |
||
5016 | + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); |
||
5017 | p->des1 = 0; |
||
5018 | } else { |
||
5019 | - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]); |
||
5020 | + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); |
||
5021 | } |
||
5022 | if (priv->hw->mode->refill_desc3) |
||
5023 | - priv->hw->mode->refill_desc3(priv, p); |
||
5024 | + priv->hw->mode->refill_desc3(rx_q, p); |
||
5025 | |||
5026 | - if (priv->rx_zeroc_thresh > 0) |
||
5027 | - priv->rx_zeroc_thresh--; |
||
5028 | + if (rx_q->rx_zeroc_thresh > 0) |
||
5029 | + rx_q->rx_zeroc_thresh--; |
||
5030 | |||
5031 | netif_dbg(priv, rx_status, priv->dev, |
||
5032 | "refill entry #%d\n", entry); |
||
5033 | @@ -2480,31 +3283,33 @@ static inline void stmmac_rx_refill(stru |
||
5034 | |||
5035 | entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); |
||
5036 | } |
||
5037 | - priv->dirty_rx = entry; |
||
5038 | + rx_q->dirty_rx = entry; |
||
5039 | } |
||
5040 | |||
5041 | /** |
||
5042 | * stmmac_rx - manage the receive process |
||
5043 | * @priv: driver private structure |
||
5044 | - * @limit: napi bugget. |
||
5045 | + * @limit: napi bugget |
||
5046 | + * @queue: RX queue index. |
||
5047 | * Description : this the function called by the napi poll method. |
||
5048 | * It gets all the frames inside the ring. |
||
5049 | */ |
||
5050 | -static int stmmac_rx(struct stmmac_priv *priv, int limit) |
||
5051 | +static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) |
||
5052 | { |
||
5053 | - unsigned int entry = priv->cur_rx; |
||
5054 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
5055 | + unsigned int entry = rx_q->cur_rx; |
||
5056 | + int coe = priv->hw->rx_csum; |
||
5057 | unsigned int next_entry; |
||
5058 | unsigned int count = 0; |
||
5059 | - int coe = priv->hw->rx_csum; |
||
5060 | |||
5061 | if (netif_msg_rx_status(priv)) { |
||
5062 | void *rx_head; |
||
5063 | |||
5064 | netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); |
||
5065 | if (priv->extend_desc) |
||
5066 | - rx_head = (void *)priv->dma_erx; |
||
5067 | + rx_head = (void *)rx_q->dma_erx; |
||
5068 | else |
||
5069 | - rx_head = (void *)priv->dma_rx; |
||
5070 | + rx_head = (void *)rx_q->dma_rx; |
||
5071 | |||
5072 | priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); |
||
5073 | } |
||
5074 | @@ -2514,9 +3319,9 @@ static int stmmac_rx(struct stmmac_priv |
||
5075 | struct dma_desc *np; |
||
5076 | |||
5077 | if (priv->extend_desc) |
||
5078 | - p = (struct dma_desc *)(priv->dma_erx + entry); |
||
5079 | + p = (struct dma_desc *)(rx_q->dma_erx + entry); |
||
5080 | else |
||
5081 | - p = priv->dma_rx + entry; |
||
5082 | + p = rx_q->dma_rx + entry; |
||
5083 | |||
5084 | /* read the status of the incoming frame */ |
||
5085 | status = priv->hw->desc->rx_status(&priv->dev->stats, |
||
5086 | @@ -2527,20 +3332,20 @@ static int stmmac_rx(struct stmmac_priv |
||
5087 | |||
5088 | count++; |
||
5089 | |||
5090 | - priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE); |
||
5091 | - next_entry = priv->cur_rx; |
||
5092 | + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); |
||
5093 | + next_entry = rx_q->cur_rx; |
||
5094 | |||
5095 | if (priv->extend_desc) |
||
5096 | - np = (struct dma_desc *)(priv->dma_erx + next_entry); |
||
5097 | + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); |
||
5098 | else |
||
5099 | - np = priv->dma_rx + next_entry; |
||
5100 | + np = rx_q->dma_rx + next_entry; |
||
5101 | |||
5102 | prefetch(np); |
||
5103 | |||
5104 | if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) |
||
5105 | priv->hw->desc->rx_extended_status(&priv->dev->stats, |
||
5106 | &priv->xstats, |
||
5107 | - priv->dma_erx + |
||
5108 | + rx_q->dma_erx + |
||
5109 | entry); |
||
5110 | if (unlikely(status == discard_frame)) { |
||
5111 | priv->dev->stats.rx_errors++; |
||
5112 | @@ -2550,9 +3355,9 @@ static int stmmac_rx(struct stmmac_priv |
||
5113 | * them in stmmac_rx_refill() function so that |
||
5114 | * device can reuse it. |
||
5115 | */ |
||
5116 | - priv->rx_skbuff[entry] = NULL; |
||
5117 | + rx_q->rx_skbuff[entry] = NULL; |
||
5118 | dma_unmap_single(priv->device, |
||
5119 | - priv->rx_skbuff_dma[entry], |
||
5120 | + rx_q->rx_skbuff_dma[entry], |
||
5121 | priv->dma_buf_sz, |
||
5122 | DMA_FROM_DEVICE); |
||
5123 | } |
||
5124 | @@ -2600,7 +3405,7 @@ static int stmmac_rx(struct stmmac_priv |
||
5125 | */ |
||
5126 | if (unlikely(!priv->plat->has_gmac4 && |
||
5127 | ((frame_len < priv->rx_copybreak) || |
||
5128 | - stmmac_rx_threshold_count(priv)))) { |
||
5129 | + stmmac_rx_threshold_count(rx_q)))) { |
||
5130 | skb = netdev_alloc_skb_ip_align(priv->dev, |
||
5131 | frame_len); |
||
5132 | if (unlikely(!skb)) { |
||
5133 | @@ -2612,21 +3417,21 @@ static int stmmac_rx(struct stmmac_priv |
||
5134 | } |
||
5135 | |||
5136 | dma_sync_single_for_cpu(priv->device, |
||
5137 | - priv->rx_skbuff_dma |
||
5138 | + rx_q->rx_skbuff_dma |
||
5139 | [entry], frame_len, |
||
5140 | DMA_FROM_DEVICE); |
||
5141 | skb_copy_to_linear_data(skb, |
||
5142 | - priv-> |
||
5143 | + rx_q-> |
||
5144 | rx_skbuff[entry]->data, |
||
5145 | frame_len); |
||
5146 | |||
5147 | skb_put(skb, frame_len); |
||
5148 | dma_sync_single_for_device(priv->device, |
||
5149 | - priv->rx_skbuff_dma |
||
5150 | + rx_q->rx_skbuff_dma |
||
5151 | [entry], frame_len, |
||
5152 | DMA_FROM_DEVICE); |
||
5153 | } else { |
||
5154 | - skb = priv->rx_skbuff[entry]; |
||
5155 | + skb = rx_q->rx_skbuff[entry]; |
||
5156 | if (unlikely(!skb)) { |
||
5157 | netdev_err(priv->dev, |
||
5158 | "%s: Inconsistent Rx chain\n", |
||
5159 | @@ -2635,12 +3440,12 @@ static int stmmac_rx(struct stmmac_priv |
||
5160 | break; |
||
5161 | } |
||
5162 | prefetch(skb->data - NET_IP_ALIGN); |
||
5163 | - priv->rx_skbuff[entry] = NULL; |
||
5164 | - priv->rx_zeroc_thresh++; |
||
5165 | + rx_q->rx_skbuff[entry] = NULL; |
||
5166 | + rx_q->rx_zeroc_thresh++; |
||
5167 | |||
5168 | skb_put(skb, frame_len); |
||
5169 | dma_unmap_single(priv->device, |
||
5170 | - priv->rx_skbuff_dma[entry], |
||
5171 | + rx_q->rx_skbuff_dma[entry], |
||
5172 | priv->dma_buf_sz, |
||
5173 | DMA_FROM_DEVICE); |
||
5174 | } |
||
5175 | @@ -2662,7 +3467,7 @@ static int stmmac_rx(struct stmmac_priv |
||
5176 | else |
||
5177 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
||
5178 | |||
5179 | - napi_gro_receive(&priv->napi, skb); |
||
5180 | + napi_gro_receive(&rx_q->napi, skb); |
||
5181 | |||
5182 | priv->dev->stats.rx_packets++; |
||
5183 | priv->dev->stats.rx_bytes += frame_len; |
||
5184 | @@ -2670,7 +3475,7 @@ static int stmmac_rx(struct stmmac_priv |
||
5185 | entry = next_entry; |
||
5186 | } |
||
5187 | |||
5188 | - stmmac_rx_refill(priv); |
||
5189 | + stmmac_rx_refill(priv, queue); |
||
5190 | |||
5191 | priv->xstats.rx_pkt_n += count; |
||
5192 | |||
5193 | @@ -2687,16 +3492,24 @@ static int stmmac_rx(struct stmmac_priv |
||
5194 | */ |
||
5195 | static int stmmac_poll(struct napi_struct *napi, int budget) |
||
5196 | { |
||
5197 | - struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); |
||
5198 | + struct stmmac_rx_queue *rx_q = |
||
5199 | + container_of(napi, struct stmmac_rx_queue, napi); |
||
5200 | + struct stmmac_priv *priv = rx_q->priv_data; |
||
5201 | + u32 tx_count = priv->plat->tx_queues_to_use; |
||
5202 | + u32 chan = rx_q->queue_index; |
||
5203 | int work_done = 0; |
||
5204 | + u32 queue; |
||
5205 | |||
5206 | priv->xstats.napi_poll++; |
||
5207 | - stmmac_tx_clean(priv); |
||
5208 | |||
5209 | - work_done = stmmac_rx(priv, budget); |
||
5210 | + /* check all the queues */ |
||
5211 | + for (queue = 0; queue < tx_count; queue++) |
||
5212 | + stmmac_tx_clean(priv, queue); |
||
5213 | + |
||
5214 | + work_done = stmmac_rx(priv, budget, rx_q->queue_index); |
||
5215 | if (work_done < budget) { |
||
5216 | napi_complete_done(napi, work_done); |
||
5217 | - stmmac_enable_dma_irq(priv); |
||
5218 | + stmmac_enable_dma_irq(priv, chan); |
||
5219 | } |
||
5220 | return work_done; |
||
5221 | } |
||
5222 | @@ -2712,9 +3525,12 @@ static int stmmac_poll(struct napi_struc |
||
5223 | static void stmmac_tx_timeout(struct net_device *dev) |
||
5224 | { |
||
5225 | struct stmmac_priv *priv = netdev_priv(dev); |
||
5226 | + u32 tx_count = priv->plat->tx_queues_to_use; |
||
5227 | + u32 chan; |
||
5228 | |||
5229 | /* Clear Tx resources and restart transmitting again */ |
||
5230 | - stmmac_tx_err(priv); |
||
5231 | + for (chan = 0; chan < tx_count; chan++) |
||
5232 | + stmmac_tx_err(priv, chan); |
||
5233 | } |
||
5234 | |||
5235 | /** |
||
5236 | @@ -2837,6 +3653,12 @@ static irqreturn_t stmmac_interrupt(int |
||
5237 | { |
||
5238 | struct net_device *dev = (struct net_device *)dev_id; |
||
5239 | struct stmmac_priv *priv = netdev_priv(dev); |
||
5240 | + u32 rx_cnt = priv->plat->rx_queues_to_use; |
||
5241 | + u32 tx_cnt = priv->plat->tx_queues_to_use; |
||
5242 | + u32 queues_count; |
||
5243 | + u32 queue; |
||
5244 | + |
||
5245 | + queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; |
||
5246 | |||
5247 | if (priv->irq_wake) |
||
5248 | pm_wakeup_event(priv->device, 0); |
||
5249 | @@ -2850,16 +3672,30 @@ static irqreturn_t stmmac_interrupt(int |
||
5250 | if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { |
||
5251 | int status = priv->hw->mac->host_irq_status(priv->hw, |
||
5252 | &priv->xstats); |
||
5253 | + |
||
5254 | if (unlikely(status)) { |
||
5255 | /* For LPI we need to save the tx status */ |
||
5256 | if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) |
||
5257 | priv->tx_path_in_lpi_mode = true; |
||
5258 | if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) |
||
5259 | priv->tx_path_in_lpi_mode = false; |
||
5260 | - if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr) |
||
5261 | - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, |
||
5262 | - priv->rx_tail_addr, |
||
5263 | - STMMAC_CHAN0); |
||
5264 | + } |
||
5265 | + |
||
5266 | + if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
||
5267 | + for (queue = 0; queue < queues_count; queue++) { |
||
5268 | + struct stmmac_rx_queue *rx_q = |
||
5269 | + &priv->rx_queue[queue]; |
||
5270 | + |
||
5271 | + status |= |
||
5272 | + priv->hw->mac->host_mtl_irq_status(priv->hw, |
||
5273 | + queue); |
||
5274 | + |
||
5275 | + if (status & CORE_IRQ_MTL_RX_OVERFLOW && |
||
5276 | + priv->hw->dma->set_rx_tail_ptr) |
||
5277 | + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, |
||
5278 | + rx_q->rx_tail_addr, |
||
5279 | + queue); |
||
5280 | + } |
||
5281 | } |
||
5282 | |||
5283 | /* PCS link status */ |
||
5284 | @@ -2944,7 +3780,7 @@ static void sysfs_display_ring(void *hea |
||
5285 | ep++; |
||
5286 | } else { |
||
5287 | seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
||
5288 | - i, (unsigned int)virt_to_phys(ep), |
||
5289 | + i, (unsigned int)virt_to_phys(p), |
||
5290 | le32_to_cpu(p->des0), le32_to_cpu(p->des1), |
||
5291 | le32_to_cpu(p->des2), le32_to_cpu(p->des3)); |
||
5292 | p++; |
||
5293 | @@ -2957,17 +3793,40 @@ static int stmmac_sysfs_ring_read(struct |
||
5294 | { |
||
5295 | struct net_device *dev = seq->private; |
||
5296 | struct stmmac_priv *priv = netdev_priv(dev); |
||
5297 | + u32 rx_count = priv->plat->rx_queues_to_use; |
||
5298 | + u32 tx_count = priv->plat->tx_queues_to_use; |
||
5299 | + u32 queue; |
||
5300 | |||
5301 | - if (priv->extend_desc) { |
||
5302 | - seq_printf(seq, "Extended RX descriptor ring:\n"); |
||
5303 | - sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq); |
||
5304 | - seq_printf(seq, "Extended TX descriptor ring:\n"); |
||
5305 | - sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq); |
||
5306 | - } else { |
||
5307 | - seq_printf(seq, "RX descriptor ring:\n"); |
||
5308 | - sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq); |
||
5309 | - seq_printf(seq, "TX descriptor ring:\n"); |
||
5310 | - sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq); |
||
5311 | + for (queue = 0; queue < rx_count; queue++) { |
||
5312 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
5313 | + |
||
5314 | + seq_printf(seq, "RX Queue %d:\n", queue); |
||
5315 | + |
||
5316 | + if (priv->extend_desc) { |
||
5317 | + seq_printf(seq, "Extended descriptor ring:\n"); |
||
5318 | + sysfs_display_ring((void *)rx_q->dma_erx, |
||
5319 | + DMA_RX_SIZE, 1, seq); |
||
5320 | + } else { |
||
5321 | + seq_printf(seq, "Descriptor ring:\n"); |
||
5322 | + sysfs_display_ring((void *)rx_q->dma_rx, |
||
5323 | + DMA_RX_SIZE, 0, seq); |
||
5324 | + } |
||
5325 | + } |
||
5326 | + |
||
5327 | + for (queue = 0; queue < tx_count; queue++) { |
||
5328 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
5329 | + |
||
5330 | + seq_printf(seq, "TX Queue %d:\n", queue); |
||
5331 | + |
||
5332 | + if (priv->extend_desc) { |
||
5333 | + seq_printf(seq, "Extended descriptor ring:\n"); |
||
5334 | + sysfs_display_ring((void *)tx_q->dma_etx, |
||
5335 | + DMA_TX_SIZE, 1, seq); |
||
5336 | + } else { |
||
5337 | + seq_printf(seq, "Descriptor ring:\n"); |
||
5338 | + sysfs_display_ring((void *)tx_q->dma_tx, |
||
5339 | + DMA_TX_SIZE, 0, seq); |
||
5340 | + } |
||
5341 | } |
||
5342 | |||
5343 | return 0; |
||
5344 | @@ -3250,11 +4109,14 @@ int stmmac_dvr_probe(struct device *devi |
||
5345 | struct plat_stmmacenet_data *plat_dat, |
||
5346 | struct stmmac_resources *res) |
||
5347 | { |
||
5348 | - int ret = 0; |
||
5349 | struct net_device *ndev = NULL; |
||
5350 | struct stmmac_priv *priv; |
||
5351 | + int ret = 0; |
||
5352 | + u32 queue; |
||
5353 | |||
5354 | - ndev = alloc_etherdev(sizeof(struct stmmac_priv)); |
||
5355 | + ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), |
||
5356 | + MTL_MAX_TX_QUEUES, |
||
5357 | + MTL_MAX_RX_QUEUES); |
||
5358 | if (!ndev) |
||
5359 | return -ENOMEM; |
||
5360 | |||
5361 | @@ -3296,6 +4158,10 @@ int stmmac_dvr_probe(struct device *devi |
||
5362 | if (ret) |
||
5363 | goto error_hw_init; |
||
5364 | |||
5365 | + /* Configure real RX and TX queues */ |
||
5366 | + netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); |
||
5367 | + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); |
||
5368 | + |
||
5369 | ndev->netdev_ops = &stmmac_netdev_ops; |
||
5370 | |||
5371 | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
||
5372 | @@ -3328,7 +4194,12 @@ int stmmac_dvr_probe(struct device *devi |
||
5373 | "Enable RX Mitigation via HW Watchdog Timer\n"); |
||
5374 | } |
||
5375 | |||
5376 | - netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); |
||
5377 | + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { |
||
5378 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
5379 | + |
||
5380 | + netif_napi_add(ndev, &rx_q->napi, stmmac_poll, |
||
5381 | + (8 * priv->plat->rx_queues_to_use)); |
||
5382 | + } |
||
5383 | |||
5384 | spin_lock_init(&priv->lock); |
||
5385 | |||
5386 | @@ -3373,7 +4244,11 @@ error_netdev_register: |
||
5387 | priv->hw->pcs != STMMAC_PCS_RTBI) |
||
5388 | stmmac_mdio_unregister(ndev); |
||
5389 | error_mdio_register: |
||
5390 | - netif_napi_del(&priv->napi); |
||
5391 | + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { |
||
5392 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
5393 | + |
||
5394 | + netif_napi_del(&rx_q->napi); |
||
5395 | + } |
||
5396 | error_hw_init: |
||
5397 | free_netdev(ndev); |
||
5398 | |||
5399 | @@ -3394,10 +4269,9 @@ int stmmac_dvr_remove(struct device *dev |
||
5400 | |||
5401 | netdev_info(priv->dev, "%s: removing driver", __func__); |
||
5402 | |||
5403 | - priv->hw->dma->stop_rx(priv->ioaddr); |
||
5404 | - priv->hw->dma->stop_tx(priv->ioaddr); |
||
5405 | + stmmac_stop_all_dma(priv); |
||
5406 | |||
5407 | - stmmac_set_mac(priv->ioaddr, false); |
||
5408 | + priv->hw->mac->set_mac(priv->ioaddr, false); |
||
5409 | netif_carrier_off(ndev); |
||
5410 | unregister_netdev(ndev); |
||
5411 | if (priv->plat->stmmac_rst) |
||
5412 | @@ -3436,20 +4310,19 @@ int stmmac_suspend(struct device *dev) |
||
5413 | spin_lock_irqsave(&priv->lock, flags); |
||
5414 | |||
5415 | netif_device_detach(ndev); |
||
5416 | - netif_stop_queue(ndev); |
||
5417 | + stmmac_stop_all_queues(priv); |
||
5418 | |||
5419 | - napi_disable(&priv->napi); |
||
5420 | + stmmac_disable_all_queues(priv); |
||
5421 | |||
5422 | /* Stop TX/RX DMA */ |
||
5423 | - priv->hw->dma->stop_tx(priv->ioaddr); |
||
5424 | - priv->hw->dma->stop_rx(priv->ioaddr); |
||
5425 | + stmmac_stop_all_dma(priv); |
||
5426 | |||
5427 | /* Enable Power down mode by programming the PMT regs */ |
||
5428 | if (device_may_wakeup(priv->device)) { |
||
5429 | priv->hw->mac->pmt(priv->hw, priv->wolopts); |
||
5430 | priv->irq_wake = 1; |
||
5431 | } else { |
||
5432 | - stmmac_set_mac(priv->ioaddr, false); |
||
5433 | + priv->hw->mac->set_mac(priv->ioaddr, false); |
||
5434 | pinctrl_pm_select_sleep_state(priv->device); |
||
5435 | /* Disable clock in case of PWM is off */ |
||
5436 | clk_disable(priv->plat->pclk); |
||
5437 | @@ -3465,6 +4338,31 @@ int stmmac_suspend(struct device *dev) |
||
5438 | EXPORT_SYMBOL_GPL(stmmac_suspend); |
||
5439 | |||
5440 | /** |
||
5441 | + * stmmac_reset_queues_param - reset queue parameters |
||
5442 | + * @dev: device pointer |
||
5443 | + */ |
||
5444 | +static void stmmac_reset_queues_param(struct stmmac_priv *priv) |
||
5445 | +{ |
||
5446 | + u32 rx_cnt = priv->plat->rx_queues_to_use; |
||
5447 | + u32 tx_cnt = priv->plat->tx_queues_to_use; |
||
5448 | + u32 queue; |
||
5449 | + |
||
5450 | + for (queue = 0; queue < rx_cnt; queue++) { |
||
5451 | + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
||
5452 | + |
||
5453 | + rx_q->cur_rx = 0; |
||
5454 | + rx_q->dirty_rx = 0; |
||
5455 | + } |
||
5456 | + |
||
5457 | + for (queue = 0; queue < tx_cnt; queue++) { |
||
5458 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
||
5459 | + |
||
5460 | + tx_q->cur_tx = 0; |
||
5461 | + tx_q->dirty_tx = 0; |
||
5462 | + } |
||
5463 | +} |
||
5464 | + |
||
5465 | +/** |
||
5466 | * stmmac_resume - resume callback |
||
5467 | * @dev: device pointer |
||
5468 | * Description: when resume this function is invoked to setup the DMA and CORE |
||
5469 | @@ -3504,10 +4402,8 @@ int stmmac_resume(struct device *dev) |
||
5470 | |||
5471 | spin_lock_irqsave(&priv->lock, flags); |
||
5472 | |||
5473 | - priv->cur_rx = 0; |
||
5474 | - priv->dirty_rx = 0; |
||
5475 | - priv->dirty_tx = 0; |
||
5476 | - priv->cur_tx = 0; |
||
5477 | + stmmac_reset_queues_param(priv); |
||
5478 | + |
||
5479 | /* reset private mss value to force mss context settings at |
||
5480 | * next tso xmit (only used for gmac4). |
||
5481 | */ |
||
5482 | @@ -3519,9 +4415,9 @@ int stmmac_resume(struct device *dev) |
||
5483 | stmmac_init_tx_coalesce(priv); |
||
5484 | stmmac_set_rx_mode(ndev); |
||
5485 | |||
5486 | - napi_enable(&priv->napi); |
||
5487 | + stmmac_enable_all_queues(priv); |
||
5488 | |||
5489 | - netif_start_queue(ndev); |
||
5490 | + stmmac_start_all_queues(priv); |
||
5491 | |||
5492 | spin_unlock_irqrestore(&priv->lock, flags); |
||
5493 | |||
5494 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c |
||
5495 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c |
||
5496 | @@ -32,6 +32,7 @@ |
||
5497 | */ |
||
5498 | struct stmmac_pci_dmi_data { |
||
5499 | const char *name; |
||
5500 | + const char *asset_tag; |
||
5501 | unsigned int func; |
||
5502 | int phy_addr; |
||
5503 | }; |
||
5504 | @@ -46,6 +47,7 @@ struct stmmac_pci_info { |
||
5505 | static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info) |
||
5506 | { |
||
5507 | const char *name = dmi_get_system_info(DMI_BOARD_NAME); |
||
5508 | + const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG); |
||
5509 | unsigned int func = PCI_FUNC(info->pdev->devfn); |
||
5510 | struct stmmac_pci_dmi_data *dmi; |
||
5511 | |||
5512 | @@ -57,18 +59,19 @@ static int stmmac_pci_find_phy_addr(stru |
||
5513 | return 1; |
||
5514 | |||
5515 | for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) { |
||
5516 | - if (!strcmp(dmi->name, name) && dmi->func == func) |
||
5517 | + if (!strcmp(dmi->name, name) && dmi->func == func) { |
||
5518 | + /* If asset tag is provided, match on it as well. */ |
||
5519 | + if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag)) |
||
5520 | + continue; |
||
5521 | return dmi->phy_addr; |
||
5522 | + } |
||
5523 | } |
||
5524 | |||
5525 | return -ENODEV; |
||
5526 | } |
||
5527 | |||
5528 | -static void stmmac_default_data(struct plat_stmmacenet_data *plat) |
||
5529 | +static void common_default_data(struct plat_stmmacenet_data *plat) |
||
5530 | { |
||
5531 | - plat->bus_id = 1; |
||
5532 | - plat->phy_addr = 0; |
||
5533 | - plat->interface = PHY_INTERFACE_MODE_GMII; |
||
5534 | plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ |
||
5535 | plat->has_gmac = 1; |
||
5536 | plat->force_sf_dma_mode = 1; |
||
5537 | @@ -76,10 +79,6 @@ static void stmmac_default_data(struct p |
||
5538 | plat->mdio_bus_data->phy_reset = NULL; |
||
5539 | plat->mdio_bus_data->phy_mask = 0; |
||
5540 | |||
5541 | - plat->dma_cfg->pbl = 32; |
||
5542 | - plat->dma_cfg->pblx8 = true; |
||
5543 | - /* TODO: AXI */ |
||
5544 | - |
||
5545 | /* Set default value for multicast hash bins */ |
||
5546 | plat->multicast_filter_bins = HASH_TABLE_SIZE; |
||
5547 | |||
5548 | @@ -88,6 +87,31 @@ static void stmmac_default_data(struct p |
||
5549 | |||
5550 | /* Set the maxmtu to a default of JUMBO_LEN */ |
||
5551 | plat->maxmtu = JUMBO_LEN; |
||
5552 | + |
||
5553 | + /* Set default number of RX and TX queues to use */ |
||
5554 | + plat->tx_queues_to_use = 1; |
||
5555 | + plat->rx_queues_to_use = 1; |
||
5556 | + |
||
5557 | + /* Disable Priority config by default */ |
||
5558 | + plat->tx_queues_cfg[0].use_prio = false; |
||
5559 | + plat->rx_queues_cfg[0].use_prio = false; |
||
5560 | + |
||
5561 | + /* Disable RX queues routing by default */ |
||
5562 | + plat->rx_queues_cfg[0].pkt_route = 0x0; |
||
5563 | +} |
||
5564 | + |
||
5565 | +static void stmmac_default_data(struct plat_stmmacenet_data *plat) |
||
5566 | +{ |
||
5567 | + /* Set common default data first */ |
||
5568 | + common_default_data(plat); |
||
5569 | + |
||
5570 | + plat->bus_id = 1; |
||
5571 | + plat->phy_addr = 0; |
||
5572 | + plat->interface = PHY_INTERFACE_MODE_GMII; |
||
5573 | + |
||
5574 | + plat->dma_cfg->pbl = 32; |
||
5575 | + plat->dma_cfg->pblx8 = true; |
||
5576 | + /* TODO: AXI */ |
||
5577 | } |
||
5578 | |||
5579 | static int quark_default_data(struct plat_stmmacenet_data *plat, |
||
5580 | @@ -96,6 +120,9 @@ static int quark_default_data(struct pla |
||
5581 | struct pci_dev *pdev = info->pdev; |
||
5582 | int ret; |
||
5583 | |||
5584 | + /* Set common default data first */ |
||
5585 | + common_default_data(plat); |
||
5586 | + |
||
5587 | /* |
||
5588 | * Refuse to load the driver and register net device if MAC controller |
||
5589 | * does not connect to any PHY interface. |
||
5590 | @@ -107,27 +134,12 @@ static int quark_default_data(struct pla |
||
5591 | plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn); |
||
5592 | plat->phy_addr = ret; |
||
5593 | plat->interface = PHY_INTERFACE_MODE_RMII; |
||
5594 | - plat->clk_csr = 2; |
||
5595 | - plat->has_gmac = 1; |
||
5596 | - plat->force_sf_dma_mode = 1; |
||
5597 | - |
||
5598 | - plat->mdio_bus_data->phy_reset = NULL; |
||
5599 | - plat->mdio_bus_data->phy_mask = 0; |
||
5600 | |||
5601 | plat->dma_cfg->pbl = 16; |
||
5602 | plat->dma_cfg->pblx8 = true; |
||
5603 | plat->dma_cfg->fixed_burst = 1; |
||
5604 | /* AXI (TODO) */ |
||
5605 | |||
5606 | - /* Set default value for multicast hash bins */ |
||
5607 | - plat->multicast_filter_bins = HASH_TABLE_SIZE; |
||
5608 | - |
||
5609 | - /* Set default value for unicast filter entries */ |
||
5610 | - plat->unicast_filter_entries = 1; |
||
5611 | - |
||
5612 | - /* Set the maxmtu to a default of JUMBO_LEN */ |
||
5613 | - plat->maxmtu = JUMBO_LEN; |
||
5614 | - |
||
5615 | return 0; |
||
5616 | } |
||
5617 | |||
5618 | @@ -142,6 +154,24 @@ static struct stmmac_pci_dmi_data quark_ |
||
5619 | .func = 6, |
||
5620 | .phy_addr = 1, |
||
5621 | }, |
||
5622 | + { |
||
5623 | + .name = "SIMATIC IOT2000", |
||
5624 | + .asset_tag = "6ES7647-0AA00-0YA2", |
||
5625 | + .func = 6, |
||
5626 | + .phy_addr = 1, |
||
5627 | + }, |
||
5628 | + { |
||
5629 | + .name = "SIMATIC IOT2000", |
||
5630 | + .asset_tag = "6ES7647-0AA00-1YA2", |
||
5631 | + .func = 6, |
||
5632 | + .phy_addr = 1, |
||
5633 | + }, |
||
5634 | + { |
||
5635 | + .name = "SIMATIC IOT2000", |
||
5636 | + .asset_tag = "6ES7647-0AA00-1YA2", |
||
5637 | + .func = 7, |
||
5638 | + .phy_addr = 1, |
||
5639 | + }, |
||
5640 | {} |
||
5641 | }; |
||
5642 | |||
5643 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c |
||
5644 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c |
||
5645 | @@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_set |
||
5646 | if (!np) |
||
5647 | return NULL; |
||
5648 | |||
5649 | - axi = kzalloc(sizeof(*axi), GFP_KERNEL); |
||
5650 | + axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); |
||
5651 | if (!axi) { |
||
5652 | of_node_put(np); |
||
5653 | return ERR_PTR(-ENOMEM); |
||
5654 | @@ -132,6 +132,155 @@ static struct stmmac_axi *stmmac_axi_set |
||
5655 | } |
||
5656 | |||
5657 | /** |
||
5658 | + * stmmac_mtl_setup - parse DT parameters for multiple queues configuration |
||
5659 | + * @pdev: platform device |
||
5660 | + */ |
||
5661 | +static void stmmac_mtl_setup(struct platform_device *pdev, |
||
5662 | + struct plat_stmmacenet_data *plat) |
||
5663 | +{ |
||
5664 | + struct device_node *q_node; |
||
5665 | + struct device_node *rx_node; |
||
5666 | + struct device_node *tx_node; |
||
5667 | + u8 queue = 0; |
||
5668 | + |
||
5669 | + /* For backwards-compatibility with device trees that don't have any |
||
5670 | + * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back |
||
5671 | + * to one RX and TX queues each. |
||
5672 | + */ |
||
5673 | + plat->rx_queues_to_use = 1; |
||
5674 | + plat->tx_queues_to_use = 1; |
||
5675 | + |
||
5676 | + rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); |
||
5677 | + if (!rx_node) |
||
5678 | + return; |
||
5679 | + |
||
5680 | + tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); |
||
5681 | + if (!tx_node) { |
||
5682 | + of_node_put(rx_node); |
||
5683 | + return; |
||
5684 | + } |
||
5685 | + |
||
5686 | + /* Processing RX queues common config */ |
||
5687 | + if (of_property_read_u8(rx_node, "snps,rx-queues-to-use", |
||
5688 | + &plat->rx_queues_to_use)) |
||
5689 | + plat->rx_queues_to_use = 1; |
||
5690 | + |
||
5691 | + if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) |
||
5692 | + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; |
||
5693 | + else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp")) |
||
5694 | + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP; |
||
5695 | + else |
||
5696 | + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; |
||
5697 | + |
||
5698 | + /* Processing individual RX queue config */ |
||
5699 | + for_each_child_of_node(rx_node, q_node) { |
||
5700 | + if (queue >= plat->rx_queues_to_use) |
||
5701 | + break; |
||
5702 | + |
||
5703 | + if (of_property_read_bool(q_node, "snps,dcb-algorithm")) |
||
5704 | + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; |
||
5705 | + else if (of_property_read_bool(q_node, "snps,avb-algorithm")) |
||
5706 | + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; |
||
5707 | + else |
||
5708 | + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; |
||
5709 | + |
||
5710 | + if (of_property_read_u8(q_node, "snps,map-to-dma-channel", |
||
5711 | + &plat->rx_queues_cfg[queue].chan)) |
||
5712 | + plat->rx_queues_cfg[queue].chan = queue; |
||
5713 | + /* TODO: Dynamic mapping to be included in the future */ |
||
5714 | + |
||
5715 | + if (of_property_read_u32(q_node, "snps,priority", |
||
5716 | + &plat->rx_queues_cfg[queue].prio)) { |
||
5717 | + plat->rx_queues_cfg[queue].prio = 0; |
||
5718 | + plat->rx_queues_cfg[queue].use_prio = false; |
||
5719 | + } else { |
||
5720 | + plat->rx_queues_cfg[queue].use_prio = true; |
||
5721 | + } |
||
5722 | + |
||
5723 | + /* RX queue specific packet type routing */ |
||
5724 | + if (of_property_read_bool(q_node, "snps,route-avcp")) |
||
5725 | + plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ; |
||
5726 | + else if (of_property_read_bool(q_node, "snps,route-ptp")) |
||
5727 | + plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ; |
||
5728 | + else if (of_property_read_bool(q_node, "snps,route-dcbcp")) |
||
5729 | + plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ; |
||
5730 | + else if (of_property_read_bool(q_node, "snps,route-up")) |
||
5731 | + plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; |
||
5732 | + else if (of_property_read_bool(q_node, "snps,route-multi-broad")) |
||
5733 | + plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; |
||
5734 | + else |
||
5735 | + plat->rx_queues_cfg[queue].pkt_route = 0x0; |
||
5736 | + |
||
5737 | + queue++; |
||
5738 | + } |
||
5739 | + |
||
5740 | + /* Processing TX queues common config */ |
||
5741 | + if (of_property_read_u8(tx_node, "snps,tx-queues-to-use", |
||
5742 | + &plat->tx_queues_to_use)) |
||
5743 | + plat->tx_queues_to_use = 1; |
||
5744 | + |
||
5745 | + if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) |
||
5746 | + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; |
||
5747 | + else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) |
||
5748 | + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ; |
||
5749 | + else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr")) |
||
5750 | + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR; |
||
5751 | + else if (of_property_read_bool(tx_node, "snps,tx-sched-sp")) |
||
5752 | + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; |
||
5753 | + else |
||
5754 | + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; |
||
5755 | + |
||
5756 | + queue = 0; |
||
5757 | + |
||
5758 | + /* Processing individual TX queue config */ |
||
5759 | + for_each_child_of_node(tx_node, q_node) { |
||
5760 | + if (queue >= plat->tx_queues_to_use) |
||
5761 | + break; |
||
5762 | + |
||
5763 | + if (of_property_read_u8(q_node, "snps,weight", |
||
5764 | + &plat->tx_queues_cfg[queue].weight)) |
||
5765 | + plat->tx_queues_cfg[queue].weight = 0x10 + queue; |
||
5766 | + |
||
5767 | + if (of_property_read_bool(q_node, "snps,dcb-algorithm")) { |
||
5768 | + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; |
||
5769 | + } else if (of_property_read_bool(q_node, |
||
5770 | + "snps,avb-algorithm")) { |
||
5771 | + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; |
||
5772 | + |
||
5773 | + /* Credit Base Shaper parameters used by AVB */ |
||
5774 | + if (of_property_read_u32(q_node, "snps,send_slope", |
||
5775 | + &plat->tx_queues_cfg[queue].send_slope)) |
||
5776 | + plat->tx_queues_cfg[queue].send_slope = 0x0; |
||
5777 | + if (of_property_read_u32(q_node, "snps,idle_slope", |
||
5778 | + &plat->tx_queues_cfg[queue].idle_slope)) |
||
5779 | + plat->tx_queues_cfg[queue].idle_slope = 0x0; |
||
5780 | + if (of_property_read_u32(q_node, "snps,high_credit", |
||
5781 | + &plat->tx_queues_cfg[queue].high_credit)) |
||
5782 | + plat->tx_queues_cfg[queue].high_credit = 0x0; |
||
5783 | + if (of_property_read_u32(q_node, "snps,low_credit", |
||
5784 | + &plat->tx_queues_cfg[queue].low_credit)) |
||
5785 | + plat->tx_queues_cfg[queue].low_credit = 0x0; |
||
5786 | + } else { |
||
5787 | + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; |
||
5788 | + } |
||
5789 | + |
||
5790 | + if (of_property_read_u32(q_node, "snps,priority", |
||
5791 | + &plat->tx_queues_cfg[queue].prio)) { |
||
5792 | + plat->tx_queues_cfg[queue].prio = 0; |
||
5793 | + plat->tx_queues_cfg[queue].use_prio = false; |
||
5794 | + } else { |
||
5795 | + plat->tx_queues_cfg[queue].use_prio = true; |
||
5796 | + } |
||
5797 | + |
||
5798 | + queue++; |
||
5799 | + } |
||
5800 | + |
||
5801 | + of_node_put(rx_node); |
||
5802 | + of_node_put(tx_node); |
||
5803 | + of_node_put(q_node); |
||
5804 | +} |
||
5805 | + |
||
5806 | +/** |
||
5807 | * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources |
||
5808 | * @plat: driver data platform structure |
||
5809 | * @np: device tree node |
||
5810 | @@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_d |
||
5811 | |||
5812 | plat->axi = stmmac_axi_setup(pdev); |
||
5813 | |||
5814 | + stmmac_mtl_setup(pdev, plat); |
||
5815 | + |
||
5816 | /* clock setup */ |
||
5817 | plat->stmmac_clk = devm_clk_get(&pdev->dev, |
||
5818 | STMMAC_RESOURCE_NAME); |
||
5819 | @@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_d |
||
5820 | clk_prepare_enable(plat->pclk); |
||
5821 | |||
5822 | /* Fall-back to main clock in case of no PTP ref is passed */ |
||
5823 | - plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref"); |
||
5824 | + plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref"); |
||
5825 | if (IS_ERR(plat->clk_ptp_ref)) { |
||
5826 | plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); |
||
5827 | plat->clk_ptp_ref = NULL; |
||
5828 | dev_warn(&pdev->dev, "PTP uses main clock\n"); |
||
5829 | } else { |
||
5830 | - clk_prepare_enable(plat->clk_ptp_ref); |
||
5831 | plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); |
||
5832 | dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); |
||
5833 | } |
||
5834 | --- a/include/linux/stmmac.h |
||
5835 | +++ b/include/linux/stmmac.h |
||
5836 | @@ -28,6 +28,9 @@ |
||
5837 | |||
5838 | #include <linux/platform_device.h> |
||
5839 | |||
5840 | +#define MTL_MAX_RX_QUEUES 8 |
||
5841 | +#define MTL_MAX_TX_QUEUES 8 |
||
5842 | + |
||
5843 | #define STMMAC_RX_COE_NONE 0 |
||
5844 | #define STMMAC_RX_COE_TYPE1 1 |
||
5845 | #define STMMAC_RX_COE_TYPE2 2 |
||
5846 | @@ -44,6 +47,18 @@ |
||
5847 | #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ |
||
5848 | #define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ |
||
5849 | |||
5850 | +/* MTL algorithms identifiers */ |
||
5851 | +#define MTL_TX_ALGORITHM_WRR 0x0 |
||
5852 | +#define MTL_TX_ALGORITHM_WFQ 0x1 |
||
5853 | +#define MTL_TX_ALGORITHM_DWRR 0x2 |
||
5854 | +#define MTL_TX_ALGORITHM_SP 0x3 |
||
5855 | +#define MTL_RX_ALGORITHM_SP 0x4 |
||
5856 | +#define MTL_RX_ALGORITHM_WSP 0x5 |
||
5857 | + |
||
5858 | +/* RX/TX Queue Mode */ |
||
5859 | +#define MTL_QUEUE_AVB 0x0 |
||
5860 | +#define MTL_QUEUE_DCB 0x1 |
||
5861 | + |
||
5862 | /* The MDC clock could be set higher than the IEEE 802.3 |
||
5863 | * specified frequency limit 0f 2.5 MHz, by programming a clock divider |
||
5864 | * of value different than the above defined values. The resultant MDIO |
||
5865 | @@ -109,6 +124,26 @@ struct stmmac_axi { |
||
5866 | bool axi_rb; |
||
5867 | }; |
||
5868 | |||
5869 | +struct stmmac_rxq_cfg { |
||
5870 | + u8 mode_to_use; |
||
5871 | + u8 chan; |
||
5872 | + u8 pkt_route; |
||
5873 | + bool use_prio; |
||
5874 | + u32 prio; |
||
5875 | +}; |
||
5876 | + |
||
5877 | +struct stmmac_txq_cfg { |
||
5878 | + u8 weight; |
||
5879 | + u8 mode_to_use; |
||
5880 | + /* Credit Base Shaper parameters */ |
||
5881 | + u32 send_slope; |
||
5882 | + u32 idle_slope; |
||
5883 | + u32 high_credit; |
||
5884 | + u32 low_credit; |
||
5885 | + bool use_prio; |
||
5886 | + u32 prio; |
||
5887 | +}; |
||
5888 | + |
||
5889 | struct plat_stmmacenet_data { |
||
5890 | int bus_id; |
||
5891 | int phy_addr; |
||
5892 | @@ -133,6 +168,12 @@ struct plat_stmmacenet_data { |
||
5893 | int unicast_filter_entries; |
||
5894 | int tx_fifo_size; |
||
5895 | int rx_fifo_size; |
||
5896 | + u8 rx_queues_to_use; |
||
5897 | + u8 tx_queues_to_use; |
||
5898 | + u8 rx_sched_algorithm; |
||
5899 | + u8 tx_sched_algorithm; |
||
5900 | + struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; |
||
5901 | + struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; |
||
5902 | void (*fix_mac_speed)(void *priv, unsigned int speed); |
||
5903 | int (*init)(struct platform_device *pdev, void *priv); |
||
5904 | void (*exit)(struct platform_device *pdev, void *priv); |