OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | /* This program is free software; you can redistribute it and/or modify |
2 | * it under the terms of the GNU General Public License as published by |
||
3 | * the Free Software Foundation; version 2 of the License |
||
4 | * |
||
5 | * This program is distributed in the hope that it will be useful, |
||
6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
8 | * GNU General Public License for more details. |
||
9 | * |
||
10 | * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> |
||
11 | * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> |
||
12 | * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> |
||
13 | */ |
||
14 | |||
15 | #include <linux/module.h> |
||
16 | #include <linux/kernel.h> |
||
17 | #include <linux/types.h> |
||
18 | #include <linux/dma-mapping.h> |
||
19 | #include <linux/init.h> |
||
20 | #include <linux/skbuff.h> |
||
21 | #include <linux/etherdevice.h> |
||
22 | #include <linux/ethtool.h> |
||
23 | #include <linux/platform_device.h> |
||
24 | #include <linux/of_device.h> |
||
25 | #include <linux/clk.h> |
||
26 | #include <linux/of_net.h> |
||
27 | #include <linux/of_mdio.h> |
||
28 | #include <linux/if_vlan.h> |
||
29 | #include <linux/reset.h> |
||
30 | #include <linux/tcp.h> |
||
31 | #include <linux/io.h> |
||
32 | #include <linux/bug.h> |
||
33 | #include <linux/netfilter.h> |
||
34 | #include <net/netfilter/nf_flow_table.h> |
||
35 | |||
36 | #include <asm/mach-ralink/ralink_regs.h> |
||
37 | |||
38 | #include "mtk_eth_soc.h" |
||
39 | #include "mdio.h" |
||
40 | #include "ethtool.h" |
||
41 | |||
42 | #define MAX_RX_LENGTH 1536 |
||
43 | #define FE_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) |
||
44 | #define FE_RX_HLEN (NET_SKB_PAD + FE_RX_ETH_HLEN + NET_IP_ALIGN) |
||
45 | #define DMA_DUMMY_DESC 0xffffffff |
||
46 | #define FE_DEFAULT_MSG_ENABLE \ |
||
47 | (NETIF_MSG_DRV | \ |
||
48 | NETIF_MSG_PROBE | \ |
||
49 | NETIF_MSG_LINK | \ |
||
50 | NETIF_MSG_TIMER | \ |
||
51 | NETIF_MSG_IFDOWN | \ |
||
52 | NETIF_MSG_IFUP | \ |
||
53 | NETIF_MSG_RX_ERR | \ |
||
54 | NETIF_MSG_TX_ERR) |
||
55 | |||
56 | #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) |
||
57 | #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1)) |
||
58 | #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1)) |
||
59 | #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1)) |
||
60 | |||
61 | #define SYSC_REG_RSTCTRL 0x34 |
||
62 | |||
63 | static int fe_msg_level = -1; |
||
64 | module_param_named(msg_level, fe_msg_level, int, 0); |
||
65 | MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); |
||
66 | |||
67 | static const u16 fe_reg_table_default[FE_REG_COUNT] = { |
||
68 | [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG, |
||
69 | [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG, |
||
70 | [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG, |
||
71 | [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0, |
||
72 | [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0, |
||
73 | [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0, |
||
74 | [FE_REG_TX_DTX_IDX0] = FE_TX_DTX_IDX0, |
||
75 | [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0, |
||
76 | [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0, |
||
77 | [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0, |
||
78 | [FE_REG_RX_DRX_IDX0] = FE_RX_DRX_IDX0, |
||
79 | [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE, |
||
80 | [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS, |
||
81 | [FE_REG_FE_DMA_VID_BASE] = FE_DMA_VID0, |
||
82 | [FE_REG_FE_COUNTER_BASE] = FE_GDMA1_TX_GBCNT, |
||
83 | [FE_REG_FE_RST_GL] = FE_FE_RST_GL, |
||
84 | }; |
||
85 | |||
86 | static const u16 *fe_reg_table = fe_reg_table_default; |
||
87 | |||
88 | struct fe_work_t { |
||
89 | int bitnr; |
||
90 | void (*action)(struct fe_priv *); |
||
91 | }; |
||
92 | |||
93 | static void __iomem *fe_base; |
||
94 | |||
95 | void fe_w32(u32 val, unsigned reg) |
||
96 | { |
||
97 | __raw_writel(val, fe_base + reg); |
||
98 | } |
||
99 | |||
100 | u32 fe_r32(unsigned reg) |
||
101 | { |
||
102 | return __raw_readl(fe_base + reg); |
||
103 | } |
||
104 | |||
105 | void fe_reg_w32(u32 val, enum fe_reg reg) |
||
106 | { |
||
107 | fe_w32(val, fe_reg_table[reg]); |
||
108 | } |
||
109 | |||
110 | u32 fe_reg_r32(enum fe_reg reg) |
||
111 | { |
||
112 | return fe_r32(fe_reg_table[reg]); |
||
113 | } |
||
114 | |||
115 | void fe_m32(struct fe_priv *eth, u32 clear, u32 set, unsigned reg) |
||
116 | { |
||
117 | u32 val; |
||
118 | |||
119 | spin_lock(ð->page_lock); |
||
120 | val = __raw_readl(fe_base + reg); |
||
121 | val &= ~clear; |
||
122 | val |= set; |
||
123 | __raw_writel(val, fe_base + reg); |
||
124 | spin_unlock(ð->page_lock); |
||
125 | } |
||
126 | |||
127 | void fe_reset(u32 reset_bits) |
||
128 | { |
||
129 | u32 t; |
||
130 | |||
131 | t = rt_sysc_r32(SYSC_REG_RSTCTRL); |
||
132 | t |= reset_bits; |
||
133 | rt_sysc_w32(t, SYSC_REG_RSTCTRL); |
||
134 | usleep_range(10, 20); |
||
135 | |||
136 | t &= ~reset_bits; |
||
137 | rt_sysc_w32(t, SYSC_REG_RSTCTRL); |
||
138 | usleep_range(10, 20); |
||
139 | } |
||
140 | |||
141 | static inline void fe_int_disable(u32 mask) |
||
142 | { |
||
143 | fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask, |
||
144 | FE_REG_FE_INT_ENABLE); |
||
145 | /* flush write */ |
||
146 | fe_reg_r32(FE_REG_FE_INT_ENABLE); |
||
147 | } |
||
148 | |||
149 | static inline void fe_int_enable(u32 mask) |
||
150 | { |
||
151 | fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask, |
||
152 | FE_REG_FE_INT_ENABLE); |
||
153 | /* flush write */ |
||
154 | fe_reg_r32(FE_REG_FE_INT_ENABLE); |
||
155 | } |
||
156 | |||
157 | static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac) |
||
158 | { |
||
159 | unsigned long flags; |
||
160 | |||
161 | spin_lock_irqsave(&priv->page_lock, flags); |
||
162 | fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH); |
||
163 | fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], |
||
164 | FE_GDMA1_MAC_ADRL); |
||
165 | spin_unlock_irqrestore(&priv->page_lock, flags); |
||
166 | } |
||
167 | |||
168 | static int fe_set_mac_address(struct net_device *dev, void *p) |
||
169 | { |
||
170 | int ret = eth_mac_addr(dev, p); |
||
171 | |||
172 | if (!ret) { |
||
173 | struct fe_priv *priv = netdev_priv(dev); |
||
174 | |||
175 | if (priv->soc->set_mac) |
||
176 | priv->soc->set_mac(priv, dev->dev_addr); |
||
177 | else |
||
178 | fe_hw_set_macaddr(priv, p); |
||
179 | } |
||
180 | |||
181 | return ret; |
||
182 | } |
||
183 | |||
184 | static inline int fe_max_frag_size(int mtu) |
||
185 | { |
||
186 | /* make sure buf_size will be at least MAX_RX_LENGTH */ |
||
187 | if (mtu + FE_RX_ETH_HLEN < MAX_RX_LENGTH) |
||
188 | mtu = MAX_RX_LENGTH - FE_RX_ETH_HLEN; |
||
189 | |||
190 | return SKB_DATA_ALIGN(FE_RX_HLEN + mtu) + |
||
191 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
||
192 | } |
||
193 | |||
194 | static inline int fe_max_buf_size(int frag_size) |
||
195 | { |
||
196 | int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - |
||
197 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
||
198 | |||
199 | BUG_ON(buf_size < MAX_RX_LENGTH); |
||
200 | return buf_size; |
||
201 | } |
||
202 | |||
203 | static inline void fe_get_rxd(struct fe_rx_dma *rxd, struct fe_rx_dma *dma_rxd) |
||
204 | { |
||
205 | rxd->rxd1 = dma_rxd->rxd1; |
||
206 | rxd->rxd2 = dma_rxd->rxd2; |
||
207 | rxd->rxd3 = dma_rxd->rxd3; |
||
208 | rxd->rxd4 = dma_rxd->rxd4; |
||
209 | } |
||
210 | |||
211 | static inline void fe_set_txd(struct fe_tx_dma *txd, struct fe_tx_dma *dma_txd) |
||
212 | { |
||
213 | dma_txd->txd1 = txd->txd1; |
||
214 | dma_txd->txd3 = txd->txd3; |
||
215 | dma_txd->txd4 = txd->txd4; |
||
216 | /* clean dma done flag last */ |
||
217 | dma_txd->txd2 = txd->txd2; |
||
218 | } |
||
219 | |||
220 | static void fe_clean_rx(struct fe_priv *priv) |
||
221 | { |
||
222 | struct fe_rx_ring *ring = &priv->rx_ring; |
||
223 | struct page *page; |
||
224 | int i; |
||
225 | |||
226 | if (ring->rx_data) { |
||
227 | for (i = 0; i < ring->rx_ring_size; i++) |
||
228 | if (ring->rx_data[i]) { |
||
229 | if (ring->rx_dma && ring->rx_dma[i].rxd1) |
||
230 | dma_unmap_single(&priv->netdev->dev, |
||
231 | ring->rx_dma[i].rxd1, |
||
232 | ring->rx_buf_size, |
||
233 | DMA_FROM_DEVICE); |
||
234 | skb_free_frag(ring->rx_data[i]); |
||
235 | } |
||
236 | |||
237 | kfree(ring->rx_data); |
||
238 | ring->rx_data = NULL; |
||
239 | } |
||
240 | |||
241 | if (ring->rx_dma) { |
||
242 | dma_free_coherent(&priv->netdev->dev, |
||
243 | ring->rx_ring_size * sizeof(*ring->rx_dma), |
||
244 | ring->rx_dma, |
||
245 | ring->rx_phys); |
||
246 | ring->rx_dma = NULL; |
||
247 | } |
||
248 | |||
249 | if (!ring->frag_cache.va) |
||
250 | return; |
||
251 | |||
252 | page = virt_to_page(ring->frag_cache.va); |
||
253 | __page_frag_cache_drain(page, ring->frag_cache.pagecnt_bias); |
||
254 | memset(&ring->frag_cache, 0, sizeof(ring->frag_cache)); |
||
255 | } |
||
256 | |||
257 | static int fe_alloc_rx(struct fe_priv *priv) |
||
258 | { |
||
259 | struct net_device *netdev = priv->netdev; |
||
260 | struct fe_rx_ring *ring = &priv->rx_ring; |
||
261 | int i, pad; |
||
262 | |||
263 | ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data), |
||
264 | GFP_KERNEL); |
||
265 | if (!ring->rx_data) |
||
266 | goto no_rx_mem; |
||
267 | |||
268 | for (i = 0; i < ring->rx_ring_size; i++) { |
||
269 | ring->rx_data[i] = page_frag_alloc(&ring->frag_cache, |
||
270 | ring->frag_size, |
||
271 | GFP_KERNEL); |
||
272 | if (!ring->rx_data[i]) |
||
273 | goto no_rx_mem; |
||
274 | } |
||
275 | |||
276 | ring->rx_dma = dma_alloc_coherent(&netdev->dev, |
||
277 | ring->rx_ring_size * sizeof(*ring->rx_dma), |
||
278 | &ring->rx_phys, |
||
279 | GFP_ATOMIC | __GFP_ZERO); |
||
280 | if (!ring->rx_dma) |
||
281 | goto no_rx_mem; |
||
282 | |||
283 | if (priv->flags & FE_FLAG_RX_2B_OFFSET) |
||
284 | pad = 0; |
||
285 | else |
||
286 | pad = NET_IP_ALIGN; |
||
287 | for (i = 0; i < ring->rx_ring_size; i++) { |
||
288 | dma_addr_t dma_addr = dma_map_single(&netdev->dev, |
||
289 | ring->rx_data[i] + NET_SKB_PAD + pad, |
||
290 | ring->rx_buf_size, |
||
291 | DMA_FROM_DEVICE); |
||
292 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) |
||
293 | goto no_rx_mem; |
||
294 | ring->rx_dma[i].rxd1 = (unsigned int)dma_addr; |
||
295 | |||
296 | if (priv->flags & FE_FLAG_RX_SG_DMA) |
||
297 | ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); |
||
298 | else |
||
299 | ring->rx_dma[i].rxd2 = RX_DMA_LSO; |
||
300 | } |
||
301 | ring->rx_calc_idx = ring->rx_ring_size - 1; |
||
302 | /* make sure that all changes to the dma ring are flushed before we |
||
303 | * continue |
||
304 | */ |
||
305 | wmb(); |
||
306 | |||
307 | fe_reg_w32(ring->rx_phys, FE_REG_RX_BASE_PTR0); |
||
308 | fe_reg_w32(ring->rx_ring_size, FE_REG_RX_MAX_CNT0); |
||
309 | fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0); |
||
310 | fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG); |
||
311 | |||
312 | return 0; |
||
313 | |||
314 | no_rx_mem: |
||
315 | return -ENOMEM; |
||
316 | } |
||
317 | |||
318 | static void fe_txd_unmap(struct device *dev, struct fe_tx_buf *tx_buf) |
||
319 | { |
||
320 | if (dma_unmap_len(tx_buf, dma_len0)) |
||
321 | dma_unmap_page(dev, |
||
322 | dma_unmap_addr(tx_buf, dma_addr0), |
||
323 | dma_unmap_len(tx_buf, dma_len0), |
||
324 | DMA_TO_DEVICE); |
||
325 | |||
326 | if (dma_unmap_len(tx_buf, dma_len1)) |
||
327 | dma_unmap_page(dev, |
||
328 | dma_unmap_addr(tx_buf, dma_addr1), |
||
329 | dma_unmap_len(tx_buf, dma_len1), |
||
330 | DMA_TO_DEVICE); |
||
331 | |||
332 | dma_unmap_len_set(tx_buf, dma_addr0, 0); |
||
333 | dma_unmap_len_set(tx_buf, dma_addr1, 0); |
||
334 | if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC)) |
||
335 | dev_kfree_skb_any(tx_buf->skb); |
||
336 | tx_buf->skb = NULL; |
||
337 | } |
||
338 | |||
339 | static void fe_clean_tx(struct fe_priv *priv) |
||
340 | { |
||
341 | int i; |
||
342 | struct device *dev = &priv->netdev->dev; |
||
343 | struct fe_tx_ring *ring = &priv->tx_ring; |
||
344 | |||
345 | if (ring->tx_buf) { |
||
346 | for (i = 0; i < ring->tx_ring_size; i++) |
||
347 | fe_txd_unmap(dev, &ring->tx_buf[i]); |
||
348 | kfree(ring->tx_buf); |
||
349 | ring->tx_buf = NULL; |
||
350 | } |
||
351 | |||
352 | if (ring->tx_dma) { |
||
353 | dma_free_coherent(dev, |
||
354 | ring->tx_ring_size * sizeof(*ring->tx_dma), |
||
355 | ring->tx_dma, |
||
356 | ring->tx_phys); |
||
357 | ring->tx_dma = NULL; |
||
358 | } |
||
359 | |||
360 | netdev_reset_queue(priv->netdev); |
||
361 | } |
||
362 | |||
363 | static int fe_alloc_tx(struct fe_priv *priv) |
||
364 | { |
||
365 | int i; |
||
366 | struct fe_tx_ring *ring = &priv->tx_ring; |
||
367 | |||
368 | ring->tx_free_idx = 0; |
||
369 | ring->tx_next_idx = 0; |
||
370 | ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, |
||
371 | MAX_SKB_FRAGS); |
||
372 | |||
373 | ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), |
||
374 | GFP_KERNEL); |
||
375 | if (!ring->tx_buf) |
||
376 | goto no_tx_mem; |
||
377 | |||
378 | ring->tx_dma = dma_alloc_coherent(&priv->netdev->dev, |
||
379 | ring->tx_ring_size * sizeof(*ring->tx_dma), |
||
380 | &ring->tx_phys, |
||
381 | GFP_ATOMIC | __GFP_ZERO); |
||
382 | if (!ring->tx_dma) |
||
383 | goto no_tx_mem; |
||
384 | |||
385 | for (i = 0; i < ring->tx_ring_size; i++) { |
||
386 | if (priv->soc->tx_dma) |
||
387 | priv->soc->tx_dma(&ring->tx_dma[i]); |
||
388 | ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF; |
||
389 | } |
||
390 | /* make sure that all changes to the dma ring are flushed before we |
||
391 | * continue |
||
392 | */ |
||
393 | wmb(); |
||
394 | |||
395 | fe_reg_w32(ring->tx_phys, FE_REG_TX_BASE_PTR0); |
||
396 | fe_reg_w32(ring->tx_ring_size, FE_REG_TX_MAX_CNT0); |
||
397 | fe_reg_w32(0, FE_REG_TX_CTX_IDX0); |
||
398 | fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG); |
||
399 | |||
400 | return 0; |
||
401 | |||
402 | no_tx_mem: |
||
403 | return -ENOMEM; |
||
404 | } |
||
405 | |||
406 | static int fe_init_dma(struct fe_priv *priv) |
||
407 | { |
||
408 | int err; |
||
409 | |||
410 | err = fe_alloc_tx(priv); |
||
411 | if (err) |
||
412 | return err; |
||
413 | |||
414 | err = fe_alloc_rx(priv); |
||
415 | if (err) |
||
416 | return err; |
||
417 | |||
418 | return 0; |
||
419 | } |
||
420 | |||
421 | static void fe_free_dma(struct fe_priv *priv) |
||
422 | { |
||
423 | fe_clean_tx(priv); |
||
424 | fe_clean_rx(priv); |
||
425 | } |
||
426 | |||
427 | void fe_stats_update(struct fe_priv *priv) |
||
428 | { |
||
429 | struct fe_hw_stats *hwstats = priv->hw_stats; |
||
430 | unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE]; |
||
431 | u64 stats; |
||
432 | |||
433 | u64_stats_update_begin(&hwstats->syncp); |
||
434 | |||
435 | if (IS_ENABLED(CONFIG_SOC_MT7621)) { |
||
436 | hwstats->rx_bytes += fe_r32(base); |
||
437 | stats = fe_r32(base + 0x04); |
||
438 | if (stats) |
||
439 | hwstats->rx_bytes += (stats << 32); |
||
440 | hwstats->rx_packets += fe_r32(base + 0x08); |
||
441 | hwstats->rx_overflow += fe_r32(base + 0x10); |
||
442 | hwstats->rx_fcs_errors += fe_r32(base + 0x14); |
||
443 | hwstats->rx_short_errors += fe_r32(base + 0x18); |
||
444 | hwstats->rx_long_errors += fe_r32(base + 0x1c); |
||
445 | hwstats->rx_checksum_errors += fe_r32(base + 0x20); |
||
446 | hwstats->rx_flow_control_packets += fe_r32(base + 0x24); |
||
447 | hwstats->tx_skip += fe_r32(base + 0x28); |
||
448 | hwstats->tx_collisions += fe_r32(base + 0x2c); |
||
449 | hwstats->tx_bytes += fe_r32(base + 0x30); |
||
450 | stats = fe_r32(base + 0x34); |
||
451 | if (stats) |
||
452 | hwstats->tx_bytes += (stats << 32); |
||
453 | hwstats->tx_packets += fe_r32(base + 0x38); |
||
454 | } else { |
||
455 | hwstats->tx_bytes += fe_r32(base); |
||
456 | hwstats->tx_packets += fe_r32(base + 0x04); |
||
457 | hwstats->tx_skip += fe_r32(base + 0x08); |
||
458 | hwstats->tx_collisions += fe_r32(base + 0x0c); |
||
459 | hwstats->rx_bytes += fe_r32(base + 0x20); |
||
460 | hwstats->rx_packets += fe_r32(base + 0x24); |
||
461 | hwstats->rx_overflow += fe_r32(base + 0x28); |
||
462 | hwstats->rx_fcs_errors += fe_r32(base + 0x2c); |
||
463 | hwstats->rx_short_errors += fe_r32(base + 0x30); |
||
464 | hwstats->rx_long_errors += fe_r32(base + 0x34); |
||
465 | hwstats->rx_checksum_errors += fe_r32(base + 0x38); |
||
466 | hwstats->rx_flow_control_packets += fe_r32(base + 0x3c); |
||
467 | } |
||
468 | |||
469 | u64_stats_update_end(&hwstats->syncp); |
||
470 | } |
||
471 | |||
472 | static void fe_get_stats64(struct net_device *dev, |
||
473 | struct rtnl_link_stats64 *storage) |
||
474 | { |
||
475 | struct fe_priv *priv = netdev_priv(dev); |
||
476 | struct fe_hw_stats *hwstats = priv->hw_stats; |
||
477 | unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE]; |
||
478 | unsigned int start; |
||
479 | |||
480 | if (!base) { |
||
481 | netdev_stats_to_stats64(storage, &dev->stats); |
||
482 | return; |
||
483 | } |
||
484 | |||
485 | if (netif_running(dev) && netif_device_present(dev)) { |
||
486 | if (spin_trylock_bh(&hwstats->stats_lock)) { |
||
487 | fe_stats_update(priv); |
||
488 | spin_unlock_bh(&hwstats->stats_lock); |
||
489 | } |
||
490 | } |
||
491 | |||
492 | do { |
||
493 | start = u64_stats_fetch_begin_irq(&hwstats->syncp); |
||
494 | storage->rx_packets = hwstats->rx_packets; |
||
495 | storage->tx_packets = hwstats->tx_packets; |
||
496 | storage->rx_bytes = hwstats->rx_bytes; |
||
497 | storage->tx_bytes = hwstats->tx_bytes; |
||
498 | storage->collisions = hwstats->tx_collisions; |
||
499 | storage->rx_length_errors = hwstats->rx_short_errors + |
||
500 | hwstats->rx_long_errors; |
||
501 | storage->rx_over_errors = hwstats->rx_overflow; |
||
502 | storage->rx_crc_errors = hwstats->rx_fcs_errors; |
||
503 | storage->rx_errors = hwstats->rx_checksum_errors; |
||
504 | storage->tx_aborted_errors = hwstats->tx_skip; |
||
505 | } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); |
||
506 | |||
507 | storage->tx_errors = priv->netdev->stats.tx_errors; |
||
508 | storage->rx_dropped = priv->netdev->stats.rx_dropped; |
||
509 | storage->tx_dropped = priv->netdev->stats.tx_dropped; |
||
510 | } |
||
511 | |||
512 | static int fe_vlan_rx_add_vid(struct net_device *dev, |
||
513 | __be16 proto, u16 vid) |
||
514 | { |
||
515 | struct fe_priv *priv = netdev_priv(dev); |
||
516 | u32 idx = (vid & 0xf); |
||
517 | u32 vlan_cfg; |
||
518 | |||
519 | if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) && |
||
520 | (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) |
||
521 | return 0; |
||
522 | |||
523 | if (test_bit(idx, &priv->vlan_map)) { |
||
524 | netdev_warn(dev, "disable tx vlan offload\n"); |
||
525 | dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX; |
||
526 | netdev_update_features(dev); |
||
527 | } else { |
||
528 | vlan_cfg = fe_r32(fe_reg_table[FE_REG_FE_DMA_VID_BASE] + |
||
529 | ((idx >> 1) << 2)); |
||
530 | if (idx & 0x1) { |
||
531 | vlan_cfg &= 0xffff; |
||
532 | vlan_cfg |= (vid << 16); |
||
533 | } else { |
||
534 | vlan_cfg &= 0xffff0000; |
||
535 | vlan_cfg |= vid; |
||
536 | } |
||
537 | fe_w32(vlan_cfg, fe_reg_table[FE_REG_FE_DMA_VID_BASE] + |
||
538 | ((idx >> 1) << 2)); |
||
539 | set_bit(idx, &priv->vlan_map); |
||
540 | } |
||
541 | |||
542 | return 0; |
||
543 | } |
||
544 | |||
545 | static int fe_vlan_rx_kill_vid(struct net_device *dev, |
||
546 | __be16 proto, u16 vid) |
||
547 | { |
||
548 | struct fe_priv *priv = netdev_priv(dev); |
||
549 | u32 idx = (vid & 0xf); |
||
550 | |||
551 | if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) && |
||
552 | (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) |
||
553 | return 0; |
||
554 | |||
555 | clear_bit(idx, &priv->vlan_map); |
||
556 | |||
557 | return 0; |
||
558 | } |
||
559 | |||
560 | static inline u32 fe_empty_txd(struct fe_tx_ring *ring) |
||
561 | { |
||
562 | barrier(); |
||
563 | return (u32)(ring->tx_ring_size - |
||
564 | ((ring->tx_next_idx - ring->tx_free_idx) & |
||
565 | (ring->tx_ring_size - 1))); |
||
566 | } |
||
567 | |||
568 | struct fe_map_state { |
||
569 | struct device *dev; |
||
570 | struct fe_tx_dma txd; |
||
571 | u32 def_txd4; |
||
572 | int ring_idx; |
||
573 | int i; |
||
574 | }; |
||
575 | |||
576 | static void fe_tx_dma_write_desc(struct fe_tx_ring *ring, struct fe_map_state *st) |
||
577 | { |
||
578 | fe_set_txd(&st->txd, &ring->tx_dma[st->ring_idx]); |
||
579 | memset(&st->txd, 0, sizeof(st->txd)); |
||
580 | st->txd.txd4 = st->def_txd4; |
||
581 | st->ring_idx = NEXT_TX_DESP_IDX(st->ring_idx); |
||
582 | } |
||
583 | |||
584 | static int __fe_tx_dma_map_page(struct fe_tx_ring *ring, struct fe_map_state *st, |
||
585 | struct page *page, size_t offset, size_t size) |
||
586 | { |
||
587 | struct device *dev = st->dev; |
||
588 | struct fe_tx_buf *tx_buf; |
||
589 | dma_addr_t mapped_addr; |
||
590 | |||
591 | mapped_addr = dma_map_page(dev, page, offset, size, DMA_TO_DEVICE); |
||
592 | if (unlikely(dma_mapping_error(dev, mapped_addr))) |
||
593 | return -EIO; |
||
594 | |||
595 | if (st->i && !(st->i & 1)) |
||
596 | fe_tx_dma_write_desc(ring, st); |
||
597 | |||
598 | tx_buf = &ring->tx_buf[st->ring_idx]; |
||
599 | if (st->i & 1) { |
||
600 | st->txd.txd3 = mapped_addr; |
||
601 | st->txd.txd2 |= TX_DMA_PLEN1(size); |
||
602 | dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr); |
||
603 | dma_unmap_len_set(tx_buf, dma_len1, size); |
||
604 | } else { |
||
605 | tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; |
||
606 | st->txd.txd1 = mapped_addr; |
||
607 | st->txd.txd2 = TX_DMA_PLEN0(size); |
||
608 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); |
||
609 | dma_unmap_len_set(tx_buf, dma_len0, size); |
||
610 | } |
||
611 | st->i++; |
||
612 | |||
613 | return 0; |
||
614 | } |
||
615 | |||
616 | static int fe_tx_dma_map_page(struct fe_tx_ring *ring, struct fe_map_state *st, |
||
617 | struct page *page, size_t offset, size_t size) |
||
618 | { |
||
619 | int cur_size; |
||
620 | int ret; |
||
621 | |||
622 | while (size > 0) { |
||
623 | cur_size = min_t(size_t, size, TX_DMA_BUF_LEN); |
||
624 | |||
625 | ret = __fe_tx_dma_map_page(ring, st, page, offset, cur_size); |
||
626 | if (ret) |
||
627 | return ret; |
||
628 | |||
629 | size -= cur_size; |
||
630 | offset += cur_size; |
||
631 | } |
||
632 | |||
633 | return 0; |
||
634 | } |
||
635 | |||
636 | static int fe_tx_dma_map_skb(struct fe_tx_ring *ring, struct fe_map_state *st, |
||
637 | struct sk_buff *skb) |
||
638 | { |
||
639 | struct page *page = virt_to_page(skb->data); |
||
640 | size_t offset = offset_in_page(skb->data); |
||
641 | size_t size = skb_headlen(skb); |
||
642 | |||
643 | return fe_tx_dma_map_page(ring, st, page, offset, size); |
||
644 | } |
||
645 | |||
646 | static inline struct sk_buff * |
||
647 | fe_next_frag(struct sk_buff *head, struct sk_buff *skb) |
||
648 | { |
||
649 | if (skb != head) |
||
650 | return skb->next; |
||
651 | |||
652 | if (skb_has_frag_list(skb)) |
||
653 | return skb_shinfo(skb)->frag_list; |
||
654 | |||
655 | return NULL; |
||
656 | } |
||
657 | |||
658 | |||
659 | static int fe_tx_map_dma(struct sk_buff *skb, struct net_device *dev, |
||
660 | int tx_num, struct fe_tx_ring *ring) |
||
661 | { |
||
662 | struct fe_priv *priv = netdev_priv(dev); |
||
663 | struct fe_map_state st = { |
||
664 | .dev = &dev->dev, |
||
665 | .ring_idx = ring->tx_next_idx, |
||
666 | }; |
||
667 | struct sk_buff *head = skb; |
||
668 | struct fe_tx_buf *tx_buf; |
||
669 | unsigned int nr_frags; |
||
670 | int i, j; |
||
671 | |||
672 | /* init tx descriptor */ |
||
673 | if (priv->soc->tx_dma) |
||
674 | priv->soc->tx_dma(&st.txd); |
||
675 | else |
||
676 | st.txd.txd4 = TX_DMA_DESP4_DEF; |
||
677 | st.def_txd4 = st.txd.txd4; |
||
678 | |||
679 | /* TX Checksum offload */ |
||
680 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
||
681 | st.txd.txd4 |= TX_DMA_CHKSUM; |
||
682 | |||
683 | /* VLAN header offload */ |
||
684 | if (skb_vlan_tag_present(skb)) { |
||
685 | u16 tag = skb_vlan_tag_get(skb); |
||
686 | |||
687 | if (IS_ENABLED(CONFIG_SOC_MT7621)) |
||
688 | st.txd.txd4 |= TX_DMA_INS_VLAN_MT7621 | tag; |
||
689 | else |
||
690 | st.txd.txd4 |= TX_DMA_INS_VLAN | |
||
691 | ((tag >> VLAN_PRIO_SHIFT) << 4) | |
||
692 | (tag & 0xF); |
||
693 | } |
||
694 | |||
695 | /* TSO: fill MSS info in tcp checksum field */ |
||
696 | if (skb_is_gso(skb)) { |
||
697 | if (skb_cow_head(skb, 0)) { |
||
698 | netif_warn(priv, tx_err, dev, |
||
699 | "GSO expand head fail.\n"); |
||
700 | goto err_out; |
||
701 | } |
||
702 | if (skb_shinfo(skb)->gso_type & |
||
703 | (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { |
||
704 | st.txd.txd4 |= TX_DMA_TSO; |
||
705 | tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); |
||
706 | } |
||
707 | } |
||
708 | |||
709 | next_frag: |
||
710 | if (skb_headlen(skb) && fe_tx_dma_map_skb(ring, &st, skb)) |
||
711 | goto err_dma; |
||
712 | |||
713 | /* TX SG offload */ |
||
714 | nr_frags = skb_shinfo(skb)->nr_frags; |
||
715 | for (i = 0; i < nr_frags; i++) { |
||
716 | struct skb_frag_struct *frag; |
||
717 | |||
718 | frag = &skb_shinfo(skb)->frags[i]; |
||
719 | if (fe_tx_dma_map_page(ring, &st, skb_frag_page(frag), |
||
720 | frag->page_offset, skb_frag_size(frag))) |
||
721 | goto err_dma; |
||
722 | } |
||
723 | |||
724 | skb = fe_next_frag(head, skb); |
||
725 | if (skb) |
||
726 | goto next_frag; |
||
727 | |||
728 | /* set last segment */ |
||
729 | if (st.i & 0x1) |
||
730 | st.txd.txd2 |= TX_DMA_LS0; |
||
731 | else |
||
732 | st.txd.txd2 |= TX_DMA_LS1; |
||
733 | |||
734 | /* store skb to cleanup */ |
||
735 | tx_buf = &ring->tx_buf[st.ring_idx]; |
||
736 | tx_buf->skb = head; |
||
737 | |||
738 | netdev_sent_queue(dev, head->len); |
||
739 | skb_tx_timestamp(head); |
||
740 | |||
741 | fe_tx_dma_write_desc(ring, &st); |
||
742 | ring->tx_next_idx = st.ring_idx; |
||
743 | |||
744 | /* make sure that all changes to the dma ring are flushed before we |
||
745 | * continue |
||
746 | */ |
||
747 | wmb(); |
||
748 | if (unlikely(fe_empty_txd(ring) <= ring->tx_thresh)) { |
||
749 | netif_stop_queue(dev); |
||
750 | smp_mb(); |
||
751 | if (unlikely(fe_empty_txd(ring) > ring->tx_thresh)) |
||
752 | netif_wake_queue(dev); |
||
753 | } |
||
754 | |||
755 | if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !head->xmit_more) |
||
756 | fe_reg_w32(ring->tx_next_idx, FE_REG_TX_CTX_IDX0); |
||
757 | |||
758 | return 0; |
||
759 | |||
760 | err_dma: |
||
761 | j = ring->tx_next_idx; |
||
762 | for (i = 0; i < tx_num; i++) { |
||
763 | /* unmap dma */ |
||
764 | fe_txd_unmap(&dev->dev, &ring->tx_buf[j]); |
||
765 | ring->tx_dma[j].txd2 = TX_DMA_DESP2_DEF; |
||
766 | |||
767 | j = NEXT_TX_DESP_IDX(j); |
||
768 | } |
||
769 | /* make sure that all changes to the dma ring are flushed before we |
||
770 | * continue |
||
771 | */ |
||
772 | wmb(); |
||
773 | |||
774 | err_out: |
||
775 | return -1; |
||
776 | } |
||
777 | |||
778 | static inline int fe_skb_padto(struct sk_buff *skb, struct fe_priv *priv) |
||
779 | { |
||
780 | unsigned int len; |
||
781 | int ret; |
||
782 | |||
783 | ret = 0; |
||
784 | if (unlikely(skb->len < VLAN_ETH_ZLEN)) { |
||
785 | if ((priv->flags & FE_FLAG_PADDING_64B) && |
||
786 | !(priv->flags & FE_FLAG_PADDING_BUG)) |
||
787 | return ret; |
||
788 | |||
789 | if (skb_vlan_tag_present(skb)) |
||
790 | len = ETH_ZLEN; |
||
791 | else if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) |
||
792 | len = VLAN_ETH_ZLEN; |
||
793 | else if (!(priv->flags & FE_FLAG_PADDING_64B)) |
||
794 | len = ETH_ZLEN; |
||
795 | else |
||
796 | return ret; |
||
797 | |||
798 | if (skb->len < len) { |
||
799 | ret = skb_pad(skb, len - skb->len); |
||
800 | if (ret < 0) |
||
801 | return ret; |
||
802 | skb->len = len; |
||
803 | skb_set_tail_pointer(skb, len); |
||
804 | } |
||
805 | } |
||
806 | |||
807 | return ret; |
||
808 | } |
||
809 | |||
810 | static inline int fe_cal_txd_req(struct sk_buff *skb) |
||
811 | { |
||
812 | struct sk_buff *head = skb; |
||
813 | int i, nfrags = 0; |
||
814 | struct skb_frag_struct *frag; |
||
815 | |||
816 | next_frag: |
||
817 | nfrags++; |
||
818 | if (skb_is_gso(skb)) { |
||
819 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
||
820 | frag = &skb_shinfo(skb)->frags[i]; |
||
821 | nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN); |
||
822 | } |
||
823 | } else { |
||
824 | nfrags += skb_shinfo(skb)->nr_frags; |
||
825 | } |
||
826 | |||
827 | skb = fe_next_frag(head, skb); |
||
828 | if (skb) |
||
829 | goto next_frag; |
||
830 | |||
831 | return DIV_ROUND_UP(nfrags, 2); |
||
832 | } |
||
833 | |||
834 | static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev) |
||
835 | { |
||
836 | struct fe_priv *priv = netdev_priv(dev); |
||
837 | struct fe_tx_ring *ring = &priv->tx_ring; |
||
838 | struct net_device_stats *stats = &dev->stats; |
||
839 | int tx_num; |
||
840 | int len = skb->len; |
||
841 | |||
842 | if (fe_skb_padto(skb, priv)) { |
||
843 | netif_warn(priv, tx_err, dev, "tx padding failed!\n"); |
||
844 | return NETDEV_TX_OK; |
||
845 | } |
||
846 | |||
847 | tx_num = fe_cal_txd_req(skb); |
||
848 | if (unlikely(fe_empty_txd(ring) <= tx_num)) { |
||
849 | netif_stop_queue(dev); |
||
850 | netif_err(priv, tx_queued, dev, |
||
851 | "Tx Ring full when queue awake!\n"); |
||
852 | return NETDEV_TX_BUSY; |
||
853 | } |
||
854 | |||
855 | if (fe_tx_map_dma(skb, dev, tx_num, ring) < 0) { |
||
856 | stats->tx_dropped++; |
||
857 | } else { |
||
858 | stats->tx_packets++; |
||
859 | stats->tx_bytes += len; |
||
860 | } |
||
861 | |||
862 | return NETDEV_TX_OK; |
||
863 | } |
||
864 | |||
865 | static int fe_poll_rx(struct napi_struct *napi, int budget, |
||
866 | struct fe_priv *priv, u32 rx_intr) |
||
867 | { |
||
868 | struct net_device *netdev = priv->netdev; |
||
869 | struct net_device_stats *stats = &netdev->stats; |
||
870 | struct fe_soc_data *soc = priv->soc; |
||
871 | struct fe_rx_ring *ring = &priv->rx_ring; |
||
872 | int idx = ring->rx_calc_idx; |
||
873 | u32 checksum_bit; |
||
874 | struct sk_buff *skb; |
||
875 | u8 *data, *new_data; |
||
876 | struct fe_rx_dma *rxd, trxd; |
||
877 | int done = 0, pad; |
||
878 | |||
879 | if (netdev->features & NETIF_F_RXCSUM) |
||
880 | checksum_bit = soc->checksum_bit; |
||
881 | else |
||
882 | checksum_bit = 0; |
||
883 | |||
884 | if (priv->flags & FE_FLAG_RX_2B_OFFSET) |
||
885 | pad = 0; |
||
886 | else |
||
887 | pad = NET_IP_ALIGN; |
||
888 | |||
889 | while (done < budget) { |
||
890 | unsigned int pktlen; |
||
891 | dma_addr_t dma_addr; |
||
892 | |||
893 | idx = NEXT_RX_DESP_IDX(idx); |
||
894 | rxd = &ring->rx_dma[idx]; |
||
895 | data = ring->rx_data[idx]; |
||
896 | |||
897 | fe_get_rxd(&trxd, rxd); |
||
898 | if (!(trxd.rxd2 & RX_DMA_DONE)) |
||
899 | break; |
||
900 | |||
901 | /* alloc new buffer */ |
||
902 | new_data = page_frag_alloc(&ring->frag_cache, ring->frag_size, |
||
903 | GFP_ATOMIC); |
||
904 | if (unlikely(!new_data)) { |
||
905 | stats->rx_dropped++; |
||
906 | goto release_desc; |
||
907 | } |
||
908 | dma_addr = dma_map_single(&netdev->dev, |
||
909 | new_data + NET_SKB_PAD + pad, |
||
910 | ring->rx_buf_size, |
||
911 | DMA_FROM_DEVICE); |
||
912 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { |
||
913 | skb_free_frag(new_data); |
||
914 | goto release_desc; |
||
915 | } |
||
916 | |||
917 | /* receive data */ |
||
918 | skb = build_skb(data, ring->frag_size); |
||
919 | if (unlikely(!skb)) { |
||
920 | skb_free_frag(new_data); |
||
921 | goto release_desc; |
||
922 | } |
||
923 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
||
924 | |||
925 | dma_unmap_single(&netdev->dev, trxd.rxd1, |
||
926 | ring->rx_buf_size, DMA_FROM_DEVICE); |
||
927 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); |
||
928 | skb->dev = netdev; |
||
929 | skb_put(skb, pktlen); |
||
930 | if (trxd.rxd4 & checksum_bit) |
||
931 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
||
932 | else |
||
933 | skb_checksum_none_assert(skb); |
||
934 | skb->protocol = eth_type_trans(skb, netdev); |
||
935 | |||
936 | #ifdef CONFIG_NET_MEDIATEK_OFFLOAD |
||
937 | if (mtk_offload_check_rx(priv, skb, trxd.rxd4) == 0) { |
||
938 | #endif |
||
939 | stats->rx_packets++; |
||
940 | stats->rx_bytes += pktlen; |
||
941 | |||
942 | napi_gro_receive(napi, skb); |
||
943 | #ifdef CONFIG_NET_MEDIATEK_OFFLOAD |
||
944 | } else { |
||
945 | dev_kfree_skb(skb); |
||
946 | } |
||
947 | #endif |
||
948 | ring->rx_data[idx] = new_data; |
||
949 | rxd->rxd1 = (unsigned int)dma_addr; |
||
950 | |||
951 | release_desc: |
||
952 | if (priv->flags & FE_FLAG_RX_SG_DMA) |
||
953 | rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); |
||
954 | else |
||
955 | rxd->rxd2 = RX_DMA_LSO; |
||
956 | |||
957 | ring->rx_calc_idx = idx; |
||
958 | /* make sure that all changes to the dma ring are flushed before |
||
959 | * we continue |
||
960 | */ |
||
961 | wmb(); |
||
962 | fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0); |
||
963 | done++; |
||
964 | } |
||
965 | |||
966 | if (done < budget) |
||
967 | fe_reg_w32(rx_intr, FE_REG_FE_INT_STATUS); |
||
968 | |||
969 | return done; |
||
970 | } |
||
971 | |||
972 | static int fe_poll_tx(struct fe_priv *priv, int budget, u32 tx_intr, |
||
973 | int *tx_again) |
||
974 | { |
||
975 | struct net_device *netdev = priv->netdev; |
||
976 | struct device *dev = &netdev->dev; |
||
977 | unsigned int bytes_compl = 0; |
||
978 | struct sk_buff *skb; |
||
979 | struct fe_tx_buf *tx_buf; |
||
980 | int done = 0; |
||
981 | u32 idx, hwidx; |
||
982 | struct fe_tx_ring *ring = &priv->tx_ring; |
||
983 | |||
984 | idx = ring->tx_free_idx; |
||
985 | hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0); |
||
986 | |||
987 | while ((idx != hwidx) && budget) { |
||
988 | tx_buf = &ring->tx_buf[idx]; |
||
989 | skb = tx_buf->skb; |
||
990 | |||
991 | if (!skb) |
||
992 | break; |
||
993 | |||
994 | if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { |
||
995 | bytes_compl += skb->len; |
||
996 | done++; |
||
997 | budget--; |
||
998 | } |
||
999 | fe_txd_unmap(dev, tx_buf); |
||
1000 | idx = NEXT_TX_DESP_IDX(idx); |
||
1001 | } |
||
1002 | ring->tx_free_idx = idx; |
||
1003 | |||
1004 | if (idx == hwidx) { |
||
1005 | /* read hw index again make sure no new tx packet */ |
||
1006 | hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0); |
||
1007 | if (idx == hwidx) |
||
1008 | fe_reg_w32(tx_intr, FE_REG_FE_INT_STATUS); |
||
1009 | else |
||
1010 | *tx_again = 1; |
||
1011 | } else { |
||
1012 | *tx_again = 1; |
||
1013 | } |
||
1014 | |||
1015 | if (done) { |
||
1016 | netdev_completed_queue(netdev, done, bytes_compl); |
||
1017 | smp_mb(); |
||
1018 | if (unlikely(netif_queue_stopped(netdev) && |
||
1019 | (fe_empty_txd(ring) > ring->tx_thresh))) |
||
1020 | netif_wake_queue(netdev); |
||
1021 | } |
||
1022 | |||
1023 | return done; |
||
1024 | } |
||
1025 | |||
1026 | static int fe_poll(struct napi_struct *napi, int budget) |
||
1027 | { |
||
1028 | struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi); |
||
1029 | struct fe_hw_stats *hwstat = priv->hw_stats; |
||
1030 | int tx_done, rx_done, tx_again; |
||
1031 | u32 status, fe_status, status_reg, mask; |
||
1032 | u32 tx_intr, rx_intr, status_intr; |
||
1033 | |||
1034 | status = fe_reg_r32(FE_REG_FE_INT_STATUS); |
||
1035 | fe_status = status; |
||
1036 | tx_intr = priv->soc->tx_int; |
||
1037 | rx_intr = priv->soc->rx_int; |
||
1038 | status_intr = priv->soc->status_int; |
||
1039 | tx_done = 0; |
||
1040 | rx_done = 0; |
||
1041 | tx_again = 0; |
||
1042 | |||
1043 | if (fe_reg_table[FE_REG_FE_INT_STATUS2]) { |
||
1044 | fe_status = fe_reg_r32(FE_REG_FE_INT_STATUS2); |
||
1045 | status_reg = FE_REG_FE_INT_STATUS2; |
||
1046 | } else { |
||
1047 | status_reg = FE_REG_FE_INT_STATUS; |
||
1048 | } |
||
1049 | |||
1050 | if (status & tx_intr) |
||
1051 | tx_done = fe_poll_tx(priv, budget, tx_intr, &tx_again); |
||
1052 | |||
1053 | if (status & rx_intr) |
||
1054 | rx_done = fe_poll_rx(napi, budget, priv, rx_intr); |
||
1055 | |||
1056 | if (unlikely(fe_status & status_intr)) { |
||
1057 | if (hwstat && spin_trylock(&hwstat->stats_lock)) { |
||
1058 | fe_stats_update(priv); |
||
1059 | spin_unlock(&hwstat->stats_lock); |
||
1060 | } |
||
1061 | fe_reg_w32(status_intr, status_reg); |
||
1062 | } |
||
1063 | |||
1064 | if (unlikely(netif_msg_intr(priv))) { |
||
1065 | mask = fe_reg_r32(FE_REG_FE_INT_ENABLE); |
||
1066 | netdev_info(priv->netdev, |
||
1067 | "done tx %d, rx %d, intr 0x%08x/0x%x\n", |
||
1068 | tx_done, rx_done, status, mask); |
||
1069 | } |
||
1070 | |||
1071 | if (!tx_again && (rx_done < budget)) { |
||
1072 | status = fe_reg_r32(FE_REG_FE_INT_STATUS); |
||
1073 | if (status & (tx_intr | rx_intr)) { |
||
1074 | /* let napi poll again */ |
||
1075 | rx_done = budget; |
||
1076 | goto poll_again; |
||
1077 | } |
||
1078 | |||
1079 | napi_complete_done(napi, rx_done); |
||
1080 | fe_int_enable(tx_intr | rx_intr); |
||
1081 | } else { |
||
1082 | rx_done = budget; |
||
1083 | } |
||
1084 | |||
1085 | poll_again: |
||
1086 | return rx_done; |
||
1087 | } |
||
1088 | |||
1089 | static void fe_tx_timeout(struct net_device *dev) |
||
1090 | { |
||
1091 | struct fe_priv *priv = netdev_priv(dev); |
||
1092 | struct fe_tx_ring *ring = &priv->tx_ring; |
||
1093 | |||
1094 | priv->netdev->stats.tx_errors++; |
||
1095 | netif_err(priv, tx_err, dev, |
||
1096 | "transmit timed out\n"); |
||
1097 | netif_info(priv, drv, dev, "dma_cfg:%08x\n", |
||
1098 | fe_reg_r32(FE_REG_PDMA_GLO_CFG)); |
||
1099 | netif_info(priv, drv, dev, "tx_ring=%d, " |
||
1100 | "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n", |
||
1101 | 0, fe_reg_r32(FE_REG_TX_BASE_PTR0), |
||
1102 | fe_reg_r32(FE_REG_TX_MAX_CNT0), |
||
1103 | fe_reg_r32(FE_REG_TX_CTX_IDX0), |
||
1104 | fe_reg_r32(FE_REG_TX_DTX_IDX0), |
||
1105 | ring->tx_free_idx, |
||
1106 | ring->tx_next_idx); |
||
1107 | netif_info(priv, drv, dev, |
||
1108 | "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n", |
||
1109 | 0, fe_reg_r32(FE_REG_RX_BASE_PTR0), |
||
1110 | fe_reg_r32(FE_REG_RX_MAX_CNT0), |
||
1111 | fe_reg_r32(FE_REG_RX_CALC_IDX0), |
||
1112 | fe_reg_r32(FE_REG_RX_DRX_IDX0)); |
||
1113 | |||
1114 | if (!test_and_set_bit(FE_FLAG_RESET_PENDING, priv->pending_flags)) |
||
1115 | schedule_work(&priv->pending_work); |
||
1116 | } |
||
1117 | |||
1118 | static irqreturn_t fe_handle_irq(int irq, void *dev) |
||
1119 | { |
||
1120 | struct fe_priv *priv = netdev_priv(dev); |
||
1121 | u32 status, int_mask; |
||
1122 | |||
1123 | status = fe_reg_r32(FE_REG_FE_INT_STATUS); |
||
1124 | |||
1125 | if (unlikely(!status)) |
||
1126 | return IRQ_NONE; |
||
1127 | |||
1128 | int_mask = (priv->soc->rx_int | priv->soc->tx_int); |
||
1129 | if (likely(status & int_mask)) { |
||
1130 | if (likely(napi_schedule_prep(&priv->rx_napi))) { |
||
1131 | fe_int_disable(int_mask); |
||
1132 | __napi_schedule(&priv->rx_napi); |
||
1133 | } |
||
1134 | } else { |
||
1135 | fe_reg_w32(status, FE_REG_FE_INT_STATUS); |
||
1136 | } |
||
1137 | |||
1138 | return IRQ_HANDLED; |
||
1139 | } |
||
1140 | |||
1141 | #ifdef CONFIG_NET_POLL_CONTROLLER |
||
1142 | static void fe_poll_controller(struct net_device *dev) |
||
1143 | { |
||
1144 | struct fe_priv *priv = netdev_priv(dev); |
||
1145 | u32 int_mask = priv->soc->tx_int | priv->soc->rx_int; |
||
1146 | |||
1147 | fe_int_disable(int_mask); |
||
1148 | fe_handle_irq(dev->irq, dev); |
||
1149 | fe_int_enable(int_mask); |
||
1150 | } |
||
1151 | #endif |
||
1152 | |||
1153 | int fe_set_clock_cycle(struct fe_priv *priv) |
||
1154 | { |
||
1155 | unsigned long sysclk = priv->sysclk; |
||
1156 | |||
1157 | sysclk /= FE_US_CYC_CNT_DIVISOR; |
||
1158 | sysclk <<= FE_US_CYC_CNT_SHIFT; |
||
1159 | |||
1160 | fe_w32((fe_r32(FE_FE_GLO_CFG) & |
||
1161 | ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | |
||
1162 | sysclk, |
||
1163 | FE_FE_GLO_CFG); |
||
1164 | return 0; |
||
1165 | } |
||
1166 | |||
1167 | void fe_fwd_config(struct fe_priv *priv) |
||
1168 | { |
||
1169 | u32 fwd_cfg; |
||
1170 | |||
1171 | fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG); |
||
1172 | |||
1173 | /* disable jumbo frame */ |
||
1174 | if (priv->flags & FE_FLAG_JUMBO_FRAME) |
||
1175 | fwd_cfg &= ~FE_GDM1_JMB_EN; |
||
1176 | |||
1177 | /* set unicast/multicast/broadcast frame to cpu */ |
||
1178 | fwd_cfg &= ~0xffff; |
||
1179 | |||
1180 | fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG); |
||
1181 | } |
||
1182 | |||
1183 | static void fe_rxcsum_config(bool enable) |
||
1184 | { |
||
1185 | if (enable) |
||
1186 | fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN | |
||
1187 | FE_GDM1_TCS_EN | FE_GDM1_UCS_EN), |
||
1188 | FE_GDMA1_FWD_CFG); |
||
1189 | else |
||
1190 | fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~(FE_GDM1_ICS_EN | |
||
1191 | FE_GDM1_TCS_EN | FE_GDM1_UCS_EN), |
||
1192 | FE_GDMA1_FWD_CFG); |
||
1193 | } |
||
1194 | |||
1195 | static void fe_txcsum_config(bool enable) |
||
1196 | { |
||
1197 | if (enable) |
||
1198 | fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN | |
||
1199 | FE_TCS_GEN_EN | FE_UCS_GEN_EN), |
||
1200 | FE_CDMA_CSG_CFG); |
||
1201 | else |
||
1202 | fe_w32(fe_r32(FE_CDMA_CSG_CFG) & ~(FE_ICS_GEN_EN | |
||
1203 | FE_TCS_GEN_EN | FE_UCS_GEN_EN), |
||
1204 | FE_CDMA_CSG_CFG); |
||
1205 | } |
||
1206 | |||
1207 | void fe_csum_config(struct fe_priv *priv) |
||
1208 | { |
||
1209 | struct net_device *dev = priv_netdev(priv); |
||
1210 | |||
1211 | fe_txcsum_config((dev->features & NETIF_F_IP_CSUM)); |
||
1212 | fe_rxcsum_config((dev->features & NETIF_F_RXCSUM)); |
||
1213 | } |
||
1214 | |||
1215 | static int fe_hw_init(struct net_device *dev) |
||
1216 | { |
||
1217 | struct fe_priv *priv = netdev_priv(dev); |
||
1218 | int i, err; |
||
1219 | |||
1220 | err = devm_request_irq(priv->dev, dev->irq, fe_handle_irq, 0, |
||
1221 | dev_name(priv->dev), dev); |
||
1222 | if (err) |
||
1223 | return err; |
||
1224 | |||
1225 | if (priv->soc->set_mac) |
||
1226 | priv->soc->set_mac(priv, dev->dev_addr); |
||
1227 | else |
||
1228 | fe_hw_set_macaddr(priv, dev->dev_addr); |
||
1229 | |||
1230 | /* disable delay interrupt */ |
||
1231 | fe_reg_w32(0, FE_REG_DLY_INT_CFG); |
||
1232 | |||
1233 | fe_int_disable(priv->soc->tx_int | priv->soc->rx_int); |
||
1234 | |||
1235 | /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc */ |
||
1236 | if (fe_reg_table[FE_REG_FE_DMA_VID_BASE]) |
||
1237 | for (i = 0; i < 16; i += 2) |
||
1238 | fe_w32(((i + 1) << 16) + i, |
||
1239 | fe_reg_table[FE_REG_FE_DMA_VID_BASE] + |
||
1240 | (i * 2)); |
||
1241 | |||
1242 | if (priv->soc->fwd_config(priv)) |
||
1243 | netdev_err(dev, "unable to get clock\n"); |
||
1244 | |||
1245 | if (fe_reg_table[FE_REG_FE_RST_GL]) { |
||
1246 | fe_reg_w32(1, FE_REG_FE_RST_GL); |
||
1247 | fe_reg_w32(0, FE_REG_FE_RST_GL); |
||
1248 | } |
||
1249 | |||
1250 | return 0; |
||
1251 | } |
||
1252 | |||
1253 | static int fe_open(struct net_device *dev) |
||
1254 | { |
||
1255 | struct fe_priv *priv = netdev_priv(dev); |
||
1256 | unsigned long flags; |
||
1257 | u32 val; |
||
1258 | int err; |
||
1259 | |||
1260 | err = fe_init_dma(priv); |
||
1261 | if (err) { |
||
1262 | fe_free_dma(priv); |
||
1263 | return err; |
||
1264 | } |
||
1265 | |||
1266 | spin_lock_irqsave(&priv->page_lock, flags); |
||
1267 | |||
1268 | val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN; |
||
1269 | if (priv->flags & FE_FLAG_RX_2B_OFFSET) |
||
1270 | val |= FE_RX_2B_OFFSET; |
||
1271 | val |= priv->soc->pdma_glo_cfg; |
||
1272 | fe_reg_w32(val, FE_REG_PDMA_GLO_CFG); |
||
1273 | |||
1274 | spin_unlock_irqrestore(&priv->page_lock, flags); |
||
1275 | |||
1276 | if (priv->phy) |
||
1277 | priv->phy->start(priv); |
||
1278 | |||
1279 | if (priv->soc->has_carrier && priv->soc->has_carrier(priv)) |
||
1280 | netif_carrier_on(dev); |
||
1281 | |||
1282 | napi_enable(&priv->rx_napi); |
||
1283 | fe_int_enable(priv->soc->tx_int | priv->soc->rx_int); |
||
1284 | netif_start_queue(dev); |
||
1285 | #ifdef CONFIG_NET_MEDIATEK_OFFLOAD |
||
1286 | mtk_ppe_probe(priv); |
||
1287 | #endif |
||
1288 | |||
1289 | return 0; |
||
1290 | } |
||
1291 | |||
1292 | static int fe_stop(struct net_device *dev) |
||
1293 | { |
||
1294 | struct fe_priv *priv = netdev_priv(dev); |
||
1295 | unsigned long flags; |
||
1296 | int i; |
||
1297 | |||
1298 | netif_tx_disable(dev); |
||
1299 | fe_int_disable(priv->soc->tx_int | priv->soc->rx_int); |
||
1300 | napi_disable(&priv->rx_napi); |
||
1301 | |||
1302 | if (priv->phy) |
||
1303 | priv->phy->stop(priv); |
||
1304 | |||
1305 | spin_lock_irqsave(&priv->page_lock, flags); |
||
1306 | |||
1307 | fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) & |
||
1308 | ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN), |
||
1309 | FE_REG_PDMA_GLO_CFG); |
||
1310 | spin_unlock_irqrestore(&priv->page_lock, flags); |
||
1311 | |||
1312 | /* wait dma stop */ |
||
1313 | for (i = 0; i < 10; i++) { |
||
1314 | if (fe_reg_r32(FE_REG_PDMA_GLO_CFG) & |
||
1315 | (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)) { |
||
1316 | msleep(20); |
||
1317 | continue; |
||
1318 | } |
||
1319 | break; |
||
1320 | } |
||
1321 | |||
1322 | fe_free_dma(priv); |
||
1323 | |||
1324 | #ifdef CONFIG_NET_MEDIATEK_OFFLOAD |
||
1325 | mtk_ppe_remove(priv); |
||
1326 | #endif |
||
1327 | |||
1328 | return 0; |
||
1329 | } |
||
1330 | |||
1331 | static int __init fe_init(struct net_device *dev) |
||
1332 | { |
||
1333 | struct fe_priv *priv = netdev_priv(dev); |
||
1334 | struct device_node *port; |
||
1335 | const char *mac_addr; |
||
1336 | int err; |
||
1337 | |||
1338 | priv->soc->reset_fe(); |
||
1339 | |||
1340 | if (priv->soc->switch_init) |
||
1341 | if (priv->soc->switch_init(priv)) { |
||
1342 | netdev_err(dev, "failed to initialize switch core\n"); |
||
1343 | return -ENODEV; |
||
1344 | } |
||
1345 | |||
1346 | mac_addr = of_get_mac_address(priv->dev->of_node); |
||
1347 | if (mac_addr) |
||
1348 | ether_addr_copy(dev->dev_addr, mac_addr); |
||
1349 | |||
1350 | /* If the mac address is invalid, use random mac address */ |
||
1351 | if (!is_valid_ether_addr(dev->dev_addr)) { |
||
1352 | random_ether_addr(dev->dev_addr); |
||
1353 | dev_err(priv->dev, "generated random MAC address %pM\n", |
||
1354 | dev->dev_addr); |
||
1355 | } |
||
1356 | |||
1357 | err = fe_mdio_init(priv); |
||
1358 | if (err) |
||
1359 | return err; |
||
1360 | |||
1361 | if (priv->soc->port_init) |
||
1362 | for_each_child_of_node(priv->dev->of_node, port) |
||
1363 | if (of_device_is_compatible(port, "mediatek,eth-port") && |
||
1364 | of_device_is_available(port)) |
||
1365 | priv->soc->port_init(priv, port); |
||
1366 | |||
1367 | if (priv->phy) { |
||
1368 | err = priv->phy->connect(priv); |
||
1369 | if (err) |
||
1370 | goto err_phy_disconnect; |
||
1371 | } |
||
1372 | |||
1373 | err = fe_hw_init(dev); |
||
1374 | if (err) |
||
1375 | goto err_phy_disconnect; |
||
1376 | |||
1377 | if ((priv->flags & FE_FLAG_HAS_SWITCH) && priv->soc->switch_config) |
||
1378 | priv->soc->switch_config(priv); |
||
1379 | |||
1380 | return 0; |
||
1381 | |||
1382 | err_phy_disconnect: |
||
1383 | if (priv->phy) |
||
1384 | priv->phy->disconnect(priv); |
||
1385 | fe_mdio_cleanup(priv); |
||
1386 | |||
1387 | return err; |
||
1388 | } |
||
1389 | |||
1390 | static void fe_uninit(struct net_device *dev) |
||
1391 | { |
||
1392 | struct fe_priv *priv = netdev_priv(dev); |
||
1393 | |||
1394 | if (priv->phy) |
||
1395 | priv->phy->disconnect(priv); |
||
1396 | fe_mdio_cleanup(priv); |
||
1397 | |||
1398 | fe_reg_w32(0, FE_REG_FE_INT_ENABLE); |
||
1399 | free_irq(dev->irq, dev); |
||
1400 | } |
||
1401 | |||
1402 | static int fe_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
||
1403 | { |
||
1404 | struct fe_priv *priv = netdev_priv(dev); |
||
1405 | |||
1406 | if (!priv->phy_dev) |
||
1407 | return -ENODEV; |
||
1408 | |||
1409 | switch (cmd) { |
||
1410 | case SIOCETHTOOL: |
||
1411 | return phy_ethtool_ioctl(priv->phy_dev, |
||
1412 | (void *) ifr->ifr_data); |
||
1413 | case SIOCGMIIPHY: |
||
1414 | case SIOCGMIIREG: |
||
1415 | case SIOCSMIIREG: |
||
1416 | return phy_mii_ioctl(priv->phy_dev, ifr, cmd); |
||
1417 | default: |
||
1418 | break; |
||
1419 | } |
||
1420 | |||
1421 | return -EOPNOTSUPP; |
||
1422 | } |
||
1423 | |||
1424 | static int fe_change_mtu(struct net_device *dev, int new_mtu) |
||
1425 | { |
||
1426 | struct fe_priv *priv = netdev_priv(dev); |
||
1427 | int frag_size, old_mtu; |
||
1428 | u32 fwd_cfg; |
||
1429 | |||
1430 | old_mtu = dev->mtu; |
||
1431 | dev->mtu = new_mtu; |
||
1432 | |||
1433 | if (!(priv->flags & FE_FLAG_JUMBO_FRAME)) |
||
1434 | return 0; |
||
1435 | |||
1436 | /* return early if the buffer sizes will not change */ |
||
1437 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) |
||
1438 | return 0; |
||
1439 | if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN) |
||
1440 | return 0; |
||
1441 | |||
1442 | if (new_mtu <= ETH_DATA_LEN) |
||
1443 | priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN); |
||
1444 | else |
||
1445 | priv->rx_ring.frag_size = PAGE_SIZE; |
||
1446 | priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size); |
||
1447 | |||
1448 | if (!netif_running(dev)) |
||
1449 | return 0; |
||
1450 | |||
1451 | fe_stop(dev); |
||
1452 | if (!IS_ENABLED(CONFIG_SOC_MT7621)) { |
||
1453 | fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG); |
||
1454 | if (new_mtu <= ETH_DATA_LEN) { |
||
1455 | fwd_cfg &= ~FE_GDM1_JMB_EN; |
||
1456 | } else { |
||
1457 | frag_size = fe_max_frag_size(new_mtu); |
||
1458 | fwd_cfg &= ~(FE_GDM1_JMB_LEN_MASK << FE_GDM1_JMB_LEN_SHIFT); |
||
1459 | fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) << |
||
1460 | FE_GDM1_JMB_LEN_SHIFT) | FE_GDM1_JMB_EN; |
||
1461 | } |
||
1462 | fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG); |
||
1463 | } |
||
1464 | |||
1465 | return fe_open(dev); |
||
1466 | } |
||
1467 | |||
1468 | #ifdef CONFIG_NET_MEDIATEK_OFFLOAD |
||
1469 | static int |
||
1470 | fe_flow_offload(enum flow_offload_type type, struct flow_offload *flow, |
||
1471 | struct flow_offload_hw_path *src, |
||
1472 | struct flow_offload_hw_path *dest) |
||
1473 | { |
||
1474 | struct fe_priv *priv; |
||
1475 | |||
1476 | if (src->dev != dest->dev) |
||
1477 | return -EINVAL; |
||
1478 | |||
1479 | priv = netdev_priv(src->dev); |
||
1480 | |||
1481 | return mtk_flow_offload(priv, type, flow, src, dest); |
||
1482 | } |
||
1483 | #endif |
||
1484 | |||
1485 | static const struct net_device_ops fe_netdev_ops = { |
||
1486 | .ndo_init = fe_init, |
||
1487 | .ndo_uninit = fe_uninit, |
||
1488 | .ndo_open = fe_open, |
||
1489 | .ndo_stop = fe_stop, |
||
1490 | .ndo_start_xmit = fe_start_xmit, |
||
1491 | .ndo_set_mac_address = fe_set_mac_address, |
||
1492 | .ndo_validate_addr = eth_validate_addr, |
||
1493 | .ndo_do_ioctl = fe_do_ioctl, |
||
1494 | .ndo_change_mtu = fe_change_mtu, |
||
1495 | .ndo_tx_timeout = fe_tx_timeout, |
||
1496 | .ndo_get_stats64 = fe_get_stats64, |
||
1497 | .ndo_vlan_rx_add_vid = fe_vlan_rx_add_vid, |
||
1498 | .ndo_vlan_rx_kill_vid = fe_vlan_rx_kill_vid, |
||
1499 | #ifdef CONFIG_NET_POLL_CONTROLLER |
||
1500 | .ndo_poll_controller = fe_poll_controller, |
||
1501 | #endif |
||
1502 | #ifdef CONFIG_NET_MEDIATEK_OFFLOAD |
||
1503 | .ndo_flow_offload = fe_flow_offload, |
||
1504 | #endif |
||
1505 | }; |
||
1506 | |||
1507 | static void fe_reset_pending(struct fe_priv *priv) |
||
1508 | { |
||
1509 | struct net_device *dev = priv->netdev; |
||
1510 | int err; |
||
1511 | |||
1512 | rtnl_lock(); |
||
1513 | fe_stop(dev); |
||
1514 | |||
1515 | err = fe_open(dev); |
||
1516 | if (err) { |
||
1517 | netif_alert(priv, ifup, dev, |
||
1518 | "Driver up/down cycle failed, closing device.\n"); |
||
1519 | dev_close(dev); |
||
1520 | } |
||
1521 | rtnl_unlock(); |
||
1522 | } |
||
1523 | |||
1524 | static const struct fe_work_t fe_work[] = { |
||
1525 | {FE_FLAG_RESET_PENDING, fe_reset_pending}, |
||
1526 | }; |
||
1527 | |||
1528 | static void fe_pending_work(struct work_struct *work) |
||
1529 | { |
||
1530 | struct fe_priv *priv = container_of(work, struct fe_priv, pending_work); |
||
1531 | int i; |
||
1532 | bool pending; |
||
1533 | |||
1534 | for (i = 0; i < ARRAY_SIZE(fe_work); i++) { |
||
1535 | pending = test_and_clear_bit(fe_work[i].bitnr, |
||
1536 | priv->pending_flags); |
||
1537 | if (pending) |
||
1538 | fe_work[i].action(priv); |
||
1539 | } |
||
1540 | } |
||
1541 | |||
1542 | static int fe_probe(struct platform_device *pdev) |
||
1543 | { |
||
1544 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
||
1545 | const struct of_device_id *match; |
||
1546 | struct fe_soc_data *soc; |
||
1547 | struct net_device *netdev; |
||
1548 | struct fe_priv *priv; |
||
1549 | struct clk *sysclk; |
||
1550 | int err, napi_weight; |
||
1551 | |||
1552 | device_reset(&pdev->dev); |
||
1553 | |||
1554 | match = of_match_device(of_fe_match, &pdev->dev); |
||
1555 | soc = (struct fe_soc_data *)match->data; |
||
1556 | |||
1557 | if (soc->reg_table) |
||
1558 | fe_reg_table = soc->reg_table; |
||
1559 | else |
||
1560 | soc->reg_table = fe_reg_table; |
||
1561 | |||
1562 | fe_base = devm_ioremap_resource(&pdev->dev, res); |
||
1563 | if (IS_ERR(fe_base)) { |
||
1564 | err = -EADDRNOTAVAIL; |
||
1565 | goto err_out; |
||
1566 | } |
||
1567 | |||
1568 | netdev = alloc_etherdev(sizeof(*priv)); |
||
1569 | if (!netdev) { |
||
1570 | dev_err(&pdev->dev, "alloc_etherdev failed\n"); |
||
1571 | err = -ENOMEM; |
||
1572 | goto err_iounmap; |
||
1573 | } |
||
1574 | |||
1575 | SET_NETDEV_DEV(netdev, &pdev->dev); |
||
1576 | netdev->netdev_ops = &fe_netdev_ops; |
||
1577 | netdev->base_addr = (unsigned long)fe_base; |
||
1578 | |||
1579 | netdev->irq = platform_get_irq(pdev, 0); |
||
1580 | if (netdev->irq < 0) { |
||
1581 | dev_err(&pdev->dev, "no IRQ resource found\n"); |
||
1582 | err = -ENXIO; |
||
1583 | goto err_free_dev; |
||
1584 | } |
||
1585 | |||
1586 | if (soc->init_data) |
||
1587 | soc->init_data(soc, netdev); |
||
1588 | netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_CTAG_TX; |
||
1589 | netdev->features |= netdev->hw_features; |
||
1590 | |||
1591 | if (IS_ENABLED(CONFIG_SOC_MT7621)) |
||
1592 | netdev->max_mtu = 2048; |
||
1593 | |||
1594 | /* fake rx vlan filter func. to support tx vlan offload func */ |
||
1595 | if (fe_reg_table[FE_REG_FE_DMA_VID_BASE]) |
||
1596 | netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
||
1597 | |||
1598 | priv = netdev_priv(netdev); |
||
1599 | spin_lock_init(&priv->page_lock); |
||
1600 | if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) { |
||
1601 | priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL); |
||
1602 | if (!priv->hw_stats) { |
||
1603 | err = -ENOMEM; |
||
1604 | goto err_free_dev; |
||
1605 | } |
||
1606 | spin_lock_init(&priv->hw_stats->stats_lock); |
||
1607 | } |
||
1608 | |||
1609 | sysclk = devm_clk_get(&pdev->dev, NULL); |
||
1610 | if (!IS_ERR(sysclk)) { |
||
1611 | priv->sysclk = clk_get_rate(sysclk); |
||
1612 | } else if ((priv->flags & FE_FLAG_CALIBRATE_CLK)) { |
||
1613 | dev_err(&pdev->dev, "this soc needs a clk for calibration\n"); |
||
1614 | err = -ENXIO; |
||
1615 | goto err_free_dev; |
||
1616 | } |
||
1617 | |||
1618 | priv->switch_np = of_parse_phandle(pdev->dev.of_node, "mediatek,switch", 0); |
||
1619 | if ((priv->flags & FE_FLAG_HAS_SWITCH) && !priv->switch_np) { |
||
1620 | dev_err(&pdev->dev, "failed to read switch phandle\n"); |
||
1621 | err = -ENODEV; |
||
1622 | goto err_free_dev; |
||
1623 | } |
||
1624 | |||
1625 | priv->netdev = netdev; |
||
1626 | priv->dev = &pdev->dev; |
||
1627 | priv->soc = soc; |
||
1628 | priv->msg_enable = netif_msg_init(fe_msg_level, FE_DEFAULT_MSG_ENABLE); |
||
1629 | priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN); |
||
1630 | priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size); |
||
1631 | priv->tx_ring.tx_ring_size = NUM_DMA_DESC; |
||
1632 | priv->rx_ring.rx_ring_size = NUM_DMA_DESC; |
||
1633 | INIT_WORK(&priv->pending_work, fe_pending_work); |
||
1634 | u64_stats_init(&priv->hw_stats->syncp); |
||
1635 | |||
1636 | napi_weight = 16; |
||
1637 | if (priv->flags & FE_FLAG_NAPI_WEIGHT) { |
||
1638 | napi_weight *= 4; |
||
1639 | priv->tx_ring.tx_ring_size *= 4; |
||
1640 | priv->rx_ring.rx_ring_size *= 4; |
||
1641 | } |
||
1642 | netif_napi_add(netdev, &priv->rx_napi, fe_poll, napi_weight); |
||
1643 | fe_set_ethtool_ops(netdev); |
||
1644 | |||
1645 | err = register_netdev(netdev); |
||
1646 | if (err) { |
||
1647 | dev_err(&pdev->dev, "error bringing up device\n"); |
||
1648 | goto err_free_dev; |
||
1649 | } |
||
1650 | |||
1651 | platform_set_drvdata(pdev, netdev); |
||
1652 | |||
1653 | netif_info(priv, probe, netdev, "mediatek frame engine at 0x%08lx, irq %d\n", |
||
1654 | netdev->base_addr, netdev->irq); |
||
1655 | |||
1656 | return 0; |
||
1657 | |||
1658 | err_free_dev: |
||
1659 | free_netdev(netdev); |
||
1660 | err_iounmap: |
||
1661 | devm_iounmap(&pdev->dev, fe_base); |
||
1662 | err_out: |
||
1663 | return err; |
||
1664 | } |
||
1665 | |||
1666 | static int fe_remove(struct platform_device *pdev) |
||
1667 | { |
||
1668 | struct net_device *dev = platform_get_drvdata(pdev); |
||
1669 | struct fe_priv *priv = netdev_priv(dev); |
||
1670 | |||
1671 | netif_napi_del(&priv->rx_napi); |
||
1672 | kfree(priv->hw_stats); |
||
1673 | |||
1674 | cancel_work_sync(&priv->pending_work); |
||
1675 | |||
1676 | unregister_netdev(dev); |
||
1677 | free_netdev(dev); |
||
1678 | platform_set_drvdata(pdev, NULL); |
||
1679 | |||
1680 | return 0; |
||
1681 | } |
||
1682 | |||
1683 | static struct platform_driver fe_driver = { |
||
1684 | .probe = fe_probe, |
||
1685 | .remove = fe_remove, |
||
1686 | .driver = { |
||
1687 | .name = "mtk_soc_eth", |
||
1688 | .owner = THIS_MODULE, |
||
1689 | .of_match_table = of_fe_match, |
||
1690 | }, |
||
1691 | }; |
||
1692 | |||
1693 | module_platform_driver(fe_driver); |
||
1694 | |||
1695 | MODULE_LICENSE("GPL"); |
||
1696 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); |
||
1697 | MODULE_DESCRIPTION("Ethernet driver for Ralink SoC"); |
||
1698 | MODULE_VERSION(MTK_FE_DRV_VERSION); |