OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | /* |
2 | * Atheros AR71xx built-in ethernet mac driver |
||
3 | * |
||
4 | * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> |
||
5 | * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> |
||
6 | * |
||
7 | * Based on Atheros' AG7100 driver |
||
8 | * |
||
9 | * This program is free software; you can redistribute it and/or modify it |
||
10 | * under the terms of the GNU General Public License version 2 as published |
||
11 | * by the Free Software Foundation. |
||
12 | */ |
||
13 | |||
14 | #include "ag71xx.h" |
||
15 | |||
16 | #define AG71XX_DEFAULT_MSG_ENABLE \ |
||
17 | (NETIF_MSG_DRV \ |
||
18 | | NETIF_MSG_PROBE \ |
||
19 | | NETIF_MSG_LINK \ |
||
20 | | NETIF_MSG_TIMER \ |
||
21 | | NETIF_MSG_IFDOWN \ |
||
22 | | NETIF_MSG_IFUP \ |
||
23 | | NETIF_MSG_RX_ERR \ |
||
24 | | NETIF_MSG_TX_ERR) |
||
25 | |||
26 | static int ag71xx_msg_level = -1; |
||
27 | |||
28 | module_param_named(msg_level, ag71xx_msg_level, int, 0); |
||
29 | MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); |
||
30 | |||
31 | #define ETH_SWITCH_HEADER_LEN 2 |
||
32 | |||
33 | static int ag71xx_tx_packets(struct ag71xx *ag, bool flush); |
||
34 | static void ag71xx_qca955x_sgmii_init(void); |
||
35 | |||
36 | static inline unsigned int ag71xx_max_frame_len(unsigned int mtu) |
||
37 | { |
||
38 | return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; |
||
39 | } |
||
40 | |||
41 | static void ag71xx_dump_dma_regs(struct ag71xx *ag) |
||
42 | { |
||
43 | DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n", |
||
44 | ag->dev->name, |
||
45 | ag71xx_rr(ag, AG71XX_REG_TX_CTRL), |
||
46 | ag71xx_rr(ag, AG71XX_REG_TX_DESC), |
||
47 | ag71xx_rr(ag, AG71XX_REG_TX_STATUS)); |
||
48 | |||
49 | DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n", |
||
50 | ag->dev->name, |
||
51 | ag71xx_rr(ag, AG71XX_REG_RX_CTRL), |
||
52 | ag71xx_rr(ag, AG71XX_REG_RX_DESC), |
||
53 | ag71xx_rr(ag, AG71XX_REG_RX_STATUS)); |
||
54 | } |
||
55 | |||
56 | static void ag71xx_dump_regs(struct ag71xx *ag) |
||
57 | { |
||
58 | DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n", |
||
59 | ag->dev->name, |
||
60 | ag71xx_rr(ag, AG71XX_REG_MAC_CFG1), |
||
61 | ag71xx_rr(ag, AG71XX_REG_MAC_CFG2), |
||
62 | ag71xx_rr(ag, AG71XX_REG_MAC_IPG), |
||
63 | ag71xx_rr(ag, AG71XX_REG_MAC_HDX), |
||
64 | ag71xx_rr(ag, AG71XX_REG_MAC_MFL)); |
||
65 | DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n", |
||
66 | ag->dev->name, |
||
67 | ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL), |
||
68 | ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1), |
||
69 | ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2)); |
||
70 | DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n", |
||
71 | ag->dev->name, |
||
72 | ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0), |
||
73 | ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1), |
||
74 | ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2)); |
||
75 | DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n", |
||
76 | ag->dev->name, |
||
77 | ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3), |
||
78 | ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4), |
||
79 | ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5)); |
||
80 | } |
||
81 | |||
82 | static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr) |
||
83 | { |
||
84 | DBG("%s: %s intr=%08x %s%s%s%s%s%s\n", |
||
85 | ag->dev->name, label, intr, |
||
86 | (intr & AG71XX_INT_TX_PS) ? "TXPS " : "", |
||
87 | (intr & AG71XX_INT_TX_UR) ? "TXUR " : "", |
||
88 | (intr & AG71XX_INT_TX_BE) ? "TXBE " : "", |
||
89 | (intr & AG71XX_INT_RX_PR) ? "RXPR " : "", |
||
90 | (intr & AG71XX_INT_RX_OF) ? "RXOF " : "", |
||
91 | (intr & AG71XX_INT_RX_BE) ? "RXBE " : ""); |
||
92 | } |
||
93 | |||
94 | static void ag71xx_ring_tx_clean(struct ag71xx *ag) |
||
95 | { |
||
96 | struct ag71xx_ring *ring = &ag->tx_ring; |
||
97 | struct net_device *dev = ag->dev; |
||
98 | int ring_mask = BIT(ring->order) - 1; |
||
99 | u32 bytes_compl = 0, pkts_compl = 0; |
||
100 | |||
101 | while (ring->curr != ring->dirty) { |
||
102 | struct ag71xx_desc *desc; |
||
103 | u32 i = ring->dirty & ring_mask; |
||
104 | |||
105 | desc = ag71xx_ring_desc(ring, i); |
||
106 | if (!ag71xx_desc_empty(desc)) { |
||
107 | desc->ctrl = 0; |
||
108 | dev->stats.tx_errors++; |
||
109 | } |
||
110 | |||
111 | if (ring->buf[i].skb) { |
||
112 | bytes_compl += ring->buf[i].len; |
||
113 | pkts_compl++; |
||
114 | dev_kfree_skb_any(ring->buf[i].skb); |
||
115 | } |
||
116 | ring->buf[i].skb = NULL; |
||
117 | ring->dirty++; |
||
118 | } |
||
119 | |||
120 | /* flush descriptors */ |
||
121 | wmb(); |
||
122 | |||
123 | netdev_completed_queue(dev, pkts_compl, bytes_compl); |
||
124 | } |
||
125 | |||
126 | static void ag71xx_ring_tx_init(struct ag71xx *ag) |
||
127 | { |
||
128 | struct ag71xx_ring *ring = &ag->tx_ring; |
||
129 | int ring_size = BIT(ring->order); |
||
130 | int ring_mask = ring_size - 1; |
||
131 | int i; |
||
132 | |||
133 | for (i = 0; i < ring_size; i++) { |
||
134 | struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); |
||
135 | |||
136 | desc->next = (u32) (ring->descs_dma + |
||
137 | AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); |
||
138 | |||
139 | desc->ctrl = DESC_EMPTY; |
||
140 | ring->buf[i].skb = NULL; |
||
141 | } |
||
142 | |||
143 | /* flush descriptors */ |
||
144 | wmb(); |
||
145 | |||
146 | ring->curr = 0; |
||
147 | ring->dirty = 0; |
||
148 | netdev_reset_queue(ag->dev); |
||
149 | } |
||
150 | |||
151 | static void ag71xx_ring_rx_clean(struct ag71xx *ag) |
||
152 | { |
||
153 | struct ag71xx_ring *ring = &ag->rx_ring; |
||
154 | int ring_size = BIT(ring->order); |
||
155 | int i; |
||
156 | |||
157 | if (!ring->buf) |
||
158 | return; |
||
159 | |||
160 | for (i = 0; i < ring_size; i++) |
||
161 | if (ring->buf[i].rx_buf) { |
||
162 | dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr, |
||
163 | ag->rx_buf_size, DMA_FROM_DEVICE); |
||
164 | skb_free_frag(ring->buf[i].rx_buf); |
||
165 | } |
||
166 | } |
||
167 | |||
168 | static int ag71xx_buffer_offset(struct ag71xx *ag) |
||
169 | { |
||
170 | int offset = NET_SKB_PAD; |
||
171 | |||
172 | /* |
||
173 | * On AR71xx/AR91xx packets must be 4-byte aligned. |
||
174 | * |
||
175 | * When using builtin AR8216 support, hardware adds a 2-byte header, |
||
176 | * so we don't need any extra alignment in that case. |
||
177 | */ |
||
178 | if (!ag71xx_get_pdata(ag)->is_ar724x || ag71xx_has_ar8216(ag)) |
||
179 | return offset; |
||
180 | |||
181 | return offset + NET_IP_ALIGN; |
||
182 | } |
||
183 | |||
184 | static int ag71xx_buffer_size(struct ag71xx *ag) |
||
185 | { |
||
186 | return ag->rx_buf_size + |
||
187 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
||
188 | } |
||
189 | |||
190 | static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, |
||
191 | int offset, |
||
192 | void *(*alloc)(unsigned int size)) |
||
193 | { |
||
194 | struct ag71xx_ring *ring = &ag->rx_ring; |
||
195 | struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); |
||
196 | void *data; |
||
197 | |||
198 | data = alloc(ag71xx_buffer_size(ag)); |
||
199 | if (!data) |
||
200 | return false; |
||
201 | |||
202 | buf->rx_buf = data; |
||
203 | buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size, |
||
204 | DMA_FROM_DEVICE); |
||
205 | desc->data = (u32) buf->dma_addr + offset; |
||
206 | return true; |
||
207 | } |
||
208 | |||
209 | static int ag71xx_ring_rx_init(struct ag71xx *ag) |
||
210 | { |
||
211 | struct ag71xx_ring *ring = &ag->rx_ring; |
||
212 | int ring_size = BIT(ring->order); |
||
213 | int ring_mask = BIT(ring->order) - 1; |
||
214 | unsigned int i; |
||
215 | int ret; |
||
216 | int offset = ag71xx_buffer_offset(ag); |
||
217 | |||
218 | ret = 0; |
||
219 | for (i = 0; i < ring_size; i++) { |
||
220 | struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); |
||
221 | |||
222 | desc->next = (u32) (ring->descs_dma + |
||
223 | AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); |
||
224 | |||
225 | DBG("ag71xx: RX desc at %p, next is %08x\n", |
||
226 | desc, desc->next); |
||
227 | } |
||
228 | |||
229 | for (i = 0; i < ring_size; i++) { |
||
230 | struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); |
||
231 | |||
232 | if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, |
||
233 | netdev_alloc_frag)) { |
||
234 | ret = -ENOMEM; |
||
235 | break; |
||
236 | } |
||
237 | |||
238 | desc->ctrl = DESC_EMPTY; |
||
239 | } |
||
240 | |||
241 | /* flush descriptors */ |
||
242 | wmb(); |
||
243 | |||
244 | ring->curr = 0; |
||
245 | ring->dirty = 0; |
||
246 | |||
247 | return ret; |
||
248 | } |
||
249 | |||
250 | static int ag71xx_ring_rx_refill(struct ag71xx *ag) |
||
251 | { |
||
252 | struct ag71xx_ring *ring = &ag->rx_ring; |
||
253 | int ring_mask = BIT(ring->order) - 1; |
||
254 | unsigned int count; |
||
255 | int offset = ag71xx_buffer_offset(ag); |
||
256 | |||
257 | count = 0; |
||
258 | for (; ring->curr - ring->dirty > 0; ring->dirty++) { |
||
259 | struct ag71xx_desc *desc; |
||
260 | unsigned int i; |
||
261 | |||
262 | i = ring->dirty & ring_mask; |
||
263 | desc = ag71xx_ring_desc(ring, i); |
||
264 | |||
265 | if (!ring->buf[i].rx_buf && |
||
266 | !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, |
||
267 | napi_alloc_frag)) |
||
268 | break; |
||
269 | |||
270 | desc->ctrl = DESC_EMPTY; |
||
271 | count++; |
||
272 | } |
||
273 | |||
274 | /* flush descriptors */ |
||
275 | wmb(); |
||
276 | |||
277 | DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count); |
||
278 | |||
279 | return count; |
||
280 | } |
||
281 | |||
282 | static int ag71xx_rings_init(struct ag71xx *ag) |
||
283 | { |
||
284 | struct ag71xx_ring *tx = &ag->tx_ring; |
||
285 | struct ag71xx_ring *rx = &ag->rx_ring; |
||
286 | int ring_size = BIT(tx->order) + BIT(rx->order); |
||
287 | int tx_size = BIT(tx->order); |
||
288 | |||
289 | tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL); |
||
290 | if (!tx->buf) |
||
291 | return -ENOMEM; |
||
292 | |||
293 | tx->descs_cpu = dma_alloc_coherent(NULL, ring_size * AG71XX_DESC_SIZE, |
||
294 | &tx->descs_dma, GFP_ATOMIC); |
||
295 | if (!tx->descs_cpu) { |
||
296 | kfree(tx->buf); |
||
297 | tx->buf = NULL; |
||
298 | return -ENOMEM; |
||
299 | } |
||
300 | |||
301 | rx->buf = &tx->buf[BIT(tx->order)]; |
||
302 | rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; |
||
303 | rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; |
||
304 | |||
305 | ag71xx_ring_tx_init(ag); |
||
306 | return ag71xx_ring_rx_init(ag); |
||
307 | } |
||
308 | |||
309 | static void ag71xx_rings_free(struct ag71xx *ag) |
||
310 | { |
||
311 | struct ag71xx_ring *tx = &ag->tx_ring; |
||
312 | struct ag71xx_ring *rx = &ag->rx_ring; |
||
313 | int ring_size = BIT(tx->order) + BIT(rx->order); |
||
314 | |||
315 | if (tx->descs_cpu) |
||
316 | dma_free_coherent(NULL, ring_size * AG71XX_DESC_SIZE, |
||
317 | tx->descs_cpu, tx->descs_dma); |
||
318 | |||
319 | kfree(tx->buf); |
||
320 | |||
321 | tx->descs_cpu = NULL; |
||
322 | rx->descs_cpu = NULL; |
||
323 | tx->buf = NULL; |
||
324 | rx->buf = NULL; |
||
325 | } |
||
326 | |||
327 | static void ag71xx_rings_cleanup(struct ag71xx *ag) |
||
328 | { |
||
329 | ag71xx_ring_rx_clean(ag); |
||
330 | ag71xx_ring_tx_clean(ag); |
||
331 | ag71xx_rings_free(ag); |
||
332 | |||
333 | netdev_reset_queue(ag->dev); |
||
334 | } |
||
335 | |||
336 | static unsigned char *ag71xx_speed_str(struct ag71xx *ag) |
||
337 | { |
||
338 | switch (ag->speed) { |
||
339 | case SPEED_1000: |
||
340 | return "1000"; |
||
341 | case SPEED_100: |
||
342 | return "100"; |
||
343 | case SPEED_10: |
||
344 | return "10"; |
||
345 | } |
||
346 | |||
347 | return "?"; |
||
348 | } |
||
349 | |||
350 | static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac) |
||
351 | { |
||
352 | u32 t; |
||
353 | |||
354 | t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16) |
||
355 | | (((u32) mac[3]) << 8) | ((u32) mac[2]); |
||
356 | |||
357 | ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t); |
||
358 | |||
359 | t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16); |
||
360 | ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t); |
||
361 | } |
||
362 | |||
363 | static void ag71xx_dma_reset(struct ag71xx *ag) |
||
364 | { |
||
365 | u32 val; |
||
366 | int i; |
||
367 | |||
368 | ag71xx_dump_dma_regs(ag); |
||
369 | |||
370 | /* stop RX and TX */ |
||
371 | ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); |
||
372 | ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); |
||
373 | |||
374 | /* |
||
375 | * give the hardware some time to really stop all rx/tx activity |
||
376 | * clearing the descriptors too early causes random memory corruption |
||
377 | */ |
||
378 | mdelay(1); |
||
379 | |||
380 | /* clear descriptor addresses */ |
||
381 | ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); |
||
382 | ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); |
||
383 | |||
384 | /* clear pending RX/TX interrupts */ |
||
385 | for (i = 0; i < 256; i++) { |
||
386 | ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); |
||
387 | ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); |
||
388 | } |
||
389 | |||
390 | /* clear pending errors */ |
||
391 | ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF); |
||
392 | ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR); |
||
393 | |||
394 | val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); |
||
395 | if (val) |
||
396 | pr_alert("%s: unable to clear DMA Rx status: %08x\n", |
||
397 | ag->dev->name, val); |
||
398 | |||
399 | val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); |
||
400 | |||
401 | /* mask out reserved bits */ |
||
402 | val &= ~0xff000000; |
||
403 | |||
404 | if (val) |
||
405 | pr_alert("%s: unable to clear DMA Tx status: %08x\n", |
||
406 | ag->dev->name, val); |
||
407 | |||
408 | ag71xx_dump_dma_regs(ag); |
||
409 | } |
||
410 | |||
411 | #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \ |
||
412 | MAC_CFG1_SRX | MAC_CFG1_STX) |
||
413 | |||
414 | #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT) |
||
415 | |||
416 | #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ |
||
417 | FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \ |
||
418 | FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \ |
||
419 | FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \ |
||
420 | FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \ |
||
421 | FIFO_CFG4_VT) |
||
422 | |||
423 | #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ |
||
424 | FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ |
||
425 | FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ |
||
426 | FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ |
||
427 | FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ |
||
428 | FIFO_CFG5_17 | FIFO_CFG5_SF) |
||
429 | |||
430 | static void ag71xx_hw_stop(struct ag71xx *ag) |
||
431 | { |
||
432 | /* disable all interrupts and stop the rx/tx engine */ |
||
433 | ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0); |
||
434 | ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); |
||
435 | ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); |
||
436 | } |
||
437 | |||
438 | static void ag71xx_hw_setup(struct ag71xx *ag) |
||
439 | { |
||
440 | struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); |
||
441 | u32 init = MAC_CFG1_INIT; |
||
442 | |||
443 | /* setup MAC configuration registers */ |
||
444 | if (pdata->use_flow_control) |
||
445 | init |= MAC_CFG1_TFC | MAC_CFG1_RFC; |
||
446 | ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init); |
||
447 | |||
448 | ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, |
||
449 | MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); |
||
450 | |||
451 | /* setup max frame length to zero */ |
||
452 | ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0); |
||
453 | |||
454 | /* setup FIFO configuration registers */ |
||
455 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT); |
||
456 | if (pdata->is_ar724x) { |
||
457 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0010ffff); |
||
458 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x015500aa); |
||
459 | } else { |
||
460 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000); |
||
461 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff); |
||
462 | } |
||
463 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT); |
||
464 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT); |
||
465 | } |
||
466 | |||
467 | static void ag71xx_hw_init(struct ag71xx *ag) |
||
468 | { |
||
469 | struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); |
||
470 | u32 reset_mask = pdata->reset_bit; |
||
471 | |||
472 | ag71xx_hw_stop(ag); |
||
473 | |||
474 | if (pdata->is_ar724x) { |
||
475 | u32 reset_phy = reset_mask; |
||
476 | |||
477 | reset_phy &= AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY; |
||
478 | reset_mask &= ~(AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY); |
||
479 | |||
480 | ath79_device_reset_set(reset_phy); |
||
481 | msleep(50); |
||
482 | ath79_device_reset_clear(reset_phy); |
||
483 | msleep(200); |
||
484 | } |
||
485 | |||
486 | ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); |
||
487 | udelay(20); |
||
488 | |||
489 | ath79_device_reset_set(reset_mask); |
||
490 | msleep(100); |
||
491 | ath79_device_reset_clear(reset_mask); |
||
492 | msleep(200); |
||
493 | |||
494 | ag71xx_hw_setup(ag); |
||
495 | |||
496 | ag71xx_dma_reset(ag); |
||
497 | } |
||
498 | |||
499 | static void ag71xx_fast_reset(struct ag71xx *ag) |
||
500 | { |
||
501 | struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); |
||
502 | struct net_device *dev = ag->dev; |
||
503 | u32 reset_mask = pdata->reset_bit; |
||
504 | u32 rx_ds; |
||
505 | u32 mii_reg; |
||
506 | |||
507 | reset_mask &= AR71XX_RESET_GE0_MAC | AR71XX_RESET_GE1_MAC; |
||
508 | |||
509 | ag71xx_hw_stop(ag); |
||
510 | wmb(); |
||
511 | |||
512 | mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG); |
||
513 | rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC); |
||
514 | |||
515 | ag71xx_tx_packets(ag, true); |
||
516 | |||
517 | ath79_device_reset_set(reset_mask); |
||
518 | udelay(10); |
||
519 | ath79_device_reset_clear(reset_mask); |
||
520 | udelay(10); |
||
521 | |||
522 | ag71xx_dma_reset(ag); |
||
523 | ag71xx_hw_setup(ag); |
||
524 | ag->tx_ring.curr = 0; |
||
525 | ag->tx_ring.dirty = 0; |
||
526 | netdev_reset_queue(ag->dev); |
||
527 | |||
528 | /* setup max frame length */ |
||
529 | ag71xx_wr(ag, AG71XX_REG_MAC_MFL, |
||
530 | ag71xx_max_frame_len(ag->dev->mtu)); |
||
531 | |||
532 | ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds); |
||
533 | ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); |
||
534 | ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg); |
||
535 | |||
536 | ag71xx_hw_set_macaddr(ag, dev->dev_addr); |
||
537 | } |
||
538 | |||
539 | static void ag71xx_hw_start(struct ag71xx *ag) |
||
540 | { |
||
541 | /* start RX engine */ |
||
542 | ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); |
||
543 | |||
544 | /* enable interrupts */ |
||
545 | ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT); |
||
546 | |||
547 | netif_wake_queue(ag->dev); |
||
548 | } |
||
549 | |||
550 | static void |
||
551 | __ag71xx_link_adjust(struct ag71xx *ag, bool update) |
||
552 | { |
||
553 | struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); |
||
554 | u32 cfg2; |
||
555 | u32 ifctl; |
||
556 | u32 fifo5; |
||
557 | u32 fifo3; |
||
558 | |||
559 | if (!ag->link && update) { |
||
560 | ag71xx_hw_stop(ag); |
||
561 | netif_carrier_off(ag->dev); |
||
562 | if (netif_msg_link(ag)) |
||
563 | pr_info("%s: link down\n", ag->dev->name); |
||
564 | return; |
||
565 | } |
||
566 | |||
567 | if (pdata->is_ar724x) |
||
568 | ag71xx_fast_reset(ag); |
||
569 | |||
570 | cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); |
||
571 | cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); |
||
572 | cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0; |
||
573 | |||
574 | ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); |
||
575 | ifctl &= ~(MAC_IFCTL_SPEED); |
||
576 | |||
577 | fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); |
||
578 | fifo5 &= ~FIFO_CFG5_BM; |
||
579 | |||
580 | switch (ag->speed) { |
||
581 | case SPEED_1000: |
||
582 | cfg2 |= MAC_CFG2_IF_1000; |
||
583 | fifo5 |= FIFO_CFG5_BM; |
||
584 | break; |
||
585 | case SPEED_100: |
||
586 | cfg2 |= MAC_CFG2_IF_10_100; |
||
587 | ifctl |= MAC_IFCTL_SPEED; |
||
588 | break; |
||
589 | case SPEED_10: |
||
590 | cfg2 |= MAC_CFG2_IF_10_100; |
||
591 | break; |
||
592 | default: |
||
593 | BUG(); |
||
594 | return; |
||
595 | } |
||
596 | |||
597 | if (pdata->is_ar91xx) |
||
598 | fifo3 = 0x00780fff; |
||
599 | else if (pdata->is_ar724x) |
||
600 | fifo3 = 0x01f00140; |
||
601 | else |
||
602 | fifo3 = 0x008001ff; |
||
603 | |||
604 | if (ag->tx_ring.desc_split) { |
||
605 | fifo3 &= 0xffff; |
||
606 | fifo3 |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; |
||
607 | } |
||
608 | |||
609 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, fifo3); |
||
610 | |||
611 | if (update && pdata->set_speed) |
||
612 | pdata->set_speed(ag->speed); |
||
613 | |||
614 | if (update && pdata->enable_sgmii_fixup) |
||
615 | ag71xx_qca955x_sgmii_init(); |
||
616 | |||
617 | ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); |
||
618 | ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); |
||
619 | ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); |
||
620 | |||
621 | if (pdata->disable_inline_checksum_engine) { |
||
622 | /* |
||
623 | * The rx ring buffer can stall on small packets on QCA953x and |
||
624 | * QCA956x. Disabling the inline checksum engine fixes the stall. |
||
625 | * The wr, rr functions cannot be used since this hidden register |
||
626 | * is outside of the normal ag71xx register block. |
||
627 | */ |
||
628 | void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4); |
||
629 | if (dam) { |
||
630 | __raw_writel(__raw_readl(dam) & ~BIT(27), dam); |
||
631 | (void)__raw_readl(dam); |
||
632 | iounmap(dam); |
||
633 | } |
||
634 | } |
||
635 | |||
636 | ag71xx_hw_start(ag); |
||
637 | |||
638 | netif_carrier_on(ag->dev); |
||
639 | if (update && netif_msg_link(ag)) |
||
640 | pr_info("%s: link up (%sMbps/%s duplex)\n", |
||
641 | ag->dev->name, |
||
642 | ag71xx_speed_str(ag), |
||
643 | (DUPLEX_FULL == ag->duplex) ? "Full" : "Half"); |
||
644 | |||
645 | ag71xx_dump_regs(ag); |
||
646 | } |
||
647 | |||
648 | void ag71xx_link_adjust(struct ag71xx *ag) |
||
649 | { |
||
650 | __ag71xx_link_adjust(ag, true); |
||
651 | } |
||
652 | |||
653 | static int ag71xx_hw_enable(struct ag71xx *ag) |
||
654 | { |
||
655 | int ret; |
||
656 | |||
657 | ret = ag71xx_rings_init(ag); |
||
658 | if (ret) |
||
659 | return ret; |
||
660 | |||
661 | napi_enable(&ag->napi); |
||
662 | ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); |
||
663 | ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); |
||
664 | netif_start_queue(ag->dev); |
||
665 | |||
666 | return 0; |
||
667 | } |
||
668 | |||
669 | static void ag71xx_hw_disable(struct ag71xx *ag) |
||
670 | { |
||
671 | unsigned long flags; |
||
672 | |||
673 | spin_lock_irqsave(&ag->lock, flags); |
||
674 | |||
675 | netif_stop_queue(ag->dev); |
||
676 | |||
677 | ag71xx_hw_stop(ag); |
||
678 | ag71xx_dma_reset(ag); |
||
679 | |||
680 | napi_disable(&ag->napi); |
||
681 | del_timer_sync(&ag->oom_timer); |
||
682 | |||
683 | spin_unlock_irqrestore(&ag->lock, flags); |
||
684 | |||
685 | ag71xx_rings_cleanup(ag); |
||
686 | } |
||
687 | |||
688 | static int ag71xx_open(struct net_device *dev) |
||
689 | { |
||
690 | struct ag71xx *ag = netdev_priv(dev); |
||
691 | unsigned int max_frame_len; |
||
692 | int ret; |
||
693 | |||
694 | netif_carrier_off(dev); |
||
695 | max_frame_len = ag71xx_max_frame_len(dev->mtu); |
||
696 | ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN); |
||
697 | |||
698 | /* setup max frame length */ |
||
699 | ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len); |
||
700 | ag71xx_hw_set_macaddr(ag, dev->dev_addr); |
||
701 | |||
702 | ret = ag71xx_hw_enable(ag); |
||
703 | if (ret) |
||
704 | goto err; |
||
705 | |||
706 | ag71xx_phy_start(ag); |
||
707 | |||
708 | return 0; |
||
709 | |||
710 | err: |
||
711 | ag71xx_rings_cleanup(ag); |
||
712 | return ret; |
||
713 | } |
||
714 | |||
715 | static int ag71xx_stop(struct net_device *dev) |
||
716 | { |
||
717 | struct ag71xx *ag = netdev_priv(dev); |
||
718 | |||
719 | netif_carrier_off(dev); |
||
720 | ag71xx_phy_stop(ag); |
||
721 | ag71xx_hw_disable(ag); |
||
722 | |||
723 | return 0; |
||
724 | } |
||
725 | |||
726 | static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len) |
||
727 | { |
||
728 | int i; |
||
729 | struct ag71xx_desc *desc; |
||
730 | int ring_mask = BIT(ring->order) - 1; |
||
731 | int ndesc = 0; |
||
732 | int split = ring->desc_split; |
||
733 | |||
734 | if (!split) |
||
735 | split = len; |
||
736 | |||
737 | while (len > 0) { |
||
738 | unsigned int cur_len = len; |
||
739 | |||
740 | i = (ring->curr + ndesc) & ring_mask; |
||
741 | desc = ag71xx_ring_desc(ring, i); |
||
742 | |||
743 | if (!ag71xx_desc_empty(desc)) |
||
744 | return -1; |
||
745 | |||
746 | if (cur_len > split) { |
||
747 | cur_len = split; |
||
748 | |||
749 | /* |
||
750 | * TX will hang if DMA transfers <= 4 bytes, |
||
751 | * make sure next segment is more than 4 bytes long. |
||
752 | */ |
||
753 | if (len <= split + 4) |
||
754 | cur_len -= 4; |
||
755 | } |
||
756 | |||
757 | desc->data = addr; |
||
758 | addr += cur_len; |
||
759 | len -= cur_len; |
||
760 | |||
761 | if (len > 0) |
||
762 | cur_len |= DESC_MORE; |
||
763 | |||
764 | /* prevent early tx attempt of this descriptor */ |
||
765 | if (!ndesc) |
||
766 | cur_len |= DESC_EMPTY; |
||
767 | |||
768 | desc->ctrl = cur_len; |
||
769 | ndesc++; |
||
770 | } |
||
771 | |||
772 | return ndesc; |
||
773 | } |
||
774 | |||
775 | static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, |
||
776 | struct net_device *dev) |
||
777 | { |
||
778 | struct ag71xx *ag = netdev_priv(dev); |
||
779 | struct ag71xx_ring *ring = &ag->tx_ring; |
||
780 | int ring_mask = BIT(ring->order) - 1; |
||
781 | int ring_size = BIT(ring->order); |
||
782 | struct ag71xx_desc *desc; |
||
783 | dma_addr_t dma_addr; |
||
784 | int i, n, ring_min; |
||
785 | |||
786 | if (ag71xx_has_ar8216(ag)) |
||
787 | ag71xx_add_ar8216_header(ag, skb); |
||
788 | |||
789 | if (skb->len <= 4) { |
||
790 | DBG("%s: packet len is too small\n", ag->dev->name); |
||
791 | goto err_drop; |
||
792 | } |
||
793 | |||
794 | dma_addr = dma_map_single(&dev->dev, skb->data, skb->len, |
||
795 | DMA_TO_DEVICE); |
||
796 | |||
797 | i = ring->curr & ring_mask; |
||
798 | desc = ag71xx_ring_desc(ring, i); |
||
799 | |||
800 | /* setup descriptor fields */ |
||
801 | n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask); |
||
802 | if (n < 0) |
||
803 | goto err_drop_unmap; |
||
804 | |||
805 | i = (ring->curr + n - 1) & ring_mask; |
||
806 | ring->buf[i].len = skb->len; |
||
807 | ring->buf[i].skb = skb; |
||
808 | |||
809 | netdev_sent_queue(dev, skb->len); |
||
810 | |||
811 | skb_tx_timestamp(skb); |
||
812 | |||
813 | desc->ctrl &= ~DESC_EMPTY; |
||
814 | ring->curr += n; |
||
815 | |||
816 | /* flush descriptor */ |
||
817 | wmb(); |
||
818 | |||
819 | ring_min = 2; |
||
820 | if (ring->desc_split) |
||
821 | ring_min *= AG71XX_TX_RING_DS_PER_PKT; |
||
822 | |||
823 | if (ring->curr - ring->dirty >= ring_size - ring_min) { |
||
824 | DBG("%s: tx queue full\n", dev->name); |
||
825 | netif_stop_queue(dev); |
||
826 | } |
||
827 | |||
828 | DBG("%s: packet injected into TX queue\n", ag->dev->name); |
||
829 | |||
830 | /* enable TX engine */ |
||
831 | ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); |
||
832 | |||
833 | return NETDEV_TX_OK; |
||
834 | |||
835 | err_drop_unmap: |
||
836 | dma_unmap_single(&dev->dev, dma_addr, skb->len, DMA_TO_DEVICE); |
||
837 | |||
838 | err_drop: |
||
839 | dev->stats.tx_dropped++; |
||
840 | |||
841 | dev_kfree_skb(skb); |
||
842 | return NETDEV_TX_OK; |
||
843 | } |
||
844 | |||
845 | static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
||
846 | { |
||
847 | struct ag71xx *ag = netdev_priv(dev); |
||
848 | int ret; |
||
849 | |||
850 | switch (cmd) { |
||
851 | case SIOCETHTOOL: |
||
852 | if (ag->phy_dev == NULL) |
||
853 | break; |
||
854 | |||
855 | spin_lock_irq(&ag->lock); |
||
856 | ret = phy_ethtool_ioctl(ag->phy_dev, (void *) ifr->ifr_data); |
||
857 | spin_unlock_irq(&ag->lock); |
||
858 | return ret; |
||
859 | |||
860 | case SIOCSIFHWADDR: |
||
861 | if (copy_from_user |
||
862 | (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr))) |
||
863 | return -EFAULT; |
||
864 | return 0; |
||
865 | |||
866 | case SIOCGIFHWADDR: |
||
867 | if (copy_to_user |
||
868 | (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr))) |
||
869 | return -EFAULT; |
||
870 | return 0; |
||
871 | |||
872 | case SIOCGMIIPHY: |
||
873 | case SIOCGMIIREG: |
||
874 | case SIOCSMIIREG: |
||
875 | if (ag->phy_dev == NULL) |
||
876 | break; |
||
877 | |||
878 | return phy_mii_ioctl(ag->phy_dev, ifr, cmd); |
||
879 | |||
880 | default: |
||
881 | break; |
||
882 | } |
||
883 | |||
884 | return -EOPNOTSUPP; |
||
885 | } |
||
886 | |||
887 | static void ag71xx_oom_timer_handler(unsigned long data) |
||
888 | { |
||
889 | struct net_device *dev = (struct net_device *) data; |
||
890 | struct ag71xx *ag = netdev_priv(dev); |
||
891 | |||
892 | napi_schedule(&ag->napi); |
||
893 | } |
||
894 | |||
895 | static void ag71xx_tx_timeout(struct net_device *dev) |
||
896 | { |
||
897 | struct ag71xx *ag = netdev_priv(dev); |
||
898 | |||
899 | if (netif_msg_tx_err(ag)) |
||
900 | pr_info("%s: tx timeout\n", ag->dev->name); |
||
901 | |||
902 | schedule_delayed_work(&ag->restart_work, 1); |
||
903 | } |
||
904 | |||
905 | static void ag71xx_bit_set(void __iomem *reg, u32 bit) |
||
906 | { |
||
907 | u32 val = __raw_readl(reg) | bit; |
||
908 | __raw_writel(val, reg); |
||
909 | __raw_readl(reg); |
||
910 | } |
||
911 | |||
912 | static void ag71xx_bit_clear(void __iomem *reg, u32 bit) |
||
913 | { |
||
914 | u32 val = __raw_readl(reg) & ~bit; |
||
915 | __raw_writel(val, reg); |
||
916 | __raw_readl(reg); |
||
917 | } |
||
918 | |||
919 | static void ag71xx_qca955x_sgmii_init() |
||
920 | { |
||
921 | void __iomem *gmac_base; |
||
922 | u32 mr_an_status, sgmii_status; |
||
923 | u8 tries = 0; |
||
924 | |||
925 | gmac_base = ioremap_nocache(QCA955X_GMAC_BASE, QCA955X_GMAC_SIZE); |
||
926 | |||
927 | if (!gmac_base) |
||
928 | goto sgmii_out; |
||
929 | |||
930 | mr_an_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_MR_AN_STATUS); |
||
931 | if (!(mr_an_status & QCA955X_MR_AN_STATUS_AN_ABILITY)) |
||
932 | goto sgmii_out; |
||
933 | |||
934 | __raw_writel(QCA955X_SGMII_RESET_RX_CLK_N_RESET , |
||
935 | gmac_base + QCA955X_GMAC_REG_SGMII_RESET); |
||
936 | __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_RESET); |
||
937 | udelay(10); |
||
938 | |||
939 | /* Init sequence */ |
||
940 | ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, |
||
941 | QCA955X_SGMII_RESET_HW_RX_125M_N); |
||
942 | udelay(10); |
||
943 | |||
944 | ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, |
||
945 | QCA955X_SGMII_RESET_RX_125M_N); |
||
946 | udelay(10); |
||
947 | |||
948 | ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, |
||
949 | QCA955X_SGMII_RESET_TX_125M_N); |
||
950 | udelay(10); |
||
951 | |||
952 | ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, |
||
953 | QCA955X_SGMII_RESET_RX_CLK_N); |
||
954 | udelay(10); |
||
955 | |||
956 | ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, |
||
957 | QCA955X_SGMII_RESET_TX_CLK_N); |
||
958 | udelay(10); |
||
959 | |||
960 | do { |
||
961 | ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL, |
||
962 | QCA955X_MR_AN_CONTROL_PHY_RESET | |
||
963 | QCA955X_MR_AN_CONTROL_AN_ENABLE); |
||
964 | udelay(100); |
||
965 | ag71xx_bit_clear(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL, |
||
966 | QCA955X_MR_AN_CONTROL_PHY_RESET); |
||
967 | mdelay(10); |
||
968 | sgmii_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_DEBUG) & 0xF; |
||
969 | |||
970 | if (tries++ >= QCA955X_SGMII_LINK_WAR_MAX_TRY) { |
||
971 | pr_warn("ag71xx: max retries for SGMII fixup exceeded!\n"); |
||
972 | break; |
||
973 | } |
||
974 | } while (!(sgmii_status == 0xf || sgmii_status == 0x10)); |
||
975 | |||
976 | sgmii_out: |
||
977 | iounmap(gmac_base); |
||
978 | } |
||
979 | |||
980 | static void ag71xx_restart_work_func(struct work_struct *work) |
||
981 | { |
||
982 | struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work); |
||
983 | |||
984 | rtnl_lock(); |
||
985 | ag71xx_hw_disable(ag); |
||
986 | ag71xx_hw_enable(ag); |
||
987 | if (ag->link) |
||
988 | __ag71xx_link_adjust(ag, false); |
||
989 | rtnl_unlock(); |
||
990 | } |
||
991 | |||
992 | static bool ag71xx_check_dma_stuck(struct ag71xx *ag) |
||
993 | { |
||
994 | unsigned long timestamp; |
||
995 | u32 rx_sm, tx_sm, rx_fd; |
||
996 | |||
997 | timestamp = netdev_get_tx_queue(ag->dev, 0)->trans_start; |
||
998 | if (likely(time_before(jiffies, timestamp + HZ/10))) |
||
999 | return false; |
||
1000 | |||
1001 | if (!netif_carrier_ok(ag->dev)) |
||
1002 | return false; |
||
1003 | |||
1004 | rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM); |
||
1005 | if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6) |
||
1006 | return true; |
||
1007 | |||
1008 | tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM); |
||
1009 | rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH); |
||
1010 | if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) && |
||
1011 | ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0) |
||
1012 | return true; |
||
1013 | |||
1014 | return false; |
||
1015 | } |
||
1016 | |||
1017 | static int ag71xx_tx_packets(struct ag71xx *ag, bool flush) |
||
1018 | { |
||
1019 | struct ag71xx_ring *ring = &ag->tx_ring; |
||
1020 | struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); |
||
1021 | bool dma_stuck = false; |
||
1022 | int ring_mask = BIT(ring->order) - 1; |
||
1023 | int ring_size = BIT(ring->order); |
||
1024 | int sent = 0; |
||
1025 | int bytes_compl = 0; |
||
1026 | int n = 0; |
||
1027 | |||
1028 | DBG("%s: processing TX ring\n", ag->dev->name); |
||
1029 | |||
1030 | while (ring->dirty + n != ring->curr) { |
||
1031 | unsigned int i = (ring->dirty + n) & ring_mask; |
||
1032 | struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); |
||
1033 | struct sk_buff *skb = ring->buf[i].skb; |
||
1034 | |||
1035 | if (!flush && !ag71xx_desc_empty(desc)) { |
||
1036 | if (pdata->is_ar724x && |
||
1037 | ag71xx_check_dma_stuck(ag)) { |
||
1038 | schedule_delayed_work(&ag->restart_work, HZ / 2); |
||
1039 | dma_stuck = true; |
||
1040 | } |
||
1041 | break; |
||
1042 | } |
||
1043 | |||
1044 | if (flush) |
||
1045 | desc->ctrl |= DESC_EMPTY; |
||
1046 | |||
1047 | n++; |
||
1048 | if (!skb) |
||
1049 | continue; |
||
1050 | |||
1051 | dev_kfree_skb_any(skb); |
||
1052 | ring->buf[i].skb = NULL; |
||
1053 | |||
1054 | bytes_compl += ring->buf[i].len; |
||
1055 | |||
1056 | sent++; |
||
1057 | ring->dirty += n; |
||
1058 | |||
1059 | while (n > 0) { |
||
1060 | ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); |
||
1061 | n--; |
||
1062 | } |
||
1063 | } |
||
1064 | |||
1065 | DBG("%s: %d packets sent out\n", ag->dev->name, sent); |
||
1066 | |||
1067 | if (!sent) |
||
1068 | return 0; |
||
1069 | |||
1070 | ag->dev->stats.tx_bytes += bytes_compl; |
||
1071 | ag->dev->stats.tx_packets += sent; |
||
1072 | |||
1073 | netdev_completed_queue(ag->dev, sent, bytes_compl); |
||
1074 | if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) |
||
1075 | netif_wake_queue(ag->dev); |
||
1076 | |||
1077 | if (!dma_stuck) |
||
1078 | cancel_delayed_work(&ag->restart_work); |
||
1079 | |||
1080 | return sent; |
||
1081 | } |
||
1082 | |||
1083 | static int ag71xx_rx_packets(struct ag71xx *ag, int limit) |
||
1084 | { |
||
1085 | struct net_device *dev = ag->dev; |
||
1086 | struct ag71xx_ring *ring = &ag->rx_ring; |
||
1087 | int offset = ag71xx_buffer_offset(ag); |
||
1088 | unsigned int pktlen_mask = ag->desc_pktlen_mask; |
||
1089 | int ring_mask = BIT(ring->order) - 1; |
||
1090 | int ring_size = BIT(ring->order); |
||
1091 | struct sk_buff_head queue; |
||
1092 | struct sk_buff *skb; |
||
1093 | int done = 0; |
||
1094 | |||
1095 | DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n", |
||
1096 | dev->name, limit, ring->curr, ring->dirty); |
||
1097 | |||
1098 | skb_queue_head_init(&queue); |
||
1099 | |||
1100 | while (done < limit) { |
||
1101 | unsigned int i = ring->curr & ring_mask; |
||
1102 | struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); |
||
1103 | int pktlen; |
||
1104 | int err = 0; |
||
1105 | |||
1106 | if (ag71xx_desc_empty(desc)) |
||
1107 | break; |
||
1108 | |||
1109 | if ((ring->dirty + ring_size) == ring->curr) { |
||
1110 | ag71xx_assert(0); |
||
1111 | break; |
||
1112 | } |
||
1113 | |||
1114 | ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); |
||
1115 | |||
1116 | pktlen = desc->ctrl & pktlen_mask; |
||
1117 | pktlen -= ETH_FCS_LEN; |
||
1118 | |||
1119 | dma_unmap_single(&dev->dev, ring->buf[i].dma_addr, |
||
1120 | ag->rx_buf_size, DMA_FROM_DEVICE); |
||
1121 | |||
1122 | dev->stats.rx_packets++; |
||
1123 | dev->stats.rx_bytes += pktlen; |
||
1124 | |||
1125 | skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag)); |
||
1126 | if (!skb) { |
||
1127 | skb_free_frag(ring->buf[i].rx_buf); |
||
1128 | goto next; |
||
1129 | } |
||
1130 | |||
1131 | skb_reserve(skb, offset); |
||
1132 | skb_put(skb, pktlen); |
||
1133 | |||
1134 | if (ag71xx_has_ar8216(ag)) |
||
1135 | err = ag71xx_remove_ar8216_header(ag, skb, pktlen); |
||
1136 | |||
1137 | if (err) { |
||
1138 | dev->stats.rx_dropped++; |
||
1139 | kfree_skb(skb); |
||
1140 | } else { |
||
1141 | skb->dev = dev; |
||
1142 | skb->ip_summed = CHECKSUM_NONE; |
||
1143 | __skb_queue_tail(&queue, skb); |
||
1144 | } |
||
1145 | |||
1146 | next: |
||
1147 | ring->buf[i].rx_buf = NULL; |
||
1148 | done++; |
||
1149 | |||
1150 | ring->curr++; |
||
1151 | } |
||
1152 | |||
1153 | ag71xx_ring_rx_refill(ag); |
||
1154 | |||
1155 | while ((skb = __skb_dequeue(&queue)) != NULL) { |
||
1156 | skb->protocol = eth_type_trans(skb, dev); |
||
1157 | netif_receive_skb(skb); |
||
1158 | } |
||
1159 | |||
1160 | DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n", |
||
1161 | dev->name, ring->curr, ring->dirty, done); |
||
1162 | |||
1163 | return done; |
||
1164 | } |
||
1165 | |||
1166 | static int ag71xx_poll(struct napi_struct *napi, int limit) |
||
1167 | { |
||
1168 | struct ag71xx *ag = container_of(napi, struct ag71xx, napi); |
||
1169 | struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); |
||
1170 | struct net_device *dev = ag->dev; |
||
1171 | struct ag71xx_ring *rx_ring = &ag->rx_ring; |
||
1172 | int rx_ring_size = BIT(rx_ring->order); |
||
1173 | unsigned long flags; |
||
1174 | u32 status; |
||
1175 | int tx_done; |
||
1176 | int rx_done; |
||
1177 | |||
1178 | pdata->ddr_flush(); |
||
1179 | tx_done = ag71xx_tx_packets(ag, false); |
||
1180 | |||
1181 | DBG("%s: processing RX ring\n", dev->name); |
||
1182 | rx_done = ag71xx_rx_packets(ag, limit); |
||
1183 | |||
1184 | ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done); |
||
1185 | |||
1186 | if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL) |
||
1187 | goto oom; |
||
1188 | |||
1189 | status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); |
||
1190 | if (unlikely(status & RX_STATUS_OF)) { |
||
1191 | ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); |
||
1192 | dev->stats.rx_fifo_errors++; |
||
1193 | |||
1194 | /* restart RX */ |
||
1195 | ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); |
||
1196 | } |
||
1197 | |||
1198 | if (rx_done < limit) { |
||
1199 | if (status & RX_STATUS_PR) |
||
1200 | goto more; |
||
1201 | |||
1202 | status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); |
||
1203 | if (status & TX_STATUS_PS) |
||
1204 | goto more; |
||
1205 | |||
1206 | DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n", |
||
1207 | dev->name, rx_done, tx_done, limit); |
||
1208 | |||
1209 | napi_complete(napi); |
||
1210 | |||
1211 | /* enable interrupts */ |
||
1212 | spin_lock_irqsave(&ag->lock, flags); |
||
1213 | ag71xx_int_enable(ag, AG71XX_INT_POLL); |
||
1214 | spin_unlock_irqrestore(&ag->lock, flags); |
||
1215 | return rx_done; |
||
1216 | } |
||
1217 | |||
1218 | more: |
||
1219 | DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n", |
||
1220 | dev->name, rx_done, tx_done, limit); |
||
1221 | return limit; |
||
1222 | |||
1223 | oom: |
||
1224 | if (netif_msg_rx_err(ag)) |
||
1225 | pr_info("%s: out of memory\n", dev->name); |
||
1226 | |||
1227 | mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); |
||
1228 | napi_complete(napi); |
||
1229 | return 0; |
||
1230 | } |
||
1231 | |||
1232 | static irqreturn_t ag71xx_interrupt(int irq, void *dev_id) |
||
1233 | { |
||
1234 | struct net_device *dev = dev_id; |
||
1235 | struct ag71xx *ag = netdev_priv(dev); |
||
1236 | u32 status; |
||
1237 | |||
1238 | status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS); |
||
1239 | ag71xx_dump_intr(ag, "raw", status); |
||
1240 | |||
1241 | if (unlikely(!status)) |
||
1242 | return IRQ_NONE; |
||
1243 | |||
1244 | if (unlikely(status & AG71XX_INT_ERR)) { |
||
1245 | if (status & AG71XX_INT_TX_BE) { |
||
1246 | ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE); |
||
1247 | dev_err(&dev->dev, "TX BUS error\n"); |
||
1248 | } |
||
1249 | if (status & AG71XX_INT_RX_BE) { |
||
1250 | ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE); |
||
1251 | dev_err(&dev->dev, "RX BUS error\n"); |
||
1252 | } |
||
1253 | } |
||
1254 | |||
1255 | if (likely(status & AG71XX_INT_POLL)) { |
||
1256 | ag71xx_int_disable(ag, AG71XX_INT_POLL); |
||
1257 | DBG("%s: enable polling mode\n", dev->name); |
||
1258 | napi_schedule(&ag->napi); |
||
1259 | } |
||
1260 | |||
1261 | ag71xx_debugfs_update_int_stats(ag, status); |
||
1262 | |||
1263 | return IRQ_HANDLED; |
||
1264 | } |
||
1265 | |||
1266 | #ifdef CONFIG_NET_POLL_CONTROLLER |
||
1267 | /* |
||
1268 | * Polling 'interrupt' - used by things like netconsole to send skbs |
||
1269 | * without having to re-enable interrupts. It's not called while |
||
1270 | * the interrupt routine is executing. |
||
1271 | */ |
||
1272 | static void ag71xx_netpoll(struct net_device *dev) |
||
1273 | { |
||
1274 | disable_irq(dev->irq); |
||
1275 | ag71xx_interrupt(dev->irq, dev); |
||
1276 | enable_irq(dev->irq); |
||
1277 | } |
||
1278 | #endif |
||
1279 | |||
1280 | static int ag71xx_change_mtu(struct net_device *dev, int new_mtu) |
||
1281 | { |
||
1282 | struct ag71xx *ag = netdev_priv(dev); |
||
1283 | unsigned int max_frame_len; |
||
1284 | |||
1285 | max_frame_len = ag71xx_max_frame_len(new_mtu); |
||
1286 | if (new_mtu < 68 || max_frame_len > ag->max_frame_len) |
||
1287 | return -EINVAL; |
||
1288 | |||
1289 | if (netif_running(dev)) |
||
1290 | return -EBUSY; |
||
1291 | |||
1292 | dev->mtu = new_mtu; |
||
1293 | return 0; |
||
1294 | } |
||
1295 | |||
1296 | static const struct net_device_ops ag71xx_netdev_ops = { |
||
1297 | .ndo_open = ag71xx_open, |
||
1298 | .ndo_stop = ag71xx_stop, |
||
1299 | .ndo_start_xmit = ag71xx_hard_start_xmit, |
||
1300 | .ndo_do_ioctl = ag71xx_do_ioctl, |
||
1301 | .ndo_tx_timeout = ag71xx_tx_timeout, |
||
1302 | .ndo_change_mtu = ag71xx_change_mtu, |
||
1303 | .ndo_set_mac_address = eth_mac_addr, |
||
1304 | .ndo_validate_addr = eth_validate_addr, |
||
1305 | #ifdef CONFIG_NET_POLL_CONTROLLER |
||
1306 | .ndo_poll_controller = ag71xx_netpoll, |
||
1307 | #endif |
||
1308 | }; |
||
1309 | |||
1310 | static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode) |
||
1311 | { |
||
1312 | switch (mode) { |
||
1313 | case PHY_INTERFACE_MODE_MII: |
||
1314 | return "MII"; |
||
1315 | case PHY_INTERFACE_MODE_GMII: |
||
1316 | return "GMII"; |
||
1317 | case PHY_INTERFACE_MODE_RMII: |
||
1318 | return "RMII"; |
||
1319 | case PHY_INTERFACE_MODE_RGMII: |
||
1320 | return "RGMII"; |
||
1321 | case PHY_INTERFACE_MODE_SGMII: |
||
1322 | return "SGMII"; |
||
1323 | default: |
||
1324 | break; |
||
1325 | } |
||
1326 | |||
1327 | return "unknown"; |
||
1328 | } |
||
1329 | |||
1330 | |||
1331 | static int ag71xx_probe(struct platform_device *pdev) |
||
1332 | { |
||
1333 | struct net_device *dev; |
||
1334 | struct resource *res; |
||
1335 | struct ag71xx *ag; |
||
1336 | struct ag71xx_platform_data *pdata; |
||
1337 | int tx_size, err; |
||
1338 | |||
1339 | pdata = pdev->dev.platform_data; |
||
1340 | if (!pdata) { |
||
1341 | dev_err(&pdev->dev, "no platform data specified\n"); |
||
1342 | err = -ENXIO; |
||
1343 | goto err_out; |
||
1344 | } |
||
1345 | |||
1346 | if (pdata->mii_bus_dev == NULL && pdata->phy_mask) { |
||
1347 | dev_err(&pdev->dev, "no MII bus device specified\n"); |
||
1348 | err = -EINVAL; |
||
1349 | goto err_out; |
||
1350 | } |
||
1351 | |||
1352 | dev = alloc_etherdev(sizeof(*ag)); |
||
1353 | if (!dev) { |
||
1354 | dev_err(&pdev->dev, "alloc_etherdev failed\n"); |
||
1355 | err = -ENOMEM; |
||
1356 | goto err_out; |
||
1357 | } |
||
1358 | |||
1359 | if (!pdata->max_frame_len || !pdata->desc_pktlen_mask) |
||
1360 | return -EINVAL; |
||
1361 | |||
1362 | SET_NETDEV_DEV(dev, &pdev->dev); |
||
1363 | |||
1364 | ag = netdev_priv(dev); |
||
1365 | ag->pdev = pdev; |
||
1366 | ag->dev = dev; |
||
1367 | ag->msg_enable = netif_msg_init(ag71xx_msg_level, |
||
1368 | AG71XX_DEFAULT_MSG_ENABLE); |
||
1369 | spin_lock_init(&ag->lock); |
||
1370 | |||
1371 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base"); |
||
1372 | if (!res) { |
||
1373 | dev_err(&pdev->dev, "no mac_base resource found\n"); |
||
1374 | err = -ENXIO; |
||
1375 | goto err_out; |
||
1376 | } |
||
1377 | |||
1378 | ag->mac_base = ioremap_nocache(res->start, res->end - res->start + 1); |
||
1379 | if (!ag->mac_base) { |
||
1380 | dev_err(&pdev->dev, "unable to ioremap mac_base\n"); |
||
1381 | err = -ENOMEM; |
||
1382 | goto err_free_dev; |
||
1383 | } |
||
1384 | |||
1385 | dev->irq = platform_get_irq(pdev, 0); |
||
1386 | err = request_irq(dev->irq, ag71xx_interrupt, |
||
1387 | 0x0, |
||
1388 | dev->name, dev); |
||
1389 | if (err) { |
||
1390 | dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq); |
||
1391 | goto err_unmap_base; |
||
1392 | } |
||
1393 | |||
1394 | dev->base_addr = (unsigned long)ag->mac_base; |
||
1395 | dev->netdev_ops = &ag71xx_netdev_ops; |
||
1396 | dev->ethtool_ops = &ag71xx_ethtool_ops; |
||
1397 | |||
1398 | INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); |
||
1399 | |||
1400 | init_timer(&ag->oom_timer); |
||
1401 | ag->oom_timer.data = (unsigned long) dev; |
||
1402 | ag->oom_timer.function = ag71xx_oom_timer_handler; |
||
1403 | |||
1404 | tx_size = AG71XX_TX_RING_SIZE_DEFAULT; |
||
1405 | ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); |
||
1406 | |||
1407 | ag->max_frame_len = pdata->max_frame_len; |
||
1408 | ag->desc_pktlen_mask = pdata->desc_pktlen_mask; |
||
1409 | |||
1410 | if (!pdata->is_ar724x && !pdata->is_ar91xx) { |
||
1411 | ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; |
||
1412 | tx_size *= AG71XX_TX_RING_DS_PER_PKT; |
||
1413 | } |
||
1414 | ag->tx_ring.order = ag71xx_ring_size_order(tx_size); |
||
1415 | |||
1416 | ag->stop_desc = dma_alloc_coherent(NULL, |
||
1417 | sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL); |
||
1418 | |||
1419 | if (!ag->stop_desc) |
||
1420 | goto err_free_irq; |
||
1421 | |||
1422 | ag->stop_desc->data = 0; |
||
1423 | ag->stop_desc->ctrl = 0; |
||
1424 | ag->stop_desc->next = (u32) ag->stop_desc_dma; |
||
1425 | |||
1426 | memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN); |
||
1427 | |||
1428 | netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); |
||
1429 | |||
1430 | ag71xx_dump_regs(ag); |
||
1431 | |||
1432 | ag71xx_hw_init(ag); |
||
1433 | |||
1434 | ag71xx_dump_regs(ag); |
||
1435 | |||
1436 | err = ag71xx_phy_connect(ag); |
||
1437 | if (err) |
||
1438 | goto err_free_desc; |
||
1439 | |||
1440 | err = ag71xx_debugfs_init(ag); |
||
1441 | if (err) |
||
1442 | goto err_phy_disconnect; |
||
1443 | |||
1444 | platform_set_drvdata(pdev, dev); |
||
1445 | |||
1446 | err = register_netdev(dev); |
||
1447 | if (err) { |
||
1448 | dev_err(&pdev->dev, "unable to register net device\n"); |
||
1449 | goto err_debugfs_exit; |
||
1450 | } |
||
1451 | |||
1452 | pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n", |
||
1453 | dev->name, dev->base_addr, dev->irq, |
||
1454 | ag71xx_get_phy_if_mode_name(pdata->phy_if_mode)); |
||
1455 | |||
1456 | return 0; |
||
1457 | |||
1458 | err_debugfs_exit: |
||
1459 | ag71xx_debugfs_exit(ag); |
||
1460 | err_phy_disconnect: |
||
1461 | ag71xx_phy_disconnect(ag); |
||
1462 | err_free_desc: |
||
1463 | dma_free_coherent(NULL, sizeof(struct ag71xx_desc), ag->stop_desc, |
||
1464 | ag->stop_desc_dma); |
||
1465 | err_free_irq: |
||
1466 | free_irq(dev->irq, dev); |
||
1467 | err_unmap_base: |
||
1468 | iounmap(ag->mac_base); |
||
1469 | err_free_dev: |
||
1470 | kfree(dev); |
||
1471 | err_out: |
||
1472 | platform_set_drvdata(pdev, NULL); |
||
1473 | return err; |
||
1474 | } |
||
1475 | |||
1476 | static int ag71xx_remove(struct platform_device *pdev) |
||
1477 | { |
||
1478 | struct net_device *dev = platform_get_drvdata(pdev); |
||
1479 | |||
1480 | if (dev) { |
||
1481 | struct ag71xx *ag = netdev_priv(dev); |
||
1482 | |||
1483 | ag71xx_debugfs_exit(ag); |
||
1484 | ag71xx_phy_disconnect(ag); |
||
1485 | unregister_netdev(dev); |
||
1486 | free_irq(dev->irq, dev); |
||
1487 | iounmap(ag->mac_base); |
||
1488 | kfree(dev); |
||
1489 | platform_set_drvdata(pdev, NULL); |
||
1490 | } |
||
1491 | |||
1492 | return 0; |
||
1493 | } |
||
1494 | |||
1495 | static struct platform_driver ag71xx_driver = { |
||
1496 | .probe = ag71xx_probe, |
||
1497 | .remove = ag71xx_remove, |
||
1498 | .driver = { |
||
1499 | .name = AG71XX_DRV_NAME, |
||
1500 | } |
||
1501 | }; |
||
1502 | |||
1503 | static int __init ag71xx_module_init(void) |
||
1504 | { |
||
1505 | int ret; |
||
1506 | |||
1507 | ret = ag71xx_debugfs_root_init(); |
||
1508 | if (ret) |
||
1509 | goto err_out; |
||
1510 | |||
1511 | ret = ag71xx_mdio_driver_init(); |
||
1512 | if (ret) |
||
1513 | goto err_debugfs_exit; |
||
1514 | |||
1515 | ret = platform_driver_register(&ag71xx_driver); |
||
1516 | if (ret) |
||
1517 | goto err_mdio_exit; |
||
1518 | |||
1519 | return 0; |
||
1520 | |||
1521 | err_mdio_exit: |
||
1522 | ag71xx_mdio_driver_exit(); |
||
1523 | err_debugfs_exit: |
||
1524 | ag71xx_debugfs_root_exit(); |
||
1525 | err_out: |
||
1526 | return ret; |
||
1527 | } |
||
1528 | |||
1529 | static void __exit ag71xx_module_exit(void) |
||
1530 | { |
||
1531 | platform_driver_unregister(&ag71xx_driver); |
||
1532 | ag71xx_mdio_driver_exit(); |
||
1533 | ag71xx_debugfs_root_exit(); |
||
1534 | } |
||
1535 | |||
1536 | module_init(ag71xx_module_init); |
||
1537 | module_exit(ag71xx_module_exit); |
||
1538 | |||
1539 | MODULE_VERSION(AG71XX_DRV_VERSION); |
||
1540 | MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); |
||
1541 | MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>"); |
||
1542 | MODULE_LICENSE("GPL v2"); |
||
1543 | MODULE_ALIAS("platform:" AG71XX_DRV_NAME); |