OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | /* |
2 | * Cavium CNS3xxx Gigabit driver for Linux |
||
3 | * |
||
4 | * Copyright 2011 Gateworks Corporation |
||
5 | * Chris Lang <clang@gateworks.com> |
||
6 | * |
||
7 | * This program is free software; you can redistribute it and/or modify it |
||
8 | * under the terms of version 2 of the GNU General Public License |
||
9 | * as published by the Free Software Foundation. |
||
10 | * |
||
11 | */ |
||
12 | |||
13 | #include <linux/delay.h> |
||
14 | #include <linux/module.h> |
||
15 | #include <linux/dma-mapping.h> |
||
16 | #include <linux/dmapool.h> |
||
17 | #include <linux/etherdevice.h> |
||
18 | #include <linux/interrupt.h> |
||
19 | #include <linux/io.h> |
||
20 | #include <linux/kernel.h> |
||
21 | #include <linux/phy.h> |
||
22 | #include <linux/platform_device.h> |
||
23 | #include <linux/platform_data/cns3xxx.h> |
||
24 | #include <linux/skbuff.h> |
||
25 | |||
26 | #define DRV_NAME "cns3xxx_eth" |
||
27 | |||
28 | #define RX_DESCS 256 |
||
29 | #define TX_DESCS 128 |
||
30 | #define TX_DESC_RESERVE 20 |
||
31 | |||
32 | #define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS) |
||
33 | #define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS) |
||
34 | #define REGS_SIZE 336 |
||
35 | |||
36 | #define RX_BUFFER_ALIGN 64 |
||
37 | #define RX_BUFFER_ALIGN_MASK (~(RX_BUFFER_ALIGN - 1)) |
||
38 | |||
39 | #define SKB_HEAD_ALIGN (((PAGE_SIZE - NET_SKB_PAD) % RX_BUFFER_ALIGN) + NET_SKB_PAD + NET_IP_ALIGN) |
||
40 | #define RX_SEGMENT_ALLOC_SIZE 2048 |
||
41 | #define RX_SEGMENT_BUFSIZE (SKB_WITH_OVERHEAD(RX_SEGMENT_ALLOC_SIZE)) |
||
42 | #define RX_SEGMENT_MRU (((RX_SEGMENT_BUFSIZE - SKB_HEAD_ALIGN) & RX_BUFFER_ALIGN_MASK) - NET_IP_ALIGN) |
||
43 | #define MAX_MTU 9500 |
||
44 | |||
45 | #define NAPI_WEIGHT 64 |
||
46 | |||
47 | /* MDIO Defines */ |
||
48 | #define MDIO_CMD_COMPLETE 0x00008000 |
||
49 | #define MDIO_WRITE_COMMAND 0x00002000 |
||
50 | #define MDIO_READ_COMMAND 0x00004000 |
||
51 | #define MDIO_REG_OFFSET 8 |
||
52 | #define MDIO_VALUE_OFFSET 16 |
||
53 | |||
54 | /* Descritor Defines */ |
||
55 | #define END_OF_RING 0x40000000 |
||
56 | #define FIRST_SEGMENT 0x20000000 |
||
57 | #define LAST_SEGMENT 0x10000000 |
||
58 | #define FORCE_ROUTE 0x04000000 |
||
59 | #define UDP_CHECKSUM 0x00020000 |
||
60 | #define TCP_CHECKSUM 0x00010000 |
||
61 | |||
62 | /* Port Config Defines */ |
||
63 | #define PORT_BP_ENABLE 0x00020000 |
||
64 | #define PORT_DISABLE 0x00040000 |
||
65 | #define PORT_LEARN_DIS 0x00080000 |
||
66 | #define PORT_BLOCK_STATE 0x00100000 |
||
67 | #define PORT_BLOCK_MODE 0x00200000 |
||
68 | |||
69 | #define PROMISC_OFFSET 29 |
||
70 | |||
71 | /* Global Config Defines */ |
||
72 | #define UNKNOWN_VLAN_TO_CPU 0x02000000 |
||
73 | #define ACCEPT_CRC_PACKET 0x00200000 |
||
74 | #define CRC_STRIPPING 0x00100000 |
||
75 | |||
76 | /* VLAN Config Defines */ |
||
77 | #define NIC_MODE 0x00008000 |
||
78 | #define VLAN_UNAWARE 0x00000001 |
||
79 | |||
80 | /* DMA AUTO Poll Defines */ |
||
81 | #define TS_POLL_EN 0x00000020 |
||
82 | #define TS_SUSPEND 0x00000010 |
||
83 | #define FS_POLL_EN 0x00000002 |
||
84 | #define FS_SUSPEND 0x00000001 |
||
85 | |||
86 | /* DMA Ring Control Defines */ |
||
87 | #define QUEUE_THRESHOLD 0x000000f0 |
||
88 | #define CLR_FS_STATE 0x80000000 |
||
89 | |||
90 | /* Interrupt Status Defines */ |
||
91 | #define MAC0_STATUS_CHANGE 0x00004000 |
||
92 | #define MAC1_STATUS_CHANGE 0x00008000 |
||
93 | #define MAC2_STATUS_CHANGE 0x00010000 |
||
94 | #define MAC0_RX_ERROR 0x00100000 |
||
95 | #define MAC1_RX_ERROR 0x00200000 |
||
96 | #define MAC2_RX_ERROR 0x00400000 |
||
97 | |||
98 | struct tx_desc |
||
99 | { |
||
100 | u32 sdp; /* segment data pointer */ |
||
101 | |||
102 | union { |
||
103 | struct { |
||
104 | u32 sdl:16; /* segment data length */ |
||
105 | u32 tco:1; |
||
106 | u32 uco:1; |
||
107 | u32 ico:1; |
||
108 | u32 rsv_1:3; /* reserve */ |
||
109 | u32 pri:3; |
||
110 | u32 fp:1; /* force priority */ |
||
111 | u32 fr:1; |
||
112 | u32 interrupt:1; |
||
113 | u32 lsd:1; |
||
114 | u32 fsd:1; |
||
115 | u32 eor:1; |
||
116 | u32 cown:1; |
||
117 | }; |
||
118 | u32 config0; |
||
119 | }; |
||
120 | |||
121 | union { |
||
122 | struct { |
||
123 | u32 ctv:1; |
||
124 | u32 stv:1; |
||
125 | u32 sid:4; |
||
126 | u32 inss:1; |
||
127 | u32 dels:1; |
||
128 | u32 rsv_2:9; |
||
129 | u32 pmap:5; |
||
130 | u32 mark:3; |
||
131 | u32 ewan:1; |
||
132 | u32 fewan:1; |
||
133 | u32 rsv_3:5; |
||
134 | }; |
||
135 | u32 config1; |
||
136 | }; |
||
137 | |||
138 | union { |
||
139 | struct { |
||
140 | u32 c_vid:12; |
||
141 | u32 c_cfs:1; |
||
142 | u32 c_pri:3; |
||
143 | u32 s_vid:12; |
||
144 | u32 s_dei:1; |
||
145 | u32 s_pri:3; |
||
146 | }; |
||
147 | u32 config2; |
||
148 | }; |
||
149 | |||
150 | u8 alignment[16]; /* for 32 byte */ |
||
151 | }; |
||
152 | |||
153 | struct rx_desc |
||
154 | { |
||
155 | u32 sdp; /* segment data pointer */ |
||
156 | |||
157 | union { |
||
158 | struct { |
||
159 | u32 sdl:16; /* segment data length */ |
||
160 | u32 l4f:1; |
||
161 | u32 ipf:1; |
||
162 | u32 prot:4; |
||
163 | u32 hr:6; |
||
164 | u32 lsd:1; |
||
165 | u32 fsd:1; |
||
166 | u32 eor:1; |
||
167 | u32 cown:1; |
||
168 | }; |
||
169 | u32 config0; |
||
170 | }; |
||
171 | |||
172 | union { |
||
173 | struct { |
||
174 | u32 ctv:1; |
||
175 | u32 stv:1; |
||
176 | u32 unv:1; |
||
177 | u32 iwan:1; |
||
178 | u32 exdv:1; |
||
179 | u32 e_wan:1; |
||
180 | u32 rsv_1:2; |
||
181 | u32 sp:3; |
||
182 | u32 crc_err:1; |
||
183 | u32 un_eth:1; |
||
184 | u32 tc:2; |
||
185 | u32 rsv_2:1; |
||
186 | u32 ip_offset:5; |
||
187 | u32 rsv_3:11; |
||
188 | }; |
||
189 | u32 config1; |
||
190 | }; |
||
191 | |||
192 | union { |
||
193 | struct { |
||
194 | u32 c_vid:12; |
||
195 | u32 c_cfs:1; |
||
196 | u32 c_pri:3; |
||
197 | u32 s_vid:12; |
||
198 | u32 s_dei:1; |
||
199 | u32 s_pri:3; |
||
200 | }; |
||
201 | u32 config2; |
||
202 | }; |
||
203 | |||
204 | u8 alignment[16]; /* for 32 byte alignment */ |
||
205 | }; |
||
206 | |||
207 | |||
208 | struct switch_regs { |
||
209 | u32 phy_control; |
||
210 | u32 phy_auto_addr; |
||
211 | u32 mac_glob_cfg; |
||
212 | u32 mac_cfg[4]; |
||
213 | u32 mac_pri_ctrl[5], __res; |
||
214 | u32 etype[2]; |
||
215 | u32 udp_range[4]; |
||
216 | u32 prio_etype_udp; |
||
217 | u32 prio_ipdscp[8]; |
||
218 | u32 tc_ctrl; |
||
219 | u32 rate_ctrl; |
||
220 | u32 fc_glob_thrs; |
||
221 | u32 fc_port_thrs; |
||
222 | u32 mc_fc_glob_thrs; |
||
223 | u32 dc_glob_thrs; |
||
224 | u32 arl_vlan_cmd; |
||
225 | u32 arl_ctrl[3]; |
||
226 | u32 vlan_cfg; |
||
227 | u32 pvid[2]; |
||
228 | u32 vlan_ctrl[3]; |
||
229 | u32 session_id[8]; |
||
230 | u32 intr_stat; |
||
231 | u32 intr_mask; |
||
232 | u32 sram_test; |
||
233 | u32 mem_queue; |
||
234 | u32 farl_ctrl; |
||
235 | u32 fc_input_thrs, __res1[2]; |
||
236 | u32 clk_skew_ctrl; |
||
237 | u32 mac_glob_cfg_ext, __res2[2]; |
||
238 | u32 dma_ring_ctrl; |
||
239 | u32 dma_auto_poll_cfg; |
||
240 | u32 delay_intr_cfg, __res3; |
||
241 | u32 ts_dma_ctrl0; |
||
242 | u32 ts_desc_ptr0; |
||
243 | u32 ts_desc_base_addr0, __res4; |
||
244 | u32 fs_dma_ctrl0; |
||
245 | u32 fs_desc_ptr0; |
||
246 | u32 fs_desc_base_addr0, __res5; |
||
247 | u32 ts_dma_ctrl1; |
||
248 | u32 ts_desc_ptr1; |
||
249 | u32 ts_desc_base_addr1, __res6; |
||
250 | u32 fs_dma_ctrl1; |
||
251 | u32 fs_desc_ptr1; |
||
252 | u32 fs_desc_base_addr1; |
||
253 | u32 __res7[109]; |
||
254 | u32 mac_counter0[13]; |
||
255 | }; |
||
256 | |||
257 | struct _tx_ring { |
||
258 | struct tx_desc *desc; |
||
259 | dma_addr_t phys_addr; |
||
260 | struct tx_desc *cur_addr; |
||
261 | struct sk_buff *buff_tab[TX_DESCS]; |
||
262 | unsigned int phys_tab[TX_DESCS]; |
||
263 | u32 free_index; |
||
264 | u32 count_index; |
||
265 | u32 cur_index; |
||
266 | int num_used; |
||
267 | int num_count; |
||
268 | bool stopped; |
||
269 | }; |
||
270 | |||
271 | struct _rx_ring { |
||
272 | struct rx_desc *desc; |
||
273 | dma_addr_t phys_addr; |
||
274 | struct rx_desc *cur_addr; |
||
275 | void *buff_tab[RX_DESCS]; |
||
276 | unsigned int phys_tab[RX_DESCS]; |
||
277 | u32 cur_index; |
||
278 | u32 alloc_index; |
||
279 | int alloc_count; |
||
280 | }; |
||
281 | |||
282 | struct sw { |
||
283 | struct switch_regs __iomem *regs; |
||
284 | struct napi_struct napi; |
||
285 | struct cns3xxx_plat_info *plat; |
||
286 | struct _tx_ring tx_ring; |
||
287 | struct _rx_ring rx_ring; |
||
288 | struct sk_buff *frag_first; |
||
289 | struct sk_buff *frag_last; |
||
290 | struct device *dev; |
||
291 | int rx_irq; |
||
292 | int stat_irq; |
||
293 | }; |
||
294 | |||
295 | struct port { |
||
296 | struct net_device *netdev; |
||
297 | struct phy_device *phydev; |
||
298 | struct sw *sw; |
||
299 | int id; /* logical port ID */ |
||
300 | int speed, duplex; |
||
301 | }; |
||
302 | |||
303 | static spinlock_t mdio_lock; |
||
304 | static DEFINE_SPINLOCK(tx_lock); |
||
305 | static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */ |
||
306 | struct mii_bus *mdio_bus; |
||
307 | static int ports_open; |
||
308 | static struct port *switch_port_tab[4]; |
||
309 | struct net_device *napi_dev; |
||
310 | |||
311 | static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, |
||
312 | int write, u16 cmd) |
||
313 | { |
||
314 | int cycles = 0; |
||
315 | u32 temp = 0; |
||
316 | |||
317 | temp = __raw_readl(&mdio_regs->phy_control); |
||
318 | temp |= MDIO_CMD_COMPLETE; |
||
319 | __raw_writel(temp, &mdio_regs->phy_control); |
||
320 | udelay(10); |
||
321 | |||
322 | if (write) { |
||
323 | temp = (cmd << MDIO_VALUE_OFFSET); |
||
324 | temp |= MDIO_WRITE_COMMAND; |
||
325 | } else { |
||
326 | temp = MDIO_READ_COMMAND; |
||
327 | } |
||
328 | |||
329 | temp |= ((location & 0x1f) << MDIO_REG_OFFSET); |
||
330 | temp |= (phy_id & 0x1f); |
||
331 | |||
332 | __raw_writel(temp, &mdio_regs->phy_control); |
||
333 | |||
334 | while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0) |
||
335 | && cycles < 5000) { |
||
336 | udelay(1); |
||
337 | cycles++; |
||
338 | } |
||
339 | |||
340 | if (cycles == 5000) { |
||
341 | printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name, phy_id); |
||
342 | return -1; |
||
343 | } |
||
344 | |||
345 | temp = __raw_readl(&mdio_regs->phy_control); |
||
346 | temp |= MDIO_CMD_COMPLETE; |
||
347 | __raw_writel(temp, &mdio_regs->phy_control); |
||
348 | |||
349 | if (write) |
||
350 | return 0; |
||
351 | |||
352 | return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF); |
||
353 | } |
||
354 | |||
355 | static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location) |
||
356 | { |
||
357 | unsigned long flags; |
||
358 | int ret; |
||
359 | |||
360 | spin_lock_irqsave(&mdio_lock, flags); |
||
361 | ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0); |
||
362 | spin_unlock_irqrestore(&mdio_lock, flags); |
||
363 | return ret; |
||
364 | } |
||
365 | |||
366 | static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location, u16 val) |
||
367 | { |
||
368 | unsigned long flags; |
||
369 | int ret; |
||
370 | |||
371 | spin_lock_irqsave(&mdio_lock, flags); |
||
372 | ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val); |
||
373 | spin_unlock_irqrestore(&mdio_lock, flags); |
||
374 | return ret; |
||
375 | } |
||
376 | |||
377 | static int cns3xxx_mdio_register(void __iomem *base) |
||
378 | { |
||
379 | int err; |
||
380 | |||
381 | if (!(mdio_bus = mdiobus_alloc())) |
||
382 | return -ENOMEM; |
||
383 | |||
384 | mdio_regs = base; |
||
385 | |||
386 | spin_lock_init(&mdio_lock); |
||
387 | mdio_bus->name = "CNS3xxx MII Bus"; |
||
388 | mdio_bus->read = &cns3xxx_mdio_read; |
||
389 | mdio_bus->write = &cns3xxx_mdio_write; |
||
390 | strcpy(mdio_bus->id, "0"); |
||
391 | |||
392 | if ((err = mdiobus_register(mdio_bus))) |
||
393 | mdiobus_free(mdio_bus); |
||
394 | |||
395 | return err; |
||
396 | } |
||
397 | |||
398 | static void cns3xxx_mdio_remove(void) |
||
399 | { |
||
400 | mdiobus_unregister(mdio_bus); |
||
401 | mdiobus_free(mdio_bus); |
||
402 | } |
||
403 | |||
404 | static void enable_tx_dma(struct sw *sw) |
||
405 | { |
||
406 | __raw_writel(0x1, &sw->regs->ts_dma_ctrl0); |
||
407 | } |
||
408 | |||
409 | static void enable_rx_dma(struct sw *sw) |
||
410 | { |
||
411 | __raw_writel(0x1, &sw->regs->fs_dma_ctrl0); |
||
412 | } |
||
413 | |||
414 | static void cns3xxx_adjust_link(struct net_device *dev) |
||
415 | { |
||
416 | struct port *port = netdev_priv(dev); |
||
417 | struct phy_device *phydev = port->phydev; |
||
418 | |||
419 | if (!phydev->link) { |
||
420 | if (port->speed) { |
||
421 | port->speed = 0; |
||
422 | printk(KERN_INFO "%s: link down\n", dev->name); |
||
423 | } |
||
424 | return; |
||
425 | } |
||
426 | |||
427 | if (port->speed == phydev->speed && port->duplex == phydev->duplex) |
||
428 | return; |
||
429 | |||
430 | port->speed = phydev->speed; |
||
431 | port->duplex = phydev->duplex; |
||
432 | |||
433 | printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n", |
||
434 | dev->name, port->speed, port->duplex ? "full" : "half"); |
||
435 | } |
||
436 | |||
437 | static void eth_schedule_poll(struct sw *sw) |
||
438 | { |
||
439 | if (unlikely(!napi_schedule_prep(&sw->napi))) |
||
440 | return; |
||
441 | |||
442 | disable_irq_nosync(sw->rx_irq); |
||
443 | __napi_schedule(&sw->napi); |
||
444 | } |
||
445 | |||
446 | irqreturn_t eth_rx_irq(int irq, void *pdev) |
||
447 | { |
||
448 | struct net_device *dev = pdev; |
||
449 | struct sw *sw = netdev_priv(dev); |
||
450 | eth_schedule_poll(sw); |
||
451 | return (IRQ_HANDLED); |
||
452 | } |
||
453 | |||
454 | irqreturn_t eth_stat_irq(int irq, void *pdev) |
||
455 | { |
||
456 | struct net_device *dev = pdev; |
||
457 | struct sw *sw = netdev_priv(dev); |
||
458 | u32 cfg; |
||
459 | u32 stat = __raw_readl(&sw->regs->intr_stat); |
||
460 | __raw_writel(0xffffffff, &sw->regs->intr_stat); |
||
461 | |||
462 | if (stat & MAC2_RX_ERROR) |
||
463 | switch_port_tab[3]->netdev->stats.rx_dropped++; |
||
464 | if (stat & MAC1_RX_ERROR) |
||
465 | switch_port_tab[1]->netdev->stats.rx_dropped++; |
||
466 | if (stat & MAC0_RX_ERROR) |
||
467 | switch_port_tab[0]->netdev->stats.rx_dropped++; |
||
468 | |||
469 | if (stat & MAC0_STATUS_CHANGE) { |
||
470 | cfg = __raw_readl(&sw->regs->mac_cfg[0]); |
||
471 | switch_port_tab[0]->phydev->link = (cfg & 0x1); |
||
472 | switch_port_tab[0]->phydev->duplex = ((cfg >> 4) & 0x1); |
||
473 | if (((cfg >> 2) & 0x3) == 2) |
||
474 | switch_port_tab[0]->phydev->speed = 1000; |
||
475 | else if (((cfg >> 2) & 0x3) == 1) |
||
476 | switch_port_tab[0]->phydev->speed = 100; |
||
477 | else |
||
478 | switch_port_tab[0]->phydev->speed = 10; |
||
479 | cns3xxx_adjust_link(switch_port_tab[0]->netdev); |
||
480 | } |
||
481 | |||
482 | if (stat & MAC1_STATUS_CHANGE) { |
||
483 | cfg = __raw_readl(&sw->regs->mac_cfg[1]); |
||
484 | switch_port_tab[1]->phydev->link = (cfg & 0x1); |
||
485 | switch_port_tab[1]->phydev->duplex = ((cfg >> 4) & 0x1); |
||
486 | if (((cfg >> 2) & 0x3) == 2) |
||
487 | switch_port_tab[1]->phydev->speed = 1000; |
||
488 | else if (((cfg >> 2) & 0x3) == 1) |
||
489 | switch_port_tab[1]->phydev->speed = 100; |
||
490 | else |
||
491 | switch_port_tab[1]->phydev->speed = 10; |
||
492 | cns3xxx_adjust_link(switch_port_tab[1]->netdev); |
||
493 | } |
||
494 | |||
495 | if (stat & MAC2_STATUS_CHANGE) { |
||
496 | cfg = __raw_readl(&sw->regs->mac_cfg[3]); |
||
497 | switch_port_tab[3]->phydev->link = (cfg & 0x1); |
||
498 | switch_port_tab[3]->phydev->duplex = ((cfg >> 4) & 0x1); |
||
499 | if (((cfg >> 2) & 0x3) == 2) |
||
500 | switch_port_tab[3]->phydev->speed = 1000; |
||
501 | else if (((cfg >> 2) & 0x3) == 1) |
||
502 | switch_port_tab[3]->phydev->speed = 100; |
||
503 | else |
||
504 | switch_port_tab[3]->phydev->speed = 10; |
||
505 | cns3xxx_adjust_link(switch_port_tab[3]->netdev); |
||
506 | } |
||
507 | |||
508 | return (IRQ_HANDLED); |
||
509 | } |
||
510 | |||
511 | |||
512 | static void cns3xxx_alloc_rx_buf(struct sw *sw, int received) |
||
513 | { |
||
514 | struct _rx_ring *rx_ring = &sw->rx_ring; |
||
515 | unsigned int i = rx_ring->alloc_index; |
||
516 | struct rx_desc *desc = &(rx_ring)->desc[i]; |
||
517 | void *buf; |
||
518 | unsigned int phys; |
||
519 | |||
520 | for (received += rx_ring->alloc_count; received > 0; received--) { |
||
521 | buf = napi_alloc_frag(RX_SEGMENT_ALLOC_SIZE); |
||
522 | if (!buf) |
||
523 | break; |
||
524 | |||
525 | phys = dma_map_single(sw->dev, buf + SKB_HEAD_ALIGN, |
||
526 | RX_SEGMENT_MRU, DMA_FROM_DEVICE); |
||
527 | if (dma_mapping_error(sw->dev, phys)) { |
||
528 | skb_free_frag(buf); |
||
529 | break; |
||
530 | } |
||
531 | |||
532 | desc->sdl = RX_SEGMENT_MRU; |
||
533 | desc->sdp = phys; |
||
534 | |||
535 | wmb(); |
||
536 | |||
537 | /* put the new buffer on RX-free queue */ |
||
538 | rx_ring->buff_tab[i] = buf; |
||
539 | rx_ring->phys_tab[i] = phys; |
||
540 | |||
541 | if (i == RX_DESCS - 1) { |
||
542 | desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | RX_SEGMENT_MRU | END_OF_RING; |
||
543 | i = 0; |
||
544 | desc = &(rx_ring)->desc[i]; |
||
545 | } else { |
||
546 | desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | RX_SEGMENT_MRU; |
||
547 | i++; |
||
548 | desc++; |
||
549 | } |
||
550 | } |
||
551 | |||
552 | rx_ring->alloc_count = received; |
||
553 | rx_ring->alloc_index = i; |
||
554 | } |
||
555 | |||
556 | static void eth_check_num_used(struct _tx_ring *tx_ring) |
||
557 | { |
||
558 | bool stop = false; |
||
559 | int i; |
||
560 | |||
561 | if (tx_ring->num_used >= TX_DESCS - TX_DESC_RESERVE) |
||
562 | stop = true; |
||
563 | |||
564 | if (tx_ring->stopped == stop) |
||
565 | return; |
||
566 | |||
567 | tx_ring->stopped = stop; |
||
568 | |||
569 | for (i = 0; i < 4; i++) { |
||
570 | struct port *port = switch_port_tab[i]; |
||
571 | struct net_device *dev; |
||
572 | |||
573 | if (!port) |
||
574 | continue; |
||
575 | |||
576 | dev = port->netdev; |
||
577 | |||
578 | if (stop) |
||
579 | netif_stop_queue(dev); |
||
580 | else |
||
581 | netif_wake_queue(dev); |
||
582 | } |
||
583 | } |
||
584 | |||
585 | static void eth_complete_tx(struct sw *sw) |
||
586 | { |
||
587 | struct _tx_ring *tx_ring = &sw->tx_ring; |
||
588 | struct tx_desc *desc; |
||
589 | int i; |
||
590 | int index; |
||
591 | int num_used = tx_ring->num_used; |
||
592 | struct sk_buff *skb; |
||
593 | |||
594 | index = tx_ring->free_index; |
||
595 | desc = &(tx_ring)->desc[index]; |
||
596 | |||
597 | for (i = 0; i < num_used; i++) { |
||
598 | if (!desc->cown) |
||
599 | break; |
||
600 | |||
601 | skb = tx_ring->buff_tab[index]; |
||
602 | tx_ring->buff_tab[index] = 0; |
||
603 | |||
604 | if (skb) |
||
605 | dev_kfree_skb_any(skb); |
||
606 | |||
607 | dma_unmap_single(sw->dev, tx_ring->phys_tab[index], desc->sdl, DMA_TO_DEVICE); |
||
608 | |||
609 | if (index == TX_DESCS - 1) { |
||
610 | index = 0; |
||
611 | desc = &(tx_ring)->desc[index]; |
||
612 | } else { |
||
613 | index++; |
||
614 | desc++; |
||
615 | } |
||
616 | } |
||
617 | |||
618 | tx_ring->free_index = index; |
||
619 | tx_ring->num_used -= i; |
||
620 | eth_check_num_used(tx_ring); |
||
621 | } |
||
622 | |||
623 | static int eth_poll(struct napi_struct *napi, int budget) |
||
624 | { |
||
625 | struct sw *sw = container_of(napi, struct sw, napi); |
||
626 | struct _rx_ring *rx_ring = &sw->rx_ring; |
||
627 | int received = 0; |
||
628 | unsigned int length; |
||
629 | unsigned int i = rx_ring->cur_index; |
||
630 | struct rx_desc *desc = &(rx_ring)->desc[i]; |
||
631 | unsigned int alloc_count = rx_ring->alloc_count; |
||
632 | |||
633 | while (desc->cown && alloc_count + received < RX_DESCS - 1) { |
||
634 | struct sk_buff *skb; |
||
635 | int reserve = SKB_HEAD_ALIGN; |
||
636 | |||
637 | if (received >= budget) |
||
638 | break; |
||
639 | |||
640 | /* process received frame */ |
||
641 | dma_unmap_single(sw->dev, rx_ring->phys_tab[i], RX_SEGMENT_MRU, DMA_FROM_DEVICE); |
||
642 | |||
643 | skb = build_skb(rx_ring->buff_tab[i], RX_SEGMENT_ALLOC_SIZE); |
||
644 | if (!skb) |
||
645 | break; |
||
646 | |||
647 | skb->dev = switch_port_tab[desc->sp]->netdev; |
||
648 | |||
649 | length = desc->sdl; |
||
650 | if (desc->fsd && !desc->lsd) |
||
651 | length = RX_SEGMENT_MRU; |
||
652 | |||
653 | if (!desc->fsd) { |
||
654 | reserve -= NET_IP_ALIGN; |
||
655 | if (!desc->lsd) |
||
656 | length += NET_IP_ALIGN; |
||
657 | } |
||
658 | |||
659 | skb_reserve(skb, reserve); |
||
660 | skb_put(skb, length); |
||
661 | |||
662 | if (!sw->frag_first) |
||
663 | sw->frag_first = skb; |
||
664 | else { |
||
665 | if (sw->frag_first == sw->frag_last) |
||
666 | skb_shinfo(sw->frag_first)->frag_list = skb; |
||
667 | else |
||
668 | sw->frag_last->next = skb; |
||
669 | sw->frag_first->len += skb->len; |
||
670 | sw->frag_first->data_len += skb->len; |
||
671 | sw->frag_first->truesize += skb->truesize; |
||
672 | } |
||
673 | sw->frag_last = skb; |
||
674 | |||
675 | if (desc->lsd) { |
||
676 | struct net_device *dev; |
||
677 | |||
678 | skb = sw->frag_first; |
||
679 | dev = skb->dev; |
||
680 | skb->protocol = eth_type_trans(skb, dev); |
||
681 | |||
682 | dev->stats.rx_packets++; |
||
683 | dev->stats.rx_bytes += skb->len; |
||
684 | |||
685 | /* RX Hardware checksum offload */ |
||
686 | skb->ip_summed = CHECKSUM_NONE; |
||
687 | switch (desc->prot) { |
||
688 | case 1: |
||
689 | case 2: |
||
690 | case 5: |
||
691 | case 6: |
||
692 | case 13: |
||
693 | case 14: |
||
694 | if (!desc->l4f) { |
||
695 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
||
696 | napi_gro_receive(napi, skb); |
||
697 | break; |
||
698 | } |
||
699 | /* fall through */ |
||
700 | default: |
||
701 | netif_receive_skb(skb); |
||
702 | break; |
||
703 | } |
||
704 | |||
705 | sw->frag_first = NULL; |
||
706 | sw->frag_last = NULL; |
||
707 | } |
||
708 | |||
709 | received++; |
||
710 | if (i == RX_DESCS - 1) { |
||
711 | i = 0; |
||
712 | desc = &(rx_ring)->desc[i]; |
||
713 | } else { |
||
714 | i++; |
||
715 | desc++; |
||
716 | } |
||
717 | } |
||
718 | |||
719 | rx_ring->cur_index = i; |
||
720 | |||
721 | cns3xxx_alloc_rx_buf(sw, received); |
||
722 | wmb(); |
||
723 | enable_rx_dma(sw); |
||
724 | |||
725 | if (received < budget && napi_complete_done(napi, received)) { |
||
726 | enable_irq(sw->rx_irq); |
||
727 | } |
||
728 | |||
729 | spin_lock_bh(&tx_lock); |
||
730 | eth_complete_tx(sw); |
||
731 | spin_unlock_bh(&tx_lock); |
||
732 | |||
733 | return received; |
||
734 | } |
||
735 | |||
736 | static void eth_set_desc(struct sw *sw, struct _tx_ring *tx_ring, int index, |
||
737 | int index_last, void *data, int len, u32 config0, |
||
738 | u32 pmap) |
||
739 | { |
||
740 | struct tx_desc *tx_desc = &(tx_ring)->desc[index]; |
||
741 | unsigned int phys; |
||
742 | |||
743 | phys = dma_map_single(sw->dev, data, len, DMA_TO_DEVICE); |
||
744 | tx_desc->sdp = phys; |
||
745 | tx_desc->pmap = pmap; |
||
746 | tx_ring->phys_tab[index] = phys; |
||
747 | |||
748 | config0 |= len; |
||
749 | |||
750 | if (index == TX_DESCS - 1) |
||
751 | config0 |= END_OF_RING; |
||
752 | |||
753 | if (index == index_last) |
||
754 | config0 |= LAST_SEGMENT; |
||
755 | |||
756 | wmb(); |
||
757 | tx_desc->config0 = config0; |
||
758 | } |
||
759 | |||
760 | static int eth_xmit(struct sk_buff *skb, struct net_device *dev) |
||
761 | { |
||
762 | struct port *port = netdev_priv(dev); |
||
763 | struct sw *sw = port->sw; |
||
764 | struct _tx_ring *tx_ring = &sw->tx_ring; |
||
765 | struct sk_buff *skb1; |
||
766 | char pmap = (1 << port->id); |
||
767 | int nr_frags = skb_shinfo(skb)->nr_frags; |
||
768 | int nr_desc = nr_frags; |
||
769 | int index0, index, index_last; |
||
770 | int len0; |
||
771 | int i; |
||
772 | u32 config0; |
||
773 | |||
774 | if (pmap == 8) |
||
775 | pmap = (1 << 4); |
||
776 | |||
777 | skb_walk_frags(skb, skb1) |
||
778 | nr_desc++; |
||
779 | |||
780 | eth_schedule_poll(sw); |
||
781 | spin_lock_bh(&tx_lock); |
||
782 | |||
783 | if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) { |
||
784 | spin_unlock_bh(&tx_lock); |
||
785 | return NETDEV_TX_BUSY; |
||
786 | } |
||
787 | |||
788 | index = index0 = tx_ring->cur_index; |
||
789 | index_last = (index0 + nr_desc) % TX_DESCS; |
||
790 | tx_ring->cur_index = (index_last + 1) % TX_DESCS; |
||
791 | |||
792 | spin_unlock_bh(&tx_lock); |
||
793 | |||
794 | config0 = FORCE_ROUTE; |
||
795 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
||
796 | config0 |= UDP_CHECKSUM | TCP_CHECKSUM; |
||
797 | |||
798 | len0 = skb->len; |
||
799 | |||
800 | /* fragments */ |
||
801 | for (i = 0; i < nr_frags; i++) { |
||
802 | struct skb_frag_struct *frag; |
||
803 | void *addr; |
||
804 | |||
805 | index = (index + 1) % TX_DESCS; |
||
806 | |||
807 | frag = &skb_shinfo(skb)->frags[i]; |
||
808 | addr = page_address(skb_frag_page(frag)) + frag->page_offset; |
||
809 | |||
810 | eth_set_desc(sw, tx_ring, index, index_last, addr, frag->size, |
||
811 | config0, pmap); |
||
812 | } |
||
813 | |||
814 | if (nr_frags) |
||
815 | len0 = skb->len - skb->data_len; |
||
816 | |||
817 | skb_walk_frags(skb, skb1) { |
||
818 | index = (index + 1) % TX_DESCS; |
||
819 | len0 -= skb1->len; |
||
820 | |||
821 | eth_set_desc(sw, tx_ring, index, index_last, skb1->data, |
||
822 | skb1->len, config0, pmap); |
||
823 | } |
||
824 | |||
825 | tx_ring->buff_tab[index0] = skb; |
||
826 | eth_set_desc(sw, tx_ring, index0, index_last, skb->data, len0, |
||
827 | config0 | FIRST_SEGMENT, pmap); |
||
828 | |||
829 | wmb(); |
||
830 | |||
831 | spin_lock(&tx_lock); |
||
832 | tx_ring->num_used += nr_desc + 1; |
||
833 | spin_unlock(&tx_lock); |
||
834 | |||
835 | dev->stats.tx_packets++; |
||
836 | dev->stats.tx_bytes += skb->len; |
||
837 | |||
838 | enable_tx_dma(sw); |
||
839 | |||
840 | return NETDEV_TX_OK; |
||
841 | } |
||
842 | |||
843 | static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) |
||
844 | { |
||
845 | struct port *port = netdev_priv(dev); |
||
846 | |||
847 | if (!netif_running(dev)) |
||
848 | return -EINVAL; |
||
849 | return phy_mii_ioctl(port->phydev, req, cmd); |
||
850 | } |
||
851 | |||
852 | /* ethtool support */ |
||
853 | |||
854 | static void cns3xxx_get_drvinfo(struct net_device *dev, |
||
855 | struct ethtool_drvinfo *info) |
||
856 | { |
||
857 | strcpy(info->driver, DRV_NAME); |
||
858 | strcpy(info->bus_info, "internal"); |
||
859 | } |
||
860 | |||
861 | static int cns3xxx_nway_reset(struct net_device *dev) |
||
862 | { |
||
863 | struct port *port = netdev_priv(dev); |
||
864 | return phy_start_aneg(port->phydev); |
||
865 | } |
||
866 | |||
867 | static struct ethtool_ops cns3xxx_ethtool_ops = { |
||
868 | .get_drvinfo = cns3xxx_get_drvinfo, |
||
869 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
||
870 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
||
871 | .nway_reset = cns3xxx_nway_reset, |
||
872 | .get_link = ethtool_op_get_link, |
||
873 | }; |
||
874 | |||
875 | |||
876 | static int init_rings(struct sw *sw) |
||
877 | { |
||
878 | int i; |
||
879 | struct _rx_ring *rx_ring = &sw->rx_ring; |
||
880 | struct _tx_ring *tx_ring = &sw->tx_ring; |
||
881 | |||
882 | __raw_writel(0, &sw->regs->fs_dma_ctrl0); |
||
883 | __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg); |
||
884 | __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); |
||
885 | __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); |
||
886 | __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); |
||
887 | |||
888 | rx_ring->desc = dmam_alloc_coherent(sw->dev, RX_POOL_ALLOC_SIZE, |
||
889 | &rx_ring->phys_addr, GFP_KERNEL); |
||
890 | if (!rx_ring->desc) |
||
891 | return -ENOMEM; |
||
892 | |||
893 | /* Setup RX buffers */ |
||
894 | memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE); |
||
895 | |||
896 | for (i = 0; i < RX_DESCS; i++) { |
||
897 | struct rx_desc *desc = &(rx_ring)->desc[i]; |
||
898 | void *buf; |
||
899 | |||
900 | buf = netdev_alloc_frag(RX_SEGMENT_ALLOC_SIZE); |
||
901 | if (!buf) |
||
902 | return -ENOMEM; |
||
903 | |||
904 | desc->sdl = RX_SEGMENT_MRU; |
||
905 | |||
906 | if (i == (RX_DESCS - 1)) |
||
907 | desc->eor = 1; |
||
908 | |||
909 | desc->fsd = 1; |
||
910 | desc->lsd = 1; |
||
911 | |||
912 | desc->sdp = dma_map_single(sw->dev, buf + SKB_HEAD_ALIGN, |
||
913 | RX_SEGMENT_MRU, DMA_FROM_DEVICE); |
||
914 | |||
915 | if (dma_mapping_error(sw->dev, desc->sdp)) |
||
916 | return -EIO; |
||
917 | |||
918 | rx_ring->buff_tab[i] = buf; |
||
919 | rx_ring->phys_tab[i] = desc->sdp; |
||
920 | desc->cown = 0; |
||
921 | } |
||
922 | __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0); |
||
923 | __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0); |
||
924 | |||
925 | tx_ring->desc = dmam_alloc_coherent(sw->dev, TX_POOL_ALLOC_SIZE, |
||
926 | &tx_ring->phys_addr, GFP_KERNEL); |
||
927 | if (!tx_ring->desc) |
||
928 | return -ENOMEM; |
||
929 | |||
930 | /* Setup TX buffers */ |
||
931 | memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE); |
||
932 | |||
933 | for (i = 0; i < TX_DESCS; i++) { |
||
934 | struct tx_desc *desc = &(tx_ring)->desc[i]; |
||
935 | tx_ring->buff_tab[i] = 0; |
||
936 | |||
937 | if (i == (TX_DESCS - 1)) |
||
938 | desc->eor = 1; |
||
939 | |||
940 | desc->cown = 1; |
||
941 | } |
||
942 | __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0); |
||
943 | __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0); |
||
944 | |||
945 | return 0; |
||
946 | } |
||
947 | |||
948 | static void destroy_rings(struct sw *sw) |
||
949 | { |
||
950 | int i; |
||
951 | |||
952 | for (i = 0; i < RX_DESCS; i++) { |
||
953 | struct _rx_ring *rx_ring = &sw->rx_ring; |
||
954 | struct rx_desc *desc = &(rx_ring)->desc[i]; |
||
955 | void *buf = sw->rx_ring.buff_tab[i]; |
||
956 | |||
957 | if (!buf) |
||
958 | continue; |
||
959 | |||
960 | dma_unmap_single(sw->dev, desc->sdp, RX_SEGMENT_MRU, DMA_FROM_DEVICE); |
||
961 | skb_free_frag(buf); |
||
962 | } |
||
963 | |||
964 | for (i = 0; i < TX_DESCS; i++) { |
||
965 | struct _tx_ring *tx_ring = &sw->tx_ring; |
||
966 | struct tx_desc *desc = &(tx_ring)->desc[i]; |
||
967 | struct sk_buff *skb = sw->tx_ring.buff_tab[i]; |
||
968 | |||
969 | if (!skb) |
||
970 | continue; |
||
971 | |||
972 | dma_unmap_single(sw->dev, desc->sdp, skb->len, DMA_TO_DEVICE); |
||
973 | dev_kfree_skb(skb); |
||
974 | } |
||
975 | } |
||
976 | |||
977 | static int eth_open(struct net_device *dev) |
||
978 | { |
||
979 | struct port *port = netdev_priv(dev); |
||
980 | struct sw *sw = port->sw; |
||
981 | u32 temp; |
||
982 | |||
983 | port->speed = 0; /* force "link up" message */ |
||
984 | phy_start(port->phydev); |
||
985 | |||
986 | netif_start_queue(dev); |
||
987 | |||
988 | if (!ports_open) { |
||
989 | request_irq(sw->rx_irq, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev); |
||
990 | request_irq(sw->stat_irq, eth_stat_irq, IRQF_SHARED, "gig_stat", napi_dev); |
||
991 | napi_enable(&sw->napi); |
||
992 | netif_start_queue(napi_dev); |
||
993 | |||
994 | __raw_writel(~(MAC0_STATUS_CHANGE | MAC1_STATUS_CHANGE | MAC2_STATUS_CHANGE | |
||
995 | MAC0_RX_ERROR | MAC1_RX_ERROR | MAC2_RX_ERROR), &sw->regs->intr_mask); |
||
996 | |||
997 | temp = __raw_readl(&sw->regs->mac_cfg[2]); |
||
998 | temp &= ~(PORT_DISABLE); |
||
999 | __raw_writel(temp, &sw->regs->mac_cfg[2]); |
||
1000 | |||
1001 | temp = __raw_readl(&sw->regs->dma_auto_poll_cfg); |
||
1002 | temp &= ~(TS_SUSPEND | FS_SUSPEND); |
||
1003 | __raw_writel(temp, &sw->regs->dma_auto_poll_cfg); |
||
1004 | |||
1005 | enable_rx_dma(sw); |
||
1006 | } |
||
1007 | temp = __raw_readl(&sw->regs->mac_cfg[port->id]); |
||
1008 | temp &= ~(PORT_DISABLE); |
||
1009 | __raw_writel(temp, &sw->regs->mac_cfg[port->id]); |
||
1010 | |||
1011 | ports_open++; |
||
1012 | netif_carrier_on(dev); |
||
1013 | |||
1014 | return 0; |
||
1015 | } |
||
1016 | |||
1017 | static int eth_close(struct net_device *dev) |
||
1018 | { |
||
1019 | struct port *port = netdev_priv(dev); |
||
1020 | struct sw *sw = port->sw; |
||
1021 | u32 temp; |
||
1022 | |||
1023 | ports_open--; |
||
1024 | |||
1025 | temp = __raw_readl(&sw->regs->mac_cfg[port->id]); |
||
1026 | temp |= (PORT_DISABLE); |
||
1027 | __raw_writel(temp, &sw->regs->mac_cfg[port->id]); |
||
1028 | |||
1029 | netif_stop_queue(dev); |
||
1030 | |||
1031 | phy_stop(port->phydev); |
||
1032 | |||
1033 | if (!ports_open) { |
||
1034 | disable_irq(sw->rx_irq); |
||
1035 | free_irq(sw->rx_irq, napi_dev); |
||
1036 | disable_irq(sw->stat_irq); |
||
1037 | free_irq(sw->stat_irq, napi_dev); |
||
1038 | napi_disable(&sw->napi); |
||
1039 | netif_stop_queue(napi_dev); |
||
1040 | temp = __raw_readl(&sw->regs->mac_cfg[2]); |
||
1041 | temp |= (PORT_DISABLE); |
||
1042 | __raw_writel(temp, &sw->regs->mac_cfg[2]); |
||
1043 | |||
1044 | __raw_writel(TS_SUSPEND | FS_SUSPEND, |
||
1045 | &sw->regs->dma_auto_poll_cfg); |
||
1046 | } |
||
1047 | |||
1048 | netif_carrier_off(dev); |
||
1049 | return 0; |
||
1050 | } |
||
1051 | |||
1052 | static void eth_rx_mode(struct net_device *dev) |
||
1053 | { |
||
1054 | struct port *port = netdev_priv(dev); |
||
1055 | struct sw *sw = port->sw; |
||
1056 | u32 temp; |
||
1057 | |||
1058 | temp = __raw_readl(&sw->regs->mac_glob_cfg); |
||
1059 | |||
1060 | if (dev->flags & IFF_PROMISC) { |
||
1061 | if (port->id == 3) |
||
1062 | temp |= ((1 << 2) << PROMISC_OFFSET); |
||
1063 | else |
||
1064 | temp |= ((1 << port->id) << PROMISC_OFFSET); |
||
1065 | } else { |
||
1066 | if (port->id == 3) |
||
1067 | temp &= ~((1 << 2) << PROMISC_OFFSET); |
||
1068 | else |
||
1069 | temp &= ~((1 << port->id) << PROMISC_OFFSET); |
||
1070 | } |
||
1071 | __raw_writel(temp, &sw->regs->mac_glob_cfg); |
||
1072 | } |
||
1073 | |||
1074 | static int eth_set_mac(struct net_device *netdev, void *p) |
||
1075 | { |
||
1076 | struct port *port = netdev_priv(netdev); |
||
1077 | struct sw *sw = port->sw; |
||
1078 | struct sockaddr *addr = p; |
||
1079 | u32 cycles = 0; |
||
1080 | |||
1081 | if (!is_valid_ether_addr(addr->sa_data)) |
||
1082 | return -EADDRNOTAVAIL; |
||
1083 | |||
1084 | /* Invalidate old ARL Entry */ |
||
1085 | if (port->id == 3) |
||
1086 | __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); |
||
1087 | else |
||
1088 | __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); |
||
1089 | __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) | |
||
1090 | (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])), |
||
1091 | &sw->regs->arl_ctrl[1]); |
||
1092 | |||
1093 | __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) | |
||
1094 | (1 << 1)), |
||
1095 | &sw->regs->arl_ctrl[2]); |
||
1096 | __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd); |
||
1097 | |||
1098 | while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0) |
||
1099 | && cycles < 5000) { |
||
1100 | udelay(1); |
||
1101 | cycles++; |
||
1102 | } |
||
1103 | |||
1104 | cycles = 0; |
||
1105 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
||
1106 | |||
1107 | if (port->id == 3) |
||
1108 | __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); |
||
1109 | else |
||
1110 | __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); |
||
1111 | __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) | |
||
1112 | (addr->sa_data[2] << 8) | (addr->sa_data[3])), |
||
1113 | &sw->regs->arl_ctrl[1]); |
||
1114 | |||
1115 | __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) | |
||
1116 | (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]); |
||
1117 | __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd); |
||
1118 | |||
1119 | while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0) |
||
1120 | && cycles < 5000) { |
||
1121 | udelay(1); |
||
1122 | cycles++; |
||
1123 | } |
||
1124 | return 0; |
||
1125 | } |
||
1126 | |||
1127 | static const struct net_device_ops cns3xxx_netdev_ops = { |
||
1128 | .ndo_open = eth_open, |
||
1129 | .ndo_stop = eth_close, |
||
1130 | .ndo_start_xmit = eth_xmit, |
||
1131 | .ndo_set_rx_mode = eth_rx_mode, |
||
1132 | .ndo_do_ioctl = eth_ioctl, |
||
1133 | .ndo_set_mac_address = eth_set_mac, |
||
1134 | .ndo_validate_addr = eth_validate_addr, |
||
1135 | }; |
||
1136 | |||
1137 | static int eth_init_one(struct platform_device *pdev) |
||
1138 | { |
||
1139 | int i; |
||
1140 | struct port *port; |
||
1141 | struct sw *sw; |
||
1142 | struct net_device *dev; |
||
1143 | struct cns3xxx_plat_info *plat = pdev->dev.platform_data; |
||
1144 | char phy_id[MII_BUS_ID_SIZE + 3]; |
||
1145 | int err; |
||
1146 | u32 temp; |
||
1147 | struct resource *res; |
||
1148 | void __iomem *regs; |
||
1149 | |||
1150 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
||
1151 | regs = devm_ioremap_resource(&pdev->dev, res); |
||
1152 | if (IS_ERR(regs)) |
||
1153 | return PTR_ERR(regs); |
||
1154 | |||
1155 | err = cns3xxx_mdio_register(regs); |
||
1156 | if (err) |
||
1157 | return err; |
||
1158 | |||
1159 | if (!(napi_dev = alloc_etherdev(sizeof(struct sw)))) { |
||
1160 | err = -ENOMEM; |
||
1161 | goto err_remove_mdio; |
||
1162 | } |
||
1163 | |||
1164 | strcpy(napi_dev->name, "cns3xxx_eth"); |
||
1165 | napi_dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST; |
||
1166 | |||
1167 | SET_NETDEV_DEV(napi_dev, &pdev->dev); |
||
1168 | sw = netdev_priv(napi_dev); |
||
1169 | memset(sw, 0, sizeof(struct sw)); |
||
1170 | sw->regs = regs; |
||
1171 | sw->dev = &pdev->dev; |
||
1172 | |||
1173 | sw->rx_irq = platform_get_irq_byname(pdev, "eth_rx"); |
||
1174 | sw->stat_irq = platform_get_irq_byname(pdev, "eth_stat"); |
||
1175 | |||
1176 | temp = __raw_readl(&sw->regs->phy_auto_addr); |
||
1177 | temp |= (3 << 30); /* maximum frame length: 9600 bytes */ |
||
1178 | __raw_writel(temp, &sw->regs->phy_auto_addr); |
||
1179 | |||
1180 | for (i = 0; i < 4; i++) { |
||
1181 | temp = __raw_readl(&sw->regs->mac_cfg[i]); |
||
1182 | temp |= (PORT_DISABLE); |
||
1183 | __raw_writel(temp, &sw->regs->mac_cfg[i]); |
||
1184 | } |
||
1185 | |||
1186 | temp = PORT_DISABLE; |
||
1187 | __raw_writel(temp, &sw->regs->mac_cfg[2]); |
||
1188 | |||
1189 | temp = __raw_readl(&sw->regs->vlan_cfg); |
||
1190 | temp |= NIC_MODE | VLAN_UNAWARE; |
||
1191 | __raw_writel(temp, &sw->regs->vlan_cfg); |
||
1192 | |||
1193 | __raw_writel(UNKNOWN_VLAN_TO_CPU | |
||
1194 | CRC_STRIPPING, &sw->regs->mac_glob_cfg); |
||
1195 | |||
1196 | if ((err = init_rings(sw)) != 0) { |
||
1197 | err = -ENOMEM; |
||
1198 | goto err_free; |
||
1199 | } |
||
1200 | platform_set_drvdata(pdev, napi_dev); |
||
1201 | |||
1202 | netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT); |
||
1203 | |||
1204 | for (i = 0; i < 3; i++) { |
||
1205 | if (!(plat->ports & (1 << i))) { |
||
1206 | continue; |
||
1207 | } |
||
1208 | |||
1209 | if (!(dev = alloc_etherdev(sizeof(struct port)))) { |
||
1210 | goto free_ports; |
||
1211 | } |
||
1212 | |||
1213 | port = netdev_priv(dev); |
||
1214 | port->netdev = dev; |
||
1215 | if (i == 2) |
||
1216 | port->id = 3; |
||
1217 | else |
||
1218 | port->id = i; |
||
1219 | port->sw = sw; |
||
1220 | |||
1221 | temp = __raw_readl(&sw->regs->mac_cfg[port->id]); |
||
1222 | temp |= (PORT_DISABLE | PORT_BLOCK_STATE | PORT_LEARN_DIS); |
||
1223 | __raw_writel(temp, &sw->regs->mac_cfg[port->id]); |
||
1224 | |||
1225 | SET_NETDEV_DEV(dev, &pdev->dev); |
||
1226 | dev->netdev_ops = &cns3xxx_netdev_ops; |
||
1227 | dev->ethtool_ops = &cns3xxx_ethtool_ops; |
||
1228 | dev->tx_queue_len = 1000; |
||
1229 | dev->max_mtu = MAX_MTU; |
||
1230 | dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST; |
||
1231 | |||
1232 | switch_port_tab[port->id] = port; |
||
1233 | memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN); |
||
1234 | |||
1235 | snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]); |
||
1236 | port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, |
||
1237 | PHY_INTERFACE_MODE_RGMII); |
||
1238 | if ((err = IS_ERR(port->phydev))) { |
||
1239 | switch_port_tab[port->id] = 0; |
||
1240 | free_netdev(dev); |
||
1241 | goto free_ports; |
||
1242 | } |
||
1243 | |||
1244 | port->phydev->irq = PHY_IGNORE_INTERRUPT; |
||
1245 | |||
1246 | if ((err = register_netdev(dev))) { |
||
1247 | phy_disconnect(port->phydev); |
||
1248 | switch_port_tab[port->id] = 0; |
||
1249 | free_netdev(dev); |
||
1250 | goto free_ports; |
||
1251 | } |
||
1252 | |||
1253 | printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]); |
||
1254 | netif_carrier_off(dev); |
||
1255 | dev = 0; |
||
1256 | } |
||
1257 | |||
1258 | return 0; |
||
1259 | |||
1260 | free_ports: |
||
1261 | err = -ENOMEM; |
||
1262 | for (--i; i >= 0; i--) { |
||
1263 | if (switch_port_tab[i]) { |
||
1264 | port = switch_port_tab[i]; |
||
1265 | dev = port->netdev; |
||
1266 | unregister_netdev(dev); |
||
1267 | phy_disconnect(port->phydev); |
||
1268 | switch_port_tab[i] = 0; |
||
1269 | free_netdev(dev); |
||
1270 | } |
||
1271 | } |
||
1272 | err_free: |
||
1273 | free_netdev(napi_dev); |
||
1274 | err_remove_mdio: |
||
1275 | cns3xxx_mdio_remove(); |
||
1276 | return err; |
||
1277 | } |
||
1278 | |||
1279 | static int eth_remove_one(struct platform_device *pdev) |
||
1280 | { |
||
1281 | struct net_device *dev = platform_get_drvdata(pdev); |
||
1282 | struct sw *sw = netdev_priv(dev); |
||
1283 | int i; |
||
1284 | |||
1285 | destroy_rings(sw); |
||
1286 | for (i = 3; i >= 0; i--) { |
||
1287 | if (switch_port_tab[i]) { |
||
1288 | struct port *port = switch_port_tab[i]; |
||
1289 | struct net_device *dev = port->netdev; |
||
1290 | unregister_netdev(dev); |
||
1291 | phy_disconnect(port->phydev); |
||
1292 | switch_port_tab[i] = 0; |
||
1293 | free_netdev(dev); |
||
1294 | } |
||
1295 | } |
||
1296 | |||
1297 | free_netdev(napi_dev); |
||
1298 | cns3xxx_mdio_remove(); |
||
1299 | |||
1300 | return 0; |
||
1301 | } |
||
1302 | |||
1303 | static struct platform_driver cns3xxx_eth_driver = { |
||
1304 | .driver.name = DRV_NAME, |
||
1305 | .probe = eth_init_one, |
||
1306 | .remove = eth_remove_one, |
||
1307 | }; |
||
1308 | |||
1309 | static int __init eth_init_module(void) |
||
1310 | { |
||
1311 | return platform_driver_register(&cns3xxx_eth_driver); |
||
1312 | } |
||
1313 | |||
1314 | static void __exit eth_cleanup_module(void) |
||
1315 | { |
||
1316 | platform_driver_unregister(&cns3xxx_eth_driver); |
||
1317 | } |
||
1318 | |||
1319 | module_init(eth_init_module); |
||
1320 | module_exit(eth_cleanup_module); |
||
1321 | |||
1322 | MODULE_AUTHOR("Chris Lang"); |
||
1323 | MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver"); |
||
1324 | MODULE_LICENSE("GPL v2"); |
||
1325 | MODULE_ALIAS("platform:cns3xxx_eth"); |