OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | /* |
2 | * ADM5120 built-in ethernet switch driver |
||
3 | * |
||
4 | * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org> |
||
5 | * |
||
6 | * This code was based on a driver for Linux 2.6.xx by Jeroen Vreeken. |
||
7 | * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005 |
||
8 | * NAPI extension for the Jeroen's driver |
||
9 | * Copyright Thomas Langer (Thomas.Langer@infineon.com), 2007 |
||
10 | * Copyright Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007 |
||
11 | * Inspiration for the Jeroen's driver came from the ADMtek 2.4 driver. |
||
12 | * Copyright ADMtek Inc. |
||
13 | * |
||
14 | * This program is free software; you can redistribute it and/or modify it |
||
15 | * under the terms of the GNU General Public License version 2 as published |
||
16 | * by the Free Software Foundation. |
||
17 | * |
||
18 | */ |
||
19 | |||
20 | #include <linux/kernel.h> |
||
21 | #include <linux/module.h> |
||
22 | #include <linux/errno.h> |
||
23 | #include <linux/interrupt.h> |
||
24 | #include <linux/ioport.h> |
||
25 | #include <linux/spinlock.h> |
||
26 | #include <linux/platform_device.h> |
||
27 | #include <linux/io.h> |
||
28 | #include <linux/irq.h> |
||
29 | |||
30 | #include <linux/netdevice.h> |
||
31 | #include <linux/etherdevice.h> |
||
32 | #include <linux/skbuff.h> |
||
33 | |||
34 | #include <asm/mipsregs.h> |
||
35 | |||
36 | #include <asm/mach-adm5120/adm5120_info.h> |
||
37 | #include <asm/mach-adm5120/adm5120_defs.h> |
||
38 | #include <asm/mach-adm5120/adm5120_switch.h> |
||
39 | |||
40 | #include "adm5120sw.h" |
||
41 | #include <linux/dma-mapping.h> |
||
42 | |||
43 | #define DRV_NAME "adm5120-switch" |
||
44 | #define DRV_DESC "ADM5120 built-in ethernet switch driver" |
||
45 | #define DRV_VERSION "0.1.1" |
||
46 | |||
47 | #define CONFIG_ADM5120_SWITCH_NAPI 1 |
||
48 | #undef CONFIG_ADM5120_SWITCH_DEBUG |
||
49 | |||
50 | /* ------------------------------------------------------------------------ */ |
||
51 | |||
52 | #ifdef CONFIG_ADM5120_SWITCH_DEBUG |
||
53 | #define SW_DBG(f, a...) printk(KERN_DEBUG "%s: " f, DRV_NAME , ## a) |
||
54 | #else |
||
55 | #define SW_DBG(f, a...) do {} while (0) |
||
56 | #endif |
||
57 | #define SW_ERR(f, a...) printk(KERN_ERR "%s: " f, DRV_NAME , ## a) |
||
58 | #define SW_INFO(f, a...) printk(KERN_INFO "%s: " f, DRV_NAME , ## a) |
||
59 | |||
60 | #define SWITCH_NUM_PORTS 6 |
||
61 | #define ETH_CSUM_LEN 4 |
||
62 | |||
63 | #define RX_MAX_PKTLEN 1550 |
||
64 | #define RX_RING_SIZE 64 |
||
65 | |||
66 | #define TX_RING_SIZE 32 |
||
67 | #define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */ |
||
68 | #define TX_TIMEOUT (HZ * 400) |
||
69 | |||
70 | #define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *)) |
||
71 | #define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *)) |
||
72 | #define TX_DESCS_SIZE (TX_RING_SIZE * sizeof(struct dma_desc *)) |
||
73 | #define TX_SKBS_SIZE (TX_RING_SIZE * sizeof(struct sk_buff *)) |
||
74 | |||
75 | #define SKB_ALLOC_LEN (RX_MAX_PKTLEN + 32) |
||
76 | #define SKB_RESERVE_LEN (NET_IP_ALIGN + NET_SKB_PAD) |
||
77 | |||
78 | #define SWITCH_INTS_HIGH (SWITCH_INT_SHD | SWITCH_INT_RHD | SWITCH_INT_HDF) |
||
79 | #define SWITCH_INTS_LOW (SWITCH_INT_SLD | SWITCH_INT_RLD | SWITCH_INT_LDF) |
||
80 | #define SWITCH_INTS_ERR (SWITCH_INT_RDE | SWITCH_INT_SDE | SWITCH_INT_CPUH) |
||
81 | #define SWITCH_INTS_Q (SWITCH_INT_P0QF | SWITCH_INT_P1QF | SWITCH_INT_P2QF | \ |
||
82 | SWITCH_INT_P3QF | SWITCH_INT_P4QF | SWITCH_INT_P5QF | \ |
||
83 | SWITCH_INT_CPQF | SWITCH_INT_GQF) |
||
84 | |||
85 | #define SWITCH_INTS_ALL (SWITCH_INTS_HIGH | SWITCH_INTS_LOW | \ |
||
86 | SWITCH_INTS_ERR | SWITCH_INTS_Q | \ |
||
87 | SWITCH_INT_MD | SWITCH_INT_PSC) |
||
88 | |||
89 | #define SWITCH_INTS_USED (SWITCH_INTS_LOW | SWITCH_INT_PSC) |
||
90 | #define SWITCH_INTS_POLL (SWITCH_INT_RLD | SWITCH_INT_LDF | SWITCH_INT_SLD) |
||
91 | |||
92 | /* ------------------------------------------------------------------------ */ |
||
93 | |||
94 | struct adm5120_if_priv { |
||
95 | struct net_device *dev; |
||
96 | |||
97 | unsigned int vlan_no; |
||
98 | unsigned int port_mask; |
||
99 | |||
100 | #ifdef CONFIG_ADM5120_SWITCH_NAPI |
||
101 | struct napi_struct napi; |
||
102 | #endif |
||
103 | }; |
||
104 | |||
105 | struct dma_desc { |
||
106 | __u32 buf1; |
||
107 | #define DESC_OWN (1UL << 31) /* Owned by the switch */ |
||
108 | #define DESC_EOR (1UL << 28) /* End of Ring */ |
||
109 | #define DESC_ADDR_MASK 0x1FFFFFF |
||
110 | #define DESC_ADDR(x) ((__u32)(x) & DESC_ADDR_MASK) |
||
111 | __u32 buf2; |
||
112 | #define DESC_BUF2_EN (1UL << 31) /* Buffer 2 enable */ |
||
113 | __u32 buflen; |
||
114 | __u32 misc; |
||
115 | /* definitions for tx/rx descriptors */ |
||
116 | #define DESC_PKTLEN_SHIFT 16 |
||
117 | #define DESC_PKTLEN_MASK 0x7FF |
||
118 | /* tx descriptor specific part */ |
||
119 | #define DESC_CSUM (1UL << 31) /* Append checksum */ |
||
120 | #define DESC_DSTPORT_SHIFT 8 |
||
121 | #define DESC_DSTPORT_MASK 0x3F |
||
122 | #define DESC_VLAN_MASK 0x3F |
||
123 | /* rx descriptor specific part */ |
||
124 | #define DESC_SRCPORT_SHIFT 12 |
||
125 | #define DESC_SRCPORT_MASK 0x7 |
||
126 | #define DESC_DA_MASK 0x3 |
||
127 | #define DESC_DA_SHIFT 4 |
||
128 | #define DESC_IPCSUM_FAIL (1UL << 3) /* IP checksum fail */ |
||
129 | #define DESC_VLAN_TAG (1UL << 2) /* VLAN tag present */ |
||
130 | #define DESC_TYPE_MASK 0x3 /* mask for Packet type */ |
||
131 | #define DESC_TYPE_IP 0x0 /* IP packet */ |
||
132 | #define DESC_TYPE_PPPoE 0x1 /* PPPoE packet */ |
||
133 | } __attribute__ ((aligned(16))); |
||
134 | |||
135 | /* ------------------------------------------------------------------------ */ |
||
136 | |||
137 | static int adm5120_nrdevs; |
||
138 | |||
139 | static struct net_device *adm5120_devs[SWITCH_NUM_PORTS]; |
||
140 | /* Lookup table port -> device */ |
||
141 | static struct net_device *adm5120_port[SWITCH_NUM_PORTS]; |
||
142 | |||
143 | static struct dma_desc *txl_descs; |
||
144 | static struct dma_desc *rxl_descs; |
||
145 | |||
146 | static dma_addr_t txl_descs_dma; |
||
147 | static dma_addr_t rxl_descs_dma; |
||
148 | |||
149 | static struct sk_buff **txl_skbuff; |
||
150 | static struct sk_buff **rxl_skbuff; |
||
151 | |||
152 | static unsigned int cur_rxl, dirty_rxl; /* producer/consumer ring indices */ |
||
153 | static unsigned int cur_txl, dirty_txl; |
||
154 | |||
155 | static unsigned int sw_used; |
||
156 | |||
157 | static DEFINE_SPINLOCK(tx_lock); |
||
158 | |||
159 | /* ------------------------------------------------------------------------ */ |
||
160 | |||
161 | static inline u32 sw_read_reg(u32 reg) |
||
162 | { |
||
163 | return __raw_readl((void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg); |
||
164 | } |
||
165 | |||
166 | static inline void sw_write_reg(u32 reg, u32 val) |
||
167 | { |
||
168 | __raw_writel(val, (void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg); |
||
169 | } |
||
170 | |||
171 | static inline void sw_int_mask(u32 mask) |
||
172 | { |
||
173 | u32 t; |
||
174 | |||
175 | t = sw_read_reg(SWITCH_REG_INT_MASK); |
||
176 | t |= mask; |
||
177 | sw_write_reg(SWITCH_REG_INT_MASK, t); |
||
178 | } |
||
179 | |||
180 | static inline void sw_int_unmask(u32 mask) |
||
181 | { |
||
182 | u32 t; |
||
183 | |||
184 | t = sw_read_reg(SWITCH_REG_INT_MASK); |
||
185 | t &= ~mask; |
||
186 | sw_write_reg(SWITCH_REG_INT_MASK, t); |
||
187 | } |
||
188 | |||
189 | static inline void sw_int_ack(u32 mask) |
||
190 | { |
||
191 | sw_write_reg(SWITCH_REG_INT_STATUS, mask); |
||
192 | } |
||
193 | |||
194 | static inline u32 sw_int_status(void) |
||
195 | { |
||
196 | u32 t; |
||
197 | |||
198 | t = sw_read_reg(SWITCH_REG_INT_STATUS); |
||
199 | t &= ~sw_read_reg(SWITCH_REG_INT_MASK); |
||
200 | return t; |
||
201 | } |
||
202 | |||
203 | static inline u32 desc_get_srcport(struct dma_desc *desc) |
||
204 | { |
||
205 | return (desc->misc >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK; |
||
206 | } |
||
207 | |||
208 | static inline u32 desc_get_pktlen(struct dma_desc *desc) |
||
209 | { |
||
210 | return (desc->misc >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK; |
||
211 | } |
||
212 | |||
213 | static inline int desc_ipcsum_fail(struct dma_desc *desc) |
||
214 | { |
||
215 | return ((desc->misc & DESC_IPCSUM_FAIL) != 0); |
||
216 | } |
||
217 | |||
218 | /* ------------------------------------------------------------------------ */ |
||
219 | |||
220 | #ifdef CONFIG_ADM5120_SWITCH_DEBUG |
||
221 | static void sw_dump_desc(char *label, struct dma_desc *desc, int tx) |
||
222 | { |
||
223 | u32 t; |
||
224 | |||
225 | SW_DBG("%s %s desc/%p\n", label, tx ? "tx" : "rx", desc); |
||
226 | |||
227 | t = desc->buf1; |
||
228 | SW_DBG(" buf1 %08X addr=%08X; len=%08X %s%s\n", t, |
||
229 | t & DESC_ADDR_MASK, |
||
230 | desc->buflen, |
||
231 | (t & DESC_OWN) ? "SWITCH" : "CPU", |
||
232 | (t & DESC_EOR) ? " RE" : ""); |
||
233 | |||
234 | t = desc->buf2; |
||
235 | SW_DBG(" buf2 %08X addr=%08X%s\n", desc->buf2, |
||
236 | t & DESC_ADDR_MASK, |
||
237 | (t & DESC_BUF2_EN) ? " EN" : ""); |
||
238 | |||
239 | t = desc->misc; |
||
240 | if (tx) |
||
241 | SW_DBG(" misc %08X%s pktlen=%04X ports=%02X vlan=%02X\n", t, |
||
242 | (t & DESC_CSUM) ? " CSUM" : "", |
||
243 | (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK, |
||
244 | (t >> DESC_DSTPORT_SHIFT) & DESC_DSTPORT_MASK, |
||
245 | t & DESC_VLAN_MASK); |
||
246 | else |
||
247 | SW_DBG(" misc %08X pktlen=%04X port=%d DA=%d%s%s type=%d\n", |
||
248 | t, |
||
249 | (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK, |
||
250 | (t >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK, |
||
251 | (t >> DESC_DA_SHIFT) & DESC_DA_MASK, |
||
252 | (t & DESC_IPCSUM_FAIL) ? " IPCF" : "", |
||
253 | (t & DESC_VLAN_TAG) ? " VLAN" : "", |
||
254 | (t & DESC_TYPE_MASK)); |
||
255 | } |
||
256 | |||
257 | static void sw_dump_intr_mask(char *label, u32 mask) |
||
258 | { |
||
259 | SW_DBG("%s %08X%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", |
||
260 | label, mask, |
||
261 | (mask & SWITCH_INT_SHD) ? " SHD" : "", |
||
262 | (mask & SWITCH_INT_SLD) ? " SLD" : "", |
||
263 | (mask & SWITCH_INT_RHD) ? " RHD" : "", |
||
264 | (mask & SWITCH_INT_RLD) ? " RLD" : "", |
||
265 | (mask & SWITCH_INT_HDF) ? " HDF" : "", |
||
266 | (mask & SWITCH_INT_LDF) ? " LDF" : "", |
||
267 | (mask & SWITCH_INT_P0QF) ? " P0QF" : "", |
||
268 | (mask & SWITCH_INT_P1QF) ? " P1QF" : "", |
||
269 | (mask & SWITCH_INT_P2QF) ? " P2QF" : "", |
||
270 | (mask & SWITCH_INT_P3QF) ? " P3QF" : "", |
||
271 | (mask & SWITCH_INT_P4QF) ? " P4QF" : "", |
||
272 | (mask & SWITCH_INT_CPQF) ? " CPQF" : "", |
||
273 | (mask & SWITCH_INT_GQF) ? " GQF" : "", |
||
274 | (mask & SWITCH_INT_MD) ? " MD" : "", |
||
275 | (mask & SWITCH_INT_BCS) ? " BCS" : "", |
||
276 | (mask & SWITCH_INT_PSC) ? " PSC" : "", |
||
277 | (mask & SWITCH_INT_ID) ? " ID" : "", |
||
278 | (mask & SWITCH_INT_W0TE) ? " W0TE" : "", |
||
279 | (mask & SWITCH_INT_W1TE) ? " W1TE" : "", |
||
280 | (mask & SWITCH_INT_RDE) ? " RDE" : "", |
||
281 | (mask & SWITCH_INT_SDE) ? " SDE" : "", |
||
282 | (mask & SWITCH_INT_CPUH) ? " CPUH" : ""); |
||
283 | } |
||
284 | |||
285 | static void sw_dump_regs(void) |
||
286 | { |
||
287 | u32 t; |
||
288 | |||
289 | t = sw_read_reg(SWITCH_REG_PHY_STATUS); |
||
290 | SW_DBG("phy_status: %08X\n", t); |
||
291 | |||
292 | t = sw_read_reg(SWITCH_REG_CPUP_CONF); |
||
293 | SW_DBG("cpup_conf: %08X%s%s%s\n", t, |
||
294 | (t & CPUP_CONF_DCPUP) ? " DCPUP" : "", |
||
295 | (t & CPUP_CONF_CRCP) ? " CRCP" : "", |
||
296 | (t & CPUP_CONF_BTM) ? " BTM" : ""); |
||
297 | |||
298 | t = sw_read_reg(SWITCH_REG_PORT_CONF0); |
||
299 | SW_DBG("port_conf0: %08X\n", t); |
||
300 | t = sw_read_reg(SWITCH_REG_PORT_CONF1); |
||
301 | SW_DBG("port_conf1: %08X\n", t); |
||
302 | t = sw_read_reg(SWITCH_REG_PORT_CONF2); |
||
303 | SW_DBG("port_conf2: %08X\n", t); |
||
304 | |||
305 | t = sw_read_reg(SWITCH_REG_VLAN_G1); |
||
306 | SW_DBG("vlan g1: %08X\n", t); |
||
307 | t = sw_read_reg(SWITCH_REG_VLAN_G2); |
||
308 | SW_DBG("vlan g2: %08X\n", t); |
||
309 | |||
310 | t = sw_read_reg(SWITCH_REG_BW_CNTL0); |
||
311 | SW_DBG("bw_cntl0: %08X\n", t); |
||
312 | t = sw_read_reg(SWITCH_REG_BW_CNTL1); |
||
313 | SW_DBG("bw_cntl1: %08X\n", t); |
||
314 | |||
315 | t = sw_read_reg(SWITCH_REG_PHY_CNTL0); |
||
316 | SW_DBG("phy_cntl0: %08X\n", t); |
||
317 | t = sw_read_reg(SWITCH_REG_PHY_CNTL1); |
||
318 | SW_DBG("phy_cntl1: %08X\n", t); |
||
319 | t = sw_read_reg(SWITCH_REG_PHY_CNTL2); |
||
320 | SW_DBG("phy_cntl2: %08X\n", t); |
||
321 | t = sw_read_reg(SWITCH_REG_PHY_CNTL3); |
||
322 | SW_DBG("phy_cntl3: %08X\n", t); |
||
323 | t = sw_read_reg(SWITCH_REG_PHY_CNTL4); |
||
324 | SW_DBG("phy_cntl4: %08X\n", t); |
||
325 | |||
326 | t = sw_read_reg(SWITCH_REG_INT_STATUS); |
||
327 | sw_dump_intr_mask("int_status: ", t); |
||
328 | |||
329 | t = sw_read_reg(SWITCH_REG_INT_MASK); |
||
330 | sw_dump_intr_mask("int_mask: ", t); |
||
331 | |||
332 | t = sw_read_reg(SWITCH_REG_SHDA); |
||
333 | SW_DBG("shda: %08X\n", t); |
||
334 | t = sw_read_reg(SWITCH_REG_SLDA); |
||
335 | SW_DBG("slda: %08X\n", t); |
||
336 | t = sw_read_reg(SWITCH_REG_RHDA); |
||
337 | SW_DBG("rhda: %08X\n", t); |
||
338 | t = sw_read_reg(SWITCH_REG_RLDA); |
||
339 | SW_DBG("rlda: %08X\n", t); |
||
340 | } |
||
341 | #else |
||
342 | static inline void sw_dump_desc(char *label, struct dma_desc *desc, int tx) {} |
||
343 | static void sw_dump_intr_mask(char *label, u32 mask) {} |
||
344 | static inline void sw_dump_regs(void) {} |
||
345 | #endif /* CONFIG_ADM5120_SWITCH_DEBUG */ |
||
346 | |||
347 | /* ------------------------------------------------------------------------ */ |
||
348 | |||
349 | static inline void adm5120_rx_dma_update(struct dma_desc *desc, |
||
350 | struct sk_buff *skb, int end) |
||
351 | { |
||
352 | desc->misc = 0; |
||
353 | desc->buf2 = 0; |
||
354 | desc->buflen = RX_MAX_PKTLEN; |
||
355 | desc->buf1 = DESC_ADDR(skb->data) | |
||
356 | DESC_OWN | (end ? DESC_EOR : 0); |
||
357 | } |
||
358 | |||
359 | static void adm5120_switch_rx_refill(void) |
||
360 | { |
||
361 | unsigned int entry; |
||
362 | |||
363 | for (; cur_rxl - dirty_rxl > 0; dirty_rxl++) { |
||
364 | struct dma_desc *desc; |
||
365 | struct sk_buff *skb; |
||
366 | |||
367 | entry = dirty_rxl % RX_RING_SIZE; |
||
368 | desc = &rxl_descs[entry]; |
||
369 | |||
370 | skb = rxl_skbuff[entry]; |
||
371 | if (skb == NULL) { |
||
372 | skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC); |
||
373 | if (skb) { |
||
374 | skb_reserve(skb, SKB_RESERVE_LEN); |
||
375 | rxl_skbuff[entry] = skb; |
||
376 | } else { |
||
377 | SW_ERR("no memory for skb\n"); |
||
378 | desc->buflen = 0; |
||
379 | desc->buf2 = 0; |
||
380 | desc->misc = 0; |
||
381 | desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN; |
||
382 | break; |
||
383 | } |
||
384 | } |
||
385 | |||
386 | desc->buf2 = 0; |
||
387 | desc->buflen = RX_MAX_PKTLEN; |
||
388 | desc->misc = 0; |
||
389 | desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN | |
||
390 | DESC_ADDR(skb->data); |
||
391 | } |
||
392 | } |
||
393 | |||
394 | static int adm5120_switch_rx(int limit) |
||
395 | { |
||
396 | unsigned int done = 0; |
||
397 | |||
398 | SW_DBG("rx start, limit=%d, cur_rxl=%u, dirty_rxl=%u\n", |
||
399 | limit, cur_rxl, dirty_rxl); |
||
400 | |||
401 | while (done < limit) { |
||
402 | int entry = cur_rxl % RX_RING_SIZE; |
||
403 | struct dma_desc *desc = &rxl_descs[entry]; |
||
404 | struct net_device *rdev; |
||
405 | unsigned int port; |
||
406 | |||
407 | if (desc->buf1 & DESC_OWN) |
||
408 | break; |
||
409 | |||
410 | if (dirty_rxl + RX_RING_SIZE == cur_rxl) |
||
411 | break; |
||
412 | |||
413 | port = desc_get_srcport(desc); |
||
414 | rdev = adm5120_port[port]; |
||
415 | |||
416 | SW_DBG("rx descriptor %u, desc=%p, skb=%p\n", entry, desc, |
||
417 | rxl_skbuff[entry]); |
||
418 | |||
419 | if ((rdev) && netif_running(rdev)) { |
||
420 | struct sk_buff *skb = rxl_skbuff[entry]; |
||
421 | int pktlen; |
||
422 | |||
423 | pktlen = desc_get_pktlen(desc); |
||
424 | pktlen -= ETH_CSUM_LEN; |
||
425 | |||
426 | if ((pktlen == 0) || desc_ipcsum_fail(desc)) { |
||
427 | rdev->stats.rx_errors++; |
||
428 | if (pktlen == 0) |
||
429 | rdev->stats.rx_length_errors++; |
||
430 | if (desc_ipcsum_fail(desc)) |
||
431 | rdev->stats.rx_crc_errors++; |
||
432 | SW_DBG("rx error, recycling skb %u\n", entry); |
||
433 | } else { |
||
434 | skb_put(skb, pktlen); |
||
435 | |||
436 | skb->dev = rdev; |
||
437 | skb->protocol = eth_type_trans(skb, rdev); |
||
438 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
||
439 | |||
440 | dma_cache_wback_inv((unsigned long)skb->data, |
||
441 | skb->len); |
||
442 | |||
443 | #ifdef CONFIG_ADM5120_SWITCH_NAPI |
||
444 | netif_receive_skb(skb); |
||
445 | #else |
||
446 | netif_rx(skb); |
||
447 | #endif |
||
448 | |||
449 | rdev->last_rx = jiffies; |
||
450 | rdev->stats.rx_packets++; |
||
451 | rdev->stats.rx_bytes += pktlen; |
||
452 | |||
453 | rxl_skbuff[entry] = NULL; |
||
454 | done++; |
||
455 | } |
||
456 | } else { |
||
457 | SW_DBG("no rx device, recycling skb %u\n", entry); |
||
458 | } |
||
459 | |||
460 | cur_rxl++; |
||
461 | if (cur_rxl - dirty_rxl > RX_RING_SIZE / 4) |
||
462 | adm5120_switch_rx_refill(); |
||
463 | } |
||
464 | |||
465 | adm5120_switch_rx_refill(); |
||
466 | |||
467 | SW_DBG("rx finished, cur_rxl=%u, dirty_rxl=%u, processed %d\n", |
||
468 | cur_rxl, dirty_rxl, done); |
||
469 | |||
470 | return done; |
||
471 | } |
||
472 | |||
473 | static void adm5120_switch_tx(void) |
||
474 | { |
||
475 | unsigned int entry; |
||
476 | |||
477 | spin_lock(&tx_lock); |
||
478 | entry = dirty_txl % TX_RING_SIZE; |
||
479 | while (dirty_txl != cur_txl) { |
||
480 | struct dma_desc *desc = &txl_descs[entry]; |
||
481 | struct sk_buff *skb = txl_skbuff[entry]; |
||
482 | |||
483 | if (desc->buf1 & DESC_OWN) |
||
484 | break; |
||
485 | |||
486 | if (netif_running(skb->dev)) { |
||
487 | skb->dev->stats.tx_bytes += skb->len; |
||
488 | skb->dev->stats.tx_packets++; |
||
489 | } |
||
490 | |||
491 | dev_kfree_skb_irq(skb); |
||
492 | txl_skbuff[entry] = NULL; |
||
493 | entry = (++dirty_txl) % TX_RING_SIZE; |
||
494 | } |
||
495 | |||
496 | if ((cur_txl - dirty_txl) < TX_QUEUE_LEN - 4) { |
||
497 | int i; |
||
498 | for (i = 0; i < SWITCH_NUM_PORTS; i++) { |
||
499 | if (!adm5120_devs[i]) |
||
500 | continue; |
||
501 | netif_wake_queue(adm5120_devs[i]); |
||
502 | } |
||
503 | } |
||
504 | spin_unlock(&tx_lock); |
||
505 | } |
||
506 | |||
507 | #ifdef CONFIG_ADM5120_SWITCH_NAPI |
||
508 | static int adm5120_if_poll(struct napi_struct *napi, int limit) |
||
509 | { |
||
510 | struct adm5120_if_priv *priv = container_of(napi, |
||
511 | struct adm5120_if_priv, napi); |
||
512 | struct net_device *dev __maybe_unused = priv->dev; |
||
513 | int done; |
||
514 | u32 status; |
||
515 | |||
516 | sw_int_ack(SWITCH_INTS_POLL); |
||
517 | |||
518 | SW_DBG("%s: processing TX ring\n", dev->name); |
||
519 | adm5120_switch_tx(); |
||
520 | |||
521 | SW_DBG("%s: processing RX ring\n", dev->name); |
||
522 | done = adm5120_switch_rx(limit); |
||
523 | |||
524 | status = sw_int_status() & SWITCH_INTS_POLL; |
||
525 | if ((done < limit) && (!status)) { |
||
526 | SW_DBG("disable polling mode for %s\n", dev->name); |
||
527 | napi_complete(napi); |
||
528 | sw_int_unmask(SWITCH_INTS_POLL); |
||
529 | return 0; |
||
530 | } |
||
531 | |||
532 | SW_DBG("%s still in polling mode, done=%d, status=%x\n", |
||
533 | dev->name, done, status); |
||
534 | return 1; |
||
535 | } |
||
536 | #endif /* CONFIG_ADM5120_SWITCH_NAPI */ |
||
537 | |||
538 | |||
539 | static irqreturn_t adm5120_switch_irq(int irq, void *dev_id) |
||
540 | { |
||
541 | u32 status; |
||
542 | |||
543 | status = sw_int_status(); |
||
544 | status &= SWITCH_INTS_ALL; |
||
545 | if (!status) |
||
546 | return IRQ_NONE; |
||
547 | |||
548 | #ifdef CONFIG_ADM5120_SWITCH_NAPI |
||
549 | sw_int_ack(status & ~SWITCH_INTS_POLL); |
||
550 | |||
551 | if (status & SWITCH_INTS_POLL) { |
||
552 | struct net_device *dev = dev_id; |
||
553 | struct adm5120_if_priv *priv = netdev_priv(dev); |
||
554 | |||
555 | sw_dump_intr_mask("poll ints", status); |
||
556 | SW_DBG("enable polling mode for %s\n", dev->name); |
||
557 | sw_int_mask(SWITCH_INTS_POLL); |
||
558 | napi_schedule(&priv->napi); |
||
559 | } |
||
560 | #else |
||
561 | sw_int_ack(status); |
||
562 | |||
563 | if (status & (SWITCH_INT_RLD | SWITCH_INT_LDF)) |
||
564 | adm5120_switch_rx(RX_RING_SIZE); |
||
565 | |||
566 | if (status & SWITCH_INT_SLD) |
||
567 | adm5120_switch_tx(); |
||
568 | #endif |
||
569 | |||
570 | return IRQ_HANDLED; |
||
571 | } |
||
572 | |||
573 | static void adm5120_set_bw(char *matrix) |
||
574 | { |
||
575 | unsigned long val; |
||
576 | |||
577 | /* Port 0 to 3 are set using the bandwidth control 0 register */ |
||
578 | val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24); |
||
579 | sw_write_reg(SWITCH_REG_BW_CNTL0, val); |
||
580 | |||
581 | /* Port 4 and 5 are set using the bandwidth control 1 register */ |
||
582 | val = matrix[4]; |
||
583 | if (matrix[5] == 1) |
||
584 | sw_write_reg(SWITCH_REG_BW_CNTL1, val | 0x80000000); |
||
585 | else |
||
586 | sw_write_reg(SWITCH_REG_BW_CNTL1, val & ~0x8000000); |
||
587 | |||
588 | SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0), |
||
589 | sw_read_reg(SWITCH_REG_BW_CNTL1)); |
||
590 | } |
||
591 | |||
592 | static void adm5120_switch_tx_ring_reset(struct dma_desc *desc, |
||
593 | struct sk_buff **skbl, int num) |
||
594 | { |
||
595 | memset(desc, 0, num * sizeof(*desc)); |
||
596 | desc[num-1].buf1 |= DESC_EOR; |
||
597 | memset(skbl, 0, sizeof(struct skb *) * num); |
||
598 | |||
599 | cur_txl = 0; |
||
600 | dirty_txl = 0; |
||
601 | } |
||
602 | |||
603 | static void adm5120_switch_rx_ring_reset(struct dma_desc *desc, |
||
604 | struct sk_buff **skbl, int num) |
||
605 | { |
||
606 | int i; |
||
607 | |||
608 | memset(desc, 0, num * sizeof(*desc)); |
||
609 | for (i = 0; i < num; i++) { |
||
610 | skbl[i] = dev_alloc_skb(SKB_ALLOC_LEN); |
||
611 | if (!skbl[i]) { |
||
612 | i = num; |
||
613 | break; |
||
614 | } |
||
615 | skb_reserve(skbl[i], SKB_RESERVE_LEN); |
||
616 | adm5120_rx_dma_update(&desc[i], skbl[i], (num - 1 == i)); |
||
617 | } |
||
618 | |||
619 | cur_rxl = 0; |
||
620 | dirty_rxl = 0; |
||
621 | } |
||
622 | |||
623 | static int adm5120_switch_tx_ring_alloc(void) |
||
624 | { |
||
625 | int err; |
||
626 | |||
627 | txl_descs = dma_alloc_coherent(NULL, TX_DESCS_SIZE, &txl_descs_dma, |
||
628 | GFP_ATOMIC); |
||
629 | if (!txl_descs) { |
||
630 | err = -ENOMEM; |
||
631 | goto err; |
||
632 | } |
||
633 | |||
634 | txl_skbuff = kzalloc(TX_SKBS_SIZE, GFP_KERNEL); |
||
635 | if (!txl_skbuff) { |
||
636 | err = -ENOMEM; |
||
637 | goto err; |
||
638 | } |
||
639 | |||
640 | return 0; |
||
641 | |||
642 | err: |
||
643 | return err; |
||
644 | } |
||
645 | |||
646 | static void adm5120_switch_tx_ring_free(void) |
||
647 | { |
||
648 | int i; |
||
649 | |||
650 | if (txl_skbuff) { |
||
651 | for (i = 0; i < TX_RING_SIZE; i++) |
||
652 | if (txl_skbuff[i]) |
||
653 | kfree_skb(txl_skbuff[i]); |
||
654 | kfree(txl_skbuff); |
||
655 | } |
||
656 | |||
657 | if (txl_descs) |
||
658 | dma_free_coherent(NULL, TX_DESCS_SIZE, txl_descs, |
||
659 | txl_descs_dma); |
||
660 | } |
||
661 | |||
662 | static int adm5120_switch_rx_ring_alloc(void) |
||
663 | { |
||
664 | int err; |
||
665 | int i; |
||
666 | |||
667 | /* init RX ring */ |
||
668 | rxl_descs = dma_alloc_coherent(NULL, RX_DESCS_SIZE, &rxl_descs_dma, |
||
669 | GFP_ATOMIC); |
||
670 | if (!rxl_descs) { |
||
671 | err = -ENOMEM; |
||
672 | goto err; |
||
673 | } |
||
674 | |||
675 | rxl_skbuff = kzalloc(RX_SKBS_SIZE, GFP_KERNEL); |
||
676 | if (!rxl_skbuff) { |
||
677 | err = -ENOMEM; |
||
678 | goto err; |
||
679 | } |
||
680 | |||
681 | for (i = 0; i < RX_RING_SIZE; i++) { |
||
682 | struct sk_buff *skb; |
||
683 | skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC); |
||
684 | if (!skb) { |
||
685 | err = -ENOMEM; |
||
686 | goto err; |
||
687 | } |
||
688 | rxl_skbuff[i] = skb; |
||
689 | skb_reserve(skb, SKB_RESERVE_LEN); |
||
690 | } |
||
691 | |||
692 | return 0; |
||
693 | |||
694 | err: |
||
695 | return err; |
||
696 | } |
||
697 | |||
698 | static void adm5120_switch_rx_ring_free(void) |
||
699 | { |
||
700 | int i; |
||
701 | |||
702 | if (rxl_skbuff) { |
||
703 | for (i = 0; i < RX_RING_SIZE; i++) |
||
704 | if (rxl_skbuff[i]) |
||
705 | kfree_skb(rxl_skbuff[i]); |
||
706 | kfree(rxl_skbuff); |
||
707 | } |
||
708 | |||
709 | if (rxl_descs) |
||
710 | dma_free_coherent(NULL, RX_DESCS_SIZE, rxl_descs, |
||
711 | rxl_descs_dma); |
||
712 | } |
||
713 | |||
714 | static void adm5120_write_mac(struct net_device *dev) |
||
715 | { |
||
716 | struct adm5120_if_priv *priv = netdev_priv(dev); |
||
717 | unsigned char *mac = dev->dev_addr; |
||
718 | u32 t; |
||
719 | |||
720 | t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) | |
||
721 | (mac[4] << MAC_WT1_MAC4_SHIFT) | (mac[5] << MAC_WT1_MAC5_SHIFT); |
||
722 | sw_write_reg(SWITCH_REG_MAC_WT1, t); |
||
723 | |||
724 | t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) | |
||
725 | MAC_WT0_MAWC | MAC_WT0_WVE | (priv->vlan_no<<3); |
||
726 | |||
727 | sw_write_reg(SWITCH_REG_MAC_WT0, t); |
||
728 | |||
729 | while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD)) |
||
730 | ; |
||
731 | } |
||
732 | |||
733 | static void adm5120_set_vlan(char *matrix) |
||
734 | { |
||
735 | unsigned long val; |
||
736 | int vlan_port, port; |
||
737 | |||
738 | val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24); |
||
739 | sw_write_reg(SWITCH_REG_VLAN_G1, val); |
||
740 | val = matrix[4] + (matrix[5]<<8); |
||
741 | sw_write_reg(SWITCH_REG_VLAN_G2, val); |
||
742 | |||
743 | /* Now set/update the port vs. device lookup table */ |
||
744 | for (port = 0; port < SWITCH_NUM_PORTS; port++) { |
||
745 | for (vlan_port = 0; vlan_port < SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++) |
||
746 | ; |
||
747 | if (vlan_port < SWITCH_NUM_PORTS) |
||
748 | adm5120_port[port] = adm5120_devs[vlan_port]; |
||
749 | else |
||
750 | adm5120_port[port] = NULL; |
||
751 | } |
||
752 | } |
||
753 | |||
754 | static void adm5120_switch_set_vlan_mac(unsigned int vlan, unsigned char *mac) |
||
755 | { |
||
756 | u32 t; |
||
757 | |||
758 | t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) |
||
759 | | (mac[4] << MAC_WT1_MAC4_SHIFT) |
||
760 | | (mac[5] << MAC_WT1_MAC5_SHIFT); |
||
761 | sw_write_reg(SWITCH_REG_MAC_WT1, t); |
||
762 | |||
763 | t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) | |
||
764 | MAC_WT0_MAWC | MAC_WT0_WVE | (vlan << MAC_WT0_WVN_SHIFT) | |
||
765 | (MAC_WT0_WAF_STATIC << MAC_WT0_WAF_SHIFT); |
||
766 | sw_write_reg(SWITCH_REG_MAC_WT0, t); |
||
767 | |||
768 | do { |
||
769 | t = sw_read_reg(SWITCH_REG_MAC_WT0); |
||
770 | } while ((t & MAC_WT0_MWD) == 0); |
||
771 | } |
||
772 | |||
773 | static void adm5120_switch_set_vlan_ports(unsigned int vlan, u32 ports) |
||
774 | { |
||
775 | unsigned int reg; |
||
776 | u32 t; |
||
777 | |||
778 | if (vlan < 4) |
||
779 | reg = SWITCH_REG_VLAN_G1; |
||
780 | else { |
||
781 | vlan -= 4; |
||
782 | reg = SWITCH_REG_VLAN_G2; |
||
783 | } |
||
784 | |||
785 | t = sw_read_reg(reg); |
||
786 | t &= ~(0xFF << (vlan*8)); |
||
787 | t |= (ports << (vlan*8)); |
||
788 | sw_write_reg(reg, t); |
||
789 | } |
||
790 | |||
791 | /* ------------------------------------------------------------------------ */ |
||
792 | |||
793 | #ifdef CONFIG_ADM5120_SWITCH_NAPI |
||
794 | static inline void adm5120_if_napi_enable(struct net_device *dev) |
||
795 | { |
||
796 | struct adm5120_if_priv *priv = netdev_priv(dev); |
||
797 | napi_enable(&priv->napi); |
||
798 | } |
||
799 | |||
800 | static inline void adm5120_if_napi_disable(struct net_device *dev) |
||
801 | { |
||
802 | struct adm5120_if_priv *priv = netdev_priv(dev); |
||
803 | napi_disable(&priv->napi); |
||
804 | } |
||
805 | #else |
||
806 | static inline void adm5120_if_napi_enable(struct net_device *dev) {} |
||
807 | static inline void adm5120_if_napi_disable(struct net_device *dev) {} |
||
808 | #endif /* CONFIG_ADM5120_SWITCH_NAPI */ |
||
809 | |||
810 | static int adm5120_if_open(struct net_device *dev) |
||
811 | { |
||
812 | u32 t; |
||
813 | int err; |
||
814 | int i; |
||
815 | |||
816 | adm5120_if_napi_enable(dev); |
||
817 | |||
818 | err = request_irq(dev->irq, adm5120_switch_irq, IRQF_SHARED, |
||
819 | dev->name, dev); |
||
820 | if (err) { |
||
821 | SW_ERR("unable to get irq for %s\n", dev->name); |
||
822 | goto err; |
||
823 | } |
||
824 | |||
825 | if (!sw_used++) |
||
826 | /* enable interrupts on first open */ |
||
827 | sw_int_unmask(SWITCH_INTS_USED); |
||
828 | |||
829 | /* enable (additional) port */ |
||
830 | t = sw_read_reg(SWITCH_REG_PORT_CONF0); |
||
831 | for (i = 0; i < SWITCH_NUM_PORTS; i++) { |
||
832 | if (dev == adm5120_devs[i]) |
||
833 | t &= ~adm5120_eth_vlans[i]; |
||
834 | } |
||
835 | sw_write_reg(SWITCH_REG_PORT_CONF0, t); |
||
836 | |||
837 | netif_start_queue(dev); |
||
838 | |||
839 | return 0; |
||
840 | |||
841 | err: |
||
842 | adm5120_if_napi_disable(dev); |
||
843 | return err; |
||
844 | } |
||
845 | |||
846 | static int adm5120_if_stop(struct net_device *dev) |
||
847 | { |
||
848 | u32 t; |
||
849 | int i; |
||
850 | |||
851 | netif_stop_queue(dev); |
||
852 | adm5120_if_napi_disable(dev); |
||
853 | |||
854 | /* disable port if not assigned to other devices */ |
||
855 | t = sw_read_reg(SWITCH_REG_PORT_CONF0); |
||
856 | t |= SWITCH_PORTS_NOCPU; |
||
857 | for (i = 0; i < SWITCH_NUM_PORTS; i++) { |
||
858 | if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i])) |
||
859 | t &= ~adm5120_eth_vlans[i]; |
||
860 | } |
||
861 | sw_write_reg(SWITCH_REG_PORT_CONF0, t); |
||
862 | |||
863 | if (!--sw_used) |
||
864 | sw_int_mask(SWITCH_INTS_USED); |
||
865 | |||
866 | free_irq(dev->irq, dev); |
||
867 | |||
868 | return 0; |
||
869 | } |
||
870 | |||
871 | static int adm5120_if_hard_start_xmit(struct sk_buff *skb, |
||
872 | struct net_device *dev) |
||
873 | { |
||
874 | struct dma_desc *desc; |
||
875 | struct adm5120_if_priv *priv = netdev_priv(dev); |
||
876 | unsigned int entry; |
||
877 | unsigned long data; |
||
878 | int i; |
||
879 | |||
880 | /* lock switch irq */ |
||
881 | spin_lock_irq(&tx_lock); |
||
882 | |||
883 | /* calculate the next TX descriptor entry. */ |
||
884 | entry = cur_txl % TX_RING_SIZE; |
||
885 | |||
886 | desc = &txl_descs[entry]; |
||
887 | if (desc->buf1 & DESC_OWN) { |
||
888 | /* We want to write a packet but the TX queue is still |
||
889 | * occupied by the DMA. We are faster than the DMA... */ |
||
890 | SW_DBG("%s unable to transmit, packet dopped\n", dev->name); |
||
891 | dev_kfree_skb(skb); |
||
892 | dev->stats.tx_dropped++; |
||
893 | return 0; |
||
894 | } |
||
895 | |||
896 | txl_skbuff[entry] = skb; |
||
897 | data = (desc->buf1 & DESC_EOR); |
||
898 | data |= DESC_ADDR(skb->data); |
||
899 | |||
900 | desc->misc = |
||
901 | ((skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len) << DESC_PKTLEN_SHIFT) | |
||
902 | (0x1 << priv->vlan_no); |
||
903 | |||
904 | desc->buflen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; |
||
905 | |||
906 | desc->buf1 = data | DESC_OWN; |
||
907 | sw_write_reg(SWITCH_REG_SEND_TRIG, SEND_TRIG_STL); |
||
908 | |||
909 | cur_txl++; |
||
910 | if (cur_txl == dirty_txl + TX_QUEUE_LEN) { |
||
911 | for (i = 0; i < SWITCH_NUM_PORTS; i++) { |
||
912 | if (!adm5120_devs[i]) |
||
913 | continue; |
||
914 | netif_stop_queue(adm5120_devs[i]); |
||
915 | } |
||
916 | } |
||
917 | |||
918 | dev->trans_start = jiffies; |
||
919 | |||
920 | spin_unlock_irq(&tx_lock); |
||
921 | |||
922 | return 0; |
||
923 | } |
||
924 | |||
925 | static void adm5120_if_tx_timeout(struct net_device *dev) |
||
926 | { |
||
927 | SW_INFO("TX timeout on %s\n", dev->name); |
||
928 | } |
||
929 | |||
930 | static void adm5120_if_set_rx_mode(struct net_device *dev) |
||
931 | { |
||
932 | struct adm5120_if_priv *priv = netdev_priv(dev); |
||
933 | u32 ports; |
||
934 | u32 t; |
||
935 | |||
936 | ports = adm5120_eth_vlans[priv->vlan_no] & SWITCH_PORTS_NOCPU; |
||
937 | |||
938 | t = sw_read_reg(SWITCH_REG_CPUP_CONF); |
||
939 | if (dev->flags & IFF_PROMISC) |
||
940 | /* enable unknown packets */ |
||
941 | t &= ~(ports << CPUP_CONF_DUNP_SHIFT); |
||
942 | else |
||
943 | /* disable unknown packets */ |
||
944 | t |= (ports << CPUP_CONF_DUNP_SHIFT); |
||
945 | |||
946 | if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI || |
||
947 | netdev_mc_count(dev)) |
||
948 | /* enable multicast packets */ |
||
949 | t &= ~(ports << CPUP_CONF_DMCP_SHIFT); |
||
950 | else |
||
951 | /* disable multicast packets */ |
||
952 | t |= (ports << CPUP_CONF_DMCP_SHIFT); |
||
953 | |||
954 | /* If there is any port configured to be in promiscuous mode, then the */ |
||
955 | /* Bridge Test Mode has to be activated. This will result in */ |
||
956 | /* transporting also packets learned in another VLAN to be forwarded */ |
||
957 | /* to the CPU. */ |
||
958 | /* The difficult scenario is when we want to build a bridge on the CPU.*/ |
||
959 | /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */ |
||
960 | /* CPU port in VLAN1. Now we build a bridge on the CPU between */ |
||
961 | /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */ |
||
962 | /* Now assume a packet with ethernet source address 99 enters port 0 */ |
||
963 | /* It will be forwarded to the CPU because it is unknown. Then the */ |
||
964 | /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */ |
||
965 | /* When now a packet with ethernet destination address 99 comes in at */ |
||
966 | /* port 1 in VLAN1, then the switch has learned that this address is */ |
||
967 | /* located at port 0 in VLAN0. Therefore the switch will drop */ |
||
968 | /* this packet. In order to avoid this and to send the packet still */ |
||
969 | /* to the CPU, the Bridge Test Mode has to be activated. */ |
||
970 | |||
971 | /* Check if there is any vlan in promisc mode. */ |
||
972 | if (~t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT)) |
||
973 | t |= CPUP_CONF_BTM; /* Enable Bridge Testing Mode */ |
||
974 | else |
||
975 | t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */ |
||
976 | |||
977 | sw_write_reg(SWITCH_REG_CPUP_CONF, t); |
||
978 | |||
979 | } |
||
980 | |||
981 | static int adm5120_if_set_mac_address(struct net_device *dev, void *p) |
||
982 | { |
||
983 | int ret; |
||
984 | |||
985 | ret = eth_mac_addr(dev, p); |
||
986 | if (ret) |
||
987 | return ret; |
||
988 | |||
989 | adm5120_write_mac(dev); |
||
990 | return 0; |
||
991 | } |
||
992 | |||
993 | static int adm5120_if_do_ioctl(struct net_device *dev, struct ifreq *rq, |
||
994 | int cmd) |
||
995 | { |
||
996 | int err; |
||
997 | struct adm5120_sw_info info; |
||
998 | struct adm5120_if_priv *priv = netdev_priv(dev); |
||
999 | |||
1000 | switch (cmd) { |
||
1001 | case SIOCGADMINFO: |
||
1002 | info.magic = 0x5120; |
||
1003 | info.ports = adm5120_nrdevs; |
||
1004 | info.vlan = priv->vlan_no; |
||
1005 | err = copy_to_user(rq->ifr_data, &info, sizeof(info)); |
||
1006 | if (err) |
||
1007 | return -EFAULT; |
||
1008 | break; |
||
1009 | case SIOCSMATRIX: |
||
1010 | if (!capable(CAP_NET_ADMIN)) |
||
1011 | return -EPERM; |
||
1012 | err = copy_from_user(adm5120_eth_vlans, rq->ifr_data, |
||
1013 | sizeof(adm5120_eth_vlans)); |
||
1014 | if (err) |
||
1015 | return -EFAULT; |
||
1016 | adm5120_set_vlan(adm5120_eth_vlans); |
||
1017 | break; |
||
1018 | case SIOCGMATRIX: |
||
1019 | err = copy_to_user(rq->ifr_data, adm5120_eth_vlans, |
||
1020 | sizeof(adm5120_eth_vlans)); |
||
1021 | if (err) |
||
1022 | return -EFAULT; |
||
1023 | break; |
||
1024 | default: |
||
1025 | return -EOPNOTSUPP; |
||
1026 | } |
||
1027 | return 0; |
||
1028 | } |
||
1029 | |||
1030 | static const struct net_device_ops adm5120sw_netdev_ops = { |
||
1031 | .ndo_open = adm5120_if_open, |
||
1032 | .ndo_stop = adm5120_if_stop, |
||
1033 | .ndo_start_xmit = adm5120_if_hard_start_xmit, |
||
1034 | .ndo_set_rx_mode = adm5120_if_set_rx_mode, |
||
1035 | .ndo_do_ioctl = adm5120_if_do_ioctl, |
||
1036 | .ndo_tx_timeout = adm5120_if_tx_timeout, |
||
1037 | .ndo_validate_addr = eth_validate_addr, |
||
1038 | .ndo_change_mtu = eth_change_mtu, |
||
1039 | .ndo_set_mac_address = adm5120_if_set_mac_address, |
||
1040 | }; |
||
1041 | |||
1042 | static struct net_device *adm5120_if_alloc(void) |
||
1043 | { |
||
1044 | struct net_device *dev; |
||
1045 | struct adm5120_if_priv *priv; |
||
1046 | |||
1047 | dev = alloc_etherdev(sizeof(*priv)); |
||
1048 | if (!dev) |
||
1049 | return NULL; |
||
1050 | |||
1051 | priv = netdev_priv(dev); |
||
1052 | priv->dev = dev; |
||
1053 | |||
1054 | dev->irq = ADM5120_IRQ_SWITCH; |
||
1055 | dev->netdev_ops = &adm5120sw_netdev_ops; |
||
1056 | dev->watchdog_timeo = TX_TIMEOUT; |
||
1057 | |||
1058 | #ifdef CONFIG_ADM5120_SWITCH_NAPI |
||
1059 | netif_napi_add(dev, &priv->napi, adm5120_if_poll, 64); |
||
1060 | #endif |
||
1061 | |||
1062 | return dev; |
||
1063 | } |
||
1064 | |||
1065 | /* ------------------------------------------------------------------------ */ |
||
1066 | |||
1067 | static void adm5120_switch_cleanup(void) |
||
1068 | { |
||
1069 | int i; |
||
1070 | |||
1071 | /* disable interrupts */ |
||
1072 | sw_int_mask(SWITCH_INTS_ALL); |
||
1073 | |||
1074 | for (i = 0; i < SWITCH_NUM_PORTS; i++) { |
||
1075 | struct net_device *dev = adm5120_devs[i]; |
||
1076 | if (dev) { |
||
1077 | unregister_netdev(dev); |
||
1078 | free_netdev(dev); |
||
1079 | } |
||
1080 | } |
||
1081 | |||
1082 | adm5120_switch_tx_ring_free(); |
||
1083 | adm5120_switch_rx_ring_free(); |
||
1084 | } |
||
1085 | |||
1086 | static int adm5120_switch_probe(struct platform_device *pdev) |
||
1087 | { |
||
1088 | u32 t; |
||
1089 | int i, err; |
||
1090 | |||
1091 | adm5120_nrdevs = adm5120_eth_num_ports; |
||
1092 | |||
1093 | t = CPUP_CONF_DCPUP | CPUP_CONF_CRCP | |
||
1094 | SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT | |
||
1095 | SWITCH_PORTS_NOCPU << CPUP_CONF_DMCP_SHIFT ; |
||
1096 | sw_write_reg(SWITCH_REG_CPUP_CONF, t); |
||
1097 | |||
1098 | t = (SWITCH_PORTS_NOCPU << PORT_CONF0_EMCP_SHIFT) | |
||
1099 | (SWITCH_PORTS_NOCPU << PORT_CONF0_BP_SHIFT) | |
||
1100 | (SWITCH_PORTS_NOCPU); |
||
1101 | sw_write_reg(SWITCH_REG_PORT_CONF0, t); |
||
1102 | |||
1103 | /* setup ports to Autoneg/100M/Full duplex/Auto MDIX */ |
||
1104 | t = SWITCH_PORTS_PHY | |
||
1105 | (SWITCH_PORTS_PHY << PHY_CNTL2_SC_SHIFT) | |
||
1106 | (SWITCH_PORTS_PHY << PHY_CNTL2_DC_SHIFT) | |
||
1107 | (SWITCH_PORTS_PHY << PHY_CNTL2_PHYR_SHIFT) | |
||
1108 | (SWITCH_PORTS_PHY << PHY_CNTL2_AMDIX_SHIFT) | |
||
1109 | PHY_CNTL2_RMAE; |
||
1110 | sw_write_reg(SWITCH_REG_PHY_CNTL2, t); |
||
1111 | |||
1112 | t = sw_read_reg(SWITCH_REG_PHY_CNTL3); |
||
1113 | t |= PHY_CNTL3_RNT; |
||
1114 | sw_write_reg(SWITCH_REG_PHY_CNTL3, t); |
||
1115 | |||
1116 | /* Force all the packets from all ports are low priority */ |
||
1117 | sw_write_reg(SWITCH_REG_PRI_CNTL, 0); |
||
1118 | |||
1119 | sw_int_mask(SWITCH_INTS_ALL); |
||
1120 | sw_int_ack(SWITCH_INTS_ALL); |
||
1121 | |||
1122 | err = adm5120_switch_rx_ring_alloc(); |
||
1123 | if (err) |
||
1124 | goto err; |
||
1125 | |||
1126 | err = adm5120_switch_tx_ring_alloc(); |
||
1127 | if (err) |
||
1128 | goto err; |
||
1129 | |||
1130 | adm5120_switch_tx_ring_reset(txl_descs, txl_skbuff, TX_RING_SIZE); |
||
1131 | adm5120_switch_rx_ring_reset(rxl_descs, rxl_skbuff, RX_RING_SIZE); |
||
1132 | |||
1133 | sw_write_reg(SWITCH_REG_SHDA, 0); |
||
1134 | sw_write_reg(SWITCH_REG_SLDA, KSEG1ADDR(txl_descs)); |
||
1135 | sw_write_reg(SWITCH_REG_RHDA, 0); |
||
1136 | sw_write_reg(SWITCH_REG_RLDA, KSEG1ADDR(rxl_descs)); |
||
1137 | |||
1138 | for (i = 0; i < SWITCH_NUM_PORTS; i++) { |
||
1139 | struct net_device *dev; |
||
1140 | struct adm5120_if_priv *priv; |
||
1141 | |||
1142 | dev = adm5120_if_alloc(); |
||
1143 | if (!dev) { |
||
1144 | err = -ENOMEM; |
||
1145 | goto err; |
||
1146 | } |
||
1147 | |||
1148 | adm5120_devs[i] = dev; |
||
1149 | priv = netdev_priv(dev); |
||
1150 | |||
1151 | priv->vlan_no = i; |
||
1152 | priv->port_mask = adm5120_eth_vlans[i]; |
||
1153 | |||
1154 | memcpy(dev->dev_addr, adm5120_eth_macs[i], 6); |
||
1155 | adm5120_write_mac(dev); |
||
1156 | |||
1157 | err = register_netdev(dev); |
||
1158 | if (err) { |
||
1159 | SW_INFO("%s register failed, error=%d\n", |
||
1160 | dev->name, err); |
||
1161 | goto err; |
||
1162 | } |
||
1163 | } |
||
1164 | |||
1165 | /* setup vlan/port mapping after devs are filled up */ |
||
1166 | adm5120_set_vlan(adm5120_eth_vlans); |
||
1167 | |||
1168 | /* enable CPU port */ |
||
1169 | t = sw_read_reg(SWITCH_REG_CPUP_CONF); |
||
1170 | t &= ~CPUP_CONF_DCPUP; |
||
1171 | sw_write_reg(SWITCH_REG_CPUP_CONF, t); |
||
1172 | |||
1173 | return 0; |
||
1174 | |||
1175 | err: |
||
1176 | adm5120_switch_cleanup(); |
||
1177 | |||
1178 | SW_ERR("init failed\n"); |
||
1179 | return err; |
||
1180 | } |
||
1181 | |||
1182 | static int adm5120_switch_remove(struct platform_device *pdev) |
||
1183 | { |
||
1184 | adm5120_switch_cleanup(); |
||
1185 | return 0; |
||
1186 | } |
||
1187 | |||
1188 | static struct platform_driver adm5120_switch_driver = { |
||
1189 | .probe = adm5120_switch_probe, |
||
1190 | .remove = adm5120_switch_remove, |
||
1191 | .driver = { |
||
1192 | .name = DRV_NAME, |
||
1193 | }, |
||
1194 | }; |
||
1195 | |||
1196 | /* -------------------------------------------------------------------------- */ |
||
1197 | |||
1198 | static int __init adm5120_switch_mod_init(void) |
||
1199 | { |
||
1200 | int err; |
||
1201 | |||
1202 | pr_info(DRV_DESC " version " DRV_VERSION "\n"); |
||
1203 | err = platform_driver_register(&adm5120_switch_driver); |
||
1204 | |||
1205 | return err; |
||
1206 | } |
||
1207 | |||
1208 | static void __exit adm5120_switch_mod_exit(void) |
||
1209 | { |
||
1210 | platform_driver_unregister(&adm5120_switch_driver); |
||
1211 | } |
||
1212 | |||
1213 | module_init(adm5120_switch_mod_init); |
||
1214 | module_exit(adm5120_switch_mod_exit); |
||
1215 | |||
1216 | MODULE_LICENSE("GPL v2"); |
||
1217 | MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); |
||
1218 | MODULE_DESCRIPTION(DRV_DESC); |
||
1219 | MODULE_VERSION(DRV_VERSION); |