OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | /* This program is free software; you can redistribute it and/or modify |
2 | * it under the terms of the GNU General Public License as published by |
||
3 | * the Free Software Foundation; version 2 of the License |
||
4 | * |
||
5 | * This program is distributed in the hope that it will be useful, |
||
6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
8 | * GNU General Public License for more details. |
||
9 | * |
||
10 | * Copyright (C) 2018 John Crispin <john@phrozen.org> |
||
11 | */ |
||
12 | |||
13 | #include "mtk_offload.h" |
||
14 | |||
15 | #define INVALID 0 |
||
16 | #define UNBIND 1 |
||
17 | #define BIND 2 |
||
18 | #define FIN 3 |
||
19 | |||
20 | #define IPV4_HNAPT 0 |
||
21 | #define IPV4_HNAT 1 |
||
22 | |||
23 | static u32 |
||
24 | mtk_flow_hash_v4(struct flow_offload_tuple *tuple) |
||
25 | { |
||
26 | u32 ports = ntohs(tuple->src_port) << 16 | ntohs(tuple->dst_port); |
||
27 | u32 src = ntohl(tuple->dst_v4.s_addr); |
||
28 | u32 dst = ntohl(tuple->src_v4.s_addr); |
||
29 | u32 hash = (ports & src) | ((~ports) & dst); |
||
30 | u32 hash_23_0 = hash & 0xffffff; |
||
31 | u32 hash_31_24 = hash & 0xff000000; |
||
32 | |||
33 | hash = ports ^ src ^ dst ^ ((hash_23_0 << 8) | (hash_31_24 >> 24)); |
||
34 | hash = ((hash & 0xffff0000) >> 16 ) ^ (hash & 0xfffff); |
||
35 | hash &= 0x7ff; |
||
36 | hash *= 2;; |
||
37 | |||
38 | return hash; |
||
39 | } |
||
40 | |||
41 | static int |
||
42 | mtk_foe_prepare_v4(struct mtk_foe_entry *entry, |
||
43 | struct flow_offload_tuple *tuple, |
||
44 | struct flow_offload_tuple *dest_tuple, |
||
45 | struct flow_offload_hw_path *src, |
||
46 | struct flow_offload_hw_path *dest) |
||
47 | { |
||
48 | int is_mcast = !!is_multicast_ether_addr(dest->eth_dest); |
||
49 | |||
50 | if (tuple->l4proto == IPPROTO_UDP) |
||
51 | entry->ipv4_hnapt.bfib1.udp = 1; |
||
52 | |||
53 | entry->ipv4_hnapt.etype = htons(ETH_P_IP); |
||
54 | entry->ipv4_hnapt.bfib1.pkt_type = IPV4_HNAPT; |
||
55 | entry->ipv4_hnapt.iblk2.fqos = 0; |
||
56 | entry->ipv4_hnapt.bfib1.ttl = 1; |
||
57 | entry->ipv4_hnapt.bfib1.cah = 1; |
||
58 | entry->ipv4_hnapt.bfib1.ka = 1; |
||
59 | entry->ipv4_hnapt.iblk2.mcast = is_mcast; |
||
60 | entry->ipv4_hnapt.iblk2.dscp = 0; |
||
61 | entry->ipv4_hnapt.iblk2.port_mg = 0x3f; |
||
62 | entry->ipv4_hnapt.iblk2.port_ag = 0x1f; |
||
63 | #ifdef CONFIG_NET_MEDIATEK_HW_QOS |
||
64 | entry->ipv4_hnapt.iblk2.qid = 1; |
||
65 | entry->ipv4_hnapt.iblk2.fqos = 1; |
||
66 | #endif |
||
67 | #ifdef CONFIG_RALINK |
||
68 | entry->ipv4_hnapt.iblk2.dp = 1; |
||
69 | if ((dest->flags & FLOW_OFFLOAD_PATH_VLAN) && (dest->vlan_id > 1)) |
||
70 | entry->ipv4_hnapt.iblk2.qid += 8; |
||
71 | #else |
||
72 | entry->ipv4_hnapt.iblk2.dp = (dest->dev->name[3] - '0') + 1; |
||
73 | #endif |
||
74 | |||
75 | entry->ipv4_hnapt.sip = ntohl(tuple->src_v4.s_addr); |
||
76 | entry->ipv4_hnapt.dip = ntohl(tuple->dst_v4.s_addr); |
||
77 | entry->ipv4_hnapt.sport = ntohs(tuple->src_port); |
||
78 | entry->ipv4_hnapt.dport = ntohs(tuple->dst_port); |
||
79 | |||
80 | entry->ipv4_hnapt.new_sip = ntohl(dest_tuple->dst_v4.s_addr); |
||
81 | entry->ipv4_hnapt.new_dip = ntohl(dest_tuple->src_v4.s_addr); |
||
82 | entry->ipv4_hnapt.new_sport = ntohs(dest_tuple->dst_port); |
||
83 | entry->ipv4_hnapt.new_dport = ntohs(dest_tuple->src_port); |
||
84 | |||
85 | entry->bfib1.state = BIND; |
||
86 | |||
87 | if (dest->flags & FLOW_OFFLOAD_PATH_PPPOE) { |
||
88 | entry->bfib1.psn = 1; |
||
89 | entry->ipv4_hnapt.etype = htons(ETH_P_PPP_SES); |
||
90 | entry->ipv4_hnapt.pppoe_id = dest->pppoe_sid; |
||
91 | } |
||
92 | |||
93 | if (dest->flags & FLOW_OFFLOAD_PATH_VLAN) { |
||
94 | entry->ipv4_hnapt.vlan1 = dest->vlan_id; |
||
95 | entry->bfib1.vlan_layer = 1; |
||
96 | |||
97 | switch (dest->vlan_proto) { |
||
98 | case htons(ETH_P_8021Q): |
||
99 | entry->ipv4_hnapt.bfib1.vpm = 1; |
||
100 | break; |
||
101 | case htons(ETH_P_8021AD): |
||
102 | entry->ipv4_hnapt.bfib1.vpm = 2; |
||
103 | break; |
||
104 | default: |
||
105 | return -EINVAL; |
||
106 | } |
||
107 | } |
||
108 | |||
109 | return 0; |
||
110 | } |
||
111 | |||
112 | static void |
||
113 | mtk_foe_set_mac(struct mtk_foe_entry *entry, u8 *smac, u8 *dmac) |
||
114 | { |
||
115 | entry->ipv4_hnapt.dmac_hi = swab32(*((u32*) dmac)); |
||
116 | entry->ipv4_hnapt.dmac_lo = swab16(*((u16*) &dmac[4])); |
||
117 | entry->ipv4_hnapt.smac_hi = swab32(*((u32*) smac)); |
||
118 | entry->ipv4_hnapt.smac_lo = swab16(*((u16*) &smac[4])); |
||
119 | } |
||
120 | |||
121 | static int |
||
122 | mtk_check_entry_available(struct mtk_eth *eth, u32 hash) |
||
123 | { |
||
124 | struct mtk_foe_entry entry = ((struct mtk_foe_entry *)eth->foe_table)[hash]; |
||
125 | |||
126 | return (entry.bfib1.state == BIND)? 0:1; |
||
127 | } |
||
128 | |||
129 | static void |
||
130 | mtk_foe_write(struct mtk_eth *eth, u32 hash, |
||
131 | struct mtk_foe_entry *entry) |
||
132 | { |
||
133 | struct mtk_foe_entry *table = (struct mtk_foe_entry *)eth->foe_table; |
||
134 | |||
135 | memcpy(&table[hash], entry, sizeof(*entry)); |
||
136 | } |
||
137 | |||
138 | int mtk_flow_offload(struct mtk_eth *eth, |
||
139 | enum flow_offload_type type, |
||
140 | struct flow_offload *flow, |
||
141 | struct flow_offload_hw_path *src, |
||
142 | struct flow_offload_hw_path *dest) |
||
143 | { |
||
144 | struct flow_offload_tuple *otuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple; |
||
145 | struct flow_offload_tuple *rtuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple; |
||
146 | u32 time_stamp = mtk_r32(eth, 0x0010) & (0x7fff); |
||
147 | u32 ohash, rhash; |
||
148 | struct mtk_foe_entry orig = { |
||
149 | .bfib1.time_stamp = time_stamp, |
||
150 | .bfib1.psn = 0, |
||
151 | }; |
||
152 | struct mtk_foe_entry reply = { |
||
153 | .bfib1.time_stamp = time_stamp, |
||
154 | .bfib1.psn = 0, |
||
155 | }; |
||
156 | |||
157 | if (otuple->l4proto != IPPROTO_TCP && otuple->l4proto != IPPROTO_UDP) |
||
158 | return -EINVAL; |
||
159 | |||
160 | if (type == FLOW_OFFLOAD_DEL) { |
||
161 | flow = NULL; |
||
162 | synchronize_rcu(); |
||
163 | return 0; |
||
164 | } |
||
165 | |||
166 | switch (otuple->l3proto) { |
||
167 | case AF_INET: |
||
168 | if (mtk_foe_prepare_v4(&orig, otuple, rtuple, src, dest) || |
||
169 | mtk_foe_prepare_v4(&reply, rtuple, otuple, dest, src)) |
||
170 | return -EINVAL; |
||
171 | |||
172 | ohash = mtk_flow_hash_v4(otuple); |
||
173 | rhash = mtk_flow_hash_v4(rtuple); |
||
174 | break; |
||
175 | |||
176 | case AF_INET6: |
||
177 | return -EINVAL; |
||
178 | |||
179 | default: |
||
180 | return -EINVAL; |
||
181 | } |
||
182 | |||
183 | /* Two-way hash: when hash collision occurs, the hash value will be shifted to the next position. */ |
||
184 | if (!mtk_check_entry_available(eth, ohash)){ |
||
185 | if (!mtk_check_entry_available(eth, ohash + 1)) |
||
186 | return -EINVAL; |
||
187 | ohash += 1; |
||
188 | } |
||
189 | if (!mtk_check_entry_available(eth, rhash)){ |
||
190 | if (!mtk_check_entry_available(eth, rhash + 1)) |
||
191 | return -EINVAL; |
||
192 | rhash += 1; |
||
193 | } |
||
194 | |||
195 | mtk_foe_set_mac(&orig, dest->eth_src, dest->eth_dest); |
||
196 | mtk_foe_set_mac(&reply, src->eth_src, src->eth_dest); |
||
197 | mtk_foe_write(eth, ohash, &orig); |
||
198 | mtk_foe_write(eth, rhash, &reply); |
||
199 | rcu_assign_pointer(eth->foe_flow_table[ohash], flow); |
||
200 | rcu_assign_pointer(eth->foe_flow_table[rhash], flow); |
||
201 | |||
202 | return 0; |
||
203 | } |
||
204 | |||
205 | #ifdef CONFIG_NET_MEDIATEK_HW_QOS |
||
206 | |||
207 | #define QDMA_TX_SCH_TX 0x1a14 |
||
208 | |||
209 | static void mtk_ppe_scheduler(struct mtk_eth *eth, int id, u32 rate) |
||
210 | { |
||
211 | int exp = 0, shift = 0; |
||
212 | u32 reg = mtk_r32(eth, QDMA_TX_SCH_TX); |
||
213 | u32 val = 0; |
||
214 | |||
215 | if (rate) |
||
216 | val = BIT(11); |
||
217 | |||
218 | while (rate > 127) { |
||
219 | rate /= 10; |
||
220 | exp++; |
||
221 | } |
||
222 | |||
223 | val |= (rate & 0x7f) << 4; |
||
224 | val |= exp & 0xf; |
||
225 | if (id) |
||
226 | shift = 16; |
||
227 | reg &= ~(0xffff << shift); |
||
228 | reg |= val << shift; |
||
229 | mtk_w32(eth, val, QDMA_TX_SCH_TX); |
||
230 | } |
||
231 | |||
232 | #define QTX_CFG(x) (0x1800 + (x * 0x10)) |
||
233 | #define QTX_SCH(x) (0x1804 + (x * 0x10)) |
||
234 | |||
235 | static void mtk_ppe_queue(struct mtk_eth *eth, int id, int sched, int weight, int resv, u32 min_rate, u32 max_rate) |
||
236 | { |
||
237 | int max_exp = 0, min_exp = 0; |
||
238 | u32 reg; |
||
239 | |||
240 | if (id >= 16) |
||
241 | return; |
||
242 | |||
243 | reg = mtk_r32(eth, QTX_SCH(id)); |
||
244 | reg &= 0x70000000; |
||
245 | |||
246 | if (sched) |
||
247 | reg |= BIT(31); |
||
248 | |||
249 | if (min_rate) |
||
250 | reg |= BIT(27); |
||
251 | |||
252 | if (max_rate) |
||
253 | reg |= BIT(11); |
||
254 | |||
255 | while (max_rate > 127) { |
||
256 | max_rate /= 10; |
||
257 | max_exp++; |
||
258 | } |
||
259 | |||
260 | while (min_rate > 127) { |
||
261 | min_rate /= 10; |
||
262 | min_exp++; |
||
263 | } |
||
264 | |||
265 | reg |= (min_rate & 0x7f) << 20; |
||
266 | reg |= (min_exp & 0xf) << 16; |
||
267 | reg |= (weight & 0xf) << 12; |
||
268 | reg |= (max_rate & 0x7f) << 4; |
||
269 | reg |= max_exp & 0xf; |
||
270 | mtk_w32(eth, reg, QTX_SCH(id)); |
||
271 | |||
272 | resv &= 0xff; |
||
273 | reg = mtk_r32(eth, QTX_CFG(id)); |
||
274 | reg &= 0xffff0000; |
||
275 | reg |= (resv << 8) | resv; |
||
276 | mtk_w32(eth, reg, QTX_CFG(id)); |
||
277 | } |
||
278 | #endif |
||
279 | |||
280 | static int mtk_init_foe_table(struct mtk_eth *eth) |
||
281 | { |
||
282 | if (eth->foe_table) |
||
283 | return 0; |
||
284 | |||
285 | eth->foe_flow_table = devm_kcalloc(eth->dev, MTK_PPE_ENTRY_CNT, |
||
286 | sizeof(*eth->foe_flow_table), |
||
287 | GFP_KERNEL); |
||
288 | if (!eth->foe_flow_table) |
||
289 | return -EINVAL; |
||
290 | |||
291 | /* map the FOE table */ |
||
292 | eth->foe_table = dmam_alloc_coherent(eth->dev, MTK_PPE_TBL_SZ, |
||
293 | ð->foe_table_phys, GFP_KERNEL); |
||
294 | if (!eth->foe_table) { |
||
295 | dev_err(eth->dev, "failed to allocate foe table\n"); |
||
296 | kfree(eth->foe_flow_table); |
||
297 | return -ENOMEM; |
||
298 | } |
||
299 | |||
300 | |||
301 | return 0; |
||
302 | } |
||
303 | |||
304 | static int mtk_ppe_start(struct mtk_eth *eth) |
||
305 | { |
||
306 | int ret; |
||
307 | |||
308 | ret = mtk_init_foe_table(eth); |
||
309 | if (ret) |
||
310 | return ret; |
||
311 | |||
312 | /* tell the PPE about the tables base address */ |
||
313 | mtk_w32(eth, eth->foe_table_phys, MTK_REG_PPE_TB_BASE); |
||
314 | |||
315 | /* flush the table */ |
||
316 | memset(eth->foe_table, 0, MTK_PPE_TBL_SZ); |
||
317 | |||
318 | /* setup hashing */ |
||
319 | mtk_m32(eth, |
||
320 | MTK_PPE_TB_CFG_HASH_MODE_MASK | MTK_PPE_TB_CFG_TBL_SZ_MASK, |
||
321 | MTK_PPE_TB_CFG_HASH_MODE1 | MTK_PPE_TB_CFG_TBL_SZ_4K, |
||
322 | MTK_REG_PPE_TB_CFG); |
||
323 | |||
324 | /* set the default hashing seed */ |
||
325 | mtk_w32(eth, MTK_PPE_HASH_SEED, MTK_REG_PPE_HASH_SEED); |
||
326 | |||
327 | /* each foe entry is 64bytes and is setup by cpu forwarding*/ |
||
328 | mtk_m32(eth, MTK_PPE_CAH_CTRL_X_MODE | MTK_PPE_TB_CFG_ENTRY_SZ_MASK | |
||
329 | MTK_PPE_TB_CFG_SMA_MASK, |
||
330 | MTK_PPE_TB_CFG_ENTRY_SZ_64B | MTK_PPE_TB_CFG_SMA_FWD_CPU, |
||
331 | MTK_REG_PPE_TB_CFG); |
||
332 | |||
333 | /* set ip proto */ |
||
334 | mtk_w32(eth, 0xFFFFFFFF, MTK_REG_PPE_IP_PROT_CHK); |
||
335 | |||
336 | /* setup caching */ |
||
337 | mtk_m32(eth, 0, MTK_PPE_CAH_CTRL_X_MODE, MTK_REG_PPE_CAH_CTRL); |
||
338 | mtk_m32(eth, MTK_PPE_CAH_CTRL_X_MODE, MTK_PPE_CAH_CTRL_EN, |
||
339 | MTK_REG_PPE_CAH_CTRL); |
||
340 | |||
341 | /* enable FOE */ |
||
342 | mtk_m32(eth, 0, MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN | |
||
343 | MTK_PPE_FLOW_CFG_IPV4_NAPT_EN | MTK_PPE_FLOW_CFG_IPV4_NAT_EN | |
||
344 | MTK_PPE_FLOW_CFG_IPV4_GREK_EN, |
||
345 | MTK_REG_PPE_FLOW_CFG); |
||
346 | |||
347 | /* setup flow entry un/bind aging */ |
||
348 | mtk_m32(eth, 0, |
||
349 | MTK_PPE_TB_CFG_UNBD_AGE | MTK_PPE_TB_CFG_NTU_AGE | |
||
350 | MTK_PPE_TB_CFG_FIN_AGE | MTK_PPE_TB_CFG_UDP_AGE | |
||
351 | MTK_PPE_TB_CFG_TCP_AGE, |
||
352 | MTK_REG_PPE_TB_CFG); |
||
353 | |||
354 | mtk_m32(eth, MTK_PPE_UNB_AGE_MNP_MASK | MTK_PPE_UNB_AGE_DLTA_MASK, |
||
355 | MTK_PPE_UNB_AGE_MNP | MTK_PPE_UNB_AGE_DLTA, |
||
356 | MTK_REG_PPE_UNB_AGE); |
||
357 | mtk_m32(eth, MTK_PPE_BND_AGE0_NTU_DLTA_MASK | |
||
358 | MTK_PPE_BND_AGE0_UDP_DLTA_MASK, |
||
359 | MTK_PPE_BND_AGE0_NTU_DLTA | MTK_PPE_BND_AGE0_UDP_DLTA, |
||
360 | MTK_REG_PPE_BND_AGE0); |
||
361 | mtk_m32(eth, MTK_PPE_BND_AGE1_FIN_DLTA_MASK | |
||
362 | MTK_PPE_BND_AGE1_TCP_DLTA_MASK, |
||
363 | MTK_PPE_BND_AGE1_FIN_DLTA | MTK_PPE_BND_AGE1_TCP_DLTA, |
||
364 | MTK_REG_PPE_BND_AGE1); |
||
365 | |||
366 | /* setup flow entry keep alive */ |
||
367 | mtk_m32(eth, MTK_PPE_TB_CFG_KA_MASK, MTK_PPE_TB_CFG_KA, |
||
368 | MTK_REG_PPE_TB_CFG); |
||
369 | mtk_w32(eth, MTK_PPE_KA_UDP | MTK_PPE_KA_TCP | MTK_PPE_KA_T, MTK_REG_PPE_KA); |
||
370 | |||
371 | /* setup flow entry rate limit */ |
||
372 | mtk_w32(eth, (0x3fff << 16) | 0x3fff, MTK_REG_PPE_BIND_LMT_0); |
||
373 | mtk_w32(eth, MTK_PPE_NTU_KA | 0x3fff, MTK_REG_PPE_BIND_LMT_1); |
||
374 | mtk_m32(eth, MTK_PPE_BNDR_RATE_MASK, 1, MTK_REG_PPE_BNDR); |
||
375 | |||
376 | /* enable the PPE */ |
||
377 | mtk_m32(eth, 0, MTK_PPE_GLO_CFG_EN, MTK_REG_PPE_GLO_CFG); |
||
378 | |||
379 | #ifdef CONFIG_RALINK |
||
380 | /* set the default forwarding port to QDMA */ |
||
381 | mtk_w32(eth, 0x0, MTK_REG_PPE_DFT_CPORT); |
||
382 | #else |
||
383 | /* set the default forwarding port to QDMA */ |
||
384 | mtk_w32(eth, 0x55555555, MTK_REG_PPE_DFT_CPORT); |
||
385 | #endif |
||
386 | |||
387 | /* drop packets with TTL=0 */ |
||
388 | mtk_m32(eth, 0, MTK_PPE_GLO_CFG_TTL0_DROP, MTK_REG_PPE_GLO_CFG); |
||
389 | |||
390 | /* send all traffic from gmac to the ppe */ |
||
391 | mtk_m32(eth, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(0)); |
||
392 | mtk_m32(eth, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(1)); |
||
393 | |||
394 | dev_info(eth->dev, "PPE started\n"); |
||
395 | |||
396 | #ifdef CONFIG_NET_MEDIATEK_HW_QOS |
||
397 | mtk_ppe_scheduler(eth, 0, 500000); |
||
398 | mtk_ppe_scheduler(eth, 1, 500000); |
||
399 | mtk_ppe_queue(eth, 0, 0, 7, 32, 250000, 0); |
||
400 | mtk_ppe_queue(eth, 1, 0, 7, 32, 250000, 0); |
||
401 | mtk_ppe_queue(eth, 8, 1, 7, 32, 250000, 0); |
||
402 | mtk_ppe_queue(eth, 9, 1, 7, 32, 250000, 0); |
||
403 | #endif |
||
404 | |||
405 | return 0; |
||
406 | } |
||
407 | |||
408 | static int mtk_ppe_busy_wait(struct mtk_eth *eth) |
||
409 | { |
||
410 | unsigned long t_start = jiffies; |
||
411 | u32 r = 0; |
||
412 | |||
413 | while (1) { |
||
414 | r = mtk_r32(eth, MTK_REG_PPE_GLO_CFG); |
||
415 | if (!(r & MTK_PPE_GLO_CFG_BUSY)) |
||
416 | return 0; |
||
417 | if (time_after(jiffies, t_start + HZ)) |
||
418 | break; |
||
419 | usleep_range(10, 20); |
||
420 | } |
||
421 | |||
422 | dev_err(eth->dev, "ppe: table busy timeout - resetting\n"); |
||
423 | reset_control_reset(eth->rst_ppe); |
||
424 | |||
425 | return -ETIMEDOUT; |
||
426 | } |
||
427 | |||
428 | static int mtk_ppe_stop(struct mtk_eth *eth) |
||
429 | { |
||
430 | u32 r1 = 0, r2 = 0; |
||
431 | int i; |
||
432 | |||
433 | /* discard all traffic while we disable the PPE */ |
||
434 | mtk_m32(eth, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(0)); |
||
435 | mtk_m32(eth, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(1)); |
||
436 | |||
437 | if (mtk_ppe_busy_wait(eth)) |
||
438 | return -ETIMEDOUT; |
||
439 | |||
440 | /* invalidate all flow table entries */ |
||
441 | for (i = 0; i < MTK_PPE_ENTRY_CNT; i++) |
||
442 | eth->foe_table[i].bfib1.state = FOE_STATE_INVALID; |
||
443 | |||
444 | /* disable caching */ |
||
445 | mtk_m32(eth, 0, MTK_PPE_CAH_CTRL_X_MODE, MTK_REG_PPE_CAH_CTRL); |
||
446 | mtk_m32(eth, MTK_PPE_CAH_CTRL_X_MODE | MTK_PPE_CAH_CTRL_EN, 0, |
||
447 | MTK_REG_PPE_CAH_CTRL); |
||
448 | |||
449 | /* flush cache has to be ahead of hnat diable --*/ |
||
450 | mtk_m32(eth, MTK_PPE_GLO_CFG_EN, 0, MTK_REG_PPE_GLO_CFG); |
||
451 | |||
452 | /* disable FOE */ |
||
453 | mtk_m32(eth, |
||
454 | MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN | |
||
455 | MTK_PPE_FLOW_CFG_IPV4_NAPT_EN | MTK_PPE_FLOW_CFG_IPV4_NAT_EN | |
||
456 | MTK_PPE_FLOW_CFG_FUC_FOE | MTK_PPE_FLOW_CFG_FMC_FOE, |
||
457 | 0, MTK_REG_PPE_FLOW_CFG); |
||
458 | |||
459 | /* disable FOE aging */ |
||
460 | mtk_m32(eth, 0, |
||
461 | MTK_PPE_TB_CFG_FIN_AGE | MTK_PPE_TB_CFG_UDP_AGE | |
||
462 | MTK_PPE_TB_CFG_TCP_AGE | MTK_PPE_TB_CFG_UNBD_AGE | |
||
463 | MTK_PPE_TB_CFG_NTU_AGE, MTK_REG_PPE_TB_CFG); |
||
464 | |||
465 | r1 = mtk_r32(eth, 0x100); |
||
466 | r2 = mtk_r32(eth, 0x10c); |
||
467 | |||
468 | dev_info(eth->dev, "0x100 = 0x%x, 0x10c = 0x%x\n", r1, r2); |
||
469 | |||
470 | if (((r1 & 0xff00) >> 0x8) >= (r1 & 0xff) || |
||
471 | ((r1 & 0xff00) >> 0x8) >= (r2 & 0xff)) { |
||
472 | dev_info(eth->dev, "reset pse\n"); |
||
473 | mtk_w32(eth, 0x1, 0x4); |
||
474 | } |
||
475 | |||
476 | /* set the foe entry base address to 0 */ |
||
477 | mtk_w32(eth, 0, MTK_REG_PPE_TB_BASE); |
||
478 | |||
479 | if (mtk_ppe_busy_wait(eth)) |
||
480 | return -ETIMEDOUT; |
||
481 | |||
482 | /* send all traffic back to the DMA engine */ |
||
483 | #ifdef CONFIG_RALINK |
||
484 | mtk_m32(eth, 0xffff, 0x0, MTK_GDMA_FWD_CFG(0)); |
||
485 | mtk_m32(eth, 0xffff, 0x0, MTK_GDMA_FWD_CFG(1)); |
||
486 | #else |
||
487 | mtk_m32(eth, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(0)); |
||
488 | mtk_m32(eth, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(1)); |
||
489 | #endif |
||
490 | return 0; |
||
491 | } |
||
492 | |||
493 | static void mtk_offload_keepalive(struct fe_priv *eth, unsigned int hash) |
||
494 | { |
||
495 | struct flow_offload *flow; |
||
496 | |||
497 | rcu_read_lock(); |
||
498 | flow = rcu_dereference(eth->foe_flow_table[hash]); |
||
499 | if (flow) |
||
500 | flow->timeout = jiffies + 30 * HZ; |
||
501 | rcu_read_unlock(); |
||
502 | } |
||
503 | |||
504 | int mtk_offload_check_rx(struct fe_priv *eth, struct sk_buff *skb, u32 rxd4) |
||
505 | { |
||
506 | unsigned int hash; |
||
507 | |||
508 | switch (FIELD_GET(MTK_RXD4_CPU_REASON, rxd4)) { |
||
509 | case MTK_CPU_REASON_KEEPALIVE_UC_OLD_HDR: |
||
510 | case MTK_CPU_REASON_KEEPALIVE_MC_NEW_HDR: |
||
511 | case MTK_CPU_REASON_KEEPALIVE_DUP_OLD_HDR: |
||
512 | hash = FIELD_GET(MTK_RXD4_FOE_ENTRY, rxd4); |
||
513 | mtk_offload_keepalive(eth, hash); |
||
514 | return -1; |
||
515 | case MTK_CPU_REASON_PACKET_SAMPLING: |
||
516 | return -1; |
||
517 | default: |
||
518 | return 0; |
||
519 | } |
||
520 | } |
||
521 | |||
522 | int mtk_ppe_probe(struct mtk_eth *eth) |
||
523 | { |
||
524 | int err; |
||
525 | |||
526 | err = mtk_ppe_start(eth); |
||
527 | if (err) |
||
528 | return err; |
||
529 | |||
530 | err = mtk_ppe_debugfs_init(eth); |
||
531 | if (err) |
||
532 | return err; |
||
533 | |||
534 | return 0; |
||
535 | } |
||
536 | |||
537 | void mtk_ppe_remove(struct mtk_eth *eth) |
||
538 | { |
||
539 | mtk_ppe_stop(eth); |
||
540 | } |