OpenWrt – Blame information for rev 1

Subversion Repositories:
Rev:
Rev Author Line No. Line
1 office 1 commit 41b976414c88016e2c9d9b2f6667ee67a998d388
2 Author: David Woodhouse <David.Woodhouse@intel.com>
3 Date: Wed Sep 23 09:45:31 2015 +0100
4  
5 8139cp: Dump contents of descriptor ring on TX timeout
6  
7 We are seeing unexplained TX timeouts under heavy load. Let's try to get
8 a better idea of what's going on.
9  
10 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
11 Signed-off-by: David S. Miller <davem@davemloft.net>
12  
13 commit 7f4c685633e2df9ba10d49a31dda13715745db37
14 Author: David Woodhouse <David.Woodhouse@intel.com>
15 Date: Wed Sep 23 09:45:16 2015 +0100
16  
17 8139cp: Fix DMA unmapping of transmitted buffers
18  
19 The low 16 bits of the 'opts1' field in the TX descriptor are supposed
20 to still contain the buffer length when the descriptor is handed back to
21 us. In practice, at least on my hardware, they don't. So stash the
22 original value of the opts1 field and get the length to unmap from
23 there.
24  
25 There are other ways we could have worked out the length, but I actually
26 want a stash of the opts1 field anyway so that I can dump it alongside
27 the contents of the descriptor ring when we suffer a TX timeout.
28  
29 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
30 Signed-off-by: David S. Miller <davem@davemloft.net>
31  
32 commit 0a5aeee0b79fa99d8e04c98dd4e87d4f52aa497b
33 Author: David Woodhouse <David.Woodhouse@intel.com>
34 Date: Wed Sep 23 09:44:57 2015 +0100
35  
36 8139cp: Reduce duplicate csum/tso code in cp_start_xmit()
37  
38 We calculate the value of the opts1 descriptor field in three different
39 places. With two different behaviours when given an invalid packet to
40 be checksummed — none of them correct. Sort that out.
41  
42 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
43 Signed-off-by: David S. Miller <davem@davemloft.net>
44  
45 commit a3b804043f490aeec57d8ca5baccdd35e6250857
46 Author: David Woodhouse <David.Woodhouse@intel.com>
47 Date: Wed Sep 23 09:44:38 2015 +0100
48  
49 8139cp: Fix TSO/scatter-gather descriptor setup
50  
51 When sending a TSO frame in multiple buffers, we were neglecting to set
52 the first descriptor up in TSO mode.
53  
54 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
55 Signed-off-by: David S. Miller <davem@davemloft.net>
56  
57 commit 26b0bad6ac3a0167792dc4ffb276c29bc597d239
58 Author: David Woodhouse <David.Woodhouse@intel.com>
59 Date: Wed Sep 23 09:44:06 2015 +0100
60  
61 8139cp: Fix tx_queued debug message to print correct slot numbers
62  
63 After a certain amount of staring at the debug output of this driver, I
64 realised it was lying to me.
65  
66 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
67 Signed-off-by: David S. Miller <davem@davemloft.net>
68  
69 commit aaa0062ecf4877a26dea66bee1039c6eaf906c94
70 Author: David Woodhouse <David.Woodhouse@intel.com>
71 Date: Wed Sep 23 09:43:41 2015 +0100
72  
73 8139cp: Do not re-enable RX interrupts in cp_tx_timeout()
74  
75 If an RX interrupt was already received but NAPI has not yet run when
76 the RX timeout happens, we end up in cp_tx_timeout() with RX interrupts
77 already disabled. Blindly re-enabling them will cause an IRQ storm.
78  
79 (This is made particularly horrid by the fact that cp_interrupt() always
80 returns that it's handled the interrupt, even when it hasn't actually
81 done anything. If it didn't do that, the core IRQ code would have
82 detected the storm and handled it, I'd have had a clear smoking gun
83 backtrace instead of just a spontaneously resetting router, and I'd have
84 at *least* two days of my life back. Changing the return value of
85 cp_interrupt() will be argued about under separate cover.)
86  
87 Unconditionally leave RX interrupts disabled after the reset, and
88 schedule NAPI to check the receive ring and re-enable them.
89  
90 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
91 Signed-off-by: David S. Miller <davem@davemloft.net>
92  
93 commit 7a8a8e75d505147358b225173e890ada43a267e2
94 Author: David Woodhouse <dwmw2@infradead.org>
95 Date: Fri Sep 18 00:21:54 2015 +0100
96  
97 8139cp: Call __cp_set_rx_mode() from cp_tx_timeout()
98  
99 Unless we reset the RX config, on real hardware I don't seem to receive
100 any packets after a TX timeout.
101  
102 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
103 Signed-off-by: David S. Miller <davem@davemloft.net>
104  
105 commit fc27bd115b334e3ebdc682a42a47c3aea2566dcc
106 Author: David Woodhouse <dwmw2@infradead.org>
107 Date: Fri Sep 18 00:19:08 2015 +0100
108  
109 8139cp: Use dev_kfree_skb_any() instead of dev_kfree_skb() in cp_clean_rings()
110  
111 This can be called from cp_tx_timeout() with interrupts disabled.
112 Spotted by Francois Romieu <romieu@fr.zoreil.com>
113  
114 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
115 Signed-off-by: David S. Miller <davem@davemloft.net>
116 --- a/drivers/net/ethernet/realtek/8139cp.c
117 +++ b/drivers/net/ethernet/realtek/8139cp.c
118 @@ -157,6 +157,7 @@ enum {
119 NWayAdvert = 0x66, /* MII ADVERTISE */
120 NWayLPAR = 0x68, /* MII LPA */
121 NWayExpansion = 0x6A, /* MII Expansion */
122 + TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
123 Config5 = 0xD8, /* Config5 */
124 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
125 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
126 @@ -341,6 +342,7 @@ struct cp_private {
127 unsigned tx_tail;
128 struct cp_desc *tx_ring;
129 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
130 + u32 tx_opts[CP_TX_RING_SIZE];
131  
132 unsigned rx_buf_sz;
133 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
134 @@ -670,7 +672,7 @@ static void cp_tx (struct cp_private *cp
135 BUG_ON(!skb);
136  
137 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
138 - le32_to_cpu(txd->opts1) & 0xffff,
139 + cp->tx_opts[tx_tail] & 0xffff,
140 PCI_DMA_TODEVICE);
141  
142 if (status & LastFrag) {
143 @@ -738,7 +740,7 @@ static netdev_tx_t cp_start_xmit (struct
144 {
145 struct cp_private *cp = netdev_priv(dev);
146 unsigned entry;
147 - u32 eor, flags;
148 + u32 eor, opts1;
149 unsigned long intr_flags;
150 __le32 opts2;
151 int mss = 0;
152 @@ -758,6 +760,21 @@ static netdev_tx_t cp_start_xmit (struct
153 mss = skb_shinfo(skb)->gso_size;
154  
155 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
156 + opts1 = DescOwn;
157 + if (mss)
158 + opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
159 + else if (skb->ip_summed == CHECKSUM_PARTIAL) {
160 + const struct iphdr *ip = ip_hdr(skb);
161 + if (ip->protocol == IPPROTO_TCP)
162 + opts1 |= IPCS | TCPCS;
163 + else if (ip->protocol == IPPROTO_UDP)
164 + opts1 |= IPCS | UDPCS;
165 + else {
166 + WARN_ONCE(1,
167 + "Net bug: asked to checksum invalid Legacy IP packet\n");
168 + goto out_dma_error;
169 + }
170 + }
171  
172 if (skb_shinfo(skb)->nr_frags == 0) {
173 struct cp_desc *txd = &cp->tx_ring[entry];
174 @@ -773,31 +790,20 @@ static netdev_tx_t cp_start_xmit (struct
175 txd->addr = cpu_to_le64(mapping);
176 wmb();
177  
178 - flags = eor | len | DescOwn | FirstFrag | LastFrag;
179 -
180 - if (mss)
181 - flags |= LargeSend | ((mss & MSSMask) << MSSShift);
182 - else if (skb->ip_summed == CHECKSUM_PARTIAL) {
183 - const struct iphdr *ip = ip_hdr(skb);
184 - if (ip->protocol == IPPROTO_TCP)
185 - flags |= IPCS | TCPCS;
186 - else if (ip->protocol == IPPROTO_UDP)
187 - flags |= IPCS | UDPCS;
188 - else
189 - WARN_ON(1); /* we need a WARN() */
190 - }
191 + opts1 |= eor | len | FirstFrag | LastFrag;
192  
193 - txd->opts1 = cpu_to_le32(flags);
194 + txd->opts1 = cpu_to_le32(opts1);
195 wmb();
196  
197 cp->tx_skb[entry] = skb;
198 - entry = NEXT_TX(entry);
199 + cp->tx_opts[entry] = opts1;
200 + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
201 + entry, skb->len);
202 } else {
203 struct cp_desc *txd;
204 - u32 first_len, first_eor;
205 + u32 first_len, first_eor, ctrl;
206 dma_addr_t first_mapping;
207 int frag, first_entry = entry;
208 - const struct iphdr *ip = ip_hdr(skb);
209  
210 /* We must give this initial chunk to the device last.
211 * Otherwise we could race with the device.
212 @@ -810,14 +816,14 @@ static netdev_tx_t cp_start_xmit (struct
213 goto out_dma_error;
214  
215 cp->tx_skb[entry] = skb;
216 - entry = NEXT_TX(entry);
217  
218 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
219 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
220 u32 len;
221 - u32 ctrl;
222 dma_addr_t mapping;
223  
224 + entry = NEXT_TX(entry);
225 +
226 len = skb_frag_size(this_frag);
227 mapping = dma_map_single(&cp->pdev->dev,
228 skb_frag_address(this_frag),
229 @@ -829,19 +835,7 @@ static netdev_tx_t cp_start_xmit (struct
230  
231 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
232  
233 - ctrl = eor | len | DescOwn;
234 -
235 - if (mss)
236 - ctrl |= LargeSend |
237 - ((mss & MSSMask) << MSSShift);
238 - else if (skb->ip_summed == CHECKSUM_PARTIAL) {
239 - if (ip->protocol == IPPROTO_TCP)
240 - ctrl |= IPCS | TCPCS;
241 - else if (ip->protocol == IPPROTO_UDP)
242 - ctrl |= IPCS | UDPCS;
243 - else
244 - BUG();
245 - }
246 + ctrl = opts1 | eor | len;
247  
248 if (frag == skb_shinfo(skb)->nr_frags - 1)
249 ctrl |= LastFrag;
250 @@ -854,8 +848,8 @@ static netdev_tx_t cp_start_xmit (struct
251 txd->opts1 = cpu_to_le32(ctrl);
252 wmb();
253  
254 + cp->tx_opts[entry] = ctrl;
255 cp->tx_skb[entry] = skb;
256 - entry = NEXT_TX(entry);
257 }
258  
259 txd = &cp->tx_ring[first_entry];
260 @@ -863,27 +857,17 @@ static netdev_tx_t cp_start_xmit (struct
261 txd->addr = cpu_to_le64(first_mapping);
262 wmb();
263  
264 - if (skb->ip_summed == CHECKSUM_PARTIAL) {
265 - if (ip->protocol == IPPROTO_TCP)
266 - txd->opts1 = cpu_to_le32(first_eor | first_len |
267 - FirstFrag | DescOwn |
268 - IPCS | TCPCS);
269 - else if (ip->protocol == IPPROTO_UDP)
270 - txd->opts1 = cpu_to_le32(first_eor | first_len |
271 - FirstFrag | DescOwn |
272 - IPCS | UDPCS);
273 - else
274 - BUG();
275 - } else
276 - txd->opts1 = cpu_to_le32(first_eor | first_len |
277 - FirstFrag | DescOwn);
278 + ctrl = opts1 | first_eor | first_len | FirstFrag;
279 + txd->opts1 = cpu_to_le32(ctrl);
280 wmb();
281 +
282 + cp->tx_opts[first_entry] = ctrl;
283 + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
284 + first_entry, entry, skb->len);
285 }
286 - cp->tx_head = entry;
287 + cp->tx_head = NEXT_TX(entry);
288  
289 netdev_sent_queue(dev, skb->len);
290 - netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
291 - entry, skb->len);
292 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
293 netif_stop_queue(dev);
294  
295 @@ -1120,6 +1104,7 @@ static int cp_init_rings (struct cp_priv
296 {
297 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
298 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
299 + memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
300  
301 cp_init_rings_index(cp);
302  
303 @@ -1156,7 +1141,7 @@ static void cp_clean_rings (struct cp_pr
304 desc = cp->rx_ring + i;
305 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
306 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
307 - dev_kfree_skb(cp->rx_skb[i]);
308 + dev_kfree_skb_any(cp->rx_skb[i]);
309 }
310 }
311  
312 @@ -1169,7 +1154,7 @@ static void cp_clean_rings (struct cp_pr
313 le32_to_cpu(desc->opts1) & 0xffff,
314 PCI_DMA_TODEVICE);
315 if (le32_to_cpu(desc->opts1) & LastFrag)
316 - dev_kfree_skb(skb);
317 + dev_kfree_skb_any(skb);
318 cp->dev->stats.tx_dropped++;
319 }
320 }
321 @@ -1177,6 +1162,7 @@ static void cp_clean_rings (struct cp_pr
322  
323 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
324 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
325 + memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
326  
327 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
328 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
329 @@ -1254,7 +1240,7 @@ static void cp_tx_timeout(struct net_dev
330 {
331 struct cp_private *cp = netdev_priv(dev);
332 unsigned long flags;
333 - int rc;
334 + int rc, i;
335  
336 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
337 cpr8(Cmd), cpr16(CpCmd),
338 @@ -1262,13 +1248,26 @@ static void cp_tx_timeout(struct net_dev
339  
340 spin_lock_irqsave(&cp->lock, flags);
341  
342 + netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
343 + cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
344 + for (i = 0; i < CP_TX_RING_SIZE; i++) {
345 + netif_dbg(cp, tx_err, cp->dev,
346 + "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
347 + i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
348 + cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
349 + le64_to_cpu(cp->tx_ring[i].addr),
350 + cp->tx_skb[i]);
351 + }
352 +
353 cp_stop_hw(cp);
354 cp_clean_rings(cp);
355 rc = cp_init_rings(cp);
356 cp_start_hw(cp);
357 - cp_enable_irq(cp);
358 + __cp_set_rx_mode(dev);
359 + cpw16_f(IntrMask, cp_norx_intr_mask);
360  
361 netif_wake_queue(dev);
362 + napi_schedule(&cp->napi);
363  
364 spin_unlock_irqrestore(&cp->lock, flags);
365 }