OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | /* |
2 | * ADM5120 HCD (Host Controller Driver) for USB |
||
3 | * |
||
4 | * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org> |
||
5 | * |
||
6 | * This file was derived from: drivers/usb/host/ohci-q.c |
||
7 | * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
||
8 | * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> |
||
9 | * |
||
10 | * This program is free software; you can redistribute it and/or modify it |
||
11 | * under the terms of the GNU General Public License version 2 as published |
||
12 | * by the Free Software Foundation. |
||
13 | * |
||
14 | */ |
||
15 | |||
16 | #include <linux/irq.h> |
||
17 | #include <linux/slab.h> |
||
18 | |||
19 | /*-------------------------------------------------------------------------*/ |
||
20 | |||
21 | /* |
||
22 | * URB goes back to driver, and isn't reissued. |
||
23 | * It's completely gone from HC data structures. |
||
24 | * PRECONDITION: ahcd lock held, irqs blocked. |
||
25 | */ |
||
26 | static void |
||
27 | finish_urb(struct admhcd *ahcd, struct urb *urb, int status) |
||
28 | __releases(ahcd->lock) |
||
29 | __acquires(ahcd->lock) |
||
30 | { |
||
31 | urb_priv_free(ahcd, urb->hcpriv); |
||
32 | |||
33 | if (likely(status == -EINPROGRESS)) |
||
34 | status = 0; |
||
35 | |||
36 | switch (usb_pipetype(urb->pipe)) { |
||
37 | case PIPE_ISOCHRONOUS: |
||
38 | admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--; |
||
39 | break; |
||
40 | case PIPE_INTERRUPT: |
||
41 | admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--; |
||
42 | break; |
||
43 | } |
||
44 | |||
45 | #ifdef ADMHC_VERBOSE_DEBUG |
||
46 | urb_print(ahcd, urb, "RET", usb_pipeout(urb->pipe), status); |
||
47 | #endif |
||
48 | |||
49 | /* urb->complete() can reenter this HCD */ |
||
50 | usb_hcd_unlink_urb_from_ep(admhcd_to_hcd(ahcd), urb); |
||
51 | spin_unlock(&ahcd->lock); |
||
52 | usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb, status); |
||
53 | spin_lock(&ahcd->lock); |
||
54 | } |
||
55 | |||
56 | |||
57 | /*-------------------------------------------------------------------------* |
||
58 | * ED handling functions |
||
59 | *-------------------------------------------------------------------------*/ |
||
60 | |||
61 | #if 0 /* FIXME */ |
||
62 | /* search for the right schedule branch to use for a periodic ed. |
||
63 | * does some load balancing; returns the branch, or negative errno. |
||
64 | */ |
||
65 | static int balance(struct admhcd *ahcd, int interval, int load) |
||
66 | { |
||
67 | int i, branch = -ENOSPC; |
||
68 | |||
69 | /* iso periods can be huge; iso tds specify frame numbers */ |
||
70 | if (interval > NUM_INTS) |
||
71 | interval = NUM_INTS; |
||
72 | |||
73 | /* search for the least loaded schedule branch of that period |
||
74 | * that has enough bandwidth left unreserved. |
||
75 | */ |
||
76 | for (i = 0; i < interval ; i++) { |
||
77 | if (branch < 0 || ahcd->load[branch] > ahcd->load[i]) { |
||
78 | int j; |
||
79 | |||
80 | /* usb 1.1 says 90% of one frame */ |
||
81 | for (j = i; j < NUM_INTS; j += interval) { |
||
82 | if ((ahcd->load[j] + load) > 900) |
||
83 | break; |
||
84 | } |
||
85 | if (j < NUM_INTS) |
||
86 | continue; |
||
87 | branch = i; |
||
88 | } |
||
89 | } |
||
90 | return branch; |
||
91 | } |
||
92 | #endif |
||
93 | |||
94 | /*-------------------------------------------------------------------------*/ |
||
95 | |||
96 | #if 0 /* FIXME */ |
||
97 | /* both iso and interrupt requests have periods; this routine puts them |
||
98 | * into the schedule tree in the apppropriate place. most iso devices use |
||
99 | * 1msec periods, but that's not required. |
||
100 | */ |
||
101 | static void periodic_link(struct admhcd *ahcd, struct ed *ed) |
||
102 | { |
||
103 | unsigned i; |
||
104 | |||
105 | admhc_vdbg(ahcd, "link %sed %p branch %d [%dus.], interval %d\n", |
||
106 | (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "", |
||
107 | ed, ed->branch, ed->load, ed->interval); |
||
108 | |||
109 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { |
||
110 | struct ed **prev = &ahcd->periodic[i]; |
||
111 | __hc32 *prev_p = &ahcd->hcca->int_table[i]; |
||
112 | struct ed *here = *prev; |
||
113 | |||
114 | /* sorting each branch by period (slow before fast) |
||
115 | * lets us share the faster parts of the tree. |
||
116 | * (plus maybe: put interrupt eds before iso) |
||
117 | */ |
||
118 | while (here && ed != here) { |
||
119 | if (ed->interval > here->interval) |
||
120 | break; |
||
121 | prev = &here->ed_next; |
||
122 | prev_p = &here->hwNextED; |
||
123 | here = *prev; |
||
124 | } |
||
125 | if (ed != here) { |
||
126 | ed->ed_next = here; |
||
127 | if (here) |
||
128 | ed->hwNextED = *prev_p; |
||
129 | wmb(); |
||
130 | *prev = ed; |
||
131 | *prev_p = cpu_to_hc32(ahcd, ed->dma); |
||
132 | wmb(); |
||
133 | } |
||
134 | ahcd->load[i] += ed->load; |
||
135 | } |
||
136 | admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval; |
||
137 | } |
||
138 | #endif |
||
139 | |||
140 | /* link an ed into the HC chain */ |
||
141 | |||
142 | static int ed_schedule(struct admhcd *ahcd, struct ed *ed) |
||
143 | { |
||
144 | struct ed *old_tail; |
||
145 | |||
146 | if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING) |
||
147 | return -EAGAIN; |
||
148 | |||
149 | ed->state = ED_OPER; |
||
150 | |||
151 | old_tail = ahcd->ed_tails[ed->type]; |
||
152 | |||
153 | ed->ed_next = old_tail->ed_next; |
||
154 | if (ed->ed_next) { |
||
155 | ed->ed_next->ed_prev = ed; |
||
156 | ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma); |
||
157 | } |
||
158 | ed->ed_prev = old_tail; |
||
159 | |||
160 | old_tail->ed_next = ed; |
||
161 | old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma); |
||
162 | |||
163 | ahcd->ed_tails[ed->type] = ed; |
||
164 | |||
165 | admhc_dma_enable(ahcd); |
||
166 | |||
167 | return 0; |
||
168 | } |
||
169 | |||
170 | /*-------------------------------------------------------------------------*/ |
||
171 | |||
172 | #if 0 /* FIXME */ |
||
173 | /* scan the periodic table to find and unlink this ED */ |
||
174 | static void periodic_unlink(struct admhcd *ahcd, struct ed *ed) |
||
175 | { |
||
176 | int i; |
||
177 | |||
178 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { |
||
179 | struct ed *temp; |
||
180 | struct ed **prev = &ahcd->periodic[i]; |
||
181 | __hc32 *prev_p = &ahcd->hcca->int_table[i]; |
||
182 | |||
183 | while (*prev && (temp = *prev) != ed) { |
||
184 | prev_p = &temp->hwNextED; |
||
185 | prev = &temp->ed_next; |
||
186 | } |
||
187 | if (*prev) { |
||
188 | *prev_p = ed->hwNextED; |
||
189 | *prev = ed->ed_next; |
||
190 | } |
||
191 | ahcd->load[i] -= ed->load; |
||
192 | } |
||
193 | |||
194 | admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval; |
||
195 | admhc_vdbg(ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n", |
||
196 | (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "", |
||
197 | ed, ed->branch, ed->load, ed->interval); |
||
198 | } |
||
199 | #endif |
||
200 | |||
201 | /* unlink an ed from the HC chain. |
||
202 | * just the link to the ed is unlinked. |
||
203 | * the link from the ed still points to another operational ed or 0 |
||
204 | * so the HC can eventually finish the processing of the unlinked ed |
||
205 | * (assuming it already started that, which needn't be true). |
||
206 | * |
||
207 | * ED_UNLINK is a transient state: the HC may still see this ED, but soon |
||
208 | * it won't. ED_SKIP means the HC will finish its current transaction, |
||
209 | * but won't start anything new. The TD queue may still grow; device |
||
210 | * drivers don't know about this HCD-internal state. |
||
211 | * |
||
212 | * When the HC can't see the ED, something changes ED_UNLINK to one of: |
||
213 | * |
||
214 | * - ED_OPER: when there's any request queued, the ED gets rescheduled |
||
215 | * immediately. HC should be working on them. |
||
216 | * |
||
217 | * - ED_IDLE: when there's no TD queue. there's no reason for the HC |
||
218 | * to care about this ED; safe to disable the endpoint. |
||
219 | * |
||
220 | * When finish_unlinks() runs later, after SOF interrupt, it will often |
||
221 | * complete one or more URB unlinks before making that state change. |
||
222 | */ |
||
223 | static void ed_deschedule(struct admhcd *ahcd, struct ed *ed) |
||
224 | { |
||
225 | |||
226 | #ifdef ADMHC_VERBOSE_DEBUG |
||
227 | admhc_dump_ed(ahcd, "ED-DESCHED", ed, 1); |
||
228 | #endif |
||
229 | |||
230 | ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP); |
||
231 | wmb(); |
||
232 | ed->state = ED_UNLINK; |
||
233 | |||
234 | /* remove this ED from the HC list */ |
||
235 | ed->ed_prev->hwNextED = ed->hwNextED; |
||
236 | |||
237 | /* and remove it from our list also */ |
||
238 | ed->ed_prev->ed_next = ed->ed_next; |
||
239 | |||
240 | if (ed->ed_next) |
||
241 | ed->ed_next->ed_prev = ed->ed_prev; |
||
242 | |||
243 | if (ahcd->ed_tails[ed->type] == ed) |
||
244 | ahcd->ed_tails[ed->type] = ed->ed_prev; |
||
245 | } |
||
246 | |||
247 | /*-------------------------------------------------------------------------*/ |
||
248 | |||
249 | static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info) |
||
250 | { |
||
251 | struct ed *ed; |
||
252 | struct td *td; |
||
253 | |||
254 | ed = ed_alloc(ahcd, GFP_ATOMIC); |
||
255 | if (!ed) |
||
256 | goto err; |
||
257 | |||
258 | /* dummy td; end of td list for this ed */ |
||
259 | td = td_alloc(ahcd, GFP_ATOMIC); |
||
260 | if (!td) |
||
261 | goto err_free_ed; |
||
262 | |||
263 | switch (type) { |
||
264 | case PIPE_INTERRUPT: |
||
265 | info |= ED_INT; |
||
266 | break; |
||
267 | case PIPE_ISOCHRONOUS: |
||
268 | info |= ED_ISO; |
||
269 | break; |
||
270 | } |
||
271 | |||
272 | ed->dummy = td; |
||
273 | ed->state = ED_IDLE; |
||
274 | ed->type = type; |
||
275 | |||
276 | ed->hwINFO = cpu_to_hc32(ahcd, info); |
||
277 | ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma); |
||
278 | ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */ |
||
279 | |||
280 | return ed; |
||
281 | |||
282 | err_free_ed: |
||
283 | ed_free(ahcd, ed); |
||
284 | err: |
||
285 | return NULL; |
||
286 | } |
||
287 | |||
288 | /* get and maybe (re)init an endpoint. init _should_ be done only as part |
||
289 | * of enumeration, usb_set_configuration() or usb_set_interface(). |
||
290 | */ |
||
291 | static struct ed *ed_get(struct admhcd *ahcd, struct usb_host_endpoint *ep, |
||
292 | struct usb_device *udev, unsigned int pipe, int interval) |
||
293 | { |
||
294 | struct ed *ed; |
||
295 | unsigned long flags; |
||
296 | |||
297 | spin_lock_irqsave(&ahcd->lock, flags); |
||
298 | |||
299 | ed = ep->hcpriv; |
||
300 | if (!ed) { |
||
301 | u32 info; |
||
302 | |||
303 | /* FIXME: usbcore changes dev->devnum before SET_ADDRESS |
||
304 | * succeeds ... otherwise we wouldn't need "pipe". |
||
305 | */ |
||
306 | info = usb_pipedevice(pipe); |
||
307 | info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT; |
||
308 | info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT; |
||
309 | if (udev->speed == USB_SPEED_FULL) |
||
310 | info |= ED_SPEED_FULL; |
||
311 | |||
312 | ed = ed_create(ahcd, usb_pipetype(pipe), info); |
||
313 | if (ed) |
||
314 | ep->hcpriv = ed; |
||
315 | } |
||
316 | |||
317 | spin_unlock_irqrestore(&ahcd->lock, flags); |
||
318 | |||
319 | return ed; |
||
320 | } |
||
321 | |||
322 | /*-------------------------------------------------------------------------*/ |
||
323 | |||
324 | /* request unlinking of an endpoint from an operational HC. |
||
325 | * put the ep on the rm_list |
||
326 | * real work is done at the next start frame (SOFI) hardware interrupt |
||
327 | * caller guarantees HCD is running, so hardware access is safe, |
||
328 | * and that ed->state is ED_OPER |
||
329 | */ |
||
330 | static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed) |
||
331 | { |
||
332 | |||
333 | #ifdef ADMHC_VERBOSE_DEBUG |
||
334 | admhc_dump_ed(ahcd, "ED-UNLINK", ed, 1); |
||
335 | #endif |
||
336 | |||
337 | ed->hwINFO |= cpu_to_hc32(ahcd, ED_DEQUEUE); |
||
338 | ed_deschedule(ahcd, ed); |
||
339 | |||
340 | /* add this ED into the remove list */ |
||
341 | ed->ed_rm_next = ahcd->ed_rm_list; |
||
342 | ahcd->ed_rm_list = ed; |
||
343 | |||
344 | /* enable SOF interrupt */ |
||
345 | admhc_intr_ack(ahcd, ADMHC_INTR_SOFI); |
||
346 | admhc_intr_enable(ahcd, ADMHC_INTR_SOFI); |
||
347 | /* flush those writes */ |
||
348 | admhc_writel_flush(ahcd); |
||
349 | |||
350 | /* SOF interrupt might get delayed; record the frame counter value that |
||
351 | * indicates when the HC isn't looking at it, so concurrent unlinks |
||
352 | * behave. frame_no wraps every 2^16 msec, and changes right before |
||
353 | * SOF is triggered. |
||
354 | */ |
||
355 | ed->tick = admhc_frame_no(ahcd) + 1; |
||
356 | } |
||
357 | |||
358 | /*-------------------------------------------------------------------------* |
||
359 | * TD handling functions |
||
360 | *-------------------------------------------------------------------------*/ |
||
361 | |||
362 | /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */ |
||
363 | |||
364 | static void |
||
365 | td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len, |
||
366 | struct urb *urb, int index) |
||
367 | { |
||
368 | struct td *td, *td_pt; |
||
369 | struct urb_priv *urb_priv = urb->hcpriv; |
||
370 | int hash; |
||
371 | u32 cbl = 0; |
||
372 | |||
373 | #if 1 |
||
374 | if (index == (urb_priv->td_cnt - 1) && |
||
375 | ((urb->transfer_flags & URB_NO_INTERRUPT) == 0)) |
||
376 | cbl |= TD_IE; |
||
377 | #else |
||
378 | if (index == (urb_priv->td_cnt - 1)) |
||
379 | cbl |= TD_IE; |
||
380 | #endif |
||
381 | |||
382 | /* use this td as the next dummy */ |
||
383 | td_pt = urb_priv->td[index]; |
||
384 | |||
385 | /* fill the old dummy TD */ |
||
386 | td = urb_priv->td[index] = urb_priv->ed->dummy; |
||
387 | urb_priv->ed->dummy = td_pt; |
||
388 | |||
389 | td->ed = urb_priv->ed; |
||
390 | td->next_dl_td = NULL; |
||
391 | td->index = index; |
||
392 | td->urb = urb; |
||
393 | td->data_dma = data; |
||
394 | if (!len) |
||
395 | data = 0; |
||
396 | |||
397 | if (data) |
||
398 | cbl |= (len & TD_BL_MASK); |
||
399 | |||
400 | info |= TD_OWN; |
||
401 | |||
402 | /* setup hardware specific fields */ |
||
403 | td->hwINFO = cpu_to_hc32(ahcd, info); |
||
404 | td->hwDBP = cpu_to_hc32(ahcd, data); |
||
405 | td->hwCBL = cpu_to_hc32(ahcd, cbl); |
||
406 | td->hwNextTD = cpu_to_hc32(ahcd, td_pt->td_dma); |
||
407 | |||
408 | /* append to queue */ |
||
409 | list_add_tail(&td->td_list, &td->ed->td_list); |
||
410 | |||
411 | /* hash it for later reverse mapping */ |
||
412 | hash = TD_HASH_FUNC(td->td_dma); |
||
413 | td->td_hash = ahcd->td_hash[hash]; |
||
414 | ahcd->td_hash[hash] = td; |
||
415 | |||
416 | /* HC might read the TD (or cachelines) right away ... */ |
||
417 | wmb(); |
||
418 | td->ed->hwTailP = td->hwNextTD; |
||
419 | } |
||
420 | |||
421 | /*-------------------------------------------------------------------------*/ |
||
422 | |||
423 | /* Prepare all TDs of a transfer, and queue them onto the ED. |
||
424 | * Caller guarantees HC is active. |
||
425 | * Usually the ED is already on the schedule, so TDs might be |
||
426 | * processed as soon as they're queued. |
||
427 | */ |
||
428 | static void td_submit_urb(struct admhcd *ahcd, struct urb *urb) |
||
429 | { |
||
430 | struct urb_priv *urb_priv = urb->hcpriv; |
||
431 | dma_addr_t data; |
||
432 | int data_len = urb->transfer_buffer_length; |
||
433 | int cnt = 0; |
||
434 | u32 info = 0; |
||
435 | int is_out = usb_pipeout(urb->pipe); |
||
436 | u32 toggle = 0; |
||
437 | |||
438 | /* OHCI handles the bulk/interrupt data toggles itself. We just |
||
439 | * use the device toggle bits for resetting, and rely on the fact |
||
440 | * that resetting toggle is meaningless if the endpoint is active. |
||
441 | */ |
||
442 | |||
443 | if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) { |
||
444 | toggle = TD_T_CARRY; |
||
445 | } else { |
||
446 | toggle = TD_T_DATA0; |
||
447 | usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe), |
||
448 | is_out, 1); |
||
449 | } |
||
450 | |||
451 | urb_priv->td_idx = 0; |
||
452 | list_add(&urb_priv->pending, &ahcd->pending); |
||
453 | |||
454 | if (data_len) |
||
455 | data = urb->transfer_dma; |
||
456 | else |
||
457 | data = 0; |
||
458 | |||
459 | /* NOTE: TD_CC is set so we can tell which TDs the HC processed by |
||
460 | * using TD_CC_GET, as well as by seeing them on the done list. |
||
461 | * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.) |
||
462 | */ |
||
463 | switch (urb_priv->ed->type) { |
||
464 | case PIPE_INTERRUPT: |
||
465 | info = is_out |
||
466 | ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT |
||
467 | : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN; |
||
468 | |||
469 | /* setup service interval and starting frame number */ |
||
470 | info |= (urb->start_frame & TD_FN_MASK); |
||
471 | info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT; |
||
472 | |||
473 | td_fill(ahcd, info, data, data_len, urb, cnt); |
||
474 | cnt++; |
||
475 | |||
476 | admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++; |
||
477 | break; |
||
478 | |||
479 | case PIPE_BULK: |
||
480 | info = is_out |
||
481 | ? TD_SCC_NOTACCESSED | TD_DP_OUT |
||
482 | : TD_SCC_NOTACCESSED | TD_DP_IN; |
||
483 | |||
484 | /* TDs _could_ transfer up to 8K each */ |
||
485 | while (data_len > TD_DATALEN_MAX) { |
||
486 | td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), |
||
487 | data, TD_DATALEN_MAX, urb, cnt); |
||
488 | data += TD_DATALEN_MAX; |
||
489 | data_len -= TD_DATALEN_MAX; |
||
490 | cnt++; |
||
491 | } |
||
492 | |||
493 | td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data, |
||
494 | data_len, urb, cnt); |
||
495 | cnt++; |
||
496 | |||
497 | if ((urb->transfer_flags & URB_ZERO_PACKET) |
||
498 | && (cnt < urb_priv->td_cnt)) { |
||
499 | td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), |
||
500 | 0, 0, urb, cnt); |
||
501 | cnt++; |
||
502 | } |
||
503 | break; |
||
504 | |||
505 | /* control manages DATA0/DATA1 toggle per-request; SETUP resets it, |
||
506 | * any DATA phase works normally, and the STATUS ack is special. |
||
507 | */ |
||
508 | case PIPE_CONTROL: |
||
509 | /* fill a TD for the setup */ |
||
510 | info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0; |
||
511 | td_fill(ahcd, info, urb->setup_dma, 8, urb, cnt++); |
||
512 | |||
513 | if (data_len > 0) { |
||
514 | /* fill a TD for the data */ |
||
515 | info = TD_SCC_NOTACCESSED | TD_T_DATA1; |
||
516 | info |= is_out ? TD_DP_OUT : TD_DP_IN; |
||
517 | /* NOTE: mishandles transfers >8K, some >4K */ |
||
518 | td_fill(ahcd, info, data, data_len, urb, cnt++); |
||
519 | } |
||
520 | |||
521 | /* fill a TD for the ACK */ |
||
522 | info = (is_out || data_len == 0) |
||
523 | ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1 |
||
524 | : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1; |
||
525 | td_fill(ahcd, info, data, 0, urb, cnt++); |
||
526 | |||
527 | break; |
||
528 | |||
529 | /* ISO has no retransmit, so no toggle; |
||
530 | * Each TD could handle multiple consecutive frames (interval 1); |
||
531 | * we could often reduce the number of TDs here. |
||
532 | */ |
||
533 | case PIPE_ISOCHRONOUS: |
||
534 | info = is_out |
||
535 | ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT |
||
536 | : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN; |
||
537 | |||
538 | for (cnt = 0; cnt < urb->number_of_packets; cnt++) { |
||
539 | int frame = urb->start_frame; |
||
540 | |||
541 | frame += cnt * urb->interval; |
||
542 | frame &= TD_FN_MASK; |
||
543 | td_fill(ahcd, info | frame, |
||
544 | data + urb->iso_frame_desc[cnt].offset, |
||
545 | urb->iso_frame_desc[cnt].length, urb, cnt); |
||
546 | } |
||
547 | admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++; |
||
548 | break; |
||
549 | } |
||
550 | |||
551 | if (urb_priv->td_cnt != cnt) |
||
552 | admhc_err(ahcd, "bad number of tds created for urb %p\n", urb); |
||
553 | } |
||
554 | |||
555 | /*-------------------------------------------------------------------------* |
||
556 | * Done List handling functions |
||
557 | *-------------------------------------------------------------------------*/ |
||
558 | |||
559 | /* calculate transfer length/status and update the urb */ |
||
560 | static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td) |
||
561 | { |
||
562 | struct urb_priv *urb_priv = urb->hcpriv; |
||
563 | u32 info; |
||
564 | u32 bl; |
||
565 | u32 tdDBP; |
||
566 | int type = usb_pipetype(urb->pipe); |
||
567 | int cc; |
||
568 | int status = -EINPROGRESS; |
||
569 | |||
570 | info = hc32_to_cpup(ahcd, &td->hwINFO); |
||
571 | tdDBP = hc32_to_cpup(ahcd, &td->hwDBP); |
||
572 | bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL)); |
||
573 | cc = TD_CC_GET(info); |
||
574 | |||
575 | /* ISO ... drivers see per-TD length/status */ |
||
576 | if (type == PIPE_ISOCHRONOUS) { |
||
577 | /* TODO */ |
||
578 | int dlen = 0; |
||
579 | |||
580 | /* NOTE: assumes FC in tdINFO == 0, and that |
||
581 | * only the first of 0..MAXPSW psws is used. |
||
582 | */ |
||
583 | if (info & TD_CC) /* hc didn't touch? */ |
||
584 | return status; |
||
585 | |||
586 | if (usb_pipeout(urb->pipe)) |
||
587 | dlen = urb->iso_frame_desc[td->index].length; |
||
588 | else { |
||
589 | /* short reads are always OK for ISO */ |
||
590 | if (cc == TD_CC_DATAUNDERRUN) |
||
591 | cc = TD_CC_NOERROR; |
||
592 | dlen = tdDBP - td->data_dma + bl; |
||
593 | } |
||
594 | |||
595 | urb->actual_length += dlen; |
||
596 | urb->iso_frame_desc[td->index].actual_length = dlen; |
||
597 | urb->iso_frame_desc[td->index].status = cc_to_error[cc]; |
||
598 | |||
599 | if (cc != TD_CC_NOERROR) |
||
600 | admhc_vdbg(ahcd, |
||
601 | "urb %p iso td %p (%d) len %d cc %d\n", |
||
602 | urb, td, 1 + td->index, dlen, cc); |
||
603 | |||
604 | /* BULK, INT, CONTROL ... drivers see aggregate length/status, |
||
605 | * except that "setup" bytes aren't counted and "short" transfers |
||
606 | * might not be reported as errors. |
||
607 | */ |
||
608 | } else { |
||
609 | /* update packet status if needed (short is normally ok) */ |
||
610 | if (cc == TD_CC_DATAUNDERRUN |
||
611 | && !(urb->transfer_flags & URB_SHORT_NOT_OK)) |
||
612 | cc = TD_CC_NOERROR; |
||
613 | |||
614 | if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) |
||
615 | status = cc_to_error[cc]; |
||
616 | |||
617 | |||
618 | /* count all non-empty packets except control SETUP packet */ |
||
619 | if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0) |
||
620 | urb->actual_length += tdDBP - td->data_dma + bl; |
||
621 | |||
622 | if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) |
||
623 | admhc_vdbg(ahcd, |
||
624 | "urb %p td %p (%d) cc %d, len=%d/%d\n", |
||
625 | urb, td, td->index, cc, |
||
626 | urb->actual_length, |
||
627 | urb->transfer_buffer_length); |
||
628 | } |
||
629 | |||
630 | list_del(&td->td_list); |
||
631 | urb_priv->td_idx++; |
||
632 | |||
633 | return status; |
||
634 | } |
||
635 | |||
636 | /*-------------------------------------------------------------------------*/ |
||
637 | |||
638 | static void ed_halted(struct admhcd *ahcd, struct td *td, int cc) |
||
639 | { |
||
640 | struct urb *urb = td->urb; |
||
641 | struct urb_priv *urb_priv = urb->hcpriv; |
||
642 | struct ed *ed = td->ed; |
||
643 | struct list_head *tmp = td->td_list.next; |
||
644 | __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C); |
||
645 | |||
646 | admhc_dump_ed(ahcd, "ed halted", td->ed, 1); |
||
647 | /* clear ed halt; this is the td that caused it, but keep it inactive |
||
648 | * until its urb->complete() has a chance to clean up. |
||
649 | */ |
||
650 | ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP); |
||
651 | wmb(); |
||
652 | ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H); |
||
653 | |||
654 | /* Get rid of all later tds from this urb. We don't have |
||
655 | * to be careful: no errors and nothing was transferred. |
||
656 | * Also patch the ed so it looks as if those tds completed normally. |
||
657 | */ |
||
658 | while (tmp != &ed->td_list) { |
||
659 | struct td *next; |
||
660 | |||
661 | next = list_entry(tmp, struct td, td_list); |
||
662 | tmp = next->td_list.next; |
||
663 | |||
664 | if (next->urb != urb) |
||
665 | break; |
||
666 | |||
667 | /* NOTE: if multi-td control DATA segments get supported, |
||
668 | * this urb had one of them, this td wasn't the last td |
||
669 | * in that segment (TD_R clear), this ed halted because |
||
670 | * of a short read, _and_ URB_SHORT_NOT_OK is clear ... |
||
671 | * then we need to leave the control STATUS packet queued |
||
672 | * and clear ED_SKIP. |
||
673 | */ |
||
674 | list_del(&next->td_list); |
||
675 | urb_priv->td_cnt++; |
||
676 | ed->hwHeadP = next->hwNextTD | toggle; |
||
677 | } |
||
678 | |||
679 | /* help for troubleshooting: report anything that |
||
680 | * looks odd ... that doesn't include protocol stalls |
||
681 | * (or maybe some other things) |
||
682 | */ |
||
683 | switch (cc) { |
||
684 | case TD_CC_DATAUNDERRUN: |
||
685 | if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0) |
||
686 | break; |
||
687 | /* fallthrough */ |
||
688 | case TD_CC_STALL: |
||
689 | if (usb_pipecontrol(urb->pipe)) |
||
690 | break; |
||
691 | /* fallthrough */ |
||
692 | default: |
||
693 | admhc_dbg(ahcd, |
||
694 | "urb %p path %s ep%d%s %08x cc %d --> status %d\n", |
||
695 | urb, urb->dev->devpath, |
||
696 | usb_pipeendpoint (urb->pipe), |
||
697 | usb_pipein(urb->pipe) ? "in" : "out", |
||
698 | hc32_to_cpu(ahcd, td->hwINFO), |
||
699 | cc, cc_to_error[cc]); |
||
700 | } |
||
701 | } |
||
702 | |||
703 | /*-------------------------------------------------------------------------*/ |
||
704 | |||
705 | /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */ |
||
706 | static void |
||
707 | finish_unlinks(struct admhcd *ahcd, u16 tick) |
||
708 | { |
||
709 | struct ed *ed, **last; |
||
710 | |||
711 | rescan_all: |
||
712 | for (last = &ahcd->ed_rm_list, ed = *last; ed != NULL; ed = *last) { |
||
713 | struct list_head *entry, *tmp; |
||
714 | int completed, modified; |
||
715 | __hc32 *prev; |
||
716 | |||
717 | /* only take off EDs that the HC isn't using, accounting for |
||
718 | * frame counter wraps and EDs with partially retired TDs |
||
719 | */ |
||
720 | if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) { |
||
721 | if (tick_before(tick, ed->tick)) { |
||
722 | skip_ed: |
||
723 | last = &ed->ed_rm_next; |
||
724 | continue; |
||
725 | } |
||
726 | #if 0 |
||
727 | if (!list_empty(&ed->td_list)) { |
||
728 | struct td *td; |
||
729 | u32 head; |
||
730 | |||
731 | td = list_entry(ed->td_list.next, struct td, |
||
732 | td_list); |
||
733 | head = hc32_to_cpu(ahcd, ed->hwHeadP) & |
||
734 | TD_MASK; |
||
735 | |||
736 | /* INTR_WDH may need to clean up first */ |
||
737 | if (td->td_dma != head) |
||
738 | goto skip_ed; |
||
739 | } |
||
740 | #endif |
||
741 | } |
||
742 | |||
743 | /* reentrancy: if we drop the schedule lock, someone might |
||
744 | * have modified this list. normally it's just prepending |
||
745 | * entries (which we'd ignore), but paranoia won't hurt. |
||
746 | */ |
||
747 | *last = ed->ed_rm_next; |
||
748 | ed->ed_rm_next = NULL; |
||
749 | modified = 0; |
||
750 | |||
751 | /* unlink urbs as requested, but rescan the list after |
||
752 | * we call a completion since it might have unlinked |
||
753 | * another (earlier) urb |
||
754 | * |
||
755 | * When we get here, the HC doesn't see this ed. But it |
||
756 | * must not be rescheduled until all completed URBs have |
||
757 | * been given back to the driver. |
||
758 | */ |
||
759 | rescan_this: |
||
760 | completed = 0; |
||
761 | prev = &ed->hwHeadP; |
||
762 | list_for_each_safe(entry, tmp, &ed->td_list) { |
||
763 | struct td *td; |
||
764 | struct urb *urb; |
||
765 | struct urb_priv *urb_priv; |
||
766 | __hc32 savebits; |
||
767 | u32 tdINFO; |
||
768 | int status; |
||
769 | |||
770 | td = list_entry(entry, struct td, td_list); |
||
771 | urb = td->urb; |
||
772 | urb_priv = td->urb->hcpriv; |
||
773 | |||
774 | if (!urb->unlinked) { |
||
775 | prev = &td->hwNextTD; |
||
776 | continue; |
||
777 | } |
||
778 | |||
779 | if ((urb_priv) == NULL) |
||
780 | continue; |
||
781 | |||
782 | /* patch pointer hc uses */ |
||
783 | savebits = *prev & ~cpu_to_hc32(ahcd, TD_MASK); |
||
784 | *prev = td->hwNextTD | savebits; |
||
785 | /* If this was unlinked, the TD may not have been |
||
786 | * retired ... so manually save dhe data toggle. |
||
787 | * The controller ignores the value we save for |
||
788 | * control and ISO endpoints. |
||
789 | */ |
||
790 | tdINFO = hc32_to_cpup(ahcd, &td->hwINFO); |
||
791 | if ((tdINFO & TD_T) == TD_T_DATA0) |
||
792 | ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_C); |
||
793 | else if ((tdINFO & TD_T) == TD_T_DATA1) |
||
794 | ed->hwHeadP |= cpu_to_hc32(ahcd, ED_C); |
||
795 | |||
796 | /* HC may have partly processed this TD */ |
||
797 | #ifdef ADMHC_VERBOSE_DEBUG |
||
798 | urb_print(ahcd, urb, "PARTIAL", 0); |
||
799 | #endif |
||
800 | status = td_done(ahcd, urb, td); |
||
801 | |||
802 | /* if URB is done, clean up */ |
||
803 | if (urb_priv->td_idx == urb_priv->td_cnt) { |
||
804 | modified = completed = 1; |
||
805 | finish_urb(ahcd, urb, status); |
||
806 | } |
||
807 | } |
||
808 | if (completed && !list_empty(&ed->td_list)) |
||
809 | goto rescan_this; |
||
810 | |||
811 | /* ED's now officially unlinked, hc doesn't see */ |
||
812 | ed->state = ED_IDLE; |
||
813 | ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H); |
||
814 | ed->hwNextED = 0; |
||
815 | wmb(); |
||
816 | ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP | ED_DEQUEUE); |
||
817 | |||
818 | /* but if there's work queued, reschedule */ |
||
819 | if (!list_empty(&ed->td_list)) { |
||
820 | if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state)) |
||
821 | ed_schedule(ahcd, ed); |
||
822 | } |
||
823 | |||
824 | if (modified) |
||
825 | goto rescan_all; |
||
826 | } |
||
827 | } |
||
828 | |||
829 | /*-------------------------------------------------------------------------*/ |
||
830 | /* |
||
831 | * Process normal completions (error or success) and clean the schedules. |
||
832 | * |
||
833 | * This is the main path for handing urbs back to drivers. The only other |
||
834 | * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list, |
||
835 | * instead of scanning the (re-reversed) donelist as this does. |
||
836 | */ |
||
837 | |||
838 | static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb) |
||
839 | { |
||
840 | struct list_head *entry, *tmp; |
||
841 | __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C); |
||
842 | |||
843 | #ifdef ADMHC_VERBOSE_DEBUG |
||
844 | admhc_dump_ed(ahcd, "UNHALT", ed, 0); |
||
845 | #endif |
||
846 | /* clear ed halt; this is the td that caused it, but keep it inactive |
||
847 | * until its urb->complete() has a chance to clean up. |
||
848 | */ |
||
849 | ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP); |
||
850 | wmb(); |
||
851 | ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H); |
||
852 | |||
853 | list_for_each_safe(entry, tmp, &ed->td_list) { |
||
854 | struct td *td = list_entry(entry, struct td, td_list); |
||
855 | __hc32 info; |
||
856 | |||
857 | if (td->urb != urb) |
||
858 | break; |
||
859 | |||
860 | info = td->hwINFO; |
||
861 | info &= ~cpu_to_hc32(ahcd, TD_CC | TD_OWN); |
||
862 | td->hwINFO = info; |
||
863 | |||
864 | ed->hwHeadP = td->hwNextTD | toggle; |
||
865 | wmb(); |
||
866 | } |
||
867 | |||
868 | } |
||
869 | |||
870 | static void ed_intr_refill(struct admhcd *ahcd, struct ed *ed) |
||
871 | { |
||
872 | __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C); |
||
873 | |||
874 | ed->hwHeadP = ed->hwTailP | toggle; |
||
875 | } |
||
876 | |||
877 | |||
878 | static inline int is_ed_halted(struct admhcd *ahcd, struct ed *ed) |
||
879 | { |
||
880 | return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) == ED_H); |
||
881 | } |
||
882 | |||
883 | static inline int is_td_halted(struct admhcd *ahcd, struct ed *ed, |
||
884 | struct td *td) |
||
885 | { |
||
886 | return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & TD_MASK) == |
||
887 | (hc32_to_cpup(ahcd, &td->hwNextTD) & TD_MASK)); |
||
888 | } |
||
889 | |||
890 | static void ed_update(struct admhcd *ahcd, struct ed *ed) |
||
891 | { |
||
892 | struct list_head *entry, *tmp; |
||
893 | |||
894 | #ifdef ADMHC_VERBOSE_DEBUG |
||
895 | admhc_dump_ed(ahcd, "UPDATE", ed, 1); |
||
896 | #endif |
||
897 | |||
898 | list_for_each_safe(entry, tmp, &ed->td_list) { |
||
899 | struct td *td = list_entry(entry, struct td, td_list); |
||
900 | struct urb *urb = td->urb; |
||
901 | struct urb_priv *urb_priv = urb->hcpriv; |
||
902 | int status; |
||
903 | |||
904 | if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN) |
||
905 | break; |
||
906 | |||
907 | /* update URB's length and status from TD */ |
||
908 | status = td_done(ahcd, urb, td); |
||
909 | if (is_ed_halted(ahcd, ed) && is_td_halted(ahcd, ed, td)) |
||
910 | ed_unhalt(ahcd, ed, urb); |
||
911 | |||
912 | if (ed->type == PIPE_INTERRUPT) |
||
913 | ed_intr_refill(ahcd, ed); |
||
914 | |||
915 | /* If all this urb's TDs are done, call complete() */ |
||
916 | if (urb_priv->td_idx == urb_priv->td_cnt) |
||
917 | finish_urb(ahcd, urb, status); |
||
918 | |||
919 | /* clean schedule: unlink EDs that are no longer busy */ |
||
920 | if (list_empty(&ed->td_list)) { |
||
921 | if (ed->state == ED_OPER) |
||
922 | start_ed_unlink(ahcd, ed); |
||
923 | |||
924 | /* ... reenabling halted EDs only after fault cleanup */ |
||
925 | } else if ((ed->hwINFO & cpu_to_hc32(ahcd, |
||
926 | ED_SKIP | ED_DEQUEUE)) |
||
927 | == cpu_to_hc32(ahcd, ED_SKIP)) { |
||
928 | td = list_entry(ed->td_list.next, struct td, td_list); |
||
929 | #if 0 |
||
930 | if (!(td->hwINFO & cpu_to_hc32(ahcd, TD_DONE))) { |
||
931 | ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP); |
||
932 | /* ... hc may need waking-up */ |
||
933 | switch (ed->type) { |
||
934 | case PIPE_CONTROL: |
||
935 | admhc_writel(ahcd, OHCI_CLF, |
||
936 | &ahcd->regs->cmdstatus); |
||
937 | break; |
||
938 | case PIPE_BULK: |
||
939 | admhc_writel(ahcd, OHCI_BLF, |
||
940 | &ahcd->regs->cmdstatus); |
||
941 | break; |
||
942 | } |
||
943 | } |
||
944 | #else |
||
945 | if ((td->hwINFO & cpu_to_hc32(ahcd, TD_OWN))) |
||
946 | ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP); |
||
947 | #endif |
||
948 | } |
||
949 | |||
950 | } |
||
951 | } |
||
952 | |||
953 | /* there are some tds completed; called in_irq(), with HCD locked */ |
||
954 | static void admhc_td_complete(struct admhcd *ahcd) |
||
955 | { |
||
956 | struct ed *ed; |
||
957 | |||
958 | for (ed = ahcd->ed_head; ed; ed = ed->ed_next) { |
||
959 | if (ed->state != ED_OPER) |
||
960 | continue; |
||
961 | |||
962 | ed_update(ahcd, ed); |
||
963 | } |
||
964 | } |