OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | From: Mark Brown <broonie@kernel.org> |
2 | Date: Tue, 9 Dec 2014 21:38:05 +0000 |
||
3 | Subject: [PATCH] spi: Pump transfers inside calling context for spi_sync() |
||
4 | |||
5 | If we are using the standard SPI message pump (which all drivers should be |
||
6 | transitioning over to) then special case the message enqueue and instead of |
||
7 | starting the worker thread to push messages to the hardware do so in the |
||
8 | context of the caller if the controller is idle. This avoids a context |
||
9 | switch in the common case where the controller has a single user in a |
||
10 | single thread, for short PIO transfers there may be no need to context |
||
11 | switch away from the calling context to complete the transfer. |
||
12 | |||
13 | The code is a bit more complex than is desirable in part due to the need |
||
14 | to handle drivers not using the standard queue and in part due to handling |
||
15 | the various combinations of bus locking and asynchronous submission in |
||
16 | interrupt context. |
||
17 | |||
18 | It is still suboptimal since it will still wake the message pump for each |
||
19 | transfer in order to schedule idling of the hardware and if multiple |
||
20 | contexts are using the controller simultaneously a caller may end up |
||
21 | pumping a message for some random other thread rather than for itself, |
||
22 | and if the thread ends up deferring due to another context idling the |
||
23 | hardware then it will just busy wait. It can, however, have the benefit |
||
24 | of aggregating power up and down of the hardware when a caller performs |
||
25 | a series of transfers back to back without any need for the use of |
||
26 | spi_async(). |
||
27 | |||
28 | Signed-off-by: Mark Brown <broonie@kernel.org> |
||
29 | --- |
||
30 | |||
31 | --- a/drivers/spi/spi.c |
||
32 | +++ b/drivers/spi/spi.c |
||
33 | @@ -882,6 +882,9 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_t |
||
34 | * needs processing and if so call out to the driver to initialize hardware |
||
35 | * and transfer each message. |
||
36 | * |
||
37 | + * Note that it is called both from the kthread itself and also from |
||
38 | + * inside spi_sync(); the queue extraction handling at the top of the |
||
39 | + * function should deal with this safely. |
||
40 | */ |
||
41 | static void spi_pump_messages(struct kthread_work *work) |
||
42 | { |
||
43 | @@ -900,6 +903,13 @@ static void spi_pump_messages(struct kth |
||
44 | return; |
||
45 | } |
||
46 | |||
47 | + /* If another context is idling the device then defer */ |
||
48 | + if (master->idling) { |
||
49 | + queue_kthread_work(&master->kworker, &master->pump_messages); |
||
50 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
||
51 | + return; |
||
52 | + } |
||
53 | + |
||
54 | /* Check if the queue is idle */ |
||
55 | if (list_empty(&master->queue) || !master->running) { |
||
56 | if (!master->busy) { |
||
57 | @@ -907,7 +917,9 @@ static void spi_pump_messages(struct kth |
||
58 | return; |
||
59 | } |
||
60 | master->busy = false; |
||
61 | + master->idling = true; |
||
62 | spin_unlock_irqrestore(&master->queue_lock, flags); |
||
63 | + |
||
64 | kfree(master->dummy_rx); |
||
65 | master->dummy_rx = NULL; |
||
66 | kfree(master->dummy_tx); |
||
67 | @@ -921,6 +933,10 @@ static void spi_pump_messages(struct kth |
||
68 | pm_runtime_put_autosuspend(master->dev.parent); |
||
69 | } |
||
70 | trace_spi_master_idle(master); |
||
71 | + |
||
72 | + spin_lock_irqsave(&master->queue_lock, flags); |
||
73 | + master->idling = false; |
||
74 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
||
75 | return; |
||
76 | } |
||
77 | |||
78 | @@ -1166,12 +1182,9 @@ static int spi_destroy_queue(struct spi_ |
||
79 | return 0; |
||
80 | } |
||
81 | |||
82 | -/** |
||
83 | - * spi_queued_transfer - transfer function for queued transfers |
||
84 | - * @spi: spi device which is requesting transfer |
||
85 | - * @msg: spi message which is to handled is queued to driver queue |
||
86 | - */ |
||
87 | -static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) |
||
88 | +static int __spi_queued_transfer(struct spi_device *spi, |
||
89 | + struct spi_message *msg, |
||
90 | + bool need_pump) |
||
91 | { |
||
92 | struct spi_master *master = spi->master; |
||
93 | unsigned long flags; |
||
94 | @@ -1186,13 +1199,23 @@ static int spi_queued_transfer(struct sp |
||
95 | msg->status = -EINPROGRESS; |
||
96 | |||
97 | list_add_tail(&msg->queue, &master->queue); |
||
98 | - if (!master->busy) |
||
99 | + if (!master->busy && need_pump) |
||
100 | queue_kthread_work(&master->kworker, &master->pump_messages); |
||
101 | |||
102 | spin_unlock_irqrestore(&master->queue_lock, flags); |
||
103 | return 0; |
||
104 | } |
||
105 | |||
106 | +/** |
||
107 | + * spi_queued_transfer - transfer function for queued transfers |
||
108 | + * @spi: spi device which is requesting transfer |
||
109 | + * @msg: spi message which is to handled is queued to driver queue |
||
110 | + */ |
||
111 | +static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) |
||
112 | +{ |
||
113 | + return __spi_queued_transfer(spi, msg, true); |
||
114 | +} |
||
115 | + |
||
116 | static int spi_master_initialize_queue(struct spi_master *master) |
||
117 | { |
||
118 | int ret; |
||
119 | @@ -2104,19 +2127,46 @@ static int __spi_sync(struct spi_device |
||
120 | DECLARE_COMPLETION_ONSTACK(done); |
||
121 | int status; |
||
122 | struct spi_master *master = spi->master; |
||
123 | + unsigned long flags; |
||
124 | + |
||
125 | + status = __spi_validate(spi, message); |
||
126 | + if (status != 0) |
||
127 | + return status; |
||
128 | |||
129 | message->complete = spi_complete; |
||
130 | message->context = &done; |
||
131 | + message->spi = spi; |
||
132 | |||
133 | if (!bus_locked) |
||
134 | mutex_lock(&master->bus_lock_mutex); |
||
135 | |||
136 | - status = spi_async_locked(spi, message); |
||
137 | + /* If we're not using the legacy transfer method then we will |
||
138 | + * try to transfer in the calling context so special case. |
||
139 | + * This code would be less tricky if we could remove the |
||
140 | + * support for driver implemented message queues. |
||
141 | + */ |
||
142 | + if (master->transfer == spi_queued_transfer) { |
||
143 | + spin_lock_irqsave(&master->bus_lock_spinlock, flags); |
||
144 | + |
||
145 | + trace_spi_message_submit(message); |
||
146 | + |
||
147 | + status = __spi_queued_transfer(spi, message, false); |
||
148 | + |
||
149 | + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); |
||
150 | + } else { |
||
151 | + status = spi_async_locked(spi, message); |
||
152 | + } |
||
153 | |||
154 | if (!bus_locked) |
||
155 | mutex_unlock(&master->bus_lock_mutex); |
||
156 | |||
157 | if (status == 0) { |
||
158 | + /* Push out the messages in the calling context if we |
||
159 | + * can. |
||
160 | + */ |
||
161 | + if (master->transfer == spi_queued_transfer) |
||
162 | + spi_pump_messages(&master->pump_messages); |
||
163 | + |
||
164 | wait_for_completion(&done); |
||
165 | status = message->status; |
||
166 | } |
||
167 | --- a/include/linux/spi/spi.h |
||
168 | +++ b/include/linux/spi/spi.h |
||
169 | @@ -260,6 +260,7 @@ static inline void spi_unregister_driver |
||
170 | * @pump_messages: work struct for scheduling work to the message pump |
||
171 | * @queue_lock: spinlock to syncronise access to message queue |
||
172 | * @queue: message queue |
||
173 | + * @idling: the device is entering idle state |
||
174 | * @cur_msg: the currently in-flight message |
||
175 | * @cur_msg_prepared: spi_prepare_message was called for the currently |
||
176 | * in-flight message |
||
177 | @@ -425,6 +426,7 @@ struct spi_master { |
||
178 | spinlock_t queue_lock; |
||
179 | struct list_head queue; |
||
180 | struct spi_message *cur_msg; |
||
181 | + bool idling; |
||
182 | bool busy; |
||
183 | bool running; |
||
184 | bool rt; |