OpenWrt – Blame information for rev 2
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | From 2c58080407554e1bac8fd50d23cb02420524caed Mon Sep 17 00:00:00 2001 |
2 | From: Felix Fietkau <nbd@nbd.name> |
||
3 | Date: Mon, 12 Aug 2013 12:50:22 +0200 |
||
4 | Subject: [PATCH] MIPS: partially inline dma ops |
||
5 | |||
6 | Several DMA ops are no-op on many platforms, and the indirection through |
||
7 | the mips_dma_map_ops function table is causing the compiler to emit |
||
8 | unnecessary code. |
||
9 | |||
10 | Inlining visibly improves network performance in my tests (on a 24Kc |
||
11 | based system), and also slightly reduces code size of a few drivers. |
||
12 | |||
13 | Signed-off-by: Felix Fietkau <nbd@nbd.name> |
||
14 | --- |
||
15 | arch/mips/Kconfig | 4 + |
||
16 | arch/mips/include/asm/dma-mapping.h | 360 +++++++++++++++++++++++++++++++++++- |
||
17 | arch/mips/mm/dma-default.c | 163 ++-------------- |
||
18 | 3 files changed, 373 insertions(+), 154 deletions(-) |
||
19 | |||
20 | --- a/arch/mips/Kconfig |
||
21 | +++ b/arch/mips/Kconfig |
||
22 | @@ -1453,6 +1453,7 @@ config CPU_CAVIUM_OCTEON |
||
23 | select CPU_SUPPORTS_HUGEPAGES |
||
24 | select USB_EHCI_BIG_ENDIAN_MMIO |
||
25 | select MIPS_L1_CACHE_SHIFT_7 |
||
26 | + select SYS_HAS_DMA_OPS |
||
27 | help |
||
28 | The Cavium Octeon processor is a highly integrated chip containing |
||
29 | many ethernet hardware widgets for networking tasks. The processor |
||
30 | @@ -1708,6 +1709,9 @@ config MIPS_MALTA_PM |
||
31 | bool |
||
32 | default y |
||
33 | |||
34 | +config SYS_HAS_DMA_OPS |
||
35 | + bool |
||
36 | + |
||
37 | # |
||
38 | # CPU may reorder R->R, R->W, W->R, W->W |
||
39 | # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC |
||
40 | --- a/arch/mips/include/asm/dma-mapping.h |
||
41 | +++ b/arch/mips/include/asm/dma-mapping.h |
||
42 | @@ -1,9 +1,16 @@ |
||
43 | #ifndef _ASM_DMA_MAPPING_H |
||
44 | #define _ASM_DMA_MAPPING_H |
||
45 | |||
46 | +#include <linux/kmemcheck.h> |
||
47 | +#include <linux/bug.h> |
||
48 | +#include <linux/scatterlist.h> |
||
49 | +#include <linux/dma-debug.h> |
||
50 | +#include <linux/dma-attrs.h> |
||
51 | + |
||
52 | #include <asm/scatterlist.h> |
||
53 | #include <asm/dma-coherence.h> |
||
54 | #include <asm/cache.h> |
||
55 | +#include <asm/cpu-type.h> |
||
56 | #include <asm-generic/dma-coherent.h> |
||
57 | |||
58 | #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ |
||
59 | @@ -12,12 +19,48 @@ |
||
60 | |||
61 | extern struct dma_map_ops *mips_dma_map_ops; |
||
62 | |||
63 | +void __dma_sync(struct page *page, unsigned long offset, size_t size, |
||
64 | + enum dma_data_direction direction); |
||
65 | +void *mips_dma_alloc_coherent(struct device *dev, size_t size, |
||
66 | + dma_addr_t *dma_handle, gfp_t gfp, |
||
67 | + struct dma_attrs *attrs); |
||
68 | +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
||
69 | + dma_addr_t dma_handle, struct dma_attrs *attrs); |
||
70 | + |
||
71 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
||
72 | { |
||
73 | +#ifdef CONFIG_SYS_HAS_DMA_OPS |
||
74 | if (dev && dev->archdata.dma_ops) |
||
75 | return dev->archdata.dma_ops; |
||
76 | else |
||
77 | return mips_dma_map_ops; |
||
78 | +#else |
||
79 | + return NULL; |
||
80 | +#endif |
||
81 | +} |
||
82 | + |
||
83 | +/* |
||
84 | + * Warning on the terminology - Linux calls an uncached area coherent; |
||
85 | + * MIPS terminology calls memory areas with hardware maintained coherency |
||
86 | + * coherent. |
||
87 | + */ |
||
88 | + |
||
89 | +static inline int cpu_needs_post_dma_flush(struct device *dev) |
||
90 | +{ |
||
91 | +#ifndef CONFIG_SYS_HAS_CPU_R10000 |
||
92 | + return 0; |
||
93 | +#endif |
||
94 | + return !plat_device_is_coherent(dev) && |
||
95 | + (boot_cpu_type() == CPU_R10000 || |
||
96 | + boot_cpu_type() == CPU_R12000 || |
||
97 | + boot_cpu_type() == CPU_BMIPS5000); |
||
98 | +} |
||
99 | + |
||
100 | +static inline struct page *dma_addr_to_page(struct device *dev, |
||
101 | + dma_addr_t dma_addr) |
||
102 | +{ |
||
103 | + return pfn_to_page( |
||
104 | + plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); |
||
105 | } |
||
106 | |||
107 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
||
108 | @@ -30,12 +73,304 @@ static inline bool dma_capable(struct de |
||
109 | |||
110 | static inline void dma_mark_clean(void *addr, size_t size) {} |
||
111 | |||
112 | -#include <asm-generic/dma-mapping-common.h> |
||
113 | +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
||
114 | + size_t size, |
||
115 | + enum dma_data_direction dir, |
||
116 | + struct dma_attrs *attrs) |
||
117 | +{ |
||
118 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
119 | + unsigned long offset = (unsigned long)ptr & ~PAGE_MASK; |
||
120 | + struct page *page = virt_to_page(ptr); |
||
121 | + dma_addr_t addr; |
||
122 | + |
||
123 | + kmemcheck_mark_initialized(ptr, size); |
||
124 | + BUG_ON(!valid_dma_direction(dir)); |
||
125 | + if (ops) { |
||
126 | + addr = ops->map_page(dev, page, offset, size, dir, attrs); |
||
127 | + } else { |
||
128 | + if (!plat_device_is_coherent(dev)) |
||
129 | + __dma_sync(page, offset, size, dir); |
||
130 | + |
||
131 | + addr = plat_map_dma_mem_page(dev, page) + offset; |
||
132 | + } |
||
133 | + debug_dma_map_page(dev, page, offset, size, dir, addr, true); |
||
134 | + return addr; |
||
135 | +} |
||
136 | + |
||
137 | +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, |
||
138 | + size_t size, |
||
139 | + enum dma_data_direction dir, |
||
140 | + struct dma_attrs *attrs) |
||
141 | +{ |
||
142 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
143 | + |
||
144 | + BUG_ON(!valid_dma_direction(dir)); |
||
145 | + if (ops) { |
||
146 | + ops->unmap_page(dev, addr, size, dir, attrs); |
||
147 | + } else { |
||
148 | + if (cpu_needs_post_dma_flush(dev)) |
||
149 | + __dma_sync(dma_addr_to_page(dev, addr), |
||
150 | + addr & ~PAGE_MASK, size, dir); |
||
151 | + |
||
152 | + plat_unmap_dma_mem(dev, addr, size, dir); |
||
153 | + } |
||
154 | + debug_dma_unmap_page(dev, addr, size, dir, true); |
||
155 | +} |
||
156 | + |
||
157 | +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
||
158 | + int nents, enum dma_data_direction dir, |
||
159 | + struct dma_attrs *attrs) |
||
160 | +{ |
||
161 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
162 | + int i, ents; |
||
163 | + struct scatterlist *s; |
||
164 | + |
||
165 | + for_each_sg(sg, s, nents, i) |
||
166 | + kmemcheck_mark_initialized(sg_virt(s), s->length); |
||
167 | + BUG_ON(!valid_dma_direction(dir)); |
||
168 | + if (ops) { |
||
169 | + ents = ops->map_sg(dev, sg, nents, dir, attrs); |
||
170 | + } else { |
||
171 | + for_each_sg(sg, s, nents, i) { |
||
172 | + struct page *page = sg_page(s); |
||
173 | + |
||
174 | + if (!plat_device_is_coherent(dev)) |
||
175 | + __dma_sync(page, s->offset, s->length, dir); |
||
176 | +#ifdef CONFIG_NEED_SG_DMA_LENGTH |
||
177 | + s->dma_length = s->length; |
||
178 | +#endif |
||
179 | + s->dma_address = |
||
180 | + plat_map_dma_mem_page(dev, page) + s->offset; |
||
181 | + } |
||
182 | + ents = nents; |
||
183 | + } |
||
184 | + debug_dma_map_sg(dev, sg, nents, ents, dir); |
||
185 | + |
||
186 | + return ents; |
||
187 | +} |
||
188 | + |
||
189 | +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
||
190 | + int nents, enum dma_data_direction dir, |
||
191 | + struct dma_attrs *attrs) |
||
192 | +{ |
||
193 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
194 | + struct scatterlist *s; |
||
195 | + int i; |
||
196 | + |
||
197 | + BUG_ON(!valid_dma_direction(dir)); |
||
198 | + debug_dma_unmap_sg(dev, sg, nents, dir); |
||
199 | + if (ops) { |
||
200 | + ops->unmap_sg(dev, sg, nents, dir, attrs); |
||
201 | + return; |
||
202 | + } |
||
203 | + |
||
204 | + for_each_sg(sg, s, nents, i) { |
||
205 | + if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE) |
||
206 | + __dma_sync(sg_page(s), s->offset, s->length, dir); |
||
207 | + plat_unmap_dma_mem(dev, s->dma_address, s->length, dir); |
||
208 | + } |
||
209 | +} |
||
210 | + |
||
211 | +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
||
212 | + size_t offset, size_t size, |
||
213 | + enum dma_data_direction dir) |
||
214 | +{ |
||
215 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
216 | + dma_addr_t addr; |
||
217 | + |
||
218 | + kmemcheck_mark_initialized(page_address(page) + offset, size); |
||
219 | + BUG_ON(!valid_dma_direction(dir)); |
||
220 | + if (ops) { |
||
221 | + addr = ops->map_page(dev, page, offset, size, dir, NULL); |
||
222 | + } else { |
||
223 | + if (!plat_device_is_coherent(dev)) |
||
224 | + __dma_sync(page, offset, size, dir); |
||
225 | + |
||
226 | + addr = plat_map_dma_mem_page(dev, page) + offset; |
||
227 | + } |
||
228 | + debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
||
229 | + |
||
230 | + return addr; |
||
231 | +} |
||
232 | + |
||
233 | +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
||
234 | + size_t size, enum dma_data_direction dir) |
||
235 | +{ |
||
236 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
237 | + |
||
238 | + BUG_ON(!valid_dma_direction(dir)); |
||
239 | + if (ops) { |
||
240 | + ops->unmap_page(dev, addr, size, dir, NULL); |
||
241 | + } else { |
||
242 | + if (cpu_needs_post_dma_flush(dev)) |
||
243 | + __dma_sync(dma_addr_to_page(dev, addr), |
||
244 | + addr & ~PAGE_MASK, size, dir); |
||
245 | + |
||
246 | + plat_unmap_dma_mem(dev, addr, size, dir); |
||
247 | + } |
||
248 | + debug_dma_unmap_page(dev, addr, size, dir, false); |
||
249 | +} |
||
250 | + |
||
251 | +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
||
252 | + size_t size, |
||
253 | + enum dma_data_direction dir) |
||
254 | +{ |
||
255 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
256 | + |
||
257 | + BUG_ON(!valid_dma_direction(dir)); |
||
258 | + if (ops) |
||
259 | + ops->sync_single_for_cpu(dev, addr, size, dir); |
||
260 | + else if (cpu_needs_post_dma_flush(dev)) |
||
261 | + __dma_sync(dma_addr_to_page(dev, addr), |
||
262 | + addr & ~PAGE_MASK, size, dir); |
||
263 | + debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
||
264 | +} |
||
265 | + |
||
266 | +static inline void dma_sync_single_for_device(struct device *dev, |
||
267 | + dma_addr_t addr, size_t size, |
||
268 | + enum dma_data_direction dir) |
||
269 | +{ |
||
270 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
271 | + |
||
272 | + BUG_ON(!valid_dma_direction(dir)); |
||
273 | + if (ops) |
||
274 | + ops->sync_single_for_device(dev, addr, size, dir); |
||
275 | + else if (!plat_device_is_coherent(dev)) |
||
276 | + __dma_sync(dma_addr_to_page(dev, addr), |
||
277 | + addr & ~PAGE_MASK, size, dir); |
||
278 | + debug_dma_sync_single_for_device(dev, addr, size, dir); |
||
279 | +} |
||
280 | + |
||
281 | +static inline void dma_sync_single_range_for_cpu(struct device *dev, |
||
282 | + dma_addr_t addr, |
||
283 | + unsigned long offset, |
||
284 | + size_t size, |
||
285 | + enum dma_data_direction dir) |
||
286 | +{ |
||
287 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
||
288 | + |
||
289 | + BUG_ON(!valid_dma_direction(dir)); |
||
290 | + if (ops) |
||
291 | + ops->sync_single_for_cpu(dev, addr + offset, size, dir); |
||
292 | + else if (cpu_needs_post_dma_flush(dev)) |
||
293 | + __dma_sync(dma_addr_to_page(dev, addr + offset), |
||
294 | + (addr + offset) & ~PAGE_MASK, size, dir); |
||
295 | + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); |
||
296 | +} |
||
297 | + |
||
298 | +static inline void dma_sync_single_range_for_device(struct device *dev, |
||
299 | + dma_addr_t addr, |
||
300 | + unsigned long offset, |
||
301 | + size_t size, |
||
302 | + enum dma_data_direction dir) |
||
303 | +{ |
||
304 | + const struct dma_map_ops *ops = get_dma_ops(dev); |
||
305 | + |
||
306 | + BUG_ON(!valid_dma_direction(dir)); |
||
307 | + if (ops) |
||
308 | + ops->sync_single_for_device(dev, addr + offset, size, dir); |
||
309 | + else if (!plat_device_is_coherent(dev)) |
||
310 | + __dma_sync(dma_addr_to_page(dev, addr + offset), |
||
311 | + (addr + offset) & ~PAGE_MASK, size, dir); |
||
312 | + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); |
||
313 | +} |
||
314 | + |
||
315 | +static inline void |
||
316 | +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
||
317 | + int nelems, enum dma_data_direction dir) |
||
318 | +{ |
||
319 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
320 | + struct scatterlist *s; |
||
321 | + int i; |
||
322 | + |
||
323 | + BUG_ON(!valid_dma_direction(dir)); |
||
324 | + if (ops) |
||
325 | + ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
||
326 | + else if (cpu_needs_post_dma_flush(dev)) { |
||
327 | + for_each_sg(sg, s, nelems, i) |
||
328 | + __dma_sync(sg_page(s), s->offset, s->length, dir); |
||
329 | + } |
||
330 | + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
||
331 | +} |
||
332 | + |
||
333 | +static inline void |
||
334 | +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
||
335 | + int nelems, enum dma_data_direction dir) |
||
336 | +{ |
||
337 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
338 | + struct scatterlist *s; |
||
339 | + int i; |
||
340 | + |
||
341 | + BUG_ON(!valid_dma_direction(dir)); |
||
342 | + if (ops) |
||
343 | + ops->sync_sg_for_device(dev, sg, nelems, dir); |
||
344 | + else if (!plat_device_is_coherent(dev)) { |
||
345 | + for_each_sg(sg, s, nelems, i) |
||
346 | + __dma_sync(sg_page(s), s->offset, s->length, dir); |
||
347 | + } |
||
348 | + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
||
349 | + |
||
350 | +} |
||
351 | + |
||
352 | +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) |
||
353 | +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) |
||
354 | +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) |
||
355 | +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) |
||
356 | + |
||
357 | +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
||
358 | + void *cpu_addr, dma_addr_t dma_addr, size_t size); |
||
359 | + |
||
360 | +/** |
||
361 | + * dma_mmap_attrs - map a coherent DMA allocation into user space |
||
362 | + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
||
363 | + * @vma: vm_area_struct describing requested user mapping |
||
364 | + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs |
||
365 | + * @handle: device-view address returned from dma_alloc_attrs |
||
366 | + * @size: size of memory originally requested in dma_alloc_attrs |
||
367 | + * @attrs: attributes of mapping properties requested in dma_alloc_attrs |
||
368 | + * |
||
369 | + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs |
||
370 | + * into user space. The coherent DMA buffer must not be freed by the |
||
371 | + * driver until the user space mapping has been released. |
||
372 | + */ |
||
373 | +static inline int |
||
374 | +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, |
||
375 | + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) |
||
376 | +{ |
||
377 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
378 | + BUG_ON(!ops); |
||
379 | + if (ops && ops->mmap) |
||
380 | + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
||
381 | + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
||
382 | +} |
||
383 | + |
||
384 | +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) |
||
385 | + |
||
386 | +int |
||
387 | +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
||
388 | + void *cpu_addr, dma_addr_t dma_addr, size_t size); |
||
389 | + |
||
390 | +static inline int |
||
391 | +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, |
||
392 | + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) |
||
393 | +{ |
||
394 | + struct dma_map_ops *ops = get_dma_ops(dev); |
||
395 | + BUG_ON(!ops); |
||
396 | + if (ops && ops->get_sgtable) |
||
397 | + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, |
||
398 | + attrs); |
||
399 | + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); |
||
400 | +} |
||
401 | + |
||
402 | +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) |
||
403 | + |
||
404 | |||
405 | static inline int dma_supported(struct device *dev, u64 mask) |
||
406 | { |
||
407 | struct dma_map_ops *ops = get_dma_ops(dev); |
||
408 | - return ops->dma_supported(dev, mask); |
||
409 | + if (ops) |
||
410 | + return ops->dma_supported(dev, mask); |
||
411 | + return plat_dma_supported(dev, mask); |
||
412 | } |
||
413 | |||
414 | static inline int dma_mapping_error(struct device *dev, u64 mask) |
||
415 | @@ -43,7 +378,9 @@ static inline int dma_mapping_error(stru |
||
416 | struct dma_map_ops *ops = get_dma_ops(dev); |
||
417 | |||
418 | debug_dma_mapping_error(dev, mask); |
||
419 | - return ops->mapping_error(dev, mask); |
||
420 | + if (ops) |
||
421 | + return ops->mapping_error(dev, mask); |
||
422 | + return 0; |
||
423 | } |
||
424 | |||
425 | static inline int |
||
426 | @@ -54,7 +391,7 @@ dma_set_mask(struct device *dev, u64 mas |
||
427 | if(!dev->dma_mask || !dma_supported(dev, mask)) |
||
428 | return -EIO; |
||
429 | |||
430 | - if (ops->set_dma_mask) |
||
431 | + if (ops && ops->set_dma_mask) |
||
432 | return ops->set_dma_mask(dev, mask); |
||
433 | |||
434 | *dev->dma_mask = mask; |
||
435 | @@ -74,7 +411,11 @@ static inline void *dma_alloc_attrs(stru |
||
436 | void *ret; |
||
437 | struct dma_map_ops *ops = get_dma_ops(dev); |
||
438 | |||
439 | - ret = ops->alloc(dev, size, dma_handle, gfp, attrs); |
||
440 | + if (ops) |
||
441 | + ret = ops->alloc(dev, size, dma_handle, gfp, attrs); |
||
442 | + else |
||
443 | + ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp, |
||
444 | + attrs); |
||
445 | |||
446 | debug_dma_alloc_coherent(dev, size, *dma_handle, ret); |
||
447 | |||
448 | @@ -89,7 +430,10 @@ static inline void dma_free_attrs(struct |
||
449 | { |
||
450 | struct dma_map_ops *ops = get_dma_ops(dev); |
||
451 | |||
452 | - ops->free(dev, size, vaddr, dma_handle, attrs); |
||
453 | + if (ops) |
||
454 | + ops->free(dev, size, vaddr, dma_handle, attrs); |
||
455 | + else |
||
456 | + mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs); |
||
457 | |||
458 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); |
||
459 | } |
||
460 | --- a/arch/mips/mm/dma-default.c |
||
461 | +++ b/arch/mips/mm/dma-default.c |
||
462 | @@ -26,7 +26,7 @@ |
||
463 | |||
464 | #ifdef CONFIG_DMA_MAYBE_COHERENT |
||
465 | int coherentio = 0; /* User defined DMA coherency from command line. */ |
||
466 | -EXPORT_SYMBOL_GPL(coherentio); |
||
467 | +EXPORT_SYMBOL(coherentio); |
||
468 | int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */ |
||
469 | |||
470 | static int __init setcoherentio(char *str) |
||
471 | @@ -46,30 +46,6 @@ static int __init setnocoherentio(char * |
||
472 | early_param("nocoherentio", setnocoherentio); |
||
473 | #endif |
||
474 | |||
475 | -static inline struct page *dma_addr_to_page(struct device *dev, |
||
476 | - dma_addr_t dma_addr) |
||
477 | -{ |
||
478 | - return pfn_to_page( |
||
479 | - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); |
||
480 | -} |
||
481 | - |
||
482 | -/* |
||
483 | - * The affected CPUs below in 'cpu_needs_post_dma_flush()' can |
||
484 | - * speculatively fill random cachelines with stale data at any time, |
||
485 | - * requiring an extra flush post-DMA. |
||
486 | - * |
||
487 | - * Warning on the terminology - Linux calls an uncached area coherent; |
||
488 | - * MIPS terminology calls memory areas with hardware maintained coherency |
||
489 | - * coherent. |
||
490 | - */ |
||
491 | -static inline int cpu_needs_post_dma_flush(struct device *dev) |
||
492 | -{ |
||
493 | - return !plat_device_is_coherent(dev) && |
||
494 | - (boot_cpu_type() == CPU_R10000 || |
||
495 | - boot_cpu_type() == CPU_R12000 || |
||
496 | - boot_cpu_type() == CPU_BMIPS5000); |
||
497 | -} |
||
498 | - |
||
499 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
||
500 | { |
||
501 | gfp_t dma_flag; |
||
502 | @@ -125,8 +101,9 @@ void *dma_alloc_noncoherent(struct devic |
||
503 | } |
||
504 | EXPORT_SYMBOL(dma_alloc_noncoherent); |
||
505 | |||
506 | -static void *mips_dma_alloc_coherent(struct device *dev, size_t size, |
||
507 | - dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
||
508 | +void *mips_dma_alloc_coherent(struct device *dev, size_t size, |
||
509 | + dma_addr_t *dma_handle, gfp_t gfp, |
||
510 | + struct dma_attrs *attrs) |
||
511 | { |
||
512 | void *ret; |
||
513 | struct page *page = NULL; |
||
514 | @@ -157,6 +134,7 @@ static void *mips_dma_alloc_coherent(str |
||
515 | |||
516 | return ret; |
||
517 | } |
||
518 | +EXPORT_SYMBOL(mips_dma_alloc_coherent); |
||
519 | |||
520 | |||
521 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
||
522 | @@ -167,8 +145,8 @@ void dma_free_noncoherent(struct device |
||
523 | } |
||
524 | EXPORT_SYMBOL(dma_free_noncoherent); |
||
525 | |||
526 | -static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
||
527 | - dma_addr_t dma_handle, struct dma_attrs *attrs) |
||
528 | +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
||
529 | + dma_addr_t dma_handle, struct dma_attrs *attrs) |
||
530 | { |
||
531 | unsigned long addr = (unsigned long) vaddr; |
||
532 | int order = get_order(size); |
||
533 | @@ -188,6 +166,7 @@ static void mips_dma_free_coherent(struc |
||
534 | if (!dma_release_from_contiguous(dev, page, count)) |
||
535 | __free_pages(page, get_order(size)); |
||
536 | } |
||
537 | +EXPORT_SYMBOL(mips_dma_free_coherent); |
||
538 | |||
539 | static inline void __dma_sync_virtual(void *addr, size_t size, |
||
540 | enum dma_data_direction direction) |
||
541 | @@ -216,8 +195,8 @@ static inline void __dma_sync_virtual(vo |
||
542 | * If highmem is not configured then the bulk of this loop gets |
||
543 | * optimized out. |
||
544 | */ |
||
545 | -static inline void __dma_sync(struct page *page, |
||
546 | - unsigned long offset, size_t size, enum dma_data_direction direction) |
||
547 | +void __dma_sync(struct page *page, unsigned long offset, size_t size, |
||
548 | + enum dma_data_direction direction) |
||
549 | { |
||
550 | size_t left = size; |
||
551 | |||
552 | @@ -246,108 +225,7 @@ static inline void __dma_sync(struct pag |
||
553 | left -= len; |
||
554 | } while (left); |
||
555 | } |
||
556 | - |
||
557 | -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
||
558 | - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) |
||
559 | -{ |
||
560 | - if (cpu_needs_post_dma_flush(dev)) |
||
561 | - __dma_sync(dma_addr_to_page(dev, dma_addr), |
||
562 | - dma_addr & ~PAGE_MASK, size, direction); |
||
563 | - |
||
564 | - plat_unmap_dma_mem(dev, dma_addr, size, direction); |
||
565 | -} |
||
566 | - |
||
567 | -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, |
||
568 | - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) |
||
569 | -{ |
||
570 | - int i; |
||
571 | - |
||
572 | - for (i = 0; i < nents; i++, sg++) { |
||
573 | - if (!plat_device_is_coherent(dev)) |
||
574 | - __dma_sync(sg_page(sg), sg->offset, sg->length, |
||
575 | - direction); |
||
576 | -#ifdef CONFIG_NEED_SG_DMA_LENGTH |
||
577 | - sg->dma_length = sg->length; |
||
578 | -#endif |
||
579 | - sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + |
||
580 | - sg->offset; |
||
581 | - } |
||
582 | - |
||
583 | - return nents; |
||
584 | -} |
||
585 | - |
||
586 | -static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, |
||
587 | - unsigned long offset, size_t size, enum dma_data_direction direction, |
||
588 | - struct dma_attrs *attrs) |
||
589 | -{ |
||
590 | - if (!plat_device_is_coherent(dev)) |
||
591 | - __dma_sync(page, offset, size, direction); |
||
592 | - |
||
593 | - return plat_map_dma_mem_page(dev, page) + offset; |
||
594 | -} |
||
595 | - |
||
596 | -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
||
597 | - int nhwentries, enum dma_data_direction direction, |
||
598 | - struct dma_attrs *attrs) |
||
599 | -{ |
||
600 | - int i; |
||
601 | - |
||
602 | - for (i = 0; i < nhwentries; i++, sg++) { |
||
603 | - if (!plat_device_is_coherent(dev) && |
||
604 | - direction != DMA_TO_DEVICE) |
||
605 | - __dma_sync(sg_page(sg), sg->offset, sg->length, |
||
606 | - direction); |
||
607 | - plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); |
||
608 | - } |
||
609 | -} |
||
610 | - |
||
611 | -static void mips_dma_sync_single_for_cpu(struct device *dev, |
||
612 | - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) |
||
613 | -{ |
||
614 | - if (cpu_needs_post_dma_flush(dev)) |
||
615 | - __dma_sync(dma_addr_to_page(dev, dma_handle), |
||
616 | - dma_handle & ~PAGE_MASK, size, direction); |
||
617 | -} |
||
618 | - |
||
619 | -static void mips_dma_sync_single_for_device(struct device *dev, |
||
620 | - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) |
||
621 | -{ |
||
622 | - if (!plat_device_is_coherent(dev)) |
||
623 | - __dma_sync(dma_addr_to_page(dev, dma_handle), |
||
624 | - dma_handle & ~PAGE_MASK, size, direction); |
||
625 | -} |
||
626 | - |
||
627 | -static void mips_dma_sync_sg_for_cpu(struct device *dev, |
||
628 | - struct scatterlist *sg, int nelems, enum dma_data_direction direction) |
||
629 | -{ |
||
630 | - int i; |
||
631 | - |
||
632 | - if (cpu_needs_post_dma_flush(dev)) |
||
633 | - for (i = 0; i < nelems; i++, sg++) |
||
634 | - __dma_sync(sg_page(sg), sg->offset, sg->length, |
||
635 | - direction); |
||
636 | -} |
||
637 | - |
||
638 | -static void mips_dma_sync_sg_for_device(struct device *dev, |
||
639 | - struct scatterlist *sg, int nelems, enum dma_data_direction direction) |
||
640 | -{ |
||
641 | - int i; |
||
642 | - |
||
643 | - if (!plat_device_is_coherent(dev)) |
||
644 | - for (i = 0; i < nelems; i++, sg++) |
||
645 | - __dma_sync(sg_page(sg), sg->offset, sg->length, |
||
646 | - direction); |
||
647 | -} |
||
648 | - |
||
649 | -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
||
650 | -{ |
||
651 | - return 0; |
||
652 | -} |
||
653 | - |
||
654 | -int mips_dma_supported(struct device *dev, u64 mask) |
||
655 | -{ |
||
656 | - return plat_dma_supported(dev, mask); |
||
657 | -} |
||
658 | +EXPORT_SYMBOL(__dma_sync); |
||
659 | |||
660 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
||
661 | enum dma_data_direction direction) |
||
662 | @@ -360,23 +238,10 @@ void dma_cache_sync(struct device *dev, |
||
663 | |||
664 | EXPORT_SYMBOL(dma_cache_sync); |
||
665 | |||
666 | -static struct dma_map_ops mips_default_dma_map_ops = { |
||
667 | - .alloc = mips_dma_alloc_coherent, |
||
668 | - .free = mips_dma_free_coherent, |
||
669 | - .map_page = mips_dma_map_page, |
||
670 | - .unmap_page = mips_dma_unmap_page, |
||
671 | - .map_sg = mips_dma_map_sg, |
||
672 | - .unmap_sg = mips_dma_unmap_sg, |
||
673 | - .sync_single_for_cpu = mips_dma_sync_single_for_cpu, |
||
674 | - .sync_single_for_device = mips_dma_sync_single_for_device, |
||
675 | - .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, |
||
676 | - .sync_sg_for_device = mips_dma_sync_sg_for_device, |
||
677 | - .mapping_error = mips_dma_mapping_error, |
||
678 | - .dma_supported = mips_dma_supported |
||
679 | -}; |
||
680 | - |
||
681 | -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; |
||
682 | +#ifdef CONFIG_SYS_HAS_DMA_OPS |
||
683 | +struct dma_map_ops *mips_dma_map_ops = NULL; |
||
684 | EXPORT_SYMBOL(mips_dma_map_ops); |
||
685 | +#endif |
||
686 | |||
687 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
||
688 |