OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | From 361974032ae1b0eec36c51a8f1cd9b447864fcbd Mon Sep 17 00:00:00 2001 |
2 | From: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com> |
||
3 | Date: Fri, 5 Jan 2018 00:44:00 +0900 |
||
4 | Subject: [PATCH 150/454] vcsm: Unify cache manipulating functions |
||
5 | |||
6 | Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com> |
||
7 | --- |
||
8 | drivers/char/broadcom/vc_sm/vmcs_sm.c | 309 +++++++++++--------------- |
||
9 | 1 file changed, 132 insertions(+), 177 deletions(-) |
||
10 | |||
11 | --- a/drivers/char/broadcom/vc_sm/vmcs_sm.c |
||
12 | +++ b/drivers/char/broadcom/vc_sm/vmcs_sm.c |
||
13 | @@ -1256,61 +1256,106 @@ static const struct vm_operations_struct |
||
14 | .fault = vcsm_vma_fault, |
||
15 | }; |
||
16 | |||
17 | -/* Walks a VMA and clean each valid page from the cache */ |
||
18 | -static void vcsm_vma_cache_clean_page_range(unsigned long addr, |
||
19 | - unsigned long end) |
||
20 | +static int clean_invalid_mem_2d(const void __user *addr, |
||
21 | + const size_t block_count, const size_t block_size, const size_t stride, |
||
22 | + const unsigned cache_op) |
||
23 | { |
||
24 | - pgd_t *pgd; |
||
25 | - pud_t *pud; |
||
26 | - pmd_t *pmd; |
||
27 | - pte_t *pte; |
||
28 | - unsigned long pgd_next, pud_next, pmd_next; |
||
29 | - |
||
30 | - if (addr >= end) |
||
31 | - return; |
||
32 | - |
||
33 | - /* Walk PGD */ |
||
34 | - pgd = pgd_offset(current->mm, addr); |
||
35 | - do { |
||
36 | - pgd_next = pgd_addr_end(addr, end); |
||
37 | - |
||
38 | - if (pgd_none(*pgd) || pgd_bad(*pgd)) |
||
39 | - continue; |
||
40 | - |
||
41 | - /* Walk PUD */ |
||
42 | - pud = pud_offset(pgd, addr); |
||
43 | - do { |
||
44 | - pud_next = pud_addr_end(addr, pgd_next); |
||
45 | - if (pud_none(*pud) || pud_bad(*pud)) |
||
46 | - continue; |
||
47 | - |
||
48 | - /* Walk PMD */ |
||
49 | - pmd = pmd_offset(pud, addr); |
||
50 | - do { |
||
51 | - pmd_next = pmd_addr_end(addr, pud_next); |
||
52 | - if (pmd_none(*pmd) || pmd_bad(*pmd)) |
||
53 | - continue; |
||
54 | - |
||
55 | - /* Walk PTE */ |
||
56 | - pte = pte_offset_map(pmd, addr); |
||
57 | - do { |
||
58 | - if (pte_none(*pte) |
||
59 | - || !pte_present(*pte)) |
||
60 | - continue; |
||
61 | - |
||
62 | - /* Clean + invalidate */ |
||
63 | - dmac_flush_range((const void *) addr, |
||
64 | - (const void *) |
||
65 | - (addr + PAGE_SIZE)); |
||
66 | - |
||
67 | - } while (pte++, addr += |
||
68 | - PAGE_SIZE, addr != pmd_next); |
||
69 | - pte_unmap(pte); |
||
70 | + size_t i; |
||
71 | + void (*op_fn)(const void*, const void*); |
||
72 | |||
73 | - } while (pmd++, addr = pmd_next, addr != pud_next); |
||
74 | + if (block_size <= 0) { |
||
75 | + pr_err("[%s]: size cannot be 0\n", __func__); |
||
76 | + return -EINVAL; |
||
77 | + } |
||
78 | + |
||
79 | + switch (cache_op) { |
||
80 | + case VCSM_CACHE_OP_INV: |
||
81 | + op_fn = dmac_inv_range; |
||
82 | + break; |
||
83 | + case VCSM_CACHE_OP_CLEAN: |
||
84 | + op_fn = dmac_clean_range; |
||
85 | + break; |
||
86 | + case VCSM_CACHE_OP_FLUSH: |
||
87 | + op_fn = dmac_flush_range; |
||
88 | + break; |
||
89 | + default: |
||
90 | + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op); |
||
91 | + return -EINVAL; |
||
92 | + } |
||
93 | + |
||
94 | + for (i = 0; i < block_count; i ++, addr += stride) |
||
95 | + op_fn(addr, addr + block_size); |
||
96 | + |
||
97 | + return 0; |
||
98 | +} |
||
99 | + |
||
100 | +static int clean_invalid_mem(const void __user *addr, const size_t size, |
||
101 | + const unsigned cache_op) |
||
102 | +{ |
||
103 | + return clean_invalid_mem_2d(addr, 1, size, 0, cache_op); |
||
104 | +} |
||
105 | + |
||
106 | +static int clean_invalid_resource(const void __user *addr, const size_t size, |
||
107 | + const unsigned cache_op, const int usr_hdl, |
||
108 | + struct sm_resource_t *resource) |
||
109 | +{ |
||
110 | + int err; |
||
111 | + enum sm_stats_t stat_attempt, stat_failure; |
||
112 | + void __user *res_addr; |
||
113 | + |
||
114 | + if (resource == NULL) { |
||
115 | + pr_err("[%s]: resource is NULL\n", __func__); |
||
116 | + return -EINVAL; |
||
117 | + } |
||
118 | + if (resource->res_cached != VMCS_SM_CACHE_HOST && |
||
119 | + resource->res_cached != VMCS_SM_CACHE_BOTH) |
||
120 | + return 0; |
||
121 | + |
||
122 | + switch (cache_op) { |
||
123 | + case VCSM_CACHE_OP_INV: |
||
124 | + stat_attempt = INVALID; |
||
125 | + stat_failure = INVALID_FAIL; |
||
126 | + break; |
||
127 | + case VCSM_CACHE_OP_CLEAN: |
||
128 | + /* Like the original VMCS_SM_CMD_CLEAN_INVALID ioctl handler does. */ |
||
129 | + stat_attempt = FLUSH; |
||
130 | + stat_failure = FLUSH_FAIL; |
||
131 | + break; |
||
132 | + case VCSM_CACHE_OP_FLUSH: |
||
133 | + stat_attempt = FLUSH; |
||
134 | + stat_failure = FLUSH_FAIL; |
||
135 | + break; |
||
136 | + default: |
||
137 | + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op); |
||
138 | + return -EINVAL; |
||
139 | + } |
||
140 | + resource->res_stats[stat_attempt]++; |
||
141 | |||
142 | - } while (pud++, addr = pud_next, addr != pgd_next); |
||
143 | - } while (pgd++, addr = pgd_next, addr != end); |
||
144 | + if (size > resource->res_size) { |
||
145 | + pr_err("[%s]: size (0x%08zu) is larger than res_size (0x%08zu)\n", |
||
146 | + __func__, size, resource->res_size); |
||
147 | + return -EFAULT; |
||
148 | + } |
||
149 | + res_addr = (void __user*) vmcs_sm_usr_address_from_pid_and_usr_handle( |
||
150 | + current->tgid, usr_hdl); |
||
151 | + if (res_addr == NULL) { |
||
152 | + pr_err("[%s]: Failed to get user address " |
||
153 | + "from pid (%d) and user handle (%d)\n", __func__, current->tgid, |
||
154 | + resource->res_handle); |
||
155 | + return -EINVAL; |
||
156 | + } |
||
157 | + if (!(res_addr <= addr && addr + size <= res_addr + resource->res_size)) { |
||
158 | + pr_err("[%s]: Addr (0x%p-0x%p) out of range (0x%p-0x%p)\n", |
||
159 | + __func__, addr, addr + size, res_addr, |
||
160 | + res_addr + resource->res_size); |
||
161 | + return -EFAULT; |
||
162 | + } |
||
163 | + |
||
164 | + err = clean_invalid_mem(addr, size, cache_op); |
||
165 | + if (err) |
||
166 | + resource->res_stats[stat_failure]++; |
||
167 | + |
||
168 | + return err; |
||
169 | } |
||
170 | |||
171 | /* Map an allocated data into something that the user space. */ |
||
172 | @@ -1952,14 +1997,13 @@ static int vc_sm_ioctl_unlock(struct sm_ |
||
173 | list_for_each_entry(map, &resource->map_list, |
||
174 | resource_map_list) { |
||
175 | if (map->vma) { |
||
176 | - unsigned long start; |
||
177 | - unsigned long end; |
||
178 | - |
||
179 | - start = map->vma->vm_start; |
||
180 | - end = map->vma->vm_end; |
||
181 | + const unsigned long start = map->vma->vm_start; |
||
182 | + const unsigned long end = map->vma->vm_end; |
||
183 | |||
184 | - vcsm_vma_cache_clean_page_range( |
||
185 | - start, end); |
||
186 | + ret = clean_invalid_mem((void __user*) start, end - start, |
||
187 | + VCSM_CACHE_OP_FLUSH); |
||
188 | + if (ret) |
||
189 | + goto error; |
||
190 | } |
||
191 | } |
||
192 | up_read(¤t->mm->mmap_sem); |
||
193 | @@ -2833,41 +2877,17 @@ static long vc_sm_ioctl(struct file *fil |
||
194 | /* Locate resource from GUID. */ |
||
195 | resource = |
||
196 | vmcs_sm_acquire_resource(file_data, ioparam.handle); |
||
197 | - |
||
198 | - if ((resource != NULL) && resource->res_cached) { |
||
199 | - dma_addr_t phys_addr = 0; |
||
200 | - |
||
201 | - resource->res_stats[FLUSH]++; |
||
202 | - |
||
203 | - phys_addr = |
||
204 | - (dma_addr_t)((uint32_t) |
||
205 | - resource->res_base_mem & |
||
206 | - 0x3FFFFFFF); |
||
207 | - phys_addr += (dma_addr_t)mm_vc_mem_phys_addr; |
||
208 | - |
||
209 | - /* L1 cache flush */ |
||
210 | - down_read(¤t->mm->mmap_sem); |
||
211 | - vcsm_vma_cache_clean_page_range((unsigned long) |
||
212 | - ioparam.addr, |
||
213 | - (unsigned long) |
||
214 | - ioparam.addr + |
||
215 | - ioparam.size); |
||
216 | - up_read(¤t->mm->mmap_sem); |
||
217 | - |
||
218 | - /* L2 cache flush */ |
||
219 | - outer_clean_range(phys_addr, |
||
220 | - phys_addr + |
||
221 | - (size_t) ioparam.size); |
||
222 | - } else if (resource == NULL) { |
||
223 | + if (resource == NULL) { |
||
224 | ret = -EINVAL; |
||
225 | goto out; |
||
226 | } |
||
227 | |||
228 | - if (resource) |
||
229 | - vmcs_sm_release_resource(resource, 0); |
||
230 | - |
||
231 | - /* Done. */ |
||
232 | - goto out; |
||
233 | + ret = clean_invalid_resource((void __user*) ioparam.addr, |
||
234 | + ioparam.size, VCSM_CACHE_OP_FLUSH, ioparam.handle, |
||
235 | + resource); |
||
236 | + vmcs_sm_release_resource(resource, 0); |
||
237 | + if (ret) |
||
238 | + goto out; |
||
239 | } |
||
240 | break; |
||
241 | |||
242 | @@ -2888,41 +2908,16 @@ static long vc_sm_ioctl(struct file *fil |
||
243 | /* Locate resource from GUID. */ |
||
244 | resource = |
||
245 | vmcs_sm_acquire_resource(file_data, ioparam.handle); |
||
246 | - |
||
247 | - if ((resource != NULL) && resource->res_cached) { |
||
248 | - dma_addr_t phys_addr = 0; |
||
249 | - |
||
250 | - resource->res_stats[INVALID]++; |
||
251 | - |
||
252 | - phys_addr = |
||
253 | - (dma_addr_t)((uint32_t) |
||
254 | - resource->res_base_mem & |
||
255 | - 0x3FFFFFFF); |
||
256 | - phys_addr += (dma_addr_t)mm_vc_mem_phys_addr; |
||
257 | - |
||
258 | - /* L2 cache invalidate */ |
||
259 | - outer_inv_range(phys_addr, |
||
260 | - phys_addr + |
||
261 | - (size_t) ioparam.size); |
||
262 | - |
||
263 | - /* L1 cache invalidate */ |
||
264 | - down_read(¤t->mm->mmap_sem); |
||
265 | - vcsm_vma_cache_clean_page_range((unsigned long) |
||
266 | - ioparam.addr, |
||
267 | - (unsigned long) |
||
268 | - ioparam.addr + |
||
269 | - ioparam.size); |
||
270 | - up_read(¤t->mm->mmap_sem); |
||
271 | - } else if (resource == NULL) { |
||
272 | + if (resource == NULL) { |
||
273 | ret = -EINVAL; |
||
274 | goto out; |
||
275 | } |
||
276 | |||
277 | - if (resource) |
||
278 | - vmcs_sm_release_resource(resource, 0); |
||
279 | - |
||
280 | - /* Done. */ |
||
281 | - goto out; |
||
282 | + ret = clean_invalid_resource((void __user*) ioparam.addr, |
||
283 | + ioparam.size, VCSM_CACHE_OP_INV, ioparam.handle, resource); |
||
284 | + vmcs_sm_release_resource(resource, 0); |
||
285 | + if (ret) |
||
286 | + goto out; |
||
287 | } |
||
288 | break; |
||
289 | |||
290 | @@ -2941,43 +2936,27 @@ static long vc_sm_ioctl(struct file *fil |
||
291 | goto out; |
||
292 | } |
||
293 | for (i = 0; i < sizeof(ioparam.s) / sizeof(*ioparam.s); i++) { |
||
294 | - switch (ioparam.s[i].cmd) { |
||
295 | - case VCSM_CACHE_OP_INV: /* L1/L2 invalidate virtual range */ |
||
296 | - case VCSM_CACHE_OP_FLUSH: /* L1/L2 clean physical range */ |
||
297 | - case VCSM_CACHE_OP_CLEAN: /* L1/L2 clean+invalidate all */ |
||
298 | - /* Locate resource from GUID. */ |
||
299 | - resource = |
||
300 | - vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle); |
||
301 | - |
||
302 | - if ((resource != NULL) && resource->res_cached) { |
||
303 | - unsigned long base = ioparam.s[i].addr & ~(PAGE_SIZE - 1); |
||
304 | - unsigned long end = (ioparam.s[i].addr + ioparam.s[i].size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); |
||
305 | - |
||
306 | - resource->res_stats[ioparam.s[i].cmd == 1 ? INVALID : FLUSH]++; |
||
307 | - |
||
308 | - /* L1/L2 cache flush */ |
||
309 | - down_read(¤t->mm->mmap_sem); |
||
310 | - vcsm_vma_cache_clean_page_range(base, end); |
||
311 | - up_read(¤t->mm->mmap_sem); |
||
312 | - } else if (resource == NULL) { |
||
313 | - ret = -EINVAL; |
||
314 | - goto out; |
||
315 | - } |
||
316 | - |
||
317 | - if (resource) |
||
318 | - vmcs_sm_release_resource(resource, 0); |
||
319 | - |
||
320 | - break; |
||
321 | - default: |
||
322 | - break; /* NOOP */ |
||
323 | + /* Locate resource from GUID. */ |
||
324 | + resource = |
||
325 | + vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle); |
||
326 | + if (resource == NULL) { |
||
327 | + ret = -EINVAL; |
||
328 | + goto out; |
||
329 | } |
||
330 | + |
||
331 | + ret = clean_invalid_resource((void __user*) ioparam.s[i].addr, |
||
332 | + ioparam.s[i].size, ioparam.s[i].cmd, |
||
333 | + ioparam.s[i].handle, resource); |
||
334 | + vmcs_sm_release_resource(resource, 0); |
||
335 | + if (ret) |
||
336 | + goto out; |
||
337 | } |
||
338 | } |
||
339 | break; |
||
340 | /* Flush/Invalidate the cache for a given mapping. */ |
||
341 | case VMCS_SM_CMD_CLEAN_INVALID2: |
||
342 | { |
||
343 | - int i, j; |
||
344 | + int i; |
||
345 | struct vmcs_sm_ioctl_clean_invalid2 ioparam; |
||
346 | struct vmcs_sm_ioctl_clean_invalid_block *block = NULL; |
||
347 | |||
348 | @@ -3006,36 +2985,12 @@ static long vc_sm_ioctl(struct file *fil |
||
349 | |||
350 | for (i = 0; i < ioparam.op_count; i++) { |
||
351 | const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i; |
||
352 | - void (*op_fn)(const void *, const void *); |
||
353 | |||
354 | - switch(op->invalidate_mode & 3) { |
||
355 | - case VCSM_CACHE_OP_INV: |
||
356 | - op_fn = dmac_inv_range; |
||
357 | - break; |
||
358 | - case VCSM_CACHE_OP_CLEAN: |
||
359 | - op_fn = dmac_clean_range; |
||
360 | - break; |
||
361 | - case VCSM_CACHE_OP_FLUSH: |
||
362 | - op_fn = dmac_flush_range; |
||
363 | - break; |
||
364 | - default: |
||
365 | - op_fn = 0; |
||
366 | - break; |
||
367 | - } |
||
368 | - |
||
369 | - if ((op->invalidate_mode & ~3) != 0) { |
||
370 | - ret = -EINVAL; |
||
371 | - break; |
||
372 | - } |
||
373 | - |
||
374 | - if (op_fn == 0) |
||
375 | - continue; |
||
376 | - |
||
377 | - for (j = 0; j < op->block_count; ++j) { |
||
378 | - const char * const base = (const char *)op->start_address + j * op->inter_block_stride; |
||
379 | - const char * const end = base + op->block_size; |
||
380 | - op_fn(base, end); |
||
381 | - } |
||
382 | + ret = clean_invalid_mem_2d((void __user*) op->start_address, |
||
383 | + op->block_count, op->block_size, |
||
384 | + op->inter_block_stride, op->invalidate_mode); |
||
385 | + if (ret) |
||
386 | + goto out; |
||
387 | } |
||
388 | kfree(block); |
||
389 | } |