OpenWrt – Blame information for rev 4
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
4 | office | 1 | From 0a6c701f92e1aa368c44632fa0985e92703354ed Mon Sep 17 00:00:00 2001 |
2 | From: Yangbo Lu <yangbo.lu@nxp.com> |
||
3 | Date: Wed, 17 Jan 2018 15:35:48 +0800 |
||
4 | Subject: [PATCH 22/30] iommu: support layerscape |
||
5 | |||
6 | This is an integrated patch for layerscape smmu support. |
||
7 | |||
8 | Signed-off-by: Eric Auger <eric.auger@redhat.com> |
||
9 | Signed-off-by: Robin Murphy <robin.murphy@arm.com> |
||
10 | Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com> |
||
11 | Signed-off-by: Sunil Goutham <sgoutham@cavium.com> |
||
12 | Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> |
||
13 | --- |
||
14 | drivers/iommu/amd_iommu.c | 56 ++++++---- |
||
15 | drivers/iommu/arm-smmu-v3.c | 111 ++++++++++++++------ |
||
16 | drivers/iommu/arm-smmu.c | 100 +++++++++++++++--- |
||
17 | drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++------- |
||
18 | drivers/iommu/intel-iommu.c | 92 ++++++++++++---- |
||
19 | drivers/iommu/iommu.c | 219 ++++++++++++++++++++++++++++++++++++--- |
||
20 | drivers/iommu/mtk_iommu.c | 2 + |
||
21 | drivers/iommu/mtk_iommu_v1.c | 2 + |
||
22 | include/linux/dma-iommu.h | 11 ++ |
||
23 | include/linux/iommu.h | 55 +++++++--- |
||
24 | 10 files changed, 739 insertions(+), 151 deletions(-) |
||
25 | |||
26 | --- a/drivers/iommu/amd_iommu.c |
||
27 | +++ b/drivers/iommu/amd_iommu.c |
||
28 | @@ -373,6 +373,8 @@ static struct iommu_group *acpihid_devic |
||
29 | |||
30 | if (!entry->group) |
||
31 | entry->group = generic_device_group(dev); |
||
32 | + else |
||
33 | + iommu_group_ref_get(entry->group); |
||
34 | |||
35 | return entry->group; |
||
36 | } |
||
37 | @@ -3160,9 +3162,10 @@ static bool amd_iommu_capable(enum iommu |
||
38 | return false; |
||
39 | } |
||
40 | |||
41 | -static void amd_iommu_get_dm_regions(struct device *dev, |
||
42 | - struct list_head *head) |
||
43 | +static void amd_iommu_get_resv_regions(struct device *dev, |
||
44 | + struct list_head *head) |
||
45 | { |
||
46 | + struct iommu_resv_region *region; |
||
47 | struct unity_map_entry *entry; |
||
48 | int devid; |
||
49 | |||
50 | @@ -3171,41 +3174,56 @@ static void amd_iommu_get_dm_regions(str |
||
51 | return; |
||
52 | |||
53 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { |
||
54 | - struct iommu_dm_region *region; |
||
55 | + size_t length; |
||
56 | + int prot = 0; |
||
57 | |||
58 | if (devid < entry->devid_start || devid > entry->devid_end) |
||
59 | continue; |
||
60 | |||
61 | - region = kzalloc(sizeof(*region), GFP_KERNEL); |
||
62 | + length = entry->address_end - entry->address_start; |
||
63 | + if (entry->prot & IOMMU_PROT_IR) |
||
64 | + prot |= IOMMU_READ; |
||
65 | + if (entry->prot & IOMMU_PROT_IW) |
||
66 | + prot |= IOMMU_WRITE; |
||
67 | + |
||
68 | + region = iommu_alloc_resv_region(entry->address_start, |
||
69 | + length, prot, |
||
70 | + IOMMU_RESV_DIRECT); |
||
71 | if (!region) { |
||
72 | pr_err("Out of memory allocating dm-regions for %s\n", |
||
73 | dev_name(dev)); |
||
74 | return; |
||
75 | } |
||
76 | - |
||
77 | - region->start = entry->address_start; |
||
78 | - region->length = entry->address_end - entry->address_start; |
||
79 | - if (entry->prot & IOMMU_PROT_IR) |
||
80 | - region->prot |= IOMMU_READ; |
||
81 | - if (entry->prot & IOMMU_PROT_IW) |
||
82 | - region->prot |= IOMMU_WRITE; |
||
83 | - |
||
84 | list_add_tail(®ion->list, head); |
||
85 | } |
||
86 | + |
||
87 | + region = iommu_alloc_resv_region(MSI_RANGE_START, |
||
88 | + MSI_RANGE_END - MSI_RANGE_START + 1, |
||
89 | + 0, IOMMU_RESV_MSI); |
||
90 | + if (!region) |
||
91 | + return; |
||
92 | + list_add_tail(®ion->list, head); |
||
93 | + |
||
94 | + region = iommu_alloc_resv_region(HT_RANGE_START, |
||
95 | + HT_RANGE_END - HT_RANGE_START + 1, |
||
96 | + 0, IOMMU_RESV_RESERVED); |
||
97 | + if (!region) |
||
98 | + return; |
||
99 | + list_add_tail(®ion->list, head); |
||
100 | } |
||
101 | |||
102 | -static void amd_iommu_put_dm_regions(struct device *dev, |
||
103 | +static void amd_iommu_put_resv_regions(struct device *dev, |
||
104 | struct list_head *head) |
||
105 | { |
||
106 | - struct iommu_dm_region *entry, *next; |
||
107 | + struct iommu_resv_region *entry, *next; |
||
108 | |||
109 | list_for_each_entry_safe(entry, next, head, list) |
||
110 | kfree(entry); |
||
111 | } |
||
112 | |||
113 | -static void amd_iommu_apply_dm_region(struct device *dev, |
||
114 | +static void amd_iommu_apply_resv_region(struct device *dev, |
||
115 | struct iommu_domain *domain, |
||
116 | - struct iommu_dm_region *region) |
||
117 | + struct iommu_resv_region *region) |
||
118 | { |
||
119 | struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); |
||
120 | unsigned long start, end; |
||
121 | @@ -3229,9 +3247,9 @@ static const struct iommu_ops amd_iommu_ |
||
122 | .add_device = amd_iommu_add_device, |
||
123 | .remove_device = amd_iommu_remove_device, |
||
124 | .device_group = amd_iommu_device_group, |
||
125 | - .get_dm_regions = amd_iommu_get_dm_regions, |
||
126 | - .put_dm_regions = amd_iommu_put_dm_regions, |
||
127 | - .apply_dm_region = amd_iommu_apply_dm_region, |
||
128 | + .get_resv_regions = amd_iommu_get_resv_regions, |
||
129 | + .put_resv_regions = amd_iommu_put_resv_regions, |
||
130 | + .apply_resv_region = amd_iommu_apply_resv_region, |
||
131 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, |
||
132 | }; |
||
133 | |||
134 | --- a/drivers/iommu/arm-smmu-v3.c |
||
135 | +++ b/drivers/iommu/arm-smmu-v3.c |
||
136 | @@ -410,6 +410,9 @@ |
||
137 | /* High-level queue structures */ |
||
138 | #define ARM_SMMU_POLL_TIMEOUT_US 100 |
||
139 | |||
140 | +#define MSI_IOVA_BASE 0x8000000 |
||
141 | +#define MSI_IOVA_LENGTH 0x100000 |
||
142 | + |
||
143 | static bool disable_bypass; |
||
144 | module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); |
||
145 | MODULE_PARM_DESC(disable_bypass, |
||
146 | @@ -552,9 +555,14 @@ struct arm_smmu_s2_cfg { |
||
147 | }; |
||
148 | |||
149 | struct arm_smmu_strtab_ent { |
||
150 | - bool valid; |
||
151 | - |
||
152 | - bool bypass; /* Overrides s1/s2 config */ |
||
153 | + /* |
||
154 | + * An STE is "assigned" if the master emitting the corresponding SID |
||
155 | + * is attached to a domain. The behaviour of an unassigned STE is |
||
156 | + * determined by the disable_bypass parameter, whereas an assigned |
||
157 | + * STE behaves according to s1_cfg/s2_cfg, which themselves are |
||
158 | + * configured according to the domain type. |
||
159 | + */ |
||
160 | + bool assigned; |
||
161 | struct arm_smmu_s1_cfg *s1_cfg; |
||
162 | struct arm_smmu_s2_cfg *s2_cfg; |
||
163 | }; |
||
164 | @@ -627,6 +635,7 @@ enum arm_smmu_domain_stage { |
||
165 | ARM_SMMU_DOMAIN_S1 = 0, |
||
166 | ARM_SMMU_DOMAIN_S2, |
||
167 | ARM_SMMU_DOMAIN_NESTED, |
||
168 | + ARM_SMMU_DOMAIN_BYPASS, |
||
169 | }; |
||
170 | |||
171 | struct arm_smmu_domain { |
||
172 | @@ -1000,9 +1009,9 @@ static void arm_smmu_write_strtab_ent(st |
||
173 | * This is hideously complicated, but we only really care about |
||
174 | * three cases at the moment: |
||
175 | * |
||
176 | - * 1. Invalid (all zero) -> bypass (init) |
||
177 | - * 2. Bypass -> translation (attach) |
||
178 | - * 3. Translation -> bypass (detach) |
||
179 | + * 1. Invalid (all zero) -> bypass/fault (init) |
||
180 | + * 2. Bypass/fault -> translation/bypass (attach) |
||
181 | + * 3. Translation/bypass -> bypass/fault (detach) |
||
182 | * |
||
183 | * Given that we can't update the STE atomically and the SMMU |
||
184 | * doesn't read the thing in a defined order, that leaves us |
||
185 | @@ -1041,11 +1050,15 @@ static void arm_smmu_write_strtab_ent(st |
||
186 | } |
||
187 | |||
188 | /* Nuke the existing STE_0 value, as we're going to rewrite it */ |
||
189 | - val = ste->valid ? STRTAB_STE_0_V : 0; |
||
190 | + val = STRTAB_STE_0_V; |
||
191 | + |
||
192 | + /* Bypass/fault */ |
||
193 | + if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) { |
||
194 | + if (!ste->assigned && disable_bypass) |
||
195 | + val |= STRTAB_STE_0_CFG_ABORT; |
||
196 | + else |
||
197 | + val |= STRTAB_STE_0_CFG_BYPASS; |
||
198 | |||
199 | - if (ste->bypass) { |
||
200 | - val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT |
||
201 | - : STRTAB_STE_0_CFG_BYPASS; |
||
202 | dst[0] = cpu_to_le64(val); |
||
203 | dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING |
||
204 | << STRTAB_STE_1_SHCFG_SHIFT); |
||
205 | @@ -1108,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(st |
||
206 | static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) |
||
207 | { |
||
208 | unsigned int i; |
||
209 | - struct arm_smmu_strtab_ent ste = { |
||
210 | - .valid = true, |
||
211 | - .bypass = true, |
||
212 | - }; |
||
213 | + struct arm_smmu_strtab_ent ste = { .assigned = false }; |
||
214 | |||
215 | for (i = 0; i < nent; ++i) { |
||
216 | arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste); |
||
217 | @@ -1364,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_ |
||
218 | switch (cap) { |
||
219 | case IOMMU_CAP_CACHE_COHERENCY: |
||
220 | return true; |
||
221 | - case IOMMU_CAP_INTR_REMAP: |
||
222 | - return true; /* MSIs are just memory writes */ |
||
223 | case IOMMU_CAP_NOEXEC: |
||
224 | return true; |
||
225 | default: |
||
226 | @@ -1377,7 +1385,9 @@ static struct iommu_domain *arm_smmu_dom |
||
227 | { |
||
228 | struct arm_smmu_domain *smmu_domain; |
||
229 | |||
230 | - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) |
||
231 | + if (type != IOMMU_DOMAIN_UNMANAGED && |
||
232 | + type != IOMMU_DOMAIN_DMA && |
||
233 | + type != IOMMU_DOMAIN_IDENTITY) |
||
234 | return NULL; |
||
235 | |||
236 | /* |
||
237 | @@ -1508,6 +1518,11 @@ static int arm_smmu_domain_finalise(stru |
||
238 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
||
239 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
||
240 | |||
241 | + if (domain->type == IOMMU_DOMAIN_IDENTITY) { |
||
242 | + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; |
||
243 | + return 0; |
||
244 | + } |
||
245 | + |
||
246 | /* Restrict the stage to what we can actually support */ |
||
247 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) |
||
248 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; |
||
249 | @@ -1580,7 +1595,7 @@ static __le64 *arm_smmu_get_step_for_sid |
||
250 | return step; |
||
251 | } |
||
252 | |||
253 | -static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) |
||
254 | +static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) |
||
255 | { |
||
256 | int i, j; |
||
257 | struct arm_smmu_master_data *master = fwspec->iommu_priv; |
||
258 | @@ -1599,17 +1614,14 @@ static int arm_smmu_install_ste_for_dev( |
||
259 | |||
260 | arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); |
||
261 | } |
||
262 | - |
||
263 | - return 0; |
||
264 | } |
||
265 | |||
266 | static void arm_smmu_detach_dev(struct device *dev) |
||
267 | { |
||
268 | struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; |
||
269 | |||
270 | - master->ste.bypass = true; |
||
271 | - if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0) |
||
272 | - dev_warn(dev, "failed to install bypass STE\n"); |
||
273 | + master->ste.assigned = false; |
||
274 | + arm_smmu_install_ste_for_dev(dev->iommu_fwspec); |
||
275 | } |
||
276 | |||
277 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
||
278 | @@ -1628,7 +1640,7 @@ static int arm_smmu_attach_dev(struct io |
||
279 | ste = &master->ste; |
||
280 | |||
281 | /* Already attached to a different domain? */ |
||
282 | - if (!ste->bypass) |
||
283 | + if (ste->assigned) |
||
284 | arm_smmu_detach_dev(dev); |
||
285 | |||
286 | mutex_lock(&smmu_domain->init_mutex); |
||
287 | @@ -1649,10 +1661,12 @@ static int arm_smmu_attach_dev(struct io |
||
288 | goto out_unlock; |
||
289 | } |
||
290 | |||
291 | - ste->bypass = false; |
||
292 | - ste->valid = true; |
||
293 | + ste->assigned = true; |
||
294 | |||
295 | - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { |
||
296 | + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) { |
||
297 | + ste->s1_cfg = NULL; |
||
298 | + ste->s2_cfg = NULL; |
||
299 | + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { |
||
300 | ste->s1_cfg = &smmu_domain->s1_cfg; |
||
301 | ste->s2_cfg = NULL; |
||
302 | arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); |
||
303 | @@ -1661,10 +1675,7 @@ static int arm_smmu_attach_dev(struct io |
||
304 | ste->s2_cfg = &smmu_domain->s2_cfg; |
||
305 | } |
||
306 | |||
307 | - ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec); |
||
308 | - if (ret < 0) |
||
309 | - ste->valid = false; |
||
310 | - |
||
311 | + arm_smmu_install_ste_for_dev(dev->iommu_fwspec); |
||
312 | out_unlock: |
||
313 | mutex_unlock(&smmu_domain->init_mutex); |
||
314 | return ret; |
||
315 | @@ -1712,6 +1723,9 @@ arm_smmu_iova_to_phys(struct iommu_domai |
||
316 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
||
317 | struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; |
||
318 | |||
319 | + if (domain->type == IOMMU_DOMAIN_IDENTITY) |
||
320 | + return iova; |
||
321 | + |
||
322 | if (!ops) |
||
323 | return 0; |
||
324 | |||
325 | @@ -1810,7 +1824,7 @@ static void arm_smmu_remove_device(struc |
||
326 | return; |
||
327 | |||
328 | master = fwspec->iommu_priv; |
||
329 | - if (master && master->ste.valid) |
||
330 | + if (master && master->ste.assigned) |
||
331 | arm_smmu_detach_dev(dev); |
||
332 | iommu_group_remove_device(dev); |
||
333 | kfree(master); |
||
334 | @@ -1839,6 +1853,9 @@ static int arm_smmu_domain_get_attr(stru |
||
335 | { |
||
336 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
||
337 | |||
338 | + if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
||
339 | + return -EINVAL; |
||
340 | + |
||
341 | switch (attr) { |
||
342 | case DOMAIN_ATTR_NESTING: |
||
343 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); |
||
344 | @@ -1854,6 +1871,9 @@ static int arm_smmu_domain_set_attr(stru |
||
345 | int ret = 0; |
||
346 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
||
347 | |||
348 | + if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
||
349 | + return -EINVAL; |
||
350 | + |
||
351 | mutex_lock(&smmu_domain->init_mutex); |
||
352 | |||
353 | switch (attr) { |
||
354 | @@ -1883,6 +1903,31 @@ static int arm_smmu_of_xlate(struct devi |
||
355 | return iommu_fwspec_add_ids(dev, args->args, 1); |
||
356 | } |
||
357 | |||
358 | +static void arm_smmu_get_resv_regions(struct device *dev, |
||
359 | + struct list_head *head) |
||
360 | +{ |
||
361 | + struct iommu_resv_region *region; |
||
362 | + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
||
363 | + |
||
364 | + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, |
||
365 | + prot, IOMMU_RESV_SW_MSI); |
||
366 | + if (!region) |
||
367 | + return; |
||
368 | + |
||
369 | + list_add_tail(®ion->list, head); |
||
370 | + |
||
371 | + iommu_dma_get_resv_regions(dev, head); |
||
372 | +} |
||
373 | + |
||
374 | +static void arm_smmu_put_resv_regions(struct device *dev, |
||
375 | + struct list_head *head) |
||
376 | +{ |
||
377 | + struct iommu_resv_region *entry, *next; |
||
378 | + |
||
379 | + list_for_each_entry_safe(entry, next, head, list) |
||
380 | + kfree(entry); |
||
381 | +} |
||
382 | + |
||
383 | static struct iommu_ops arm_smmu_ops = { |
||
384 | .capable = arm_smmu_capable, |
||
385 | .domain_alloc = arm_smmu_domain_alloc, |
||
386 | @@ -1898,6 +1943,8 @@ static struct iommu_ops arm_smmu_ops = { |
||
387 | .domain_get_attr = arm_smmu_domain_get_attr, |
||
388 | .domain_set_attr = arm_smmu_domain_set_attr, |
||
389 | .of_xlate = arm_smmu_of_xlate, |
||
390 | + .get_resv_regions = arm_smmu_get_resv_regions, |
||
391 | + .put_resv_regions = arm_smmu_put_resv_regions, |
||
392 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
||
393 | }; |
||
394 | |||
395 | --- a/drivers/iommu/arm-smmu.c |
||
396 | +++ b/drivers/iommu/arm-smmu.c |
||
397 | @@ -49,6 +49,7 @@ |
||
398 | #include <linux/spinlock.h> |
||
399 | |||
400 | #include <linux/amba/bus.h> |
||
401 | +#include "../staging/fsl-mc/include/mc-bus.h" |
||
402 | |||
403 | #include "io-pgtable.h" |
||
404 | |||
405 | @@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg { |
||
406 | #define ARM_MMU500_ACTLR_CPRE (1 << 1) |
||
407 | |||
408 | #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) |
||
409 | +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) |
||
410 | |||
411 | #define CB_PAR_F (1 << 0) |
||
412 | |||
413 | @@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg { |
||
414 | |||
415 | #define FSYNR0_WNR (1 << 4) |
||
416 | |||
417 | +#define MSI_IOVA_BASE 0x8000000 |
||
418 | +#define MSI_IOVA_LENGTH 0x100000 |
||
419 | + |
||
420 | static int force_stage; |
||
421 | module_param(force_stage, int, S_IRUGO); |
||
422 | MODULE_PARM_DESC(force_stage, |
||
423 | @@ -401,6 +406,7 @@ enum arm_smmu_domain_stage { |
||
424 | ARM_SMMU_DOMAIN_S1 = 0, |
||
425 | ARM_SMMU_DOMAIN_S2, |
||
426 | ARM_SMMU_DOMAIN_NESTED, |
||
427 | + ARM_SMMU_DOMAIN_BYPASS, |
||
428 | }; |
||
429 | |||
430 | struct arm_smmu_domain { |
||
431 | @@ -821,6 +827,12 @@ static int arm_smmu_init_domain_context( |
||
432 | if (smmu_domain->smmu) |
||
433 | goto out_unlock; |
||
434 | |||
435 | + if (domain->type == IOMMU_DOMAIN_IDENTITY) { |
||
436 | + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; |
||
437 | + smmu_domain->smmu = smmu; |
||
438 | + goto out_unlock; |
||
439 | + } |
||
440 | + |
||
441 | /* |
||
442 | * Mapping the requested stage onto what we support is surprisingly |
||
443 | * complicated, mainly because the spec allows S1+S2 SMMUs without |
||
444 | @@ -981,7 +993,7 @@ static void arm_smmu_destroy_domain_cont |
||
445 | void __iomem *cb_base; |
||
446 | int irq; |
||
447 | |||
448 | - if (!smmu) |
||
449 | + if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) |
||
450 | return; |
||
451 | |||
452 | /* |
||
453 | @@ -1004,7 +1016,9 @@ static struct iommu_domain *arm_smmu_dom |
||
454 | { |
||
455 | struct arm_smmu_domain *smmu_domain; |
||
456 | |||
457 | - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) |
||
458 | + if (type != IOMMU_DOMAIN_UNMANAGED && |
||
459 | + type != IOMMU_DOMAIN_DMA && |
||
460 | + type != IOMMU_DOMAIN_IDENTITY) |
||
461 | return NULL; |
||
462 | /* |
||
463 | * Allocate the domain and initialise some of its data structures. |
||
464 | @@ -1202,10 +1216,15 @@ static int arm_smmu_domain_add_master(st |
||
465 | { |
||
466 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
||
467 | struct arm_smmu_s2cr *s2cr = smmu->s2crs; |
||
468 | - enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS; |
||
469 | u8 cbndx = smmu_domain->cfg.cbndx; |
||
470 | + enum arm_smmu_s2cr_type type; |
||
471 | int i, idx; |
||
472 | |||
473 | + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) |
||
474 | + type = S2CR_TYPE_BYPASS; |
||
475 | + else |
||
476 | + type = S2CR_TYPE_TRANS; |
||
477 | + |
||
478 | for_each_cfg_sme(fwspec, i, idx) { |
||
479 | if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) |
||
480 | continue; |
||
481 | @@ -1343,6 +1362,9 @@ static phys_addr_t arm_smmu_iova_to_phys |
||
482 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
||
483 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
||
484 | |||
485 | + if (domain->type == IOMMU_DOMAIN_IDENTITY) |
||
486 | + return iova; |
||
487 | + |
||
488 | if (!ops) |
||
489 | return 0; |
||
490 | |||
491 | @@ -1368,8 +1390,6 @@ static bool arm_smmu_capable(enum iommu_ |
||
492 | * requests. |
||
493 | */ |
||
494 | return true; |
||
495 | - case IOMMU_CAP_INTR_REMAP: |
||
496 | - return true; /* MSIs are just memory writes */ |
||
497 | case IOMMU_CAP_NOEXEC: |
||
498 | return true; |
||
499 | default: |
||
500 | @@ -1478,10 +1498,12 @@ static struct iommu_group *arm_smmu_devi |
||
501 | } |
||
502 | |||
503 | if (group) |
||
504 | - return group; |
||
505 | + return iommu_group_ref_get(group); |
||
506 | |||
507 | if (dev_is_pci(dev)) |
||
508 | group = pci_device_group(dev); |
||
509 | + else if (dev_is_fsl_mc(dev)) |
||
510 | + group = fsl_mc_device_group(dev); |
||
511 | else |
||
512 | group = generic_device_group(dev); |
||
513 | |||
514 | @@ -1493,6 +1515,9 @@ static int arm_smmu_domain_get_attr(stru |
||
515 | { |
||
516 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
||
517 | |||
518 | + if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
||
519 | + return -EINVAL; |
||
520 | + |
||
521 | switch (attr) { |
||
522 | case DOMAIN_ATTR_NESTING: |
||
523 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); |
||
524 | @@ -1508,6 +1533,9 @@ static int arm_smmu_domain_set_attr(stru |
||
525 | int ret = 0; |
||
526 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
||
527 | |||
528 | + if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
||
529 | + return -EINVAL; |
||
530 | + |
||
531 | mutex_lock(&smmu_domain->init_mutex); |
||
532 | |||
533 | switch (attr) { |
||
534 | @@ -1534,17 +1562,44 @@ out_unlock: |
||
535 | |||
536 | static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) |
||
537 | { |
||
538 | - u32 fwid = 0; |
||
539 | + u32 mask, fwid = 0; |
||
540 | |||
541 | if (args->args_count > 0) |
||
542 | fwid |= (u16)args->args[0]; |
||
543 | |||
544 | if (args->args_count > 1) |
||
545 | fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; |
||
546 | + else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) |
||
547 | + fwid |= (u16)mask << SMR_MASK_SHIFT; |
||
548 | |||
549 | return iommu_fwspec_add_ids(dev, &fwid, 1); |
||
550 | } |
||
551 | |||
552 | +static void arm_smmu_get_resv_regions(struct device *dev, |
||
553 | + struct list_head *head) |
||
554 | +{ |
||
555 | + struct iommu_resv_region *region; |
||
556 | + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
||
557 | + |
||
558 | + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, |
||
559 | + prot, IOMMU_RESV_SW_MSI); |
||
560 | + if (!region) |
||
561 | + return; |
||
562 | + |
||
563 | + list_add_tail(®ion->list, head); |
||
564 | + |
||
565 | + iommu_dma_get_resv_regions(dev, head); |
||
566 | +} |
||
567 | + |
||
568 | +static void arm_smmu_put_resv_regions(struct device *dev, |
||
569 | + struct list_head *head) |
||
570 | +{ |
||
571 | + struct iommu_resv_region *entry, *next; |
||
572 | + |
||
573 | + list_for_each_entry_safe(entry, next, head, list) |
||
574 | + kfree(entry); |
||
575 | +} |
||
576 | + |
||
577 | static struct iommu_ops arm_smmu_ops = { |
||
578 | .capable = arm_smmu_capable, |
||
579 | .domain_alloc = arm_smmu_domain_alloc, |
||
580 | @@ -1560,6 +1615,8 @@ static struct iommu_ops arm_smmu_ops = { |
||
581 | .domain_get_attr = arm_smmu_domain_get_attr, |
||
582 | .domain_set_attr = arm_smmu_domain_set_attr, |
||
583 | .of_xlate = arm_smmu_of_xlate, |
||
584 | + .get_resv_regions = arm_smmu_get_resv_regions, |
||
585 | + .put_resv_regions = arm_smmu_put_resv_regions, |
||
586 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
||
587 | }; |
||
588 | |||
589 | @@ -1581,16 +1638,22 @@ static void arm_smmu_device_reset(struct |
||
590 | for (i = 0; i < smmu->num_mapping_groups; ++i) |
||
591 | arm_smmu_write_sme(smmu, i); |
||
592 | |||
593 | - /* |
||
594 | - * Before clearing ARM_MMU500_ACTLR_CPRE, need to |
||
595 | - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK |
||
596 | - * bit is only present in MMU-500r2 onwards. |
||
597 | - */ |
||
598 | - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7); |
||
599 | - major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK; |
||
600 | - if ((smmu->model == ARM_MMU500) && (major >= 2)) { |
||
601 | + if (smmu->model == ARM_MMU500) { |
||
602 | + /* |
||
603 | + * Before clearing ARM_MMU500_ACTLR_CPRE, need to |
||
604 | + * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK |
||
605 | + * bit is only present in MMU-500r2 onwards. |
||
606 | + */ |
||
607 | + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7); |
||
608 | + major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK; |
||
609 | reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR); |
||
610 | - reg &= ~ARM_MMU500_ACR_CACHE_LOCK; |
||
611 | + if (major >= 2) |
||
612 | + reg &= ~ARM_MMU500_ACR_CACHE_LOCK; |
||
613 | + /* |
||
614 | + * Allow unmatched Stream IDs to allocate bypass |
||
615 | + * TLB entries for reduced latency. |
||
616 | + */ |
||
617 | + reg |= ARM_MMU500_ACR_SMTNMB_TLBEN; |
||
618 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR); |
||
619 | } |
||
620 | |||
621 | @@ -2024,6 +2087,11 @@ static int arm_smmu_device_dt_probe(stru |
||
622 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); |
||
623 | } |
||
624 | #endif |
||
625 | +#ifdef CONFIG_FSL_MC_BUS |
||
626 | + if (!iommu_present(&fsl_mc_bus_type)) |
||
627 | + bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops); |
||
628 | +#endif |
||
629 | + |
||
630 | return 0; |
||
631 | } |
||
632 | |||
633 | --- a/drivers/iommu/dma-iommu.c |
||
634 | +++ b/drivers/iommu/dma-iommu.c |
||
635 | @@ -37,15 +37,50 @@ struct iommu_dma_msi_page { |
||
636 | phys_addr_t phys; |
||
637 | }; |
||
638 | |||
639 | +enum iommu_dma_cookie_type { |
||
640 | + IOMMU_DMA_IOVA_COOKIE, |
||
641 | + IOMMU_DMA_MSI_COOKIE, |
||
642 | +}; |
||
643 | + |
||
644 | struct iommu_dma_cookie { |
||
645 | - struct iova_domain iovad; |
||
646 | - struct list_head msi_page_list; |
||
647 | - spinlock_t msi_lock; |
||
648 | + enum iommu_dma_cookie_type type; |
||
649 | + union { |
||
650 | + /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ |
||
651 | + struct iova_domain iovad; |
||
652 | + /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ |
||
653 | + dma_addr_t msi_iova; |
||
654 | + }; |
||
655 | + struct list_head msi_page_list; |
||
656 | + spinlock_t msi_lock; |
||
657 | }; |
||
658 | |||
659 | +static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
||
660 | +{ |
||
661 | + if (cookie->type == IOMMU_DMA_IOVA_COOKIE) |
||
662 | + return cookie->iovad.granule; |
||
663 | + return PAGE_SIZE; |
||
664 | +} |
||
665 | + |
||
666 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) |
||
667 | { |
||
668 | - return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; |
||
669 | + struct iommu_dma_cookie *cookie = domain->iova_cookie; |
||
670 | + |
||
671 | + if (cookie->type == IOMMU_DMA_IOVA_COOKIE) |
||
672 | + return &cookie->iovad; |
||
673 | + return NULL; |
||
674 | +} |
||
675 | + |
||
676 | +static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
||
677 | +{ |
||
678 | + struct iommu_dma_cookie *cookie; |
||
679 | + |
||
680 | + cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
||
681 | + if (cookie) { |
||
682 | + spin_lock_init(&cookie->msi_lock); |
||
683 | + INIT_LIST_HEAD(&cookie->msi_page_list); |
||
684 | + cookie->type = type; |
||
685 | + } |
||
686 | + return cookie; |
||
687 | } |
||
688 | |||
689 | int iommu_dma_init(void) |
||
690 | @@ -62,25 +97,53 @@ int iommu_dma_init(void) |
||
691 | */ |
||
692 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
||
693 | { |
||
694 | + if (domain->iova_cookie) |
||
695 | + return -EEXIST; |
||
696 | + |
||
697 | + domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); |
||
698 | + if (!domain->iova_cookie) |
||
699 | + return -ENOMEM; |
||
700 | + |
||
701 | + return 0; |
||
702 | +} |
||
703 | +EXPORT_SYMBOL(iommu_get_dma_cookie); |
||
704 | + |
||
705 | +/** |
||
706 | + * iommu_get_msi_cookie - Acquire just MSI remapping resources |
||
707 | + * @domain: IOMMU domain to prepare |
||
708 | + * @base: Start address of IOVA region for MSI mappings |
||
709 | + * |
||
710 | + * Users who manage their own IOVA allocation and do not want DMA API support, |
||
711 | + * but would still like to take advantage of automatic MSI remapping, can use |
||
712 | + * this to initialise their own domain appropriately. Users should reserve a |
||
713 | + * contiguous IOVA region, starting at @base, large enough to accommodate the |
||
714 | + * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address |
||
715 | + * used by the devices attached to @domain. |
||
716 | + */ |
||
717 | +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) |
||
718 | +{ |
||
719 | struct iommu_dma_cookie *cookie; |
||
720 | |||
721 | + if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
||
722 | + return -EINVAL; |
||
723 | + |
||
724 | if (domain->iova_cookie) |
||
725 | return -EEXIST; |
||
726 | |||
727 | - cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
||
728 | + cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
||
729 | if (!cookie) |
||
730 | return -ENOMEM; |
||
731 | |||
732 | - spin_lock_init(&cookie->msi_lock); |
||
733 | - INIT_LIST_HEAD(&cookie->msi_page_list); |
||
734 | + cookie->msi_iova = base; |
||
735 | domain->iova_cookie = cookie; |
||
736 | return 0; |
||
737 | } |
||
738 | -EXPORT_SYMBOL(iommu_get_dma_cookie); |
||
739 | +EXPORT_SYMBOL(iommu_get_msi_cookie); |
||
740 | |||
741 | /** |
||
742 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources |
||
743 | - * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() |
||
744 | + * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
||
745 | + * iommu_get_msi_cookie() |
||
746 | * |
||
747 | * IOMMU drivers should normally call this from their domain_free callback. |
||
748 | */ |
||
749 | @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_d |
||
750 | if (!cookie) |
||
751 | return; |
||
752 | |||
753 | - if (cookie->iovad.granule) |
||
754 | + if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
||
755 | put_iova_domain(&cookie->iovad); |
||
756 | |||
757 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { |
||
758 | @@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_d |
||
759 | } |
||
760 | EXPORT_SYMBOL(iommu_put_dma_cookie); |
||
761 | |||
762 | -static void iova_reserve_pci_windows(struct pci_dev *dev, |
||
763 | - struct iova_domain *iovad) |
||
764 | +/** |
||
765 | + * iommu_dma_get_resv_regions - Reserved region driver helper |
||
766 | + * @dev: Device from iommu_get_resv_regions() |
||
767 | + * @list: Reserved region list from iommu_get_resv_regions() |
||
768 | + * |
||
769 | + * IOMMU drivers can use this to implement their .get_resv_regions callback |
||
770 | + * for general non-IOMMU-specific reservations. Currently, this covers host |
||
771 | + * bridge windows for PCI devices. |
||
772 | + */ |
||
773 | +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) |
||
774 | { |
||
775 | - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); |
||
776 | + struct pci_host_bridge *bridge; |
||
777 | struct resource_entry *window; |
||
778 | - unsigned long lo, hi; |
||
779 | |||
780 | + if (!dev_is_pci(dev)) |
||
781 | + return; |
||
782 | + |
||
783 | + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); |
||
784 | resource_list_for_each_entry(window, &bridge->windows) { |
||
785 | + struct iommu_resv_region *region; |
||
786 | + phys_addr_t start; |
||
787 | + size_t length; |
||
788 | + |
||
789 | if (resource_type(window->res) != IORESOURCE_MEM) |
||
790 | continue; |
||
791 | |||
792 | - lo = iova_pfn(iovad, window->res->start - window->offset); |
||
793 | - hi = iova_pfn(iovad, window->res->end - window->offset); |
||
794 | + start = window->res->start - window->offset; |
||
795 | + length = window->res->end - window->res->start + 1; |
||
796 | + region = iommu_alloc_resv_region(start, length, 0, |
||
797 | + IOMMU_RESV_RESERVED); |
||
798 | + if (!region) |
||
799 | + return; |
||
800 | + |
||
801 | + list_add_tail(®ion->list, list); |
||
802 | + } |
||
803 | +} |
||
804 | +EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
||
805 | + |
||
806 | +static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
||
807 | + phys_addr_t start, phys_addr_t end) |
||
808 | +{ |
||
809 | + struct iova_domain *iovad = &cookie->iovad; |
||
810 | + struct iommu_dma_msi_page *msi_page; |
||
811 | + int i, num_pages; |
||
812 | + |
||
813 | + start -= iova_offset(iovad, start); |
||
814 | + num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); |
||
815 | + |
||
816 | + msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); |
||
817 | + if (!msi_page) |
||
818 | + return -ENOMEM; |
||
819 | + |
||
820 | + for (i = 0; i < num_pages; i++) { |
||
821 | + msi_page[i].phys = start; |
||
822 | + msi_page[i].iova = start; |
||
823 | + INIT_LIST_HEAD(&msi_page[i].list); |
||
824 | + list_add(&msi_page[i].list, &cookie->msi_page_list); |
||
825 | + start += iovad->granule; |
||
826 | + } |
||
827 | + |
||
828 | + return 0; |
||
829 | +} |
||
830 | + |
||
831 | +static int iova_reserve_iommu_regions(struct device *dev, |
||
832 | + struct iommu_domain *domain) |
||
833 | +{ |
||
834 | + struct iommu_dma_cookie *cookie = domain->iova_cookie; |
||
835 | + struct iova_domain *iovad = &cookie->iovad; |
||
836 | + struct iommu_resv_region *region; |
||
837 | + LIST_HEAD(resv_regions); |
||
838 | + int ret = 0; |
||
839 | + |
||
840 | + iommu_get_resv_regions(dev, &resv_regions); |
||
841 | + list_for_each_entry(region, &resv_regions, list) { |
||
842 | + unsigned long lo, hi; |
||
843 | + |
||
844 | + /* We ARE the software that manages these! */ |
||
845 | + if (region->type == IOMMU_RESV_SW_MSI) |
||
846 | + continue; |
||
847 | + |
||
848 | + lo = iova_pfn(iovad, region->start); |
||
849 | + hi = iova_pfn(iovad, region->start + region->length - 1); |
||
850 | reserve_iova(iovad, lo, hi); |
||
851 | + |
||
852 | + if (region->type == IOMMU_RESV_MSI) |
||
853 | + ret = cookie_init_hw_msi_region(cookie, region->start, |
||
854 | + region->start + region->length); |
||
855 | + if (ret) |
||
856 | + break; |
||
857 | } |
||
858 | + iommu_put_resv_regions(dev, &resv_regions); |
||
859 | + |
||
860 | + return ret; |
||
861 | } |
||
862 | |||
863 | /** |
||
864 | @@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(str |
||
865 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
||
866 | u64 size, struct device *dev) |
||
867 | { |
||
868 | - struct iova_domain *iovad = cookie_iovad(domain); |
||
869 | + struct iommu_dma_cookie *cookie = domain->iova_cookie; |
||
870 | + struct iova_domain *iovad = &cookie->iovad; |
||
871 | unsigned long order, base_pfn, end_pfn; |
||
872 | |||
873 | - if (!iovad) |
||
874 | - return -ENODEV; |
||
875 | + if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
||
876 | + return -EINVAL; |
||
877 | |||
878 | /* Use the smallest supported page size for IOVA granularity */ |
||
879 | order = __ffs(domain->pgsize_bitmap); |
||
880 | @@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_d |
||
881 | end_pfn = min_t(unsigned long, end_pfn, |
||
882 | domain->geometry.aperture_end >> order); |
||
883 | } |
||
884 | + /* |
||
885 | + * PCI devices may have larger DMA masks, but still prefer allocating |
||
886 | + * within a 32-bit mask to avoid DAC addressing. Such limitations don't |
||
887 | + * apply to the typical platform device, so for those we may as well |
||
888 | + * leave the cache limit at the top of their range to save an rb_last() |
||
889 | + * traversal on every allocation. |
||
890 | + */ |
||
891 | + if (dev && dev_is_pci(dev)) |
||
892 | + end_pfn &= DMA_BIT_MASK(32) >> order; |
||
893 | |||
894 | - /* All we can safely do with an existing domain is enlarge it */ |
||
895 | + /* start_pfn is always nonzero for an already-initialised domain */ |
||
896 | if (iovad->start_pfn) { |
||
897 | if (1UL << order != iovad->granule || |
||
898 | - base_pfn != iovad->start_pfn || |
||
899 | - end_pfn < iovad->dma_32bit_pfn) { |
||
900 | + base_pfn != iovad->start_pfn) { |
||
901 | pr_warn("Incompatible range for DMA domain\n"); |
||
902 | return -EFAULT; |
||
903 | } |
||
904 | - iovad->dma_32bit_pfn = end_pfn; |
||
905 | - } else { |
||
906 | - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); |
||
907 | - if (dev && dev_is_pci(dev)) |
||
908 | - iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
||
909 | + /* |
||
910 | + * If we have devices with different DMA masks, move the free |
||
911 | + * area cache limit down for the benefit of the smaller one. |
||
912 | + */ |
||
913 | + iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); |
||
914 | + |
||
915 | + return 0; |
||
916 | } |
||
917 | - return 0; |
||
918 | + |
||
919 | + init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); |
||
920 | + if (!dev) |
||
921 | + return 0; |
||
922 | + |
||
923 | + return iova_reserve_iommu_regions(dev, domain); |
||
924 | } |
||
925 | EXPORT_SYMBOL(iommu_dma_init_domain); |
||
926 | |||
927 | @@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_ |
||
928 | { |
||
929 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
||
930 | struct iommu_dma_msi_page *msi_page; |
||
931 | - struct iova_domain *iovad = &cookie->iovad; |
||
932 | + struct iova_domain *iovad = cookie_iovad(domain); |
||
933 | struct iova *iova; |
||
934 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
||
935 | + size_t size = cookie_msi_granule(cookie); |
||
936 | |||
937 | - msi_addr &= ~(phys_addr_t)iova_mask(iovad); |
||
938 | + msi_addr &= ~(phys_addr_t)(size - 1); |
||
939 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
||
940 | if (msi_page->phys == msi_addr) |
||
941 | return msi_page; |
||
942 | @@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_ |
||
943 | if (!msi_page) |
||
944 | return NULL; |
||
945 | |||
946 | - iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); |
||
947 | - if (!iova) |
||
948 | - goto out_free_page; |
||
949 | - |
||
950 | msi_page->phys = msi_addr; |
||
951 | - msi_page->iova = iova_dma_addr(iovad, iova); |
||
952 | - if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) |
||
953 | + if (iovad) { |
||
954 | + iova = __alloc_iova(domain, size, dma_get_mask(dev)); |
||
955 | + if (!iova) |
||
956 | + goto out_free_page; |
||
957 | + msi_page->iova = iova_dma_addr(iovad, iova); |
||
958 | + } else { |
||
959 | + msi_page->iova = cookie->msi_iova; |
||
960 | + cookie->msi_iova += size; |
||
961 | + } |
||
962 | + |
||
963 | + if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) |
||
964 | goto out_free_iova; |
||
965 | |||
966 | INIT_LIST_HEAD(&msi_page->list); |
||
967 | @@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_ |
||
968 | return msi_page; |
||
969 | |||
970 | out_free_iova: |
||
971 | - __free_iova(iovad, iova); |
||
972 | + if (iovad) |
||
973 | + __free_iova(iovad, iova); |
||
974 | + else |
||
975 | + cookie->msi_iova -= size; |
||
976 | out_free_page: |
||
977 | kfree(msi_page); |
||
978 | return NULL; |
||
979 | @@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, stru |
||
980 | msg->data = ~0U; |
||
981 | } else { |
||
982 | msg->address_hi = upper_32_bits(msi_page->iova); |
||
983 | - msg->address_lo &= iova_mask(&cookie->iovad); |
||
984 | + msg->address_lo &= cookie_msi_granule(cookie) - 1; |
||
985 | msg->address_lo += lower_32_bits(msi_page->iova); |
||
986 | } |
||
987 | } |
||
988 | --- a/drivers/iommu/intel-iommu.c |
||
989 | +++ b/drivers/iommu/intel-iommu.c |
||
990 | @@ -440,6 +440,7 @@ struct dmar_rmrr_unit { |
||
991 | u64 end_address; /* reserved end address */ |
||
992 | struct dmar_dev_scope *devices; /* target devices */ |
||
993 | int devices_cnt; /* target device count */ |
||
994 | + struct iommu_resv_region *resv; /* reserved region handle */ |
||
995 | }; |
||
996 | |||
997 | struct dmar_atsr_unit { |
||
998 | @@ -4251,27 +4252,40 @@ static inline void init_iommu_pm_ops(voi |
||
999 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) |
||
1000 | { |
||
1001 | struct acpi_dmar_reserved_memory *rmrr; |
||
1002 | + int prot = DMA_PTE_READ|DMA_PTE_WRITE; |
||
1003 | struct dmar_rmrr_unit *rmrru; |
||
1004 | + size_t length; |
||
1005 | |||
1006 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); |
||
1007 | if (!rmrru) |
||
1008 | - return -ENOMEM; |
||
1009 | + goto out; |
||
1010 | |||
1011 | rmrru->hdr = header; |
||
1012 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
||
1013 | rmrru->base_address = rmrr->base_address; |
||
1014 | rmrru->end_address = rmrr->end_address; |
||
1015 | + |
||
1016 | + length = rmrr->end_address - rmrr->base_address + 1; |
||
1017 | + rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot, |
||
1018 | + IOMMU_RESV_DIRECT); |
||
1019 | + if (!rmrru->resv) |
||
1020 | + goto free_rmrru; |
||
1021 | + |
||
1022 | rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), |
||
1023 | ((void *)rmrr) + rmrr->header.length, |
||
1024 | &rmrru->devices_cnt); |
||
1025 | - if (rmrru->devices_cnt && rmrru->devices == NULL) { |
||
1026 | - kfree(rmrru); |
||
1027 | - return -ENOMEM; |
||
1028 | - } |
||
1029 | + if (rmrru->devices_cnt && rmrru->devices == NULL) |
||
1030 | + goto free_all; |
||
1031 | |||
1032 | list_add(&rmrru->list, &dmar_rmrr_units); |
||
1033 | |||
1034 | return 0; |
||
1035 | +free_all: |
||
1036 | + kfree(rmrru->resv); |
||
1037 | +free_rmrru: |
||
1038 | + kfree(rmrru); |
||
1039 | +out: |
||
1040 | + return -ENOMEM; |
||
1041 | } |
||
1042 | |||
1043 | static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) |
||
1044 | @@ -4485,6 +4499,7 @@ static void intel_iommu_free_dmars(void) |
||
1045 | list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { |
||
1046 | list_del(&rmrru->list); |
||
1047 | dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); |
||
1048 | + kfree(rmrru->resv); |
||
1049 | kfree(rmrru); |
||
1050 | } |
||
1051 | |||
1052 | @@ -5220,6 +5235,45 @@ static void intel_iommu_remove_device(st |
||
1053 | iommu_device_unlink(iommu->iommu_dev, dev); |
||
1054 | } |
||
1055 | |||
1056 | +static void intel_iommu_get_resv_regions(struct device *device, |
||
1057 | + struct list_head *head) |
||
1058 | +{ |
||
1059 | + struct iommu_resv_region *reg; |
||
1060 | + struct dmar_rmrr_unit *rmrr; |
||
1061 | + struct device *i_dev; |
||
1062 | + int i; |
||
1063 | + |
||
1064 | + rcu_read_lock(); |
||
1065 | + for_each_rmrr_units(rmrr) { |
||
1066 | + for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, |
||
1067 | + i, i_dev) { |
||
1068 | + if (i_dev != device) |
||
1069 | + continue; |
||
1070 | + |
||
1071 | + list_add_tail(&rmrr->resv->list, head); |
||
1072 | + } |
||
1073 | + } |
||
1074 | + rcu_read_unlock(); |
||
1075 | + |
||
1076 | + reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, |
||
1077 | + IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, |
||
1078 | + 0, IOMMU_RESV_MSI); |
||
1079 | + if (!reg) |
||
1080 | + return; |
||
1081 | + list_add_tail(®->list, head); |
||
1082 | +} |
||
1083 | + |
||
1084 | +static void intel_iommu_put_resv_regions(struct device *dev, |
||
1085 | + struct list_head *head) |
||
1086 | +{ |
||
1087 | + struct iommu_resv_region *entry, *next; |
||
1088 | + |
||
1089 | + list_for_each_entry_safe(entry, next, head, list) { |
||
1090 | + if (entry->type == IOMMU_RESV_RESERVED) |
||
1091 | + kfree(entry); |
||
1092 | + } |
||
1093 | +} |
||
1094 | + |
||
1095 | #ifdef CONFIG_INTEL_IOMMU_SVM |
||
1096 | #define MAX_NR_PASID_BITS (20) |
||
1097 | static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu) |
||
1098 | @@ -5350,19 +5404,21 @@ struct intel_iommu *intel_svm_device_to_ |
||
1099 | #endif /* CONFIG_INTEL_IOMMU_SVM */ |
||
1100 | |||
1101 | static const struct iommu_ops intel_iommu_ops = { |
||
1102 | - .capable = intel_iommu_capable, |
||
1103 | - .domain_alloc = intel_iommu_domain_alloc, |
||
1104 | - .domain_free = intel_iommu_domain_free, |
||
1105 | - .attach_dev = intel_iommu_attach_device, |
||
1106 | - .detach_dev = intel_iommu_detach_device, |
||
1107 | - .map = intel_iommu_map, |
||
1108 | - .unmap = intel_iommu_unmap, |
||
1109 | - .map_sg = default_iommu_map_sg, |
||
1110 | - .iova_to_phys = intel_iommu_iova_to_phys, |
||
1111 | - .add_device = intel_iommu_add_device, |
||
1112 | - .remove_device = intel_iommu_remove_device, |
||
1113 | - .device_group = pci_device_group, |
||
1114 | - .pgsize_bitmap = INTEL_IOMMU_PGSIZES, |
||
1115 | + .capable = intel_iommu_capable, |
||
1116 | + .domain_alloc = intel_iommu_domain_alloc, |
||
1117 | + .domain_free = intel_iommu_domain_free, |
||
1118 | + .attach_dev = intel_iommu_attach_device, |
||
1119 | + .detach_dev = intel_iommu_detach_device, |
||
1120 | + .map = intel_iommu_map, |
||
1121 | + .unmap = intel_iommu_unmap, |
||
1122 | + .map_sg = default_iommu_map_sg, |
||
1123 | + .iova_to_phys = intel_iommu_iova_to_phys, |
||
1124 | + .add_device = intel_iommu_add_device, |
||
1125 | + .remove_device = intel_iommu_remove_device, |
||
1126 | + .get_resv_regions = intel_iommu_get_resv_regions, |
||
1127 | + .put_resv_regions = intel_iommu_put_resv_regions, |
||
1128 | + .device_group = pci_device_group, |
||
1129 | + .pgsize_bitmap = INTEL_IOMMU_PGSIZES, |
||
1130 | }; |
||
1131 | |||
1132 | static void quirk_iommu_g4x_gfx(struct pci_dev *dev) |
||
1133 | --- a/drivers/iommu/iommu.c |
||
1134 | +++ b/drivers/iommu/iommu.c |
||
1135 | @@ -36,6 +36,7 @@ |
||
1136 | |||
1137 | static struct kset *iommu_group_kset; |
||
1138 | static DEFINE_IDA(iommu_group_ida); |
||
1139 | +static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; |
||
1140 | |||
1141 | struct iommu_callback_data { |
||
1142 | const struct iommu_ops *ops; |
||
1143 | @@ -68,6 +69,13 @@ struct iommu_group_attribute { |
||
1144 | const char *buf, size_t count); |
||
1145 | }; |
||
1146 | |||
1147 | +static const char * const iommu_group_resv_type_string[] = { |
||
1148 | + [IOMMU_RESV_DIRECT] = "direct", |
||
1149 | + [IOMMU_RESV_RESERVED] = "reserved", |
||
1150 | + [IOMMU_RESV_MSI] = "msi", |
||
1151 | + [IOMMU_RESV_SW_MSI] = "msi", |
||
1152 | +}; |
||
1153 | + |
||
1154 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ |
||
1155 | struct iommu_group_attribute iommu_group_attr_##_name = \ |
||
1156 | __ATTR(_name, _mode, _show, _store) |
||
1157 | @@ -86,6 +94,18 @@ static int __iommu_attach_group(struct i |
||
1158 | static void __iommu_detach_group(struct iommu_domain *domain, |
||
1159 | struct iommu_group *group); |
||
1160 | |||
1161 | +static int __init iommu_set_def_domain_type(char *str) |
||
1162 | +{ |
||
1163 | + bool pt; |
||
1164 | + |
||
1165 | + if (!str || strtobool(str, &pt)) |
||
1166 | + return -EINVAL; |
||
1167 | + |
||
1168 | + iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; |
||
1169 | + return 0; |
||
1170 | +} |
||
1171 | +early_param("iommu.passthrough", iommu_set_def_domain_type); |
||
1172 | + |
||
1173 | static ssize_t iommu_group_attr_show(struct kobject *kobj, |
||
1174 | struct attribute *__attr, char *buf) |
||
1175 | { |
||
1176 | @@ -133,8 +153,131 @@ static ssize_t iommu_group_show_name(str |
||
1177 | return sprintf(buf, "%s\n", group->name); |
||
1178 | } |
||
1179 | |||
1180 | +/** |
||
1181 | + * iommu_insert_resv_region - Insert a new region in the |
||
1182 | + * list of reserved regions. |
||
1183 | + * @new: new region to insert |
||
1184 | + * @regions: list of regions |
||
1185 | + * |
||
1186 | + * The new element is sorted by address with respect to the other |
||
1187 | + * regions of the same type. In case it overlaps with another |
||
1188 | + * region of the same type, regions are merged. In case it |
||
1189 | + * overlaps with another region of different type, regions are |
||
1190 | + * not merged. |
||
1191 | + */ |
||
1192 | +static int iommu_insert_resv_region(struct iommu_resv_region *new, |
||
1193 | + struct list_head *regions) |
||
1194 | +{ |
||
1195 | + struct iommu_resv_region *region; |
||
1196 | + phys_addr_t start = new->start; |
||
1197 | + phys_addr_t end = new->start + new->length - 1; |
||
1198 | + struct list_head *pos = regions->next; |
||
1199 | + |
||
1200 | + while (pos != regions) { |
||
1201 | + struct iommu_resv_region *entry = |
||
1202 | + list_entry(pos, struct iommu_resv_region, list); |
||
1203 | + phys_addr_t a = entry->start; |
||
1204 | + phys_addr_t b = entry->start + entry->length - 1; |
||
1205 | + int type = entry->type; |
||
1206 | + |
||
1207 | + if (end < a) { |
||
1208 | + goto insert; |
||
1209 | + } else if (start > b) { |
||
1210 | + pos = pos->next; |
||
1211 | + } else if ((start >= a) && (end <= b)) { |
||
1212 | + if (new->type == type) |
||
1213 | + goto done; |
||
1214 | + else |
||
1215 | + pos = pos->next; |
||
1216 | + } else { |
||
1217 | + if (new->type == type) { |
||
1218 | + phys_addr_t new_start = min(a, start); |
||
1219 | + phys_addr_t new_end = max(b, end); |
||
1220 | + |
||
1221 | + list_del(&entry->list); |
||
1222 | + entry->start = new_start; |
||
1223 | + entry->length = new_end - new_start + 1; |
||
1224 | + iommu_insert_resv_region(entry, regions); |
||
1225 | + } else { |
||
1226 | + pos = pos->next; |
||
1227 | + } |
||
1228 | + } |
||
1229 | + } |
||
1230 | +insert: |
||
1231 | + region = iommu_alloc_resv_region(new->start, new->length, |
||
1232 | + new->prot, new->type); |
||
1233 | + if (!region) |
||
1234 | + return -ENOMEM; |
||
1235 | + |
||
1236 | + list_add_tail(®ion->list, pos); |
||
1237 | +done: |
||
1238 | + return 0; |
||
1239 | +} |
||
1240 | + |
||
1241 | +static int |
||
1242 | +iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, |
||
1243 | + struct list_head *group_resv_regions) |
||
1244 | +{ |
||
1245 | + struct iommu_resv_region *entry; |
||
1246 | + int ret; |
||
1247 | + |
||
1248 | + list_for_each_entry(entry, dev_resv_regions, list) { |
||
1249 | + ret = iommu_insert_resv_region(entry, group_resv_regions); |
||
1250 | + if (ret) |
||
1251 | + break; |
||
1252 | + } |
||
1253 | + return ret; |
||
1254 | +} |
||
1255 | + |
||
1256 | +int iommu_get_group_resv_regions(struct iommu_group *group, |
||
1257 | + struct list_head *head) |
||
1258 | +{ |
||
1259 | + struct iommu_device *device; |
||
1260 | + int ret = 0; |
||
1261 | + |
||
1262 | + mutex_lock(&group->mutex); |
||
1263 | + list_for_each_entry(device, &group->devices, list) { |
||
1264 | + struct list_head dev_resv_regions; |
||
1265 | + |
||
1266 | + INIT_LIST_HEAD(&dev_resv_regions); |
||
1267 | + iommu_get_resv_regions(device->dev, &dev_resv_regions); |
||
1268 | + ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); |
||
1269 | + iommu_put_resv_regions(device->dev, &dev_resv_regions); |
||
1270 | + if (ret) |
||
1271 | + break; |
||
1272 | + } |
||
1273 | + mutex_unlock(&group->mutex); |
||
1274 | + return ret; |
||
1275 | +} |
||
1276 | +EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); |
||
1277 | + |
||
1278 | +static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, |
||
1279 | + char *buf) |
||
1280 | +{ |
||
1281 | + struct iommu_resv_region *region, *next; |
||
1282 | + struct list_head group_resv_regions; |
||
1283 | + char *str = buf; |
||
1284 | + |
||
1285 | + INIT_LIST_HEAD(&group_resv_regions); |
||
1286 | + iommu_get_group_resv_regions(group, &group_resv_regions); |
||
1287 | + |
||
1288 | + list_for_each_entry_safe(region, next, &group_resv_regions, list) { |
||
1289 | + str += sprintf(str, "0x%016llx 0x%016llx %s\n", |
||
1290 | + (long long int)region->start, |
||
1291 | + (long long int)(region->start + |
||
1292 | + region->length - 1), |
||
1293 | + iommu_group_resv_type_string[region->type]); |
||
1294 | + kfree(region); |
||
1295 | + } |
||
1296 | + |
||
1297 | + return (str - buf); |
||
1298 | +} |
||
1299 | + |
||
1300 | static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); |
||
1301 | |||
1302 | +static IOMMU_GROUP_ATTR(reserved_regions, 0444, |
||
1303 | + iommu_group_show_resv_regions, NULL); |
||
1304 | + |
||
1305 | static void iommu_group_release(struct kobject *kobj) |
||
1306 | { |
||
1307 | struct iommu_group *group = to_iommu_group(kobj); |
||
1308 | @@ -212,6 +355,11 @@ struct iommu_group *iommu_group_alloc(vo |
||
1309 | */ |
||
1310 | kobject_put(&group->kobj); |
||
1311 | |||
1312 | + ret = iommu_group_create_file(group, |
||
1313 | + &iommu_group_attr_reserved_regions); |
||
1314 | + if (ret) |
||
1315 | + return ERR_PTR(ret); |
||
1316 | + |
||
1317 | pr_debug("Allocated group %d\n", group->id); |
||
1318 | |||
1319 | return group; |
||
1320 | @@ -318,7 +466,7 @@ static int iommu_group_create_direct_map |
||
1321 | struct device *dev) |
||
1322 | { |
||
1323 | struct iommu_domain *domain = group->default_domain; |
||
1324 | - struct iommu_dm_region *entry; |
||
1325 | + struct iommu_resv_region *entry; |
||
1326 | struct list_head mappings; |
||
1327 | unsigned long pg_size; |
||
1328 | int ret = 0; |
||
1329 | @@ -331,18 +479,21 @@ static int iommu_group_create_direct_map |
||
1330 | pg_size = 1UL << __ffs(domain->pgsize_bitmap); |
||
1331 | INIT_LIST_HEAD(&mappings); |
||
1332 | |||
1333 | - iommu_get_dm_regions(dev, &mappings); |
||
1334 | + iommu_get_resv_regions(dev, &mappings); |
||
1335 | |||
1336 | /* We need to consider overlapping regions for different devices */ |
||
1337 | list_for_each_entry(entry, &mappings, list) { |
||
1338 | dma_addr_t start, end, addr; |
||
1339 | |||
1340 | - if (domain->ops->apply_dm_region) |
||
1341 | - domain->ops->apply_dm_region(dev, domain, entry); |
||
1342 | + if (domain->ops->apply_resv_region) |
||
1343 | + domain->ops->apply_resv_region(dev, domain, entry); |
||
1344 | |||
1345 | start = ALIGN(entry->start, pg_size); |
||
1346 | end = ALIGN(entry->start + entry->length, pg_size); |
||
1347 | |||
1348 | + if (entry->type != IOMMU_RESV_DIRECT) |
||
1349 | + continue; |
||
1350 | + |
||
1351 | for (addr = start; addr < end; addr += pg_size) { |
||
1352 | phys_addr_t phys_addr; |
||
1353 | |||
1354 | @@ -358,7 +509,7 @@ static int iommu_group_create_direct_map |
||
1355 | } |
||
1356 | |||
1357 | out: |
||
1358 | - iommu_put_dm_regions(dev, &mappings); |
||
1359 | + iommu_put_resv_regions(dev, &mappings); |
||
1360 | |||
1361 | return ret; |
||
1362 | } |
||
1363 | @@ -563,6 +714,19 @@ struct iommu_group *iommu_group_get(stru |
||
1364 | EXPORT_SYMBOL_GPL(iommu_group_get); |
||
1365 | |||
1366 | /** |
||
1367 | + * iommu_group_ref_get - Increment reference on a group |
||
1368 | + * @group: the group to use, must not be NULL |
||
1369 | + * |
||
1370 | + * This function is called by iommu drivers to take additional references on an |
||
1371 | + * existing group. Returns the given group for convenience. |
||
1372 | + */ |
||
1373 | +struct iommu_group *iommu_group_ref_get(struct iommu_group *group) |
||
1374 | +{ |
||
1375 | + kobject_get(group->devices_kobj); |
||
1376 | + return group; |
||
1377 | +} |
||
1378 | + |
||
1379 | +/** |
||
1380 | * iommu_group_put - Decrement group reference |
||
1381 | * @group: the group to use |
||
1382 | * |
||
1383 | @@ -845,10 +1009,19 @@ struct iommu_group *iommu_group_get_for_ |
||
1384 | * IOMMU driver. |
||
1385 | */ |
||
1386 | if (!group->default_domain) { |
||
1387 | - group->default_domain = __iommu_domain_alloc(dev->bus, |
||
1388 | - IOMMU_DOMAIN_DMA); |
||
1389 | + struct iommu_domain *dom; |
||
1390 | + |
||
1391 | + dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); |
||
1392 | + if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { |
||
1393 | + dev_warn(dev, |
||
1394 | + "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", |
||
1395 | + iommu_def_domain_type); |
||
1396 | + dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); |
||
1397 | + } |
||
1398 | + |
||
1399 | + group->default_domain = dom; |
||
1400 | if (!group->domain) |
||
1401 | - group->domain = group->default_domain; |
||
1402 | + group->domain = dom; |
||
1403 | } |
||
1404 | |||
1405 | ret = iommu_group_add_device(group, dev); |
||
1406 | @@ -1557,20 +1730,38 @@ int iommu_domain_set_attr(struct iommu_d |
||
1407 | } |
||
1408 | EXPORT_SYMBOL_GPL(iommu_domain_set_attr); |
||
1409 | |||
1410 | -void iommu_get_dm_regions(struct device *dev, struct list_head *list) |
||
1411 | +void iommu_get_resv_regions(struct device *dev, struct list_head *list) |
||
1412 | { |
||
1413 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
||
1414 | |||
1415 | - if (ops && ops->get_dm_regions) |
||
1416 | - ops->get_dm_regions(dev, list); |
||
1417 | + if (ops && ops->get_resv_regions) |
||
1418 | + ops->get_resv_regions(dev, list); |
||
1419 | } |
||
1420 | |||
1421 | -void iommu_put_dm_regions(struct device *dev, struct list_head *list) |
||
1422 | +void iommu_put_resv_regions(struct device *dev, struct list_head *list) |
||
1423 | { |
||
1424 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
||
1425 | |||
1426 | - if (ops && ops->put_dm_regions) |
||
1427 | - ops->put_dm_regions(dev, list); |
||
1428 | + if (ops && ops->put_resv_regions) |
||
1429 | + ops->put_resv_regions(dev, list); |
||
1430 | +} |
||
1431 | + |
||
1432 | +struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, |
||
1433 | + size_t length, int prot, |
||
1434 | + enum iommu_resv_type type) |
||
1435 | +{ |
||
1436 | + struct iommu_resv_region *region; |
||
1437 | + |
||
1438 | + region = kzalloc(sizeof(*region), GFP_KERNEL); |
||
1439 | + if (!region) |
||
1440 | + return NULL; |
||
1441 | + |
||
1442 | + INIT_LIST_HEAD(®ion->list); |
||
1443 | + region->start = start; |
||
1444 | + region->length = length; |
||
1445 | + region->prot = prot; |
||
1446 | + region->type = type; |
||
1447 | + return region; |
||
1448 | } |
||
1449 | |||
1450 | /* Request that a device is direct mapped by the IOMMU */ |
||
1451 | --- a/drivers/iommu/mtk_iommu.c |
||
1452 | +++ b/drivers/iommu/mtk_iommu.c |
||
1453 | @@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_dev |
||
1454 | data->m4u_group = iommu_group_alloc(); |
||
1455 | if (IS_ERR(data->m4u_group)) |
||
1456 | dev_err(dev, "Failed to allocate M4U IOMMU group\n"); |
||
1457 | + } else { |
||
1458 | + iommu_group_ref_get(data->m4u_group); |
||
1459 | } |
||
1460 | return data->m4u_group; |
||
1461 | } |
||
1462 | --- a/drivers/iommu/mtk_iommu_v1.c |
||
1463 | +++ b/drivers/iommu/mtk_iommu_v1.c |
||
1464 | @@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_dev |
||
1465 | data->m4u_group = iommu_group_alloc(); |
||
1466 | if (IS_ERR(data->m4u_group)) |
||
1467 | dev_err(dev, "Failed to allocate M4U IOMMU group\n"); |
||
1468 | + } else { |
||
1469 | + iommu_group_ref_get(data->m4u_group); |
||
1470 | } |
||
1471 | return data->m4u_group; |
||
1472 | } |
||
1473 | --- a/include/linux/dma-iommu.h |
||
1474 | +++ b/include/linux/dma-iommu.h |
||
1475 | @@ -28,6 +28,7 @@ int iommu_dma_init(void); |
||
1476 | |||
1477 | /* Domain management interface for IOMMU drivers */ |
||
1478 | int iommu_get_dma_cookie(struct iommu_domain *domain); |
||
1479 | +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); |
||
1480 | void iommu_put_dma_cookie(struct iommu_domain *domain); |
||
1481 | |||
1482 | /* Setup call for arch DMA mapping code */ |
||
1483 | @@ -67,6 +68,7 @@ int iommu_dma_mapping_error(struct devic |
||
1484 | |||
1485 | /* The DMA API isn't _quite_ the whole story, though... */ |
||
1486 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); |
||
1487 | +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); |
||
1488 | |||
1489 | #else |
||
1490 | |||
1491 | @@ -83,6 +85,11 @@ static inline int iommu_get_dma_cookie(s |
||
1492 | return -ENODEV; |
||
1493 | } |
||
1494 | |||
1495 | +static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) |
||
1496 | +{ |
||
1497 | + return -ENODEV; |
||
1498 | +} |
||
1499 | + |
||
1500 | static inline void iommu_put_dma_cookie(struct iommu_domain *domain) |
||
1501 | { |
||
1502 | } |
||
1503 | @@ -91,6 +98,10 @@ static inline void iommu_dma_map_msi_msg |
||
1504 | { |
||
1505 | } |
||
1506 | |||
1507 | +static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) |
||
1508 | +{ |
||
1509 | +} |
||
1510 | + |
||
1511 | #endif /* CONFIG_IOMMU_DMA */ |
||
1512 | #endif /* __KERNEL__ */ |
||
1513 | #endif /* __DMA_IOMMU_H */ |
||
1514 | --- a/include/linux/iommu.h |
||
1515 | +++ b/include/linux/iommu.h |
||
1516 | @@ -117,18 +117,32 @@ enum iommu_attr { |
||
1517 | DOMAIN_ATTR_MAX, |
||
1518 | }; |
||
1519 | |||
1520 | +/* These are the possible reserved region types */ |
||
1521 | +enum iommu_resv_type { |
||
1522 | + /* Memory regions which must be mapped 1:1 at all times */ |
||
1523 | + IOMMU_RESV_DIRECT, |
||
1524 | + /* Arbitrary "never map this or give it to a device" address ranges */ |
||
1525 | + IOMMU_RESV_RESERVED, |
||
1526 | + /* Hardware MSI region (untranslated) */ |
||
1527 | + IOMMU_RESV_MSI, |
||
1528 | + /* Software-managed MSI translation window */ |
||
1529 | + IOMMU_RESV_SW_MSI, |
||
1530 | +}; |
||
1531 | + |
||
1532 | /** |
||
1533 | - * struct iommu_dm_region - descriptor for a direct mapped memory region |
||
1534 | + * struct iommu_resv_region - descriptor for a reserved memory region |
||
1535 | * @list: Linked list pointers |
||
1536 | * @start: System physical start address of the region |
||
1537 | * @length: Length of the region in bytes |
||
1538 | * @prot: IOMMU Protection flags (READ/WRITE/...) |
||
1539 | + * @type: Type of the reserved region |
||
1540 | */ |
||
1541 | -struct iommu_dm_region { |
||
1542 | +struct iommu_resv_region { |
||
1543 | struct list_head list; |
||
1544 | phys_addr_t start; |
||
1545 | size_t length; |
||
1546 | int prot; |
||
1547 | + enum iommu_resv_type type; |
||
1548 | }; |
||
1549 | |||
1550 | #ifdef CONFIG_IOMMU_API |
||
1551 | @@ -150,9 +164,9 @@ struct iommu_dm_region { |
||
1552 | * @device_group: find iommu group for a particular device |
||
1553 | * @domain_get_attr: Query domain attributes |
||
1554 | * @domain_set_attr: Change domain attributes |
||
1555 | - * @get_dm_regions: Request list of direct mapping requirements for a device |
||
1556 | - * @put_dm_regions: Free list of direct mapping requirements for a device |
||
1557 | - * @apply_dm_region: Temporary helper call-back for iova reserved ranges |
||
1558 | + * @get_resv_regions: Request list of reserved regions for a device |
||
1559 | + * @put_resv_regions: Free list of reserved regions for a device |
||
1560 | + * @apply_resv_region: Temporary helper call-back for iova reserved ranges |
||
1561 | * @domain_window_enable: Configure and enable a particular window for a domain |
||
1562 | * @domain_window_disable: Disable a particular window for a domain |
||
1563 | * @domain_set_windows: Set the number of windows for a domain |
||
1564 | @@ -184,11 +198,12 @@ struct iommu_ops { |
||
1565 | int (*domain_set_attr)(struct iommu_domain *domain, |
||
1566 | enum iommu_attr attr, void *data); |
||
1567 | |||
1568 | - /* Request/Free a list of direct mapping requirements for a device */ |
||
1569 | - void (*get_dm_regions)(struct device *dev, struct list_head *list); |
||
1570 | - void (*put_dm_regions)(struct device *dev, struct list_head *list); |
||
1571 | - void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, |
||
1572 | - struct iommu_dm_region *region); |
||
1573 | + /* Request/Free a list of reserved regions for a device */ |
||
1574 | + void (*get_resv_regions)(struct device *dev, struct list_head *list); |
||
1575 | + void (*put_resv_regions)(struct device *dev, struct list_head *list); |
||
1576 | + void (*apply_resv_region)(struct device *dev, |
||
1577 | + struct iommu_domain *domain, |
||
1578 | + struct iommu_resv_region *region); |
||
1579 | |||
1580 | /* Window handling functions */ |
||
1581 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, |
||
1582 | @@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(st |
||
1583 | extern void iommu_set_fault_handler(struct iommu_domain *domain, |
||
1584 | iommu_fault_handler_t handler, void *token); |
||
1585 | |||
1586 | -extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); |
||
1587 | -extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); |
||
1588 | +extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); |
||
1589 | +extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); |
||
1590 | extern int iommu_request_dm_for_dev(struct device *dev); |
||
1591 | +extern struct iommu_resv_region * |
||
1592 | +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, |
||
1593 | + enum iommu_resv_type type); |
||
1594 | +extern int iommu_get_group_resv_regions(struct iommu_group *group, |
||
1595 | + struct list_head *head); |
||
1596 | |||
1597 | extern int iommu_attach_group(struct iommu_domain *domain, |
||
1598 | struct iommu_group *group); |
||
1599 | @@ -253,6 +273,7 @@ extern void iommu_group_remove_device(st |
||
1600 | extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, |
||
1601 | int (*fn)(struct device *, void *)); |
||
1602 | extern struct iommu_group *iommu_group_get(struct device *dev); |
||
1603 | +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); |
||
1604 | extern void iommu_group_put(struct iommu_group *group); |
||
1605 | extern int iommu_group_register_notifier(struct iommu_group *group, |
||
1606 | struct notifier_block *nb); |
||
1607 | @@ -439,16 +460,22 @@ static inline void iommu_set_fault_handl |
||
1608 | { |
||
1609 | } |
||
1610 | |||
1611 | -static inline void iommu_get_dm_regions(struct device *dev, |
||
1612 | +static inline void iommu_get_resv_regions(struct device *dev, |
||
1613 | struct list_head *list) |
||
1614 | { |
||
1615 | } |
||
1616 | |||
1617 | -static inline void iommu_put_dm_regions(struct device *dev, |
||
1618 | +static inline void iommu_put_resv_regions(struct device *dev, |
||
1619 | struct list_head *list) |
||
1620 | { |
||
1621 | } |
||
1622 | |||
1623 | +static inline int iommu_get_group_resv_regions(struct iommu_group *group, |
||
1624 | + struct list_head *head) |
||
1625 | +{ |
||
1626 | + return -ENODEV; |
||
1627 | +} |
||
1628 | + |
||
1629 | static inline int iommu_request_dm_for_dev(struct device *dev) |
||
1630 | { |
||
1631 | return -ENODEV; |