OpenWrt – Blame information for rev 4
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
4 | office | 1 | From 45e934873f9147f692dddbb61abc088f4c8059d7 Mon Sep 17 00:00:00 2001 |
2 | From: Yangbo Lu <yangbo.lu@nxp.com> |
||
3 | Date: Wed, 17 Jan 2018 14:51:29 +0800 |
||
4 | Subject: [PATCH 03/30] arch: support layerscape |
||
5 | MIME-Version: 1.0 |
||
6 | Content-Type: text/plain; charset=UTF-8 |
||
7 | Content-Transfer-Encoding: 8bit |
||
8 | |||
9 | This is an integrated patch for layerscape arch support. |
||
10 | |||
11 | Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com> |
||
12 | Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com> |
||
13 | Signed-off-by: Zhao Qiang <B45475@freescale.com> |
||
14 | Signed-off-by: Camelia Groza <camelia.groza@nxp.com> |
||
15 | Signed-off-by: Haiying Wang <Haiying.wang@freescale.com> |
||
16 | Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com> |
||
17 | Signed-off-by: Po Liu <po.liu@nxp.com> |
||
18 | Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com> |
||
19 | Signed-off-by: Jianhua Xie <jianhua.xie@nxp.com> |
||
20 | Signed-off-by: Horia Geantă <horia.geanta@nxp.com> |
||
21 | Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> |
||
22 | --- |
||
23 | arch/arm/include/asm/delay.h | 16 +++++++++ |
||
24 | arch/arm/include/asm/io.h | 31 ++++++++++++++++++ |
||
25 | arch/arm/include/asm/mach/map.h | 4 +-- |
||
26 | arch/arm/include/asm/pgtable.h | 7 ++++ |
||
27 | arch/arm/kernel/bios32.c | 43 ++++++++++++++++++++++++ |
||
28 | arch/arm/mm/dma-mapping.c | 1 + |
||
29 | arch/arm/mm/ioremap.c | 7 ++++ |
||
30 | arch/arm/mm/mmu.c | 9 +++++ |
||
31 | arch/arm64/include/asm/cache.h | 2 +- |
||
32 | arch/arm64/include/asm/io.h | 30 +++++++++++++++++ |
||
33 | arch/arm64/include/asm/pci.h | 4 +++ |
||
34 | arch/arm64/include/asm/pgtable-prot.h | 1 + |
||
35 | arch/arm64/include/asm/pgtable.h | 5 +++ |
||
36 | arch/arm64/kernel/pci.c | 62 +++++++++++++++++++++++++++++++++++ |
||
37 | arch/arm64/mm/dma-mapping.c | 6 ++++ |
||
38 | 15 files changed, 225 insertions(+), 3 deletions(-) |
||
39 | |||
40 | --- a/arch/arm/include/asm/delay.h |
||
41 | +++ b/arch/arm/include/asm/delay.h |
||
42 | @@ -57,6 +57,22 @@ extern void __bad_udelay(void); |
||
43 | __const_udelay((n) * UDELAY_MULT)) : \ |
||
44 | __udelay(n)) |
||
45 | |||
46 | +#define spin_event_timeout(condition, timeout, delay) \ |
||
47 | +({ \ |
||
48 | + typeof(condition) __ret; \ |
||
49 | + int i = 0; \ |
||
50 | + while (!(__ret = (condition)) && (i++ < timeout)) { \ |
||
51 | + if (delay) \ |
||
52 | + udelay(delay); \ |
||
53 | + else \ |
||
54 | + cpu_relax(); \ |
||
55 | + udelay(1); \ |
||
56 | + } \ |
||
57 | + if (!__ret) \ |
||
58 | + __ret = (condition); \ |
||
59 | + __ret; \ |
||
60 | +}) |
||
61 | + |
||
62 | /* Loop-based definitions for assembly code. */ |
||
63 | extern void __loop_delay(unsigned long loops); |
||
64 | extern void __loop_udelay(unsigned long usecs); |
||
65 | --- a/arch/arm/include/asm/io.h |
||
66 | +++ b/arch/arm/include/asm/io.h |
||
67 | @@ -129,6 +129,7 @@ static inline u32 __raw_readl(const vola |
||
68 | #define MT_DEVICE_NONSHARED 1 |
||
69 | #define MT_DEVICE_CACHED 2 |
||
70 | #define MT_DEVICE_WC 3 |
||
71 | +#define MT_MEMORY_RW_NS 4 |
||
72 | /* |
||
73 | * types 4 onwards can be found in asm/mach/map.h and are undefined |
||
74 | * for ioremap |
||
75 | @@ -220,6 +221,34 @@ extern int pci_ioremap_io(unsigned int o |
||
76 | #endif |
||
77 | #endif |
||
78 | |||
79 | +/* access ports */ |
||
80 | +#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr)) |
||
81 | +#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr)) |
||
82 | + |
||
83 | +#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr)) |
||
84 | +#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr)) |
||
85 | + |
||
86 | +#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr)) |
||
87 | +#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr)) |
||
88 | + |
||
89 | +/* Clear and set bits in one shot. These macros can be used to clear and |
||
90 | + * set multiple bits in a register using a single read-modify-write. These |
||
91 | + * macros can also be used to set a multiple-bit bit pattern using a mask, |
||
92 | + * by specifying the mask in the 'clear' parameter and the new bit pattern |
||
93 | + * in the 'set' parameter. |
||
94 | + */ |
||
95 | + |
||
96 | +#define clrsetbits_be32(addr, clear, set) \ |
||
97 | + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr)) |
||
98 | +#define clrsetbits_le32(addr, clear, set) \ |
||
99 | + iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr)) |
||
100 | +#define clrsetbits_be16(addr, clear, set) \ |
||
101 | + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr)) |
||
102 | +#define clrsetbits_le16(addr, clear, set) \ |
||
103 | + iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr)) |
||
104 | +#define clrsetbits_8(addr, clear, set) \ |
||
105 | + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr)) |
||
106 | + |
||
107 | /* |
||
108 | * IO port access primitives |
||
109 | * ------------------------- |
||
110 | @@ -408,6 +437,8 @@ void __iomem *ioremap_wc(resource_size_t |
||
111 | #define ioremap_wc ioremap_wc |
||
112 | #define ioremap_wt ioremap_wc |
||
113 | |||
114 | +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size); |
||
115 | + |
||
116 | void iounmap(volatile void __iomem *iomem_cookie); |
||
117 | #define iounmap iounmap |
||
118 | |||
119 | --- a/arch/arm/include/asm/mach/map.h |
||
120 | +++ b/arch/arm/include/asm/mach/map.h |
||
121 | @@ -21,9 +21,9 @@ struct map_desc { |
||
122 | unsigned int type; |
||
123 | }; |
||
124 | |||
125 | -/* types 0-3 are defined in asm/io.h */ |
||
126 | +/* types 0-4 are defined in asm/io.h */ |
||
127 | enum { |
||
128 | - MT_UNCACHED = 4, |
||
129 | + MT_UNCACHED = 5, |
||
130 | MT_CACHECLEAN, |
||
131 | MT_MINICLEAN, |
||
132 | MT_LOW_VECTORS, |
||
133 | --- a/arch/arm/include/asm/pgtable.h |
||
134 | +++ b/arch/arm/include/asm/pgtable.h |
||
135 | @@ -118,6 +118,13 @@ extern pgprot_t pgprot_s2_device; |
||
136 | #define pgprot_noncached(prot) \ |
||
137 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) |
||
138 | |||
139 | +#define pgprot_cached(prot) \ |
||
140 | + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED) |
||
141 | + |
||
142 | +#define pgprot_cached_ns(prot) \ |
||
143 | + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \ |
||
144 | + L_PTE_MT_DEV_NONSHARED) |
||
145 | + |
||
146 | #define pgprot_writecombine(prot) \ |
||
147 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) |
||
148 | |||
149 | --- a/arch/arm/kernel/bios32.c |
||
150 | +++ b/arch/arm/kernel/bios32.c |
||
151 | @@ -11,6 +11,8 @@ |
||
152 | #include <linux/slab.h> |
||
153 | #include <linux/init.h> |
||
154 | #include <linux/io.h> |
||
155 | +#include <linux/of_irq.h> |
||
156 | +#include <linux/pcieport_if.h> |
||
157 | |||
158 | #include <asm/mach-types.h> |
||
159 | #include <asm/mach/map.h> |
||
160 | @@ -64,6 +66,47 @@ void pcibios_report_status(u_int status_ |
||
161 | } |
||
162 | |||
163 | /* |
||
164 | + * Check device tree if the service interrupts are there |
||
165 | + */ |
||
166 | +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask) |
||
167 | +{ |
||
168 | + int ret, count = 0; |
||
169 | + struct device_node *np = NULL; |
||
170 | + |
||
171 | + if (dev->bus->dev.of_node) |
||
172 | + np = dev->bus->dev.of_node; |
||
173 | + |
||
174 | + if (np == NULL) |
||
175 | + return 0; |
||
176 | + |
||
177 | + if (!IS_ENABLED(CONFIG_OF_IRQ)) |
||
178 | + return 0; |
||
179 | + |
||
180 | + /* If root port doesn't support MSI/MSI-X/INTx in RC mode, |
||
181 | + * request irq for aer |
||
182 | + */ |
||
183 | + if (mask & PCIE_PORT_SERVICE_AER) { |
||
184 | + ret = of_irq_get_byname(np, "aer"); |
||
185 | + if (ret > 0) { |
||
186 | + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret; |
||
187 | + count++; |
||
188 | + } |
||
189 | + } |
||
190 | + |
||
191 | + if (mask & PCIE_PORT_SERVICE_PME) { |
||
192 | + ret = of_irq_get_byname(np, "pme"); |
||
193 | + if (ret > 0) { |
||
194 | + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret; |
||
195 | + count++; |
||
196 | + } |
||
197 | + } |
||
198 | + |
||
199 | + /* TODO: add more service interrupts if there it is in the device tree*/ |
||
200 | + |
||
201 | + return count; |
||
202 | +} |
||
203 | + |
||
204 | +/* |
||
205 | * We don't use this to fix the device, but initialisation of it. |
||
206 | * It's not the correct use for this, but it works. |
||
207 | * Note that the arbiter/ISA bridge appears to be buggy, specifically in |
||
208 | --- a/arch/arm/mm/dma-mapping.c |
||
209 | +++ b/arch/arm/mm/dma-mapping.c |
||
210 | @@ -2410,6 +2410,7 @@ void arch_setup_dma_ops(struct device *d |
||
211 | |||
212 | set_dma_ops(dev, dma_ops); |
||
213 | } |
||
214 | +EXPORT_SYMBOL(arch_setup_dma_ops); |
||
215 | |||
216 | void arch_teardown_dma_ops(struct device *dev) |
||
217 | { |
||
218 | --- a/arch/arm/mm/ioremap.c |
||
219 | +++ b/arch/arm/mm/ioremap.c |
||
220 | @@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t |
||
221 | } |
||
222 | EXPORT_SYMBOL(ioremap_wc); |
||
223 | |||
224 | +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size) |
||
225 | +{ |
||
226 | + return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS, |
||
227 | + __builtin_return_address(0)); |
||
228 | +} |
||
229 | +EXPORT_SYMBOL(ioremap_cache_ns); |
||
230 | + |
||
231 | /* |
||
232 | * Remap an arbitrary physical address space into the kernel virtual |
||
233 | * address space as memory. Needed when the kernel wants to execute |
||
234 | --- a/arch/arm/mm/mmu.c |
||
235 | +++ b/arch/arm/mm/mmu.c |
||
236 | @@ -313,6 +313,13 @@ static struct mem_type mem_types[] __ro_ |
||
237 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
||
238 | .domain = DOMAIN_KERNEL, |
||
239 | }, |
||
240 | + [MT_MEMORY_RW_NS] = { |
||
241 | + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
||
242 | + L_PTE_XN, |
||
243 | + .prot_l1 = PMD_TYPE_TABLE, |
||
244 | + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN, |
||
245 | + .domain = DOMAIN_KERNEL, |
||
246 | + }, |
||
247 | [MT_ROM] = { |
||
248 | .prot_sect = PMD_TYPE_SECT, |
||
249 | .domain = DOMAIN_KERNEL, |
||
250 | @@ -644,6 +651,7 @@ static void __init build_mem_type_table( |
||
251 | } |
||
252 | kern_pgprot |= PTE_EXT_AF; |
||
253 | vecs_pgprot |= PTE_EXT_AF; |
||
254 | + mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte; |
||
255 | |||
256 | /* |
||
257 | * Set PXN for user mappings |
||
258 | @@ -672,6 +680,7 @@ static void __init build_mem_type_table( |
||
259 | mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; |
||
260 | mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; |
||
261 | mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; |
||
262 | + mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd; |
||
263 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; |
||
264 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; |
||
265 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
||
266 | --- a/arch/arm64/include/asm/cache.h |
||
267 | +++ b/arch/arm64/include/asm/cache.h |
||
268 | @@ -18,7 +18,7 @@ |
||
269 | |||
270 | #include <asm/cachetype.h> |
||
271 | |||
272 | -#define L1_CACHE_SHIFT 7 |
||
273 | +#define L1_CACHE_SHIFT 6 |
||
274 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
||
275 | |||
276 | /* |
||
277 | --- a/arch/arm64/include/asm/io.h |
||
278 | +++ b/arch/arm64/include/asm/io.h |
||
279 | @@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_ |
||
280 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
||
281 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) |
||
282 | #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
||
283 | +#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), \ |
||
284 | + __pgprot(PROT_NORMAL_NS)) |
||
285 | #define iounmap __iounmap |
||
286 | |||
287 | /* |
||
288 | @@ -184,6 +186,34 @@ extern void __iomem *ioremap_cache(phys_ |
||
289 | #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) |
||
290 | #define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); }) |
||
291 | |||
292 | +/* access ports */ |
||
293 | +#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr)) |
||
294 | +#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr)) |
||
295 | + |
||
296 | +#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr)) |
||
297 | +#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr)) |
||
298 | + |
||
299 | +#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr)) |
||
300 | +#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr)) |
||
301 | + |
||
302 | +/* Clear and set bits in one shot. These macros can be used to clear and |
||
303 | + * set multiple bits in a register using a single read-modify-write. These |
||
304 | + * macros can also be used to set a multiple-bit bit pattern using a mask, |
||
305 | + * by specifying the mask in the 'clear' parameter and the new bit pattern |
||
306 | + * in the 'set' parameter. |
||
307 | + */ |
||
308 | + |
||
309 | +#define clrsetbits_be32(addr, clear, set) \ |
||
310 | + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr)) |
||
311 | +#define clrsetbits_le32(addr, clear, set) \ |
||
312 | + iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr)) |
||
313 | +#define clrsetbits_be16(addr, clear, set) \ |
||
314 | + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr)) |
||
315 | +#define clrsetbits_le16(addr, clear, set) \ |
||
316 | + iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr)) |
||
317 | +#define clrsetbits_8(addr, clear, set) \ |
||
318 | + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr)) |
||
319 | + |
||
320 | #include <asm-generic/io.h> |
||
321 | |||
322 | /* |
||
323 | --- a/arch/arm64/include/asm/pci.h |
||
324 | +++ b/arch/arm64/include/asm/pci.h |
||
325 | @@ -31,6 +31,10 @@ static inline int pci_get_legacy_ide_irq |
||
326 | return -ENODEV; |
||
327 | } |
||
328 | |||
329 | +#define HAVE_PCI_MMAP |
||
330 | +extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, |
||
331 | + enum pci_mmap_state mmap_state, |
||
332 | + int write_combine); |
||
333 | static inline int pci_proc_domain(struct pci_bus *bus) |
||
334 | { |
||
335 | return 1; |
||
336 | --- a/arch/arm64/include/asm/pgtable-prot.h |
||
337 | +++ b/arch/arm64/include/asm/pgtable-prot.h |
||
338 | @@ -48,6 +48,7 @@ |
||
339 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) |
||
340 | #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) |
||
341 | #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) |
||
342 | +#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) |
||
343 | |||
344 | #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) |
||
345 | #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
||
346 | --- a/arch/arm64/include/asm/pgtable.h |
||
347 | +++ b/arch/arm64/include/asm/pgtable.h |
||
348 | @@ -370,6 +370,11 @@ static inline int pmd_protnone(pmd_t pmd |
||
349 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) |
||
350 | #define pgprot_writecombine(prot) \ |
||
351 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) |
||
352 | +#define pgprot_cached(prot) \ |
||
353 | + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \ |
||
354 | + PTE_PXN | PTE_UXN) |
||
355 | +#define pgprot_cached_ns(prot) \ |
||
356 | + __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED) |
||
357 | #define pgprot_device(prot) \ |
||
358 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) |
||
359 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
||
360 | --- a/arch/arm64/kernel/pci.c |
||
361 | +++ b/arch/arm64/kernel/pci.c |
||
362 | @@ -17,6 +17,8 @@ |
||
363 | #include <linux/mm.h> |
||
364 | #include <linux/of_pci.h> |
||
365 | #include <linux/of_platform.h> |
||
366 | +#include <linux/of_irq.h> |
||
367 | +#include <linux/pcieport_if.h> |
||
368 | #include <linux/pci.h> |
||
369 | #include <linux/pci-acpi.h> |
||
370 | #include <linux/pci-ecam.h> |
||
371 | @@ -53,6 +55,66 @@ int pcibios_alloc_irq(struct pci_dev *de |
||
372 | |||
373 | return 0; |
||
374 | } |
||
375 | + |
||
376 | +/* |
||
377 | + * Check device tree if the service interrupts are there |
||
378 | + */ |
||
379 | +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask) |
||
380 | +{ |
||
381 | + int ret, count = 0; |
||
382 | + struct device_node *np = NULL; |
||
383 | + |
||
384 | + if (dev->bus->dev.of_node) |
||
385 | + np = dev->bus->dev.of_node; |
||
386 | + |
||
387 | + if (np == NULL) |
||
388 | + return 0; |
||
389 | + |
||
390 | + if (!IS_ENABLED(CONFIG_OF_IRQ)) |
||
391 | + return 0; |
||
392 | + |
||
393 | + /* If root port doesn't support MSI/MSI-X/INTx in RC mode, |
||
394 | + * request irq for aer |
||
395 | + */ |
||
396 | + if (mask & PCIE_PORT_SERVICE_AER) { |
||
397 | + ret = of_irq_get_byname(np, "aer"); |
||
398 | + if (ret > 0) { |
||
399 | + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret; |
||
400 | + count++; |
||
401 | + } |
||
402 | + } |
||
403 | + |
||
404 | + if (mask & PCIE_PORT_SERVICE_PME) { |
||
405 | + ret = of_irq_get_byname(np, "pme"); |
||
406 | + if (ret > 0) { |
||
407 | + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret; |
||
408 | + count++; |
||
409 | + } |
||
410 | + } |
||
411 | + |
||
412 | + /* TODO: add more service interrupts if there it is in the device tree*/ |
||
413 | + |
||
414 | + return count; |
||
415 | +} |
||
416 | + |
||
417 | +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, |
||
418 | + enum pci_mmap_state mmap_state, int write_combine) |
||
419 | +{ |
||
420 | + if (mmap_state == pci_mmap_io) |
||
421 | + return -EINVAL; |
||
422 | + |
||
423 | + /* |
||
424 | + * Mark this as IO |
||
425 | + */ |
||
426 | + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
||
427 | + |
||
428 | + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
||
429 | + vma->vm_end - vma->vm_start, |
||
430 | + vma->vm_page_prot)) |
||
431 | + return -EAGAIN; |
||
432 | + |
||
433 | + return 0; |
||
434 | +} |
||
435 | |||
436 | /* |
||
437 | * raw_pci_read/write - Platform-specific PCI config space access. |
||
438 | --- a/arch/arm64/mm/dma-mapping.c |
||
439 | +++ b/arch/arm64/mm/dma-mapping.c |
||
440 | @@ -30,6 +30,7 @@ |
||
441 | #include <linux/swiotlb.h> |
||
442 | |||
443 | #include <asm/cacheflush.h> |
||
444 | +#include <../../../drivers/staging/fsl-mc/include/mc-bus.h> |
||
445 | |||
446 | static int swiotlb __ro_after_init; |
||
447 | |||
448 | @@ -925,6 +926,10 @@ static int __init __iommu_dma_init(void) |
||
449 | if (!ret) |
||
450 | ret = register_iommu_dma_ops_notifier(&pci_bus_type); |
||
451 | #endif |
||
452 | +#ifdef CONFIG_FSL_MC_BUS |
||
453 | + if (!ret) |
||
454 | + ret = register_iommu_dma_ops_notifier(&fsl_mc_bus_type); |
||
455 | +#endif |
||
456 | return ret; |
||
457 | } |
||
458 | arch_initcall(__iommu_dma_init); |
||
459 | @@ -978,3 +983,4 @@ void arch_setup_dma_ops(struct device *d |
||
460 | dev->archdata.dma_coherent = coherent; |
||
461 | __iommu_setup_dma_ops(dev, dma_base, size, iommu); |
||
462 | } |
||
463 | +EXPORT_SYMBOL(arch_setup_dma_ops); |