OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | From 621f2c4753b03170213e178cdafd66e78b212b3c Mon Sep 17 00:00:00 2001 |
2 | From: Biwen Li <biwen.li@nxp.com> |
||
3 | Date: Tue, 30 Oct 2018 18:26:41 +0800 |
||
4 | Subject: [PATCH 27/40] kvm: support layerscape |
||
5 | This is an integrated patch of kvm for layerscape |
||
6 | |||
7 | Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com> |
||
8 | Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com> |
||
9 | Signed-off-by: Signed-off-by: Biwen Li <biwen.li@nxp.com> |
||
10 | --- |
||
11 | arch/arm/include/asm/kvm_mmu.h | 3 +- |
||
12 | arch/arm64/include/asm/kvm_mmu.h | 14 ++++++-- |
||
13 | arch/powerpc/kvm/booke.c | 5 +++ |
||
14 | virt/kvm/arm/mmu.c | 56 ++++++++++++++++++++++++++++++-- |
||
15 | virt/kvm/arm/vgic/vgic-its.c | 2 +- |
||
16 | virt/kvm/arm/vgic/vgic-v2.c | 3 +- |
||
17 | 6 files changed, 74 insertions(+), 9 deletions(-) |
||
18 | |||
19 | --- a/arch/arm/include/asm/kvm_mmu.h |
||
20 | +++ b/arch/arm/include/asm/kvm_mmu.h |
||
21 | @@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm); |
||
22 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
||
23 | void kvm_free_stage2_pgd(struct kvm *kvm); |
||
24 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
||
25 | - phys_addr_t pa, unsigned long size, bool writable); |
||
26 | + phys_addr_t pa, unsigned long size, bool writable, |
||
27 | + pgprot_t prot); |
||
28 | |||
29 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); |
||
30 | |||
31 | --- a/arch/arm64/include/asm/kvm_mmu.h |
||
32 | +++ b/arch/arm64/include/asm/kvm_mmu.h |
||
33 | @@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm); |
||
34 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
||
35 | void kvm_free_stage2_pgd(struct kvm *kvm); |
||
36 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
||
37 | - phys_addr_t pa, unsigned long size, bool writable); |
||
38 | + phys_addr_t pa, unsigned long size, bool writable, |
||
39 | + pgprot_t prot); |
||
40 | |||
41 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); |
||
42 | |||
43 | @@ -270,8 +271,15 @@ static inline void __coherent_cache_gues |
||
44 | |||
45 | static inline void __kvm_flush_dcache_pte(pte_t pte) |
||
46 | { |
||
47 | - struct page *page = pte_page(pte); |
||
48 | - kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); |
||
49 | + if (pfn_valid(pte_pfn(pte))) { |
||
50 | + struct page *page = pte_page(pte); |
||
51 | + kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); |
||
52 | + } else { |
||
53 | + void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE); |
||
54 | + |
||
55 | + kvm_flush_dcache_to_poc(va, PAGE_SIZE); |
||
56 | + iounmap(va); |
||
57 | + } |
||
58 | } |
||
59 | |||
60 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) |
||
61 | --- a/arch/powerpc/kvm/booke.c |
||
62 | +++ b/arch/powerpc/kvm/booke.c |
||
63 | @@ -305,6 +305,11 @@ void kvmppc_core_queue_fpunavail(struct |
||
64 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
||
65 | } |
||
66 | |||
67 | +void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) |
||
68 | +{ |
||
69 | + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); |
||
70 | +} |
||
71 | + |
||
72 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) |
||
73 | { |
||
74 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); |
||
75 | --- a/virt/kvm/arm/mmu.c |
||
76 | +++ b/virt/kvm/arm/mmu.c |
||
77 | @@ -1027,9 +1027,11 @@ static int stage2_pmdp_test_and_clear_yo |
||
78 | * @guest_ipa: The IPA at which to insert the mapping |
||
79 | * @pa: The physical address of the device |
||
80 | * @size: The size of the mapping |
||
81 | + * @prot: S2 page translation bits |
||
82 | */ |
||
83 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
||
84 | - phys_addr_t pa, unsigned long size, bool writable) |
||
85 | + phys_addr_t pa, unsigned long size, bool writable, |
||
86 | + pgprot_t prot) |
||
87 | { |
||
88 | phys_addr_t addr, end; |
||
89 | int ret = 0; |
||
90 | @@ -1040,7 +1042,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv |
||
91 | pfn = __phys_to_pfn(pa); |
||
92 | |||
93 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { |
||
94 | - pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
||
95 | + pte_t pte = pfn_pte(pfn, prot); |
||
96 | |||
97 | if (writable) |
||
98 | pte = kvm_s2pte_mkwrite(pte); |
||
99 | @@ -1064,6 +1066,30 @@ out: |
||
100 | return ret; |
||
101 | } |
||
102 | |||
103 | +#ifdef CONFIG_ARM64 |
||
104 | +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot) |
||
105 | +{ |
||
106 | + switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) { |
||
107 | + case PTE_ATTRINDX(MT_DEVICE_nGnRE): |
||
108 | + case PTE_ATTRINDX(MT_DEVICE_nGnRnE): |
||
109 | + case PTE_ATTRINDX(MT_DEVICE_GRE): |
||
110 | + return PAGE_S2_DEVICE; |
||
111 | + case PTE_ATTRINDX(MT_NORMAL_NC): |
||
112 | + case PTE_ATTRINDX(MT_NORMAL): |
||
113 | + return (pgprot_val(prot) & PTE_SHARED) |
||
114 | + ? PAGE_S2 |
||
115 | + : PAGE_S2_NS; |
||
116 | + } |
||
117 | + |
||
118 | + return PAGE_S2_DEVICE; |
||
119 | +} |
||
120 | +#else |
||
121 | +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot) |
||
122 | +{ |
||
123 | + return PAGE_S2_DEVICE; |
||
124 | +} |
||
125 | +#endif |
||
126 | + |
||
127 | static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) |
||
128 | { |
||
129 | kvm_pfn_t pfn = *pfnp; |
||
130 | @@ -1334,6 +1360,18 @@ static int user_mem_abort(struct kvm_vcp |
||
131 | hugetlb = true; |
||
132 | gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; |
||
133 | } else { |
||
134 | + pte_t *pte; |
||
135 | + spinlock_t *ptl; |
||
136 | + pgprot_t prot; |
||
137 | + |
||
138 | + pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl); |
||
139 | + prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte))); |
||
140 | + pte_unmap_unlock(pte, ptl); |
||
141 | +#ifdef CONFIG_ARM64 |
||
142 | + if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS)) |
||
143 | + mem_type = PAGE_S2_NS; |
||
144 | +#endif |
||
145 | + |
||
146 | /* |
||
147 | * Pages belonging to memslots that don't have the same |
||
148 | * alignment for userspace and IPA cannot be mapped using |
||
149 | @@ -1375,6 +1413,11 @@ static int user_mem_abort(struct kvm_vcp |
||
150 | if (is_error_noslot_pfn(pfn)) |
||
151 | return -EFAULT; |
||
152 | |||
153 | +#ifdef CONFIG_ARM64 |
||
154 | + if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) { |
||
155 | + flags |= KVM_S2PTE_FLAG_IS_IOMAP; |
||
156 | + } else |
||
157 | +#endif |
||
158 | if (kvm_is_device_pfn(pfn)) { |
||
159 | mem_type = PAGE_S2_DEVICE; |
||
160 | flags |= KVM_S2PTE_FLAG_IS_IOMAP; |
||
161 | @@ -1911,6 +1954,9 @@ int kvm_arch_prepare_memory_region(struc |
||
162 | gpa_t gpa = mem->guest_phys_addr + |
||
163 | (vm_start - mem->userspace_addr); |
||
164 | phys_addr_t pa; |
||
165 | + pgprot_t prot; |
||
166 | + pte_t *pte; |
||
167 | + spinlock_t *ptl; |
||
168 | |||
169 | pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; |
||
170 | pa += vm_start - vma->vm_start; |
||
171 | @@ -1921,9 +1967,13 @@ int kvm_arch_prepare_memory_region(struc |
||
172 | goto out; |
||
173 | } |
||
174 | |||
175 | + pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl); |
||
176 | + prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte))); |
||
177 | + pte_unmap_unlock(pte, ptl); |
||
178 | + |
||
179 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, |
||
180 | vm_end - vm_start, |
||
181 | - writable); |
||
182 | + writable, prot); |
||
183 | if (ret) |
||
184 | break; |
||
185 | } |
||
186 | --- a/virt/kvm/arm/vgic/vgic-its.c |
||
187 | +++ b/virt/kvm/arm/vgic/vgic-its.c |
||
188 | @@ -243,7 +243,7 @@ static struct its_ite *find_ite(struct v |
||
189 | #define GIC_LPI_OFFSET 8192 |
||
190 | |||
191 | #define VITS_TYPER_IDBITS 16 |
||
192 | -#define VITS_TYPER_DEVBITS 16 |
||
193 | +#define VITS_TYPER_DEVBITS 17 |
||
194 | #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1) |
||
195 | #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1) |
||
196 | |||
197 | --- a/virt/kvm/arm/vgic/vgic-v2.c |
||
198 | +++ b/virt/kvm/arm/vgic/vgic-v2.c |
||
199 | @@ -304,7 +304,8 @@ int vgic_v2_map_resources(struct kvm *kv |
||
200 | if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { |
||
201 | ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, |
||
202 | kvm_vgic_global_state.vcpu_base, |
||
203 | - KVM_VGIC_V2_CPU_SIZE, true); |
||
204 | + KVM_VGIC_V2_CPU_SIZE, true, |
||
205 | + PAGE_S2_DEVICE); |
||
206 | if (ret) { |
||
207 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); |
||
208 | goto out; |