aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-4.9/820-kvm-support-layerscape.patch
blob: a652660f9a0e1cc58e10e653c7d9261666ff0646 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
From fe22151c95c02c6bb145ea6c3685941e8fb09d60 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
Date: Thu, 5 Jul 2018 17:43:16 +0800
Subject: [PATCH 32/32] kvm: support layerscape

This is an integrated patch for layerscape kvm support.

Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
 arch/arm/include/asm/kvm_mmu.h   |  3 +-
 arch/arm/kvm/mmu.c               | 56 ++++++++++++++++++++++++++++++--
 arch/arm64/include/asm/kvm_mmu.h | 14 ++++++--
 virt/kvm/arm/vgic/vgic-its.c     | 24 +++++++++++---
 virt/kvm/arm/vgic/vgic-v2.c      |  3 +-
 5 files changed, 88 insertions(+), 12 deletions(-)

--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
 void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-			  phys_addr_t pa, unsigned long size, bool writable);
+			  phys_addr_t pa, unsigned long size, bool writable,
+			  pgprot_t prot);
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1020,9 +1020,11 @@ static int stage2_pmdp_test_and_clear_yo
  * @guest_ipa:	The IPA at which to insert the mapping
  * @pa:		The physical address of the device
  * @size:	The size of the mapping
+ * @prot:	S2 page translation bits
  */
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-			  phys_addr_t pa, unsigned long size, bool writable)
+			  phys_addr_t pa, unsigned long size, bool writable,
+			  pgprot_t prot)
 {
 	phys_addr_t addr, end;
 	int ret = 0;
@@ -1033,7 +1035,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv
 	pfn = __phys_to_pfn(pa);
 
 	for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
-		pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
+		pte_t pte = pfn_pte(pfn, prot);
 
 		if (writable)
 			pte = kvm_s2pte_mkwrite(pte);
@@ -1057,6 +1059,30 @@ out:
 	return ret;
 }
 
+#ifdef CONFIG_ARM64
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
+{
+	switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
+		case PTE_ATTRINDX(MT_DEVICE_nGnRE):
+		case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
+		case PTE_ATTRINDX(MT_DEVICE_GRE):
+			return PAGE_S2_DEVICE;
+		case PTE_ATTRINDX(MT_NORMAL_NC):
+		case PTE_ATTRINDX(MT_NORMAL):
+			return (pgprot_val(prot) & PTE_SHARED)
+				? PAGE_S2
+				: PAGE_S2_NS;
+	}
+
+	return PAGE_S2_DEVICE;
+}
+#else
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
+{
+	return PAGE_S2_DEVICE;
+}
+#endif
+
 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
 {
 	kvm_pfn_t pfn = *pfnp;
@@ -1308,6 +1334,19 @@ static int user_mem_abort(struct kvm_vcp
 		hugetlb = true;
 		gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
 	} else {
+		if (!is_vm_hugetlb_page(vma)) {
+			pte_t *pte;
+			spinlock_t *ptl;
+			pgprot_t prot;
+
+			pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
+			prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
+			pte_unmap_unlock(pte, ptl);
+#ifdef CONFIG_ARM64
+			if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
+				mem_type = PAGE_S2_NS;
+#endif
+		}
 		/*
 		 * Pages belonging to memslots that don't have the same
 		 * alignment for userspace and IPA cannot be mapped using
@@ -1345,6 +1384,11 @@ static int user_mem_abort(struct kvm_vcp
 	if (is_error_noslot_pfn(pfn))
 		return -EFAULT;
 
+#ifdef CONFIG_ARM64
+	if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
+		flags |= KVM_S2PTE_FLAG_IS_IOMAP;
+	} else
+#endif
 	if (kvm_is_device_pfn(pfn)) {
 		mem_type = PAGE_S2_DEVICE;
 		flags |= KVM_S2PTE_FLAG_IS_IOMAP;
@@ -1882,6 +1926,9 @@ int kvm_arch_prepare_memory_region(struc
 			gpa_t gpa = mem->guest_phys_addr +
 				    (vm_start - mem->userspace_addr);
 			phys_addr_t pa;
+			pgprot_t prot;
+			pte_t *pte;
+			spinlock_t *ptl;
 
 			pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 			pa += vm_start - vma->vm_start;
@@ -1891,10 +1938,13 @@ int kvm_arch_prepare_memory_region(struc
 				ret = -EINVAL;
 				goto out;
 			}
+			pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
+			prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
+			pte_unmap_unlock(pte, ptl);
 
 			ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
 						    vm_end - vm_start,
-						    writable);
+						    writable, prot);
 			if (ret)
 				break;
 		}
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
 void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-			  phys_addr_t pa, unsigned long size, bool writable);
+			  phys_addr_t pa, unsigned long size, bool writable,
+			  pgprot_t prot);
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
@@ -274,8 +275,15 @@ static inline void __coherent_cache_gues
 
 static inline void __kvm_flush_dcache_pte(pte_t pte)
 {
-	struct page *page = pte_page(pte);
-	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+	if (pfn_valid(pte_pfn(pte))) {
+		struct page *page = pte_page(pte);
+		kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+	} else {
+		void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
+
+		kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+		iounmap(va);
+	}
 }
 
 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -176,6 +176,8 @@ static struct its_itte *find_itte(struct
 
 #define GIC_LPI_OFFSET 8192
 
+#define VITS_TYPER_DEVBITS 17
+
 /*
  * Finds and returns a collection in the ITS collection table.
  * Must be called with the its_lock mutex held.
@@ -375,7 +377,7 @@ static unsigned long vgic_mmio_read_its_
 	 * To avoid memory waste in the guest, we keep the number of IDBits and
 	 * DevBits low - as least for the time being.
 	 */
-	reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
+	reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
 	reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
 
 	return extract_bytes(reg, addr & 7, len);
@@ -601,16 +603,30 @@ static int vgic_its_cmd_handle_movi(stru
  * Check whether an ID can be stored into the corresponding guest table.
  * For a direct table this is pretty easy, but gets a bit nasty for
  * indirect tables. We check whether the resulting guest physical address
- * is actually valid (covered by a memslot and guest accessbible).
+ * is actually valid (covered by a memslot and guest accessible).
  * For this we have to read the respective first level entry.
  */
-static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
+static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id)
 {
 	int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
+	u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
 	int index;
-	u64 indirect_ptr;
 	gfn_t gfn;
 
+	switch (type) {
+	case GITS_BASER_TYPE_DEVICE:
+		if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
+			return false;
+		break;
+	case GITS_BASER_TYPE_COLLECTION:
+		/* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
+		if (id >= BIT_ULL(16))
+			return false;
+		break;
+	default:
+		return false;
+	}
+
 	if (!(baser & GITS_BASER_INDIRECT)) {
 		phys_addr_t addr;
 
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -290,7 +290,8 @@ int vgic_v2_map_resources(struct kvm *kv
 	if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
 		ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
 					    kvm_vgic_global_state.vcpu_base,
-					    KVM_VGIC_V2_CPU_SIZE, true);
+					    KVM_VGIC_V2_CPU_SIZE, true,
+					    PAGE_S2_DEVICE);
 		if (ret) {
 			kvm_err("Unable to remap VGIC CPU to VCPU\n");
 			goto out;