aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-4.14/810-kvm-support-layerscape.patch
blob: 2d26637894209be5d7328d33ab6b9c7b4e5c9cbf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
From 621f2c4753b03170213e178cdafd66e78b212b3c Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
Date: Tue, 30 Oct 2018 18:26:41 +0800
Subject: [PATCH 27/40] kvm: support layerscape
This is an integrated patch of kvm for layerscape

Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
 arch/arm/include/asm/kvm_mmu.h   |  3 +-
 arch/arm64/include/asm/kvm_mmu.h | 14 ++++++--
 arch/powerpc/kvm/booke.c         |  5 +++
 virt/kvm/arm/mmu.c               | 56 ++++++++++++++++++++++++++++++--
 virt/kvm/arm/vgic/vgic-its.c     |  2 +-
 virt/kvm/arm/vgic/vgic-v2.c      |  3 +-
 6 files changed, 74 insertions(+), 9 deletions(-)

--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
 void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-			  phys_addr_t pa, unsigned long size, bool writable);
+			  phys_addr_t pa, unsigned long size, bool writable,
+			  pgprot_t prot);
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
 void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-			  phys_addr_t pa, unsigned long size, bool writable);
+			  phys_addr_t pa, unsigned long size, bool writable,
+			  pgprot_t prot);
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
@@ -270,8 +271,15 @@ static inline void __coherent_cache_gues
 
 static inline void __kvm_flush_dcache_pte(pte_t pte)
 {
-	struct page *page = pte_page(pte);
-	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+	if (pfn_valid(pte_pfn(pte))) {
+		struct page *page = pte_page(pte);
+		kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+	} else {
+		void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
+
+		kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+		iounmap(va);
+	}
 }
 
 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -305,6 +305,11 @@ void kvmppc_core_queue_fpunavail(struct
 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
 }
 
+void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
+{
+	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
+}
+
 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
 {
 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1027,9 +1027,11 @@ static int stage2_pmdp_test_and_clear_yo
  * @guest_ipa:	The IPA at which to insert the mapping
  * @pa:		The physical address of the device
  * @size:	The size of the mapping
+ * @prot:	S2 page translation bits
  */
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-			  phys_addr_t pa, unsigned long size, bool writable)
+			  phys_addr_t pa, unsigned long size, bool writable,
+			  pgprot_t prot)
 {
 	phys_addr_t addr, end;
 	int ret = 0;
@@ -1040,7 +1042,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv
 	pfn = __phys_to_pfn(pa);
 
 	for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
-		pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
+		pte_t pte = pfn_pte(pfn, prot);
 
 		if (writable)
 			pte = kvm_s2pte_mkwrite(pte);
@@ -1064,6 +1066,30 @@ out:
 	return ret;
 }
 
+#ifdef CONFIG_ARM64
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
+{
+	switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
+		case PTE_ATTRINDX(MT_DEVICE_nGnRE):
+		case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
+		case PTE_ATTRINDX(MT_DEVICE_GRE):
+			return PAGE_S2_DEVICE;
+		case PTE_ATTRINDX(MT_NORMAL_NC):
+		case PTE_ATTRINDX(MT_NORMAL):
+			return (pgprot_val(prot) & PTE_SHARED)
+				? PAGE_S2
+				: PAGE_S2_NS;
+	}
+
+	return PAGE_S2_DEVICE;
+}
+#else
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
+{
+	return PAGE_S2_DEVICE;
+}
+#endif
+
 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
 {
 	kvm_pfn_t pfn = *pfnp;
@@ -1340,6 +1366,18 @@ static int user_mem_abort(struct kvm_vcp
 		hugetlb = true;
 		gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
 	} else {
+		pte_t *pte;
+		spinlock_t *ptl;
+		pgprot_t prot;
+
+		pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
+		prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
+		pte_unmap_unlock(pte, ptl);
+#ifdef CONFIG_ARM64
+		if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
+			mem_type = PAGE_S2_NS;
+#endif
+
 		/*
 		 * Pages belonging to memslots that don't have the same
 		 * alignment for userspace and IPA cannot be mapped using
@@ -1381,6 +1419,11 @@ static int user_mem_abort(struct kvm_vcp
 	if (is_error_noslot_pfn(pfn))
 		return -EFAULT;
 
+#ifdef CONFIG_ARM64
+	if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
+		flags |= KVM_S2PTE_FLAG_IS_IOMAP;
+	} else
+#endif
 	if (kvm_is_device_pfn(pfn)) {
 		mem_type = PAGE_S2_DEVICE;
 		flags |= KVM_S2PTE_FLAG_IS_IOMAP;
@@ -1917,6 +1960,9 @@ int kvm_arch_prepare_memory_region(struc
 			gpa_t gpa = mem->guest_phys_addr +
 				    (vm_start - mem->userspace_addr);
 			phys_addr_t pa;
+			pgprot_t prot;
+			pte_t *pte;
+			spinlock_t *ptl;
 
 			pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 			pa += vm_start - vma->vm_start;
@@ -1927,9 +1973,13 @@ int kvm_arch_prepare_memory_region(struc
 				goto out;
 			}
 
+			pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
+			prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
+			pte_unmap_unlock(pte, ptl);
+
 			ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
 						    vm_end - vm_start,
-						    writable);
+						    writable, prot);
 			if (ret)
 				break;
 		}
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -243,7 +243,7 @@ static struct its_ite *find_ite(struct v
 #define GIC_LPI_OFFSET 8192
 
 #define VITS_TYPER_IDBITS 16
-#define VITS_TYPER_DEVBITS 16
+#define VITS_TYPER_DEVBITS 17
 #define VITS_DTE_MAX_DEVID_OFFSET	(BIT(14) - 1)
 #define VITS_ITE_MAX_EVENTID_OFFSET	(BIT(16) - 1)
 
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -304,7 +304,8 @@ int vgic_v2_map_resources(struct kvm *kv
 	if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
 		ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
 					    kvm_vgic_global_state.vcpu_base,
-					    KVM_VGIC_V2_CPU_SIZE, true);
+					    KVM_VGIC_V2_CPU_SIZE, true,
+					    PAGE_S2_DEVICE);
 		if (ret) {
 			kvm_err("Unable to remap VGIC CPU to VCPU\n");
 			goto out;