tlb.c 3.32 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
/*
 * Copyright (C) 2015 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <asm/kvm_hyp.h>

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
{
	u64 val;

	/*
	 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
	 * most TLB operations target EL2/EL0. In order to affect the
	 * guest TLBs (EL1/EL0), we need to change one of these two
	 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
	 * let's flip TGE before executing the TLB operation.
	 */
	write_sysreg(kvm->arch.vttbr, vttbr_el2);
	val = read_sysreg(hcr_el2);
	val &= ~HCR_TGE;
	write_sysreg(val, hcr_el2);
	isb();
}

static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
{
	write_sysreg(kvm->arch.vttbr, vttbr_el2);
	isb();
}

static hyp_alternate_select(__tlb_switch_to_guest,
			    __tlb_switch_to_guest_nvhe,
			    __tlb_switch_to_guest_vhe,
			    ARM64_HAS_VIRT_HOST_EXTN);

static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
{
	/*
	 * We're done with the TLB operation, let's restore the host's
	 * view of HCR_EL2.
	 */
	write_sysreg(0, vttbr_el2);
	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
}

static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
{
	write_sysreg(0, vttbr_el2);
}

static hyp_alternate_select(__tlb_switch_to_host,
			    __tlb_switch_to_host_nvhe,
			    __tlb_switch_to_host_vhe,
			    ARM64_HAS_VIRT_HOST_EXTN);

69
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
70
71
72
73
74
{
	dsb(ishst);

	/* Switch to requested VMID */
	kvm = kern_hyp_va(kvm);
75
	__tlb_switch_to_guest()(kvm);
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95

	/*
	 * We could do so much better if we had the VA as well.
	 * Instead, we invalidate Stage-2 for this IPA, and the
	 * whole of Stage-1. Weep...
	 */
	ipa >>= 12;
	asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));

	/*
	 * We have to ensure completion of the invalidation at Stage-2,
	 * since a table walk on another CPU could refill a TLB with a
	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
	 * the Stage-1 invalidation happened first.
	 */
	dsb(ish);
	asm volatile("tlbi vmalle1is" : : );
	dsb(ish);
	isb();

96
	__tlb_switch_to_host()(kvm);
97
98
}

99
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
100
101
102
103
104
{
	dsb(ishst);

	/* Switch to requested VMID */
	kvm = kern_hyp_va(kvm);
105
	__tlb_switch_to_guest()(kvm);
106
107
108
109
110

	asm volatile("tlbi vmalls12e1is" : : );
	dsb(ish);
	isb();

111
	__tlb_switch_to_host()(kvm);
112
113
}

114
115
116
117
118
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);

	/* Switch to requested VMID */
119
	__tlb_switch_to_guest()(kvm);
120
121
122
123
124

	asm volatile("tlbi vmalle1" : : );
	dsb(nsh);
	isb();

125
	__tlb_switch_to_host()(kvm);
126
}
127

128
void __hyp_text __kvm_flush_vm_context(void)
129
130
131
132
133
134
{
	dsb(ishst);
	asm volatile("tlbi alle1is	\n"
		     "ic ialluis	  ": : );
	dsb(ish);
}