Skip to content

Commit a09e359

Browse files
committed
Merge tag 'kvmarm-fixes-6.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.16, take #3 - Fix another set of FP/SIMD/SVE bugs affecting NV, and plugging some missing synchronisation - A small fix for the irqbypass hook fixes, tightening the check and ensuring that we only deal with MSI for both the old and the new route entry - Rework the way the shadow LRs are addressed in a nesting configuration, plugging an embarrassing bug as well as simplifying the whole process - Add yet another fix for the dreaded arch_timer_edge_cases selftest
2 parents e04c78d + 04c5355 commit a09e359

File tree

9 files changed

+215
-271
lines changed

9 files changed

+215
-271
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 0 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -561,68 +561,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
561561
vcpu_set_flag((v), e); \
562562
} while (0)
563563

564-
#define __build_check_all_or_none(r, bits) \
565-
BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
566-
567-
#define __cpacr_to_cptr_clr(clr, set) \
568-
({ \
569-
u64 cptr = 0; \
570-
\
571-
if ((set) & CPACR_EL1_FPEN) \
572-
cptr |= CPTR_EL2_TFP; \
573-
if ((set) & CPACR_EL1_ZEN) \
574-
cptr |= CPTR_EL2_TZ; \
575-
if ((set) & CPACR_EL1_SMEN) \
576-
cptr |= CPTR_EL2_TSM; \
577-
if ((clr) & CPACR_EL1_TTA) \
578-
cptr |= CPTR_EL2_TTA; \
579-
if ((clr) & CPTR_EL2_TAM) \
580-
cptr |= CPTR_EL2_TAM; \
581-
if ((clr) & CPTR_EL2_TCPAC) \
582-
cptr |= CPTR_EL2_TCPAC; \
583-
\
584-
cptr; \
585-
})
586-
587-
#define __cpacr_to_cptr_set(clr, set) \
588-
({ \
589-
u64 cptr = 0; \
590-
\
591-
if ((clr) & CPACR_EL1_FPEN) \
592-
cptr |= CPTR_EL2_TFP; \
593-
if ((clr) & CPACR_EL1_ZEN) \
594-
cptr |= CPTR_EL2_TZ; \
595-
if ((clr) & CPACR_EL1_SMEN) \
596-
cptr |= CPTR_EL2_TSM; \
597-
if ((set) & CPACR_EL1_TTA) \
598-
cptr |= CPTR_EL2_TTA; \
599-
if ((set) & CPTR_EL2_TAM) \
600-
cptr |= CPTR_EL2_TAM; \
601-
if ((set) & CPTR_EL2_TCPAC) \
602-
cptr |= CPTR_EL2_TCPAC; \
603-
\
604-
cptr; \
605-
})
606-
607-
#define cpacr_clear_set(clr, set) \
608-
do { \
609-
BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
610-
BUILD_BUG_ON((clr) & CPACR_EL1_E0POE); \
611-
__build_check_all_or_none((clr), CPACR_EL1_FPEN); \
612-
__build_check_all_or_none((set), CPACR_EL1_FPEN); \
613-
__build_check_all_or_none((clr), CPACR_EL1_ZEN); \
614-
__build_check_all_or_none((set), CPACR_EL1_ZEN); \
615-
__build_check_all_or_none((clr), CPACR_EL1_SMEN); \
616-
__build_check_all_or_none((set), CPACR_EL1_SMEN); \
617-
\
618-
if (has_vhe() || has_hvhe()) \
619-
sysreg_clear_set(cpacr_el1, clr, set); \
620-
else \
621-
sysreg_clear_set(cptr_el2, \
622-
__cpacr_to_cptr_clr(clr, set), \
623-
__cpacr_to_cptr_set(clr, set));\
624-
} while (0)
625-
626564
/*
627565
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
628566
* format if E2H isn't set.

arch/arm64/include/asm/kvm_host.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1289,9 +1289,8 @@ void kvm_arm_resume_guest(struct kvm *kvm);
12891289
})
12901290

12911291
/*
1292-
* The couple of isb() below are there to guarantee the same behaviour
1293-
* on VHE as on !VHE, where the eret to EL1 acts as a context
1294-
* synchronization event.
1292+
* The isb() below is there to guarantee the same behaviour on VHE as on !VHE,
1293+
* where the eret to EL1 acts as a context synchronization event.
12951294
*/
12961295
#define kvm_call_hyp(f, ...) \
12971296
do { \
@@ -1309,7 +1308,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
13091308
\
13101309
if (has_vhe()) { \
13111310
ret = f(__VA_ARGS__); \
1312-
isb(); \
13131311
} else { \
13141312
ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
13151313
} \

arch/arm64/kvm/arm.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2764,7 +2764,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
27642764
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
27652765
struct kvm_kernel_irq_routing_entry *new)
27662766
{
2767-
if (new->type != KVM_IRQ_ROUTING_MSI)
2767+
if (old->type != KVM_IRQ_ROUTING_MSI ||
2768+
new->type != KVM_IRQ_ROUTING_MSI)
27682769
return true;
27692770

27702771
return memcmp(&old->msi, &new->msi, sizeof(new->msi));

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 138 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,136 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
6565
}
6666
}
6767

68+
static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
69+
{
70+
u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA;
71+
72+
/*
73+
* Always trap SME since it's not supported in KVM.
74+
* TSM is RES1 if SME isn't implemented.
75+
*/
76+
val |= CPTR_EL2_TSM;
77+
78+
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
79+
val |= CPTR_EL2_TZ;
80+
81+
if (!guest_owns_fp_regs())
82+
val |= CPTR_EL2_TFP;
83+
84+
write_sysreg(val, cptr_el2);
85+
}
86+
87+
static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
88+
{
89+
/*
90+
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
91+
* CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
92+
* except for some missing controls, such as TAM.
93+
* In this case, CPTR_EL2.TAM has the same position with or without
94+
* VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
95+
* shift value for trapping the AMU accesses.
96+
*/
97+
u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA;
98+
u64 cptr;
99+
100+
if (guest_owns_fp_regs()) {
101+
val |= CPACR_EL1_FPEN;
102+
if (vcpu_has_sve(vcpu))
103+
val |= CPACR_EL1_ZEN;
104+
}
105+
106+
if (!vcpu_has_nv(vcpu))
107+
goto write;
108+
109+
/*
110+
* The architecture is a bit crap (what a surprise): an EL2 guest
111+
* writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
112+
* as they are RES0 in the guest's view. To work around it, trap the
113+
* sucker using the very same bit it can't set...
114+
*/
115+
if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
116+
val |= CPTR_EL2_TCPAC;
117+
118+
/*
119+
* Layer the guest hypervisor's trap configuration on top of our own if
120+
* we're in a nested context.
121+
*/
122+
if (is_hyp_ctxt(vcpu))
123+
goto write;
124+
125+
cptr = vcpu_sanitised_cptr_el2(vcpu);
126+
127+
/*
128+
* Pay attention, there's some interesting detail here.
129+
*
130+
* The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
131+
* meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
132+
*
133+
* - CPTR_EL2.xEN = x0, traps are enabled
134+
* - CPTR_EL2.xEN = x1, traps are disabled
135+
*
136+
* In other words, bit[0] determines if guest accesses trap or not. In
137+
* the interest of simplicity, clear the entire field if the guest
138+
* hypervisor has traps enabled to dispel any illusion of something more
139+
* complicated taking place.
140+
*/
141+
if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
142+
val &= ~CPACR_EL1_FPEN;
143+
if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
144+
val &= ~CPACR_EL1_ZEN;
145+
146+
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
147+
val |= cptr & CPACR_EL1_E0POE;
148+
149+
val |= cptr & CPTR_EL2_TCPAC;
150+
151+
write:
152+
write_sysreg(val, cpacr_el1);
153+
}
154+
155+
static inline void __activate_cptr_traps(struct kvm_vcpu *vcpu)
156+
{
157+
if (!guest_owns_fp_regs())
158+
__activate_traps_fpsimd32(vcpu);
159+
160+
if (has_vhe() || has_hvhe())
161+
__activate_cptr_traps_vhe(vcpu);
162+
else
163+
__activate_cptr_traps_nvhe(vcpu);
164+
}
165+
166+
static inline void __deactivate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
167+
{
168+
u64 val = CPTR_NVHE_EL2_RES1;
169+
170+
if (!cpus_have_final_cap(ARM64_SVE))
171+
val |= CPTR_EL2_TZ;
172+
if (!cpus_have_final_cap(ARM64_SME))
173+
val |= CPTR_EL2_TSM;
174+
175+
write_sysreg(val, cptr_el2);
176+
}
177+
178+
static inline void __deactivate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
179+
{
180+
u64 val = CPACR_EL1_FPEN;
181+
182+
if (cpus_have_final_cap(ARM64_SVE))
183+
val |= CPACR_EL1_ZEN;
184+
if (cpus_have_final_cap(ARM64_SME))
185+
val |= CPACR_EL1_SMEN;
186+
187+
write_sysreg(val, cpacr_el1);
188+
}
189+
190+
static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
191+
{
192+
if (has_vhe() || has_hvhe())
193+
__deactivate_cptr_traps_vhe(vcpu);
194+
else
195+
__deactivate_cptr_traps_nvhe(vcpu);
196+
}
197+
68198
#define reg_to_fgt_masks(reg) \
69199
({ \
70200
struct fgt_masks *m; \
@@ -486,11 +616,6 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
486616
*/
487617
if (system_supports_sve()) {
488618
__hyp_sve_save_host();
489-
490-
/* Re-enable SVE traps if not supported for the guest vcpu. */
491-
if (!vcpu_has_sve(vcpu))
492-
cpacr_clear_set(CPACR_EL1_ZEN, 0);
493-
494619
} else {
495620
__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
496621
}
@@ -541,10 +666,7 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
541666
/* Valid trap. Switch the context: */
542667

543668
/* First disable enough traps to allow us to update the registers */
544-
if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
545-
cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN);
546-
else
547-
cpacr_clear_set(0, CPACR_EL1_FPEN);
669+
__deactivate_cptr_traps(vcpu);
548670
isb();
549671

550672
/* Write out the host state if it's in the registers */
@@ -566,6 +688,13 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
566688

567689
*host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
568690

691+
/*
692+
* Re-enable traps necessary for the current state of the guest, e.g.
693+
* those enabled by a guest hypervisor. The ERET to the guest will
694+
* provide the necessary context synchronization.
695+
*/
696+
__activate_cptr_traps(vcpu);
697+
569698
return true;
570699
}
571700

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,10 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
6969
if (!guest_owns_fp_regs())
7070
return;
7171

72-
cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN);
72+
/*
73+
* Traps have been disabled by __deactivate_cptr_traps(), but there
74+
* hasn't necessarily been a context synchronization event yet.
75+
*/
7376
isb();
7477

7578
if (vcpu_has_sve(vcpu))

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 0 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -47,65 +47,6 @@ struct fgt_masks hdfgwtr2_masks;
4747

4848
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
4949

50-
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
51-
{
52-
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
53-
54-
if (!guest_owns_fp_regs())
55-
__activate_traps_fpsimd32(vcpu);
56-
57-
if (has_hvhe()) {
58-
val |= CPACR_EL1_TTA;
59-
60-
if (guest_owns_fp_regs()) {
61-
val |= CPACR_EL1_FPEN;
62-
if (vcpu_has_sve(vcpu))
63-
val |= CPACR_EL1_ZEN;
64-
}
65-
66-
write_sysreg(val, cpacr_el1);
67-
} else {
68-
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
69-
70-
/*
71-
* Always trap SME since it's not supported in KVM.
72-
* TSM is RES1 if SME isn't implemented.
73-
*/
74-
val |= CPTR_EL2_TSM;
75-
76-
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
77-
val |= CPTR_EL2_TZ;
78-
79-
if (!guest_owns_fp_regs())
80-
val |= CPTR_EL2_TFP;
81-
82-
write_sysreg(val, cptr_el2);
83-
}
84-
}
85-
86-
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
87-
{
88-
if (has_hvhe()) {
89-
u64 val = CPACR_EL1_FPEN;
90-
91-
if (cpus_have_final_cap(ARM64_SVE))
92-
val |= CPACR_EL1_ZEN;
93-
if (cpus_have_final_cap(ARM64_SME))
94-
val |= CPACR_EL1_SMEN;
95-
96-
write_sysreg(val, cpacr_el1);
97-
} else {
98-
u64 val = CPTR_NVHE_EL2_RES1;
99-
100-
if (!cpus_have_final_cap(ARM64_SVE))
101-
val |= CPTR_EL2_TZ;
102-
if (!cpus_have_final_cap(ARM64_SME))
103-
val |= CPTR_EL2_TSM;
104-
105-
write_sysreg(val, cptr_el2);
106-
}
107-
}
108-
10950
static void __activate_traps(struct kvm_vcpu *vcpu)
11051
{
11152
___activate_traps(vcpu, vcpu->arch.hcr_el2);

0 commit comments

Comments
 (0)