KVM: x86: compile out vendor-specific code if SMM is disabled
Vendor-specific code that deals with SMI injection and saving/restoring SMM state is not needed if CONFIG_KVM_SMM is disabled, so remove the four callbacks smi_allowed, enter_smm, leave_smm and enable_smi_window. The users in svm/nested.c and x86.c also have to be compiled out; the amount of #ifdef'ed code is small and it's not worth moving it to smm.c. enter_smm is now used only within #ifdef CONFIG_KVM_SMM, and the stub can therefore be removed. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Message-Id: <20220929172016.319443-7-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
4b8e1b3201
commit
31e83e21cf
|
@ -110,10 +110,12 @@ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
|
|||
KVM_X86_OP_OPTIONAL(set_hv_timer)
|
||||
KVM_X86_OP_OPTIONAL(cancel_hv_timer)
|
||||
KVM_X86_OP(setup_mce)
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
KVM_X86_OP(smi_allowed)
|
||||
KVM_X86_OP(enter_smm)
|
||||
KVM_X86_OP(leave_smm)
|
||||
KVM_X86_OP(enable_smi_window)
|
||||
#endif
|
||||
KVM_X86_OP_OPTIONAL(mem_enc_ioctl)
|
||||
KVM_X86_OP_OPTIONAL(mem_enc_register_region)
|
||||
KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
|
||||
|
|
|
@ -1612,10 +1612,12 @@ struct kvm_x86_ops {
|
|||
|
||||
void (*setup_mce)(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
|
||||
int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
|
||||
int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
|
||||
void (*enable_smi_window)(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
|
||||
int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
|
||||
|
|
|
@ -27,7 +27,6 @@ void process_smi(struct kvm_vcpu *vcpu);
|
|||
#else
|
||||
static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
|
||||
static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
|
||||
static inline void enter_smm(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
|
||||
static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
|
||||
|
||||
/*
|
||||
|
|
|
@ -1378,6 +1378,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
|
||||
if (block_nested_events)
|
||||
return -EBUSY;
|
||||
|
@ -1386,6 +1387,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
|
|||
nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
|
||||
if (block_nested_events)
|
||||
|
|
|
@ -4373,6 +4373,7 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.mcg_cap &= 0x1ff;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
bool svm_smi_blocked(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -4522,6 +4523,7 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
|
|||
/* We must be in SMM; RSM will cause a vmexit anyway. */
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len)
|
||||
|
@ -4797,10 +4799,12 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||
.pi_update_irte = avic_pi_update_irte,
|
||||
.setup_mce = svm_setup_mce,
|
||||
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
.smi_allowed = svm_smi_allowed,
|
||||
.enter_smm = svm_enter_smm,
|
||||
.leave_smm = svm_leave_smm,
|
||||
.enable_smi_window = svm_enable_smi_window,
|
||||
#endif
|
||||
|
||||
.mem_enc_ioctl = sev_mem_enc_ioctl,
|
||||
.mem_enc_register_region = sev_mem_enc_register_region,
|
||||
|
|
|
@ -7932,6 +7932,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
|
|||
~FEAT_CTL_LMCE_ENABLED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
{
|
||||
/* we need a nested vmexit to enter SMM, postpone if run is pending */
|
||||
|
@ -7986,6 +7987,7 @@ static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
/* RSM will cause a vmexit anyway. */
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -8153,10 +8155,12 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
|||
|
||||
.setup_mce = vmx_setup_mce,
|
||||
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
.smi_allowed = vmx_smi_allowed,
|
||||
.enter_smm = vmx_enter_smm,
|
||||
.leave_smm = vmx_leave_smm,
|
||||
.enable_smi_window = vmx_enable_smi_window,
|
||||
#endif
|
||||
|
||||
.can_emulate_instruction = vmx_can_emulate_instruction,
|
||||
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
|
||||
|
|
|
@ -9919,6 +9919,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
|
|||
* in order to make progress and get back here for another iteration.
|
||||
* The kvm_x86_ops hooks communicate this by returning -EBUSY.
|
||||
*/
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
if (vcpu->arch.smi_pending) {
|
||||
r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
|
||||
if (r < 0)
|
||||
|
@ -9931,6 +9932,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
|
|||
} else
|
||||
static_call(kvm_x86_enable_smi_window)(vcpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (vcpu->arch.nmi_pending) {
|
||||
r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
|
||||
|
@ -12580,10 +12582,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
|||
static_call(kvm_x86_nmi_allowed)(vcpu, false)))
|
||||
return true;
|
||||
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
|
||||
(vcpu->arch.smi_pending &&
|
||||
static_call(kvm_x86_smi_allowed)(vcpu, false)))
|
||||
return true;
|
||||
#endif
|
||||
|
||||
if (kvm_arch_interrupt_allowed(vcpu) &&
|
||||
(kvm_cpu_has_interrupt(vcpu) ||
|
||||
|
|
Loading…
Reference in New Issue