Fix preemption delays in the SGX code, remove unnecessarily UAPI-exported code,
fix a ld.lld linker (in)compatibility quirk and make the x86 SMP init code a bit more conservative to fix kexec() lockups. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmT97boRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1jObA//X7nug+d+IMLIs+c4579z4ZhkltMRxJVI Btf8sdHpwgTUtKaOLmJnGiJ7f0GK5NtoaNtGUJF28aQETVOhco0Fvg/R8k1FE2Tc CJqw6oy2FjVqD9qzZPCXh6QCvTtGjN5GF+xmoUbf7eZ9U8IVvOxBG+7yDMorQw3P zzjIccLLg/aDvNLN/yZD2oqw6UGHZuh/Qr/0Q4PkZ7zL+yWV8EC+HOx3rlQklq0x hh6YMwa4LGr3przUObHsfNS11EDzLDhg2WtTQMr12vlnpUB2eXnXWLklr6rpWjcz qJiMxkrEkygB7seXnuQ0b4KHN/17zdkJ+t6vZoznUTXs1ohIDLWdiNTSl03qCs9B V98a1x3MPT6aro9O/5ywyAJwPb0hvsg2S0ODFWab0Z3oRUbIG/k6dTEYlP7qZw8v EFMtLy6M2EILXetj8q2ZGcA0rKz7pj/z9SosWDzqNj76w7xGwDKrSWoKJckkCwG+ j+ycBuKfrpxVYOF4ywvONSf35QTIW8BR0sM9Lg1GZuwaeincFwLf0cmS4ljGRyZ1 Vsi0SfpIgVQkeY/17onTa1C5X6c2wIE9nq253M58Xnc9B2EWpYImr+4PVZk6s4GI GExvdPC/rIIwYa0LmvYTTlpHEd7f5qIAhfcEtMAuGSjVDLvmdDGFkaU7TgJ6Jcw2 D12wKSAAgPU= =S38E -----END PGP SIGNATURE----- Merge tag 'x86-urgent-2023-09-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Ingo Molnar: "Fix preemption delays in the SGX code, remove unnecessarily UAPI-exported code, fix a ld.lld linker (in)compatibility quirk and make the x86 SMP init code a bit more conservative to fix kexec() lockups" * tag 'x86-urgent-2023-09-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sgx: Break up long non-preemptible delays in sgx_vepc_release() x86: Remove the arch_calc_vm_prot_bits() macro from the UAPI x86/build: Fix linker fill bytes quirk/incompatibility for ld.lld x86/smp: Don't send INIT to non-present and non-booted CPUs
This commit is contained in:
commit
e56b2b6057
|
@ -0,0 +1,15 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_MMAN_H__
|
||||
#define __ASM_MMAN_H__
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
#define arch_calc_vm_prot_bits(prot, key) ( \
|
||||
((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \
|
||||
((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \
|
||||
((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \
|
||||
((key) & 0x8 ? VM_PKEY_BIT3 : 0))
|
||||
#endif
|
||||
|
||||
#include <uapi/asm/mman.h>
|
||||
|
||||
#endif /* __ASM_MMAN_H__ */
|
|
@ -5,14 +5,6 @@
|
|||
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
|
||||
#define MAP_ABOVE4G 0x80 /* only map above 4GB */
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
#define arch_calc_vm_prot_bits(prot, key) ( \
|
||||
((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \
|
||||
((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \
|
||||
((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \
|
||||
((key) & 0x8 ? VM_PKEY_BIT3 : 0))
|
||||
#endif
|
||||
|
||||
/* Flags for map_shadow_stack(2) */
|
||||
#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */
|
||||
|
||||
|
|
|
@ -204,6 +204,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
|
|||
continue;
|
||||
|
||||
xa_erase(&vepc->page_array, index);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -222,6 +223,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
|
|||
list_add_tail(&epc_page->list, &secs_pages);
|
||||
|
||||
xa_erase(&vepc->page_array, index);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -243,6 +245,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
|
|||
|
||||
if (sgx_vepc_free_page(epc_page))
|
||||
list_add_tail(&epc_page->list, &secs_pages);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (!list_empty(&secs_pages))
|
||||
|
|
|
@ -1250,7 +1250,7 @@ bool smp_park_other_cpus_in_init(void)
|
|||
if (this_cpu)
|
||||
return false;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_cpu_and(cpu, &cpus_booted_once_mask, cpu_present_mask) {
|
||||
if (cpu == this_cpu)
|
||||
continue;
|
||||
apicid = apic->cpu_present_to_apicid(cpu);
|
||||
|
|
|
@ -156,7 +156,7 @@ SECTIONS
|
|||
ALIGN_ENTRY_TEXT_END
|
||||
*(.gnu.warning)
|
||||
|
||||
} :text =0xcccc
|
||||
} :text = 0xcccccccc
|
||||
|
||||
/* End of text section, which should occupy whole number of pages */
|
||||
_etext = .;
|
||||
|
|
|
@ -81,7 +81,6 @@ arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_NO
|
|||
arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_SUPPORT
|
||||
arch/x86/include/uapi/asm/auxvec.h:CONFIG_IA32_EMULATION
|
||||
arch/x86/include/uapi/asm/auxvec.h:CONFIG_X86_64
|
||||
arch/x86/include/uapi/asm/mman.h:CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
"
|
||||
|
||||
for c in $configs
|
||||
|
|
Loading…
Reference in New Issue