integrity: Move integrity_kernel_module_request() to IMA
In preparation for removing the 'integrity' LSM, move
integrity_kernel_module_request() to IMA, and rename it to
ima_kernel_module_request(). Rewrite the function documentation, to explain
better what the problem is.
Compile it conditionally if CONFIG_INTEGRITY_ASYMMETRIC_KEYS is enabled,
and call it from security.c (removed afterwards with the move of IMA to the
LSM infrastructure).
Adding this hook cannot be avoided, since IMA has no control on the flags
passed to crypto_alloc_sig() in public_key_verify_signature(), and thus
cannot pass CRYPTO_NOLOAD, which solved the problem for EVM hashing with
commit e2861fa716
("evm: Don't deadlock if a crypto algorithm is
unavailable").
EVM alone does not need to implement this hook, first because there is no
mutex to deadlock, and second because even if it had it, there should be a
recursive call. However, since verification from EVM can be initiated only
by setting inode metadata, deadlock would occur if modprobe would do the
same while loading a kernel module (which is unlikely).
Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
Reviewed-by: Mimi Zohar <zohar@linux.ibm.com>
Acked-by: Mimi Zohar <zohar@linux.ibm.com>
Signed-off-by: Paul Moore <paul@paul-moore.com>
This commit is contained in:
parent
b8d997032a
commit
06cca51107
|
@ -256,4 +256,14 @@ static inline bool ima_appraise_signature(enum kernel_read_file_id func)
|
|||
return false;
|
||||
}
|
||||
#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */
|
||||
|
||||
#if defined(CONFIG_IMA) && defined(CONFIG_INTEGRITY_ASYMMETRIC_KEYS)
|
||||
extern int ima_kernel_module_request(char *kmod_name);
|
||||
#else
|
||||
static inline int ima_kernel_module_request(char *kmod_name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* _LINUX_IMA_H */
|
||||
|
|
|
@ -42,17 +42,4 @@ static inline void integrity_load_keys(void)
|
|||
}
|
||||
#endif /* CONFIG_INTEGRITY */
|
||||
|
||||
#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
|
||||
|
||||
extern int integrity_kernel_module_request(char *kmod_name);
|
||||
|
||||
#else
|
||||
|
||||
static inline int integrity_kernel_module_request(char *kmod_name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
|
||||
|
||||
#endif /* _LINUX_INTEGRITY_H */
|
||||
|
|
|
@ -132,26 +132,3 @@ out:
|
|||
pr_debug("%s() = %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* integrity_kernel_module_request - prevent crypto-pkcs1pad(rsa,*) requests
|
||||
* @kmod_name: kernel module name
|
||||
*
|
||||
* We have situation, when public_key_verify_signature() in case of RSA
|
||||
* algorithm use alg_name to store internal information in order to
|
||||
* construct an algorithm on the fly, but crypto_larval_lookup() will try
|
||||
* to use alg_name in order to load kernel module with same name.
|
||||
* Since we don't have any real "crypto-pkcs1pad(rsa,*)" kernel modules,
|
||||
* we are safe to fail such module request from crypto_larval_lookup().
|
||||
*
|
||||
* In this way we prevent modprobe execution during digsig verification
|
||||
* and avoid possible deadlock if modprobe and/or it's dependencies
|
||||
* also signed with digsig.
|
||||
*/
|
||||
int integrity_kernel_module_request(char *kmod_name)
|
||||
{
|
||||
if (strncmp(kmod_name, "crypto-pkcs1pad(rsa,", 20) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1091,6 +1091,39 @@ int ima_measure_critical_data(const char *event_label,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ima_measure_critical_data);
|
||||
|
||||
#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
|
||||
|
||||
/**
|
||||
* ima_kernel_module_request - Prevent crypto-pkcs1pad(rsa,*) requests
|
||||
* @kmod_name: kernel module name
|
||||
*
|
||||
* Avoid a verification loop where verifying the signature of the modprobe
|
||||
* binary requires executing modprobe itself. Since the modprobe iint->mutex
|
||||
* is already held when the signature verification is performed, a deadlock
|
||||
* occurs as soon as modprobe is executed within the critical region, since
|
||||
* the same lock cannot be taken again.
|
||||
*
|
||||
* This happens when public_key_verify_signature(), in case of RSA algorithm,
|
||||
* use alg_name to store internal information in order to construct an
|
||||
* algorithm on the fly, but crypto_larval_lookup() will try to use alg_name
|
||||
* in order to load a kernel module with same name.
|
||||
*
|
||||
* Since we don't have any real "crypto-pkcs1pad(rsa,*)" kernel modules,
|
||||
* we are safe to fail such module request from crypto_larval_lookup(), and
|
||||
* avoid the verification loop.
|
||||
*
|
||||
* Return: Zero if it is safe to load the kernel module, -EINVAL otherwise.
|
||||
*/
|
||||
int ima_kernel_module_request(char *kmod_name)
|
||||
{
|
||||
if (strncmp(kmod_name, "crypto-pkcs1pad(rsa,", 20) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
|
||||
|
||||
static int __init init_ima(void)
|
||||
{
|
||||
int error;
|
||||
|
|
|
@ -3249,7 +3249,7 @@ int security_kernel_module_request(char *kmod_name)
|
|||
ret = call_int_hook(kernel_module_request, 0, kmod_name);
|
||||
if (ret)
|
||||
return ret;
|
||||
return integrity_kernel_module_request(kmod_name);
|
||||
return ima_kernel_module_request(kmod_name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue