Merge pull request #4518 from douzzer/nestable-save-vector-registers

linuxkm: fixes for {save,restore}_vector_registers_x86().
This commit is contained in:
David Garske 2021-10-29 09:14:32 -07:00 committed by GitHub
commit a6415493eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 116 additions and 120 deletions

View File

@ -1140,26 +1140,23 @@ void __attribute__((no_instrument_function))
}
#endif
#ifdef WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED
union fpregs_state **wolfcrypt_irq_fpu_states = NULL;
#endif
#if defined(WOLFSSL_LINUXKM_SIMD_X86) && defined(WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED)
#if defined(WOLFSSL_LINUXKM_SIMD_X86)
static union fpregs_state **wolfcrypt_linuxkm_fpu_states = NULL;
static WARN_UNUSED_RESULT inline int am_in_hard_interrupt_handler(void)
{
return (preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0;
}
WARN_UNUSED_RESULT int allocate_wolfcrypt_irq_fpu_states(void)
WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
{
wolfcrypt_irq_fpu_states =
wolfcrypt_linuxkm_fpu_states =
(union fpregs_state **)kzalloc(nr_cpu_ids
* sizeof(struct fpu_state *),
GFP_KERNEL);
if (! wolfcrypt_irq_fpu_states) {
if (! wolfcrypt_linuxkm_fpu_states) {
pr_err("warning, allocation of %lu bytes for "
"wolfcrypt_irq_fpu_states failed.\n",
"wolfcrypt_linuxkm_fpu_states failed.\n",
nr_cpu_ids * sizeof(struct fpu_state *));
return MEMORY_E;
}
@ -1168,27 +1165,27 @@ union fpregs_state **wolfcrypt_irq_fpu_states = NULL;
for (i=0; i<nr_cpu_ids; ++i) {
_Static_assert(sizeof(union fpregs_state) <= PAGE_SIZE,
"union fpregs_state is larger than expected.");
wolfcrypt_irq_fpu_states[i] =
wolfcrypt_linuxkm_fpu_states[i] =
(union fpregs_state *)kzalloc(PAGE_SIZE
/* sizeof(union fpregs_state) */,
GFP_KERNEL);
if (! wolfcrypt_irq_fpu_states[i])
if (! wolfcrypt_linuxkm_fpu_states[i])
break;
/* double-check that the allocation is 64-byte-aligned as needed
* for xsave.
*/
if ((unsigned long)wolfcrypt_irq_fpu_states[i] & 63UL) {
pr_err("warning, allocation for wolfcrypt_irq_fpu_states "
if ((unsigned long)wolfcrypt_linuxkm_fpu_states[i] & 63UL) {
pr_err("warning, allocation for wolfcrypt_linuxkm_fpu_states "
"was not properly aligned (%px).\n",
wolfcrypt_irq_fpu_states[i]);
kfree(wolfcrypt_irq_fpu_states[i]);
wolfcrypt_irq_fpu_states[i] = 0;
wolfcrypt_linuxkm_fpu_states[i]);
kfree(wolfcrypt_linuxkm_fpu_states[i]);
wolfcrypt_linuxkm_fpu_states[i] = 0;
break;
}
}
if (i < nr_cpu_ids) {
pr_err("warning, only %u/%u allocations succeeded for "
"wolfcrypt_irq_fpu_states.\n",
"wolfcrypt_linuxkm_fpu_states.\n",
i, nr_cpu_ids);
return MEMORY_E;
}
@ -1196,60 +1193,54 @@ union fpregs_state **wolfcrypt_irq_fpu_states = NULL;
return 0;
}
void free_wolfcrypt_irq_fpu_states(void)
void free_wolfcrypt_linuxkm_fpu_states(void)
{
if (wolfcrypt_irq_fpu_states) {
if (wolfcrypt_linuxkm_fpu_states) {
typeof(nr_cpu_ids) i;
for (i=0; i<nr_cpu_ids; ++i) {
if (wolfcrypt_irq_fpu_states[i])
kfree(wolfcrypt_irq_fpu_states[i]);
if (wolfcrypt_linuxkm_fpu_states[i])
kfree(wolfcrypt_linuxkm_fpu_states[i]);
}
kfree(wolfcrypt_irq_fpu_states);
wolfcrypt_irq_fpu_states = 0;
kfree(wolfcrypt_linuxkm_fpu_states);
wolfcrypt_linuxkm_fpu_states = 0;
}
}
WARN_UNUSED_RESULT int save_vector_registers_x86(void)
{
int processor_id;
preempt_disable();
processor_id = smp_processor_id();
{
static int _warned_on_null = -1;
if ((wolfcrypt_linuxkm_fpu_states == NULL) ||
(wolfcrypt_linuxkm_fpu_states[processor_id] == NULL))
{
preempt_enable();
if (_warned_on_null < processor_id) {
_warned_on_null = processor_id;
pr_err("save_vector_registers_x86 called for cpu id %d "
"with null context buffer.\n", processor_id);
}
return BAD_STATE_E;
}
}
if (! irq_fpu_usable()) {
if (am_in_hard_interrupt_handler()) {
int processor_id;
if (! wolfcrypt_irq_fpu_states) {
static int warned_on_null_wolfcrypt_irq_fpu_states = 0;
preempt_enable();
if (! warned_on_null_wolfcrypt_irq_fpu_states) {
warned_on_null_wolfcrypt_irq_fpu_states = 1;
pr_err("save_vector_registers_x86 with null "
"wolfcrypt_irq_fpu_states.\n");
}
return BAD_STATE_E;
}
processor_id = __smp_processor_id();
if (! wolfcrypt_irq_fpu_states[processor_id]) {
static int _warned_on_null = -1;
preempt_enable();
if (_warned_on_null < processor_id) {
_warned_on_null = processor_id;
pr_err("save_vector_registers_x86 for cpu id %d with "
"null wolfcrypt_irq_fpu_states[id].\n",
processor_id);
}
return BAD_STATE_E;
}
/* allow for recursive calls (some crypto calls are recursive) */
if (((unsigned char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
if (((unsigned char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
/* allow for nested calls */
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
preempt_enable();
pr_err("save_vector_registers_x86 recursion register overflow for "
"cpu id %d.\n", processor_id);
return BAD_STATE_E;
} else {
++((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1];
++((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1];
return 0;
}
}
@ -1263,7 +1254,7 @@ union fpregs_state **wolfcrypt_irq_fpu_states = NULL;
* around this.
*/
struct fpu *fake_fpu_pointer =
(struct fpu *)(((char *)wolfcrypt_irq_fpu_states[processor_id])
(struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
- offsetof(struct fpu, state));
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
copy_fpregs_to_fpstate(fake_fpu_pointer);
@ -1272,48 +1263,74 @@ union fpregs_state **wolfcrypt_irq_fpu_states = NULL;
#endif
}
/* mark the slot as used. */
((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
/* note, not preempt_enable()ing, mirroring kernel_fpu_begin()
* semantics.
* semantics, even though routine will have been entered already
* non-preemptable.
*/
return 0;
} else {
preempt_enable();
return BAD_STATE_E;
}
preempt_enable();
return BAD_STATE_E;
} else {
/* allow for nested calls */
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
preempt_enable();
pr_err("save_vector_registers_x86 recursion register overflow for "
"cpu id %d.\n", processor_id);
return BAD_STATE_E;
} else {
++((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1];
return 0;
}
}
kernel_fpu_begin();
preempt_enable(); /* kernel_fpu_begin() does its own
* preempt_disable(). decrement ours.
*/
((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
return 0;
}
}
void restore_vector_registers_x86(void)
{
if (am_in_hard_interrupt_handler()) {
int processor_id = __smp_processor_id();
if ((wolfcrypt_irq_fpu_states == NULL) ||
(wolfcrypt_irq_fpu_states[processor_id] == NULL) ||
(((unsigned char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] == 0))
{
int processor_id = smp_processor_id();
if ((wolfcrypt_linuxkm_fpu_states == NULL) ||
(wolfcrypt_linuxkm_fpu_states[processor_id] == NULL))
{
pr_err("restore_vector_registers_x86 called for cpu id %d "
"without saved context.\n", processor_id);
preempt_enable(); /* just in case */
"without null context buffer.\n", processor_id);
return;
} else if (--((unsigned char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] > 0) {
return;
} else {
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
copy_kernel_to_fpregs(wolfcrypt_irq_fpu_states[processor_id]);
#else
__restore_fpregs_from_fpstate(wolfcrypt_irq_fpu_states[processor_id],
xfeatures_mask_all);
#endif
((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 0;
preempt_enable();
return;
}
}
kernel_fpu_end();
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 0)
{
pr_err("restore_vector_registers_x86 called for cpu id %d "
"without saved context.\n", processor_id);
return;
}
if (--((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] > 0) {
preempt_enable(); /* preempt_disable count will still be nonzero after this decrement. */
return;
}
if (am_in_hard_interrupt_handler()) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
copy_kernel_to_fpregs(wolfcrypt_linuxkm_fpu_states[processor_id]);
#else
__restore_fpregs_from_fpstate(wolfcrypt_linuxkm_fpu_states[processor_id],
xfeatures_mask_all);
#endif
preempt_enable();
} else {
kernel_fpu_end();
}
return;
}
#endif /* WOLFSSL_LINUXKM_SIMD_X86 && WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */

View File

@ -162,11 +162,10 @@ int wolfCrypt_Init(void)
}
#endif
#if defined(WOLFSSL_LINUXKM_SIMD_X86) \
&& defined(WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED)
ret = allocate_wolfcrypt_irq_fpu_states();
#if defined(WOLFSSL_LINUXKM_SIMD_X86)
ret = allocate_wolfcrypt_linuxkm_fpu_states();
if (ret != 0) {
WOLFSSL_MSG("allocate_wolfcrypt_irq_fpu_states failed");
WOLFSSL_MSG("allocate_wolfcrypt_linuxkm_fpu_states failed");
return ret;
}
#endif
@ -374,9 +373,8 @@ int wolfCrypt_Cleanup(void)
rpcmem_deinit();
wolfSSL_CleanupHandle();
#endif
#if defined(WOLFSSL_LINUXKM_SIMD_X86) \
&& defined(WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED)
free_wolfcrypt_irq_fpu_states();
#if defined(WOLFSSL_LINUXKM_SIMD_X86)
free_wolfcrypt_linuxkm_fpu_states();
#endif
}

View File

@ -282,7 +282,6 @@
#endif
typeof(kernel_fpu_end) *kernel_fpu_end;
#ifdef WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
typeof(copy_fpregs_to_fpstate) *copy_fpregs_to_fpstate;
typeof(copy_kernel_to_fpregs) *copy_kernel_to_fpregs;
@ -293,7 +292,6 @@
#endif
typeof(cpu_number) *cpu_number;
typeof(nr_cpu_ids) *nr_cpu_ids;
#endif /* WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */
#endif /* WOLFSSL_LINUXKM_SIMD_X86 */
@ -399,18 +397,16 @@
#define kernel_fpu_begin (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin)
#endif
#define kernel_fpu_end (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_end)
#ifdef WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
#define copy_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->copy_fpregs_to_fpstate)
#define copy_kernel_to_fpregs (wolfssl_linuxkm_get_pie_redirect_table()->copy_kernel_to_fpregs)
#else
#define save_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->save_fpregs_to_fpstate)
#define __restore_fpregs_from_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->__restore_fpregs_from_fpstate)
#define xfeatures_mask_all (*(wolfssl_linuxkm_get_pie_redirect_table()->xfeatures_mask_all))
#endif
#define cpu_number (*(wolfssl_linuxkm_get_pie_redirect_table()->cpu_number))
#define nr_cpu_ids (*(wolfssl_linuxkm_get_pie_redirect_table()->nr_cpu_ids))
#endif /* WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
#define copy_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->copy_fpregs_to_fpstate)
#define copy_kernel_to_fpregs (wolfssl_linuxkm_get_pie_redirect_table()->copy_kernel_to_fpregs)
#else
#define save_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->save_fpregs_to_fpstate)
#define __restore_fpregs_from_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->__restore_fpregs_from_fpstate)
#define xfeatures_mask_all (*(wolfssl_linuxkm_get_pie_redirect_table()->xfeatures_mask_all))
#endif
#define cpu_number (*(wolfssl_linuxkm_get_pie_redirect_table()->cpu_number))
#define nr_cpu_ids (*(wolfssl_linuxkm_get_pie_redirect_table()->nr_cpu_ids))
#endif
#define __mutex_init (wolfssl_linuxkm_get_pie_redirect_table()->__mutex_init)
@ -448,30 +444,15 @@
#ifdef WOLFSSL_LINUXKM_SIMD_X86
#ifdef WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED
extern __must_check int allocate_wolfcrypt_irq_fpu_states(void);
extern void free_wolfcrypt_irq_fpu_states(void);
extern __must_check int allocate_wolfcrypt_linuxkm_fpu_states(void);
extern void free_wolfcrypt_linuxkm_fpu_states(void);
extern __must_check int save_vector_registers_x86(void);
extern void restore_vector_registers_x86(void);
#else /* !WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */
#define save_vector_registers_x86() ({ \
int _ret; \
preempt_disable(); \
if (! irq_fpu_usable()) { \
preempt_enable(); \
_ret = BAD_STATE_E; \
} else { \
kernel_fpu_begin(); \
preempt_enable(); /* kernel_fpu_begin() does its own preempt_disable(). decrement ours. */ \
_ret = 0; \
} \
_ret; \
})
#define restore_vector_registers_x86() kernel_fpu_end()
#endif /* !WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */
#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
#error kernel module ARM SIMD is not yet tested or usable.
static WARN_UNUSED_RESULT inline int save_vector_registers_arm(void)
{
preempt_disable();