Make this preemption safe.

This commit is contained in:
ad 2008-04-28 18:13:58 +00:00
parent bb588cd930
commit 64bcc9bd8b
1 changed files with 79 additions and 30 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: copy.S,v 1.14 2008/04/25 16:34:25 ad Exp $ */
/* $NetBSD: copy.S,v 1.15 2008/04/28 18:13:58 ad Exp $ */
/* NetBSD: locore.S,v 1.34 2005/04/01 11:59:31 yamt Exp $ */
/*-
@ -72,7 +72,7 @@
*/
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: copy.S,v 1.14 2008/04/25 16:34:25 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: copy.S,v 1.15 2008/04/28 18:13:58 ad Exp $");
#include "assym.h"
@ -85,9 +85,56 @@ __KERNEL_RCSID(0, "$NetBSD: copy.S,v 1.14 2008/04/25 16:34:25 ad Exp $");
movl CPUVAR(CURLWP), reg; \
movl L_ADDR(reg), reg
/*
* These are arranged so that the abnormal case is a forwards
* conditional branch - which will be predicted not-taken by
* both Intel and AMD processors.
*/
#define DEFERRED_SWITCH_CHECK \
CHECK_DEFERRED_SWITCH ; \
jnz 99f ; \
98:
#define DEFERRED_SWITCH_CALL \
99: ; \
call _C_LABEL(do_pmap_load) ; \
jmp 98b
/*
* The following primitives are to copy regions of memory.
*/
.text
x86_copyfunc_start: .globl x86_copyfunc_start
/*
* Handle deferred pmap switch. We must re-enable preemption without
* making a function call, so that the program counter is visible to
* cpu_kpreempt_exit(). It can then know if it needs to restore the
* pmap on returning, because a preemption occurred within one of the
* copy functions.
*/
NENTRY(do_pmap_load)
pushl %ebp
movl %esp, %ebp
pushl %ebx
movl CPUVAR(CURLWP), %ebx
1:
incl L_NOPREEMPT(%ebx)
call _C_LABEL(pmap_load)
decl L_NOPREEMPT(%ebx)
jnz 2f
cmpl $0, L_DOPREEMPT(%ebx)
jz 2f
pushl $0
call _C_LABEL(kpreempt)
addl $4, %esp
2:
cmpl $0, CPUVAR(WANT_PMAPLOAD)
jnz 1b
popl %ebx
leave
ret
/*
* int kcopy(const void *from, void *to, size_t len);
@ -97,10 +144,10 @@ __KERNEL_RCSID(0, "$NetBSD: copy.S,v 1.14 2008/04/25 16:34:25 ad Exp $");
ENTRY(kcopy)
pushl %esi
pushl %edi
.Lkcopy_start:
movl 12(%esp),%esi
movl 16(%esp),%edi
movl 20(%esp),%ecx
.Lkcopy_start:
movl %edi,%eax
subl %esi,%eax
cmpl %ecx,%eax # overlapping?
@ -159,16 +206,13 @@ ENTRY(kcopy)
/* LINTSTUB: Func: int copyout(const void *kaddr, void *uaddr, size_t len) */
ENTRY(copyout)
DEFERRED_SWITCH_CHECK
pushl %esi
pushl %edi
.Lcopyout_start:
DO_DEFERRED_SWITCH_RETRY
movl 12(%esp),%esi
movl 16(%esp),%edi
movl 20(%esp),%eax
.Lcopyout_start:
/*
* We check that the end of the destination buffer is not past the end
* of the user's address space.
@ -193,6 +237,7 @@ ENTRY(copyout)
popl %esi
xorl %eax,%eax
ret
DEFERRED_SWITCH_CALL
/*
* int copyin(const void *from, void *to, size_t len);
@ -202,20 +247,18 @@ ENTRY(copyout)
/* LINTSTUB: Func: int copyin(const void *uaddr, void *kaddr, size_t len) */
ENTRY(copyin)
DEFERRED_SWITCH_CHECK
pushl %esi
pushl %edi
.Lcopyin_start:
DO_DEFERRED_SWITCH_RETRY
movl 12(%esp),%esi
movl 16(%esp),%edi
movl 20(%esp),%eax
/*
* We check that the end of the destination buffer is not past the end
* of the user's address space. If it's not, then we only need to
* check that each page is readable, and the CPU will do that for us.
*/
.Lcopyin_start:
movl %esi,%edx
addl %eax,%edx
jc _C_LABEL(copy_efault)
@ -236,6 +279,7 @@ ENTRY(copyin)
popl %esi
xorl %eax,%eax
ret
DEFERRED_SWITCH_CALL
/* LINTSTUB: Ignore */
NENTRY(copy_efault)
@ -268,16 +312,13 @@ NENTRY(copy_fault)
*/
/* LINTSTUB: Func: int copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) */
ENTRY(copyoutstr)
DEFERRED_SWITCH_CHECK
pushl %esi
pushl %edi
.Lcopyoutstr_start:
DO_DEFERRED_SWITCH_RETRY
movl 12(%esp),%esi # esi = from
movl 16(%esp),%edi # edi = to
movl 20(%esp),%edx # edx = maxlen
.Lcopyoutstr_start:
5:
/*
* Get min(%edx, VM_MAXUSER_ADDRESS-%edi).
@ -310,6 +351,7 @@ ENTRY(copyoutstr)
jae _C_LABEL(copystr_efault)
movl $ENAMETOOLONG,%eax
jmp copystr_return
DEFERRED_SWITCH_CALL
/*
* int copyinstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
@ -321,12 +363,9 @@ ENTRY(copyoutstr)
*/
/* LINTSTUB: Func: int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) */
ENTRY(copyinstr)
DEFERRED_SWITCH_CHECK
pushl %esi
pushl %edi
.Lcopyinstr_start:
DO_DEFERRED_SWITCH_RETRY
movl 12(%esp),%esi # %esi = from
movl 16(%esp),%edi # %edi = to
movl 20(%esp),%edx # %edx = maxlen
@ -334,6 +373,7 @@ ENTRY(copyinstr)
/*
* Get min(%edx, VM_MAXUSER_ADDRESS-%esi).
*/
.Lcopyinstr_start:
movl $VM_MAXUSER_ADDRESS,%eax
subl %esi,%eax
jc _C_LABEL(copystr_efault)
@ -362,6 +402,7 @@ ENTRY(copyinstr)
jae _C_LABEL(copystr_efault)
movl $ENAMETOOLONG,%eax
jmp copystr_return
DEFERRED_SWITCH_CALL
/* LINTSTUB: Ignore */
NENTRY(copystr_efault)
@ -433,7 +474,7 @@ ENTRY(copystr)
*/
/* LINTSTUB: Func: long fuword(const void *base) */
ENTRY(fuword)
DO_DEFERRED_SWITCH
DEFERRED_SWITCH_CHECK
movl 4(%esp),%edx
cmpl $VM_MAXUSER_ADDRESS-4,%edx
ja _C_LABEL(fusuaddrfault)
@ -442,6 +483,7 @@ ENTRY(fuword)
movl (%edx),%eax
movl $0,PCB_ONFAULT(%ecx)
ret
DEFERRED_SWITCH_CALL
/*
* int fusword(const void *uaddr);
@ -450,7 +492,7 @@ ENTRY(fuword)
*/
/* LINTSTUB: Func: int fusword(const void *base) */
ENTRY(fusword)
DO_DEFERRED_SWITCH
DEFERRED_SWITCH_CHECK
movl 4(%esp),%edx
cmpl $VM_MAXUSER_ADDRESS-2,%edx
ja _C_LABEL(fusuaddrfault)
@ -459,6 +501,7 @@ ENTRY(fusword)
movzwl (%edx),%eax
movl $0,PCB_ONFAULT(%ecx)
ret
DEFERRED_SWITCH_CALL
/*
* int fuswintr(const void *uaddr);
@ -487,7 +530,7 @@ ENTRY(fuswintr)
*/
/* LINTSTUB: Func: int fubyte(const void *base) */
ENTRY(fubyte)
DO_DEFERRED_SWITCH
DEFERRED_SWITCH_CHECK
movl 4(%esp),%edx
cmpl $VM_MAXUSER_ADDRESS-1,%edx
ja _C_LABEL(fusuaddrfault)
@ -496,6 +539,7 @@ ENTRY(fubyte)
movzbl (%edx),%eax
movl $0,PCB_ONFAULT(%ecx)
ret
DEFERRED_SWITCH_CALL
/*
* Handle faults from [fs]u*(). Clean up and return -1.
@ -532,7 +576,7 @@ NENTRY(fusuaddrfault)
*/
/* LINTSTUB: Func: int suword(void *base, long c) */
ENTRY(suword)
DO_DEFERRED_SWITCH
DEFERRED_SWITCH_CHECK
movl 4(%esp),%edx
cmpl $VM_MAXUSER_ADDRESS-4,%edx
ja _C_LABEL(fusuaddrfault)
@ -543,6 +587,7 @@ ENTRY(suword)
xorl %eax,%eax
movl %eax,PCB_ONFAULT(%ecx)
ret
DEFERRED_SWITCH_CALL
/*
* int susword(void *uaddr, short x);
@ -551,7 +596,7 @@ ENTRY(suword)
*/
/* LINTSTUB: Func: int susword(void *base, short c) */
ENTRY(susword)
DO_DEFERRED_SWITCH
DEFERRED_SWITCH_CHECK
movl 4(%esp),%edx
cmpl $VM_MAXUSER_ADDRESS-2,%edx
ja _C_LABEL(fusuaddrfault)
@ -562,6 +607,7 @@ ENTRY(susword)
xorl %eax,%eax
movl %eax,PCB_ONFAULT(%ecx)
ret
DEFERRED_SWITCH_CALL
/*
* int suswintr(void *uaddr, short x);
@ -592,7 +638,7 @@ ENTRY(suswintr)
*/
/* LINTSTUB: Func: int subyte(void *base, int c) */
ENTRY(subyte)
DO_DEFERRED_SWITCH
DEFERRED_SWITCH_CHECK
movl 4(%esp),%edx
cmpl $VM_MAXUSER_ADDRESS-1,%edx
ja _C_LABEL(fusuaddrfault)
@ -603,24 +649,24 @@ ENTRY(subyte)
xorl %eax,%eax
movl %eax,PCB_ONFAULT(%ecx)
ret
DEFERRED_SWITCH_CALL
/*
* copyin() optimised for bringing in syscall arguments.
*/
ENTRY(x86_copyargs)
DEFERRED_SWITCH_CHECK
pushl %esi
movl 8(%esp),%esi
movl 12(%esp),%edx
movl 16(%esp),%ecx
.Lx86_copyargs_start:
DO_DEFERRED_SWITCH_RETRY
/*
* We check that the end of the destination buffer is not past the end
* of the user's address space. If it's not, then we only need to
* check that each page is readable, and the CPU will do that for us.
*/
.Lx86_copyargs_start:
movl %esi,%eax
addl %ecx,%eax
jc _C_LABEL(x86_copyargs_efault)
@ -665,6 +711,9 @@ NENTRY(x86_copyargs_efault)
NENTRY(x86_copyargs_fault)
popl %esi
ret
DEFERRED_SWITCH_CALL
x86_copyfunc_end: .globl x86_copyfunc_end
.section ".rodata"
.globl _C_LABEL(onfault_table)