KNF a little.
This commit is contained in:
parent
d6f286364d
commit
06ab45be44
@ -1,4 +1,4 @@
|
|||||||
/* $NetBSD: vector.S,v 1.45 2015/11/22 13:41:24 maxv Exp $ */
|
/* $NetBSD: vector.S,v 1.46 2016/08/07 10:17:32 maxv Exp $ */
|
||||||
|
|
||||||
/*-
|
/*-
|
||||||
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
|
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
|
||||||
@ -107,7 +107,6 @@
|
|||||||
* If the interrupt frame is made more flexible, INTR can push %eax first and
|
* If the interrupt frame is made more flexible, INTR can push %eax first and
|
||||||
* decide the ipending case with less overhead, e.g., by avoiding loading the
|
* decide the ipending case with less overhead, e.g., by avoiding loading the
|
||||||
* segment registers.
|
* segment registers.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* XXX See comment in locore.s */
|
/* XXX See comment in locore.s */
|
||||||
@ -211,7 +210,7 @@ IDTVEC(intr_lapic_tlb)
|
|||||||
pushq $0
|
pushq $0
|
||||||
pushq $T_ASTFLT
|
pushq $T_ASTFLT
|
||||||
INTRENTRY
|
INTRENTRY
|
||||||
movl $0, _C_LABEL(local_apic)+LAPIC_EOI
|
movl $0,_C_LABEL(local_apic)+LAPIC_EOI
|
||||||
callq _C_LABEL(pmap_tlb_intr)
|
callq _C_LABEL(pmap_tlb_intr)
|
||||||
INTRFASTEXIT
|
INTRFASTEXIT
|
||||||
IDTVEC_END(intr_lapic_tlb)
|
IDTVEC_END(intr_lapic_tlb)
|
||||||
@ -236,14 +235,14 @@ IDTVEC(recurse_ ## name ## num) ;\
|
|||||||
IDTVEC(resume_ ## name ## num) \
|
IDTVEC(resume_ ## name ## num) \
|
||||||
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
|
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
|
||||||
movl %ebx,%r13d ;\
|
movl %ebx,%r13d ;\
|
||||||
movq CPUVAR(ISOURCES) + (num) * 8, %r14 ;\
|
movq CPUVAR(ISOURCES) + (num) * 8,%r14 ;\
|
||||||
movl IS_MAXLEVEL(%r14),%ebx ;\
|
movl IS_MAXLEVEL(%r14),%ebx ;\
|
||||||
jmp 1f ;\
|
jmp 1f ;\
|
||||||
IDTVEC(intr_ ## name ## num) ;\
|
IDTVEC(intr_ ## name ## num) ;\
|
||||||
pushq $0 /* dummy error code */ ;\
|
pushq $0 /* dummy error code */ ;\
|
||||||
pushq $T_ASTFLT /* trap # for doing ASTs */ ;\
|
pushq $T_ASTFLT /* trap # for doing ASTs */ ;\
|
||||||
INTRENTRY ;\
|
INTRENTRY ;\
|
||||||
movq CPUVAR(ISOURCES) + (num) * 8, %r14 ;\
|
movq CPUVAR(ISOURCES) + (num) * 8,%r14 ;\
|
||||||
mask(num) /* mask it in hardware */ ;\
|
mask(num) /* mask it in hardware */ ;\
|
||||||
early_ack(num) /* and allow other intrs */ ;\
|
early_ack(num) /* and allow other intrs */ ;\
|
||||||
testq %r14,%r14 ;\
|
testq %r14,%r14 ;\
|
||||||
@ -588,7 +587,7 @@ IDTVEC(recurse_ ## name ## num) ;\
|
|||||||
IDTVEC(resume_ ## name ## num) \
|
IDTVEC(resume_ ## name ## num) \
|
||||||
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
|
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
|
||||||
movl %ebx,%r13d ;\
|
movl %ebx,%r13d ;\
|
||||||
movq CPUVAR(ISOURCES) + (num) * 8, %r14 ;\
|
movq CPUVAR(ISOURCES) + (num) * 8,%r14 ;\
|
||||||
1: \
|
1: \
|
||||||
pushq %r13 ;\
|
pushq %r13 ;\
|
||||||
movl $num,CPUVAR(ILEVEL) ;\
|
movl $num,CPUVAR(ILEVEL) ;\
|
||||||
@ -609,9 +608,9 @@ IDTVEC(resume_ ## name ## num) \
|
|||||||
STI(si) ;\
|
STI(si) ;\
|
||||||
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
|
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
|
||||||
|
|
||||||
# The unmask func for Xen events
|
/* The unmask func for Xen events */
|
||||||
#define hypervisor_asm_unmask(num) \
|
#define hypervisor_asm_unmask(num) \
|
||||||
movq $num, %rdi ;\
|
movq $num,%rdi ;\
|
||||||
call _C_LABEL(hypervisor_enable_ipl)
|
call _C_LABEL(hypervisor_enable_ipl)
|
||||||
|
|
||||||
XENINTRSTUB(xenev,0,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
|
XENINTRSTUB(xenev,0,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
|
||||||
@ -682,19 +681,21 @@ LABEL(xenev_stubs)
|
|||||||
.quad _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31)
|
.quad _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31)
|
||||||
END(xenev_stubs)
|
END(xenev_stubs)
|
||||||
|
|
||||||
# Xen callbacks
|
/*
|
||||||
|
* Xen callbacks
|
||||||
|
*/
|
||||||
|
|
||||||
# Hypervisor callback
|
/* Hypervisor callback */
|
||||||
NENTRY(hypervisor_callback)
|
NENTRY(hypervisor_callback)
|
||||||
movq (%rsp),%rcx
|
movq (%rsp),%rcx
|
||||||
movq 8(%rsp),%r11
|
movq 8(%rsp),%r11
|
||||||
addq $16,%rsp
|
addq $16,%rsp
|
||||||
pushq $0 # Dummy error code
|
pushq $0 /* Dummy error code */
|
||||||
pushq $T_ASTFLT
|
pushq $T_ASTFLT
|
||||||
INTRENTRY
|
INTRENTRY
|
||||||
# sti??
|
/* sti?? */
|
||||||
movq %rsp, %rdi
|
movq %rsp,%rdi
|
||||||
subq $8, %rdi; /* don't forget if_ppl */
|
subq $8,%rdi; /* don't forget if_ppl */
|
||||||
call do_hypervisor_callback
|
call do_hypervisor_callback
|
||||||
testb $SEL_RPL,TF_CS(%rsp)
|
testb $SEL_RPL,TF_CS(%rsp)
|
||||||
jnz doreti_checkast
|
jnz doreti_checkast
|
||||||
@ -702,7 +703,7 @@ NENTRY(hypervisor_callback)
|
|||||||
INTRFASTEXIT
|
INTRFASTEXIT
|
||||||
END(hypervisor_callback)
|
END(hypervisor_callback)
|
||||||
|
|
||||||
# Panic?
|
/* Panic? */
|
||||||
NENTRY(failsafe_callback)
|
NENTRY(failsafe_callback)
|
||||||
movq (%rsp),%rcx
|
movq (%rsp),%rcx
|
||||||
movq 8(%rsp),%r11
|
movq 8(%rsp),%r11
|
||||||
@ -710,11 +711,11 @@ NENTRY(failsafe_callback)
|
|||||||
pushq $0
|
pushq $0
|
||||||
pushq $T_ASTFLT
|
pushq $T_ASTFLT
|
||||||
INTRENTRY
|
INTRENTRY
|
||||||
movq %rsp, %rdi
|
movq %rsp,%rdi
|
||||||
subq $8, %rdi; /* don't forget if_ppl */
|
subq $8,%rdi; /* don't forget if_ppl */
|
||||||
call xen_failsafe_handler
|
call xen_failsafe_handler
|
||||||
INTRFASTEXIT
|
INTRFASTEXIT
|
||||||
# jmp HYPERVISOR_iret
|
/* jmp HYPERVISOR_iret */
|
||||||
END(failsafe_callback)
|
END(failsafe_callback)
|
||||||
|
|
||||||
#endif /* !XEN */
|
#endif /* !XEN */
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/* $NetBSD: vector.S,v 1.64 2014/01/26 19:16:17 dsl Exp $ */
|
/* $NetBSD: vector.S,v 1.65 2016/08/07 10:17:32 maxv Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright 2002 (c) Wasabi Systems, Inc.
|
* Copyright 2002 (c) Wasabi Systems, Inc.
|
||||||
@ -65,7 +65,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <machine/asm.h>
|
#include <machine/asm.h>
|
||||||
__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.64 2014/01/26 19:16:17 dsl Exp $");
|
__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.65 2016/08/07 10:17:32 maxv Exp $");
|
||||||
|
|
||||||
#include "opt_ddb.h"
|
#include "opt_ddb.h"
|
||||||
#include "opt_multiprocessor.h"
|
#include "opt_multiprocessor.h"
|
||||||
@ -106,7 +106,6 @@ __KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.64 2014/01/26 19:16:17 dsl Exp $");
|
|||||||
* If the interrupt frame is made more flexible, INTR can push %eax first and
|
* If the interrupt frame is made more flexible, INTR can push %eax first and
|
||||||
* decide the ipending case with less overhead, e.g., by avoiding loading the
|
* decide the ipending case with less overhead, e.g., by avoiding loading the
|
||||||
* segment registers.
|
* segment registers.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -189,7 +188,7 @@ IDTVEC(intr_lapic_tlb)
|
|||||||
pushl $0
|
pushl $0
|
||||||
pushl $T_ASTFLT
|
pushl $T_ASTFLT
|
||||||
INTRENTRY
|
INTRENTRY
|
||||||
movl $0, _C_LABEL(local_apic)+LAPIC_EOI
|
movl $0,_C_LABEL(local_apic)+LAPIC_EOI
|
||||||
call _C_LABEL(pmap_tlb_intr)
|
call _C_LABEL(pmap_tlb_intr)
|
||||||
INTRFASTEXIT
|
INTRFASTEXIT
|
||||||
IDTVEC_END(intr_lapic_tlb)
|
IDTVEC_END(intr_lapic_tlb)
|
||||||
@ -272,7 +271,7 @@ IDTVEC_END(recurse_ ## name ## num) ;\
|
|||||||
IDTVEC(resume_ ## name ## num) \
|
IDTVEC(resume_ ## name ## num) \
|
||||||
movl $IREENT_MAGIC,TF_ERR(%esp) ;\
|
movl $IREENT_MAGIC,TF_ERR(%esp) ;\
|
||||||
movl %ebx,%esi ;\
|
movl %ebx,%esi ;\
|
||||||
movl CPUVAR(ISOURCES) + (num) * 4, %ebp ;\
|
movl CPUVAR(ISOURCES) + (num) * 4,%ebp ;\
|
||||||
movl IS_MAXLEVEL(%ebp),%ebx ;\
|
movl IS_MAXLEVEL(%ebp),%ebx ;\
|
||||||
jmp 1f ;\
|
jmp 1f ;\
|
||||||
IDTVEC_END(resume_ ## name ## num) ;\
|
IDTVEC_END(resume_ ## name ## num) ;\
|
||||||
@ -280,7 +279,7 @@ IDTVEC(intr_ ## name ## num) ;\
|
|||||||
pushl $0 /* dummy error code */ ;\
|
pushl $0 /* dummy error code */ ;\
|
||||||
pushl $T_ASTFLT /* trap # for doing ASTs */ ;\
|
pushl $T_ASTFLT /* trap # for doing ASTs */ ;\
|
||||||
INTRENTRY ;\
|
INTRENTRY ;\
|
||||||
movl CPUVAR(ISOURCES) + (num) * 4, %ebp ;\
|
movl CPUVAR(ISOURCES) + (num) * 4,%ebp ;\
|
||||||
mask(num) /* mask it in hardware */ ;\
|
mask(num) /* mask it in hardware */ ;\
|
||||||
early_ack(num) /* and allow other intrs */ ;\
|
early_ack(num) /* and allow other intrs */ ;\
|
||||||
testl %ebp,%ebp ;\
|
testl %ebp,%ebp ;\
|
||||||
@ -631,7 +630,7 @@ IDTVEC(recurse_ ## name ## num) ;\
|
|||||||
IDTVEC(resume_ ## name ## num) \
|
IDTVEC(resume_ ## name ## num) \
|
||||||
movl $IREENT_MAGIC,TF_ERR(%esp) ;\
|
movl $IREENT_MAGIC,TF_ERR(%esp) ;\
|
||||||
pushl %ebx ;\
|
pushl %ebx ;\
|
||||||
movl CPUVAR(ISOURCES) + (num) * 4, %ebp ;\
|
movl CPUVAR(ISOURCES) + (num) * 4,%ebp ;\
|
||||||
movl $num,CPUVAR(ILEVEL) ;\
|
movl $num,CPUVAR(ILEVEL) ;\
|
||||||
IDEPTH_INCR /* leaves old %esp on stack */ ;\
|
IDEPTH_INCR /* leaves old %esp on stack */ ;\
|
||||||
STI(%eax) ;\
|
STI(%eax) ;\
|
||||||
@ -753,7 +752,7 @@ END(xenev_stubs)
|
|||||||
* activation and restart the handler using the previous one.
|
* activation and restart the handler using the previous one.
|
||||||
*/
|
*/
|
||||||
NENTRY(hypervisor_callback)
|
NENTRY(hypervisor_callback)
|
||||||
pushl $0 # dummy error code
|
pushl $0 /* dummy error code */
|
||||||
pushl $T_ASTFLT
|
pushl $T_ASTFLT
|
||||||
INTRENTRY
|
INTRENTRY
|
||||||
movl TF_EIP(%esp),%eax
|
movl TF_EIP(%esp),%eax
|
||||||
@ -767,16 +766,16 @@ NENTRY(hypervisor_callback)
|
|||||||
add $8,%esp
|
add $8,%esp
|
||||||
xorl %eax,%eax
|
xorl %eax,%eax
|
||||||
movb TF_CS(%esp),%cl
|
movb TF_CS(%esp),%cl
|
||||||
test $CHK_UPL,%cl # slow return to ring 2 or 3
|
test $CHK_UPL,%cl /* slow return to ring 2 or 3 */
|
||||||
je safesti
|
je safesti
|
||||||
movl CPUVAR(ILEVEL),%ebx
|
movl CPUVAR(ILEVEL),%ebx
|
||||||
jmp doreti_checkast
|
jmp doreti_checkast
|
||||||
safesti:
|
safesti:
|
||||||
movl CPUVAR(VCPU),%esi
|
movl CPUVAR(VCPU),%esi
|
||||||
XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks
|
XEN_UNBLOCK_EVENTS(%esi) /* reenable event callbacks */
|
||||||
scrit: /**** START OF CRITICAL REGION ****/
|
scrit: /**** START OF CRITICAL REGION ****/
|
||||||
XEN_TEST_PENDING(%esi)
|
XEN_TEST_PENDING(%esi)
|
||||||
jnz 14f # process more events if necessary...
|
jnz 14f /* process more events if necessary... */
|
||||||
INTRFASTEXIT
|
INTRFASTEXIT
|
||||||
critiret:
|
critiret:
|
||||||
14: XEN_BLOCK_EVENTS(%esi)
|
14: XEN_BLOCK_EVENTS(%esi)
|
||||||
@ -792,7 +791,7 @@ ecrit: /**** END OF CRITICAL REGION ****/
|
|||||||
* interrupted stack frame.
|
* interrupted stack frame.
|
||||||
*/
|
*/
|
||||||
critical_region_fixup:
|
critical_region_fixup:
|
||||||
cmpl $(critiret-1),%eax # eip points to iret?
|
cmpl $(critiret-1),%eax /* eip points to iret? */
|
||||||
jne 1f
|
jne 1f
|
||||||
movl $(TF_PUSHSIZE+0x8),%eax
|
movl $(TF_PUSHSIZE+0x8),%eax
|
||||||
jmp 2f
|
jmp 2f
|
||||||
@ -800,18 +799,18 @@ critical_region_fixup:
|
|||||||
2:
|
2:
|
||||||
/* %eax contains num bytes popped */
|
/* %eax contains num bytes popped */
|
||||||
mov %esp,%esi
|
mov %esp,%esi
|
||||||
add %eax,%esi # %esi points at end of src region
|
add %eax,%esi /* %esi points at end of src region */
|
||||||
mov %esp,%edi
|
mov %esp,%edi
|
||||||
add $(TF_PUSHSIZE+0x8+0xC),%edi # %edi points at end of dst region
|
add $(TF_PUSHSIZE+0x8+0xC),%edi /* %edi points at end of dst region */
|
||||||
mov %eax,%ecx
|
mov %eax,%ecx
|
||||||
shr $2,%ecx # convert words to bytes
|
shr $2,%ecx /* convert words to bytes */
|
||||||
je 16f # skip loop if nothing to copy
|
je 16f /* skip loop if nothing to copy */
|
||||||
15: subl $4,%esi # pre-decrementing copy loop
|
15: subl $4,%esi /* pre-decrementing copy loop */
|
||||||
subl $4,%edi
|
subl $4,%edi
|
||||||
movl (%esi),%eax
|
movl (%esi),%eax
|
||||||
movl %eax,(%edi)
|
movl %eax,(%edi)
|
||||||
loop 15b
|
loop 15b
|
||||||
16: movl %edi,%esp # final %edi is top of merged stack
|
16: movl %edi,%esp /* final %edi is top of merged stack */
|
||||||
jmp 11b
|
jmp 11b
|
||||||
END(hypervisor_callback)
|
END(hypervisor_callback)
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/* $NetBSD: uvm_mmap.c,v 1.160 2016/08/07 09:55:18 maxv Exp $ */
|
/* $NetBSD: uvm_mmap.c,v 1.161 2016/08/07 10:07:58 maxv Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||||
@ -46,7 +46,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <sys/cdefs.h>
|
#include <sys/cdefs.h>
|
||||||
__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.160 2016/08/07 09:55:18 maxv Exp $");
|
__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.161 2016/08/07 10:07:58 maxv Exp $");
|
||||||
|
|
||||||
#include "opt_compat_netbsd.h"
|
#include "opt_compat_netbsd.h"
|
||||||
#include "opt_pax.h"
|
#include "opt_pax.h"
|
||||||
@ -104,7 +104,7 @@ sys_sbrk(struct lwp *l, const struct sys_sbrk_args *uap, register_t *retval)
|
|||||||
syscallarg(intptr_t) incr;
|
syscallarg(intptr_t) incr;
|
||||||
} */
|
} */
|
||||||
|
|
||||||
return (ENOSYS);
|
return ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -119,7 +119,7 @@ sys_sstk(struct lwp *l, const struct sys_sstk_args *uap, register_t *retval)
|
|||||||
syscallarg(int) incr;
|
syscallarg(int) incr;
|
||||||
} */
|
} */
|
||||||
|
|
||||||
return (ENOSYS);
|
return ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -155,11 +155,11 @@ sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
|
|||||||
vec = SCARG(uap, vec);
|
vec = SCARG(uap, vec);
|
||||||
|
|
||||||
if (start & PAGE_MASK)
|
if (start & PAGE_MASK)
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
len = round_page(len);
|
len = round_page(len);
|
||||||
end = start + len;
|
end = start + len;
|
||||||
if (end <= start)
|
if (end <= start)
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock down vec, so our returned status isn't outdated by
|
* Lock down vec, so our returned status isn't outdated by
|
||||||
@ -259,7 +259,7 @@ sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
|
|||||||
out:
|
out:
|
||||||
vm_map_unlock_read(map);
|
vm_map_unlock_read(map);
|
||||||
uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
|
uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
|
||||||
return (error);
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -328,7 +328,7 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
|
if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* align file position and save offset. adjust size.
|
* align file position and save offset. adjust size.
|
||||||
@ -340,26 +340,23 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
|
|||||||
newsize = (vsize_t)round_page(newsize); /* round up */
|
newsize = (vsize_t)round_page(newsize); /* round up */
|
||||||
|
|
||||||
if (newsize < size)
|
if (newsize < size)
|
||||||
return (ENOMEM);
|
return ENOMEM;
|
||||||
size = newsize;
|
size = newsize;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
|
* now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
|
||||||
*/
|
*/
|
||||||
if (flags & MAP_FIXED) {
|
if (flags & MAP_FIXED) {
|
||||||
|
|
||||||
/* ensure address and file offset are aligned properly */
|
/* ensure address and file offset are aligned properly */
|
||||||
addr -= pageoff;
|
addr -= pageoff;
|
||||||
if (addr & PAGE_MASK)
|
if (addr & PAGE_MASK)
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
|
|
||||||
error = range_test(&p->p_vmspace->vm_map, addr, size, true);
|
error = range_test(&p->p_vmspace->vm_map, addr, size, true);
|
||||||
if (error) {
|
if (error) {
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
|
} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* not fixed: make sure we skip over the largest
|
* not fixed: make sure we skip over the largest
|
||||||
* possible heap for non-topdown mapping arrangements.
|
* possible heap for non-topdown mapping arrangements.
|
||||||
@ -371,8 +368,7 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
|
|||||||
(vaddr_t)p->p_vmspace->vm_daddr, size,
|
(vaddr_t)p->p_vmspace->vm_daddr, size,
|
||||||
p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
|
p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
|
||||||
|
|
||||||
if (addr == 0 ||
|
if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
|
||||||
!(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
|
|
||||||
addr = MAX(addr, defaddr);
|
addr = MAX(addr, defaddr);
|
||||||
else
|
else
|
||||||
addr = MIN(addr, defaddr);
|
addr = MIN(addr, defaddr);
|
||||||
@ -385,7 +381,7 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
|
|||||||
advice = UVM_ADV_NORMAL;
|
advice = UVM_ADV_NORMAL;
|
||||||
if ((flags & MAP_ANON) == 0) {
|
if ((flags & MAP_ANON) == 0) {
|
||||||
if ((fp = fd_getfile(fd)) == NULL)
|
if ((fp = fd_getfile(fd)) == NULL)
|
||||||
return (EBADF);
|
return EBADF;
|
||||||
|
|
||||||
if (fp->f_ops->fo_mmap == NULL) {
|
if (fp->f_ops->fo_mmap == NULL) {
|
||||||
error = ENODEV;
|
error = ENODEV;
|
||||||
@ -407,7 +403,7 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
|
|||||||
* XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
|
* XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
|
||||||
*/
|
*/
|
||||||
if (fd != -1)
|
if (fd != -1)
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
|
|
||||||
is_anon: /* label for SunOS style /dev/zero */
|
is_anon: /* label for SunOS style /dev/zero */
|
||||||
uobj = NULL;
|
uobj = NULL;
|
||||||
@ -433,7 +429,7 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
|
|||||||
if (fp != NULL)
|
if (fp != NULL)
|
||||||
fd_putfile(fd);
|
fd_putfile(fd);
|
||||||
|
|
||||||
return (error);
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -468,7 +464,7 @@ sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
|
|||||||
if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
|
if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
|
||||||
(flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
|
(flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
|
||||||
(flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
|
(flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
|
if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
|
||||||
flags |= MS_SYNC;
|
flags |= MS_SYNC;
|
||||||
|
|
||||||
@ -513,7 +509,7 @@ sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
|
|||||||
}
|
}
|
||||||
vm_map_unlock_read(map);
|
vm_map_unlock_read(map);
|
||||||
if (rv == false)
|
if (rv == false)
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -565,7 +561,7 @@ sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
|
|||||||
size = (vsize_t)round_page(size);
|
size = (vsize_t)round_page(size);
|
||||||
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
return (0);
|
return 0;
|
||||||
|
|
||||||
map = &p->p_vmspace->vm_map;
|
map = &p->p_vmspace->vm_map;
|
||||||
|
|
||||||
@ -573,23 +569,22 @@ sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
|
|||||||
if (error)
|
if (error)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
|
vm_map_lock(map);
|
||||||
|
#if 0
|
||||||
/*
|
/*
|
||||||
* interesting system call semantic: make sure entire range is
|
* interesting system call semantic: make sure entire range is
|
||||||
* allocated before allowing an unmap.
|
* allocated before allowing an unmap.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
vm_map_lock(map);
|
|
||||||
#if 0
|
|
||||||
if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
|
if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
|
||||||
vm_map_unlock(map);
|
vm_map_unlock(map);
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0);
|
uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0);
|
||||||
vm_map_unlock(map);
|
vm_map_unlock(map);
|
||||||
if (dead_entries != NULL)
|
if (dead_entries != NULL)
|
||||||
uvm_unmap_detach(dead_entries, 0);
|
uvm_unmap_detach(dead_entries, 0);
|
||||||
return (0);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -769,10 +764,10 @@ sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
|
|||||||
* There's also what to do for device/file/anonymous memory.
|
* There's also what to do for device/file/anonymous memory.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return (EINVAL);
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
@ -815,11 +810,11 @@ sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
|
|||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
|
|
||||||
if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
|
if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
|
||||||
return (EAGAIN);
|
return EAGAIN;
|
||||||
|
|
||||||
if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
|
if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
|
||||||
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
|
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
|
||||||
return (EAGAIN);
|
return EAGAIN;
|
||||||
|
|
||||||
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
|
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
|
||||||
0);
|
0);
|
||||||
@ -888,13 +883,12 @@ sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap,
|
|||||||
|
|
||||||
flags = SCARG(uap, flags);
|
flags = SCARG(uap, flags);
|
||||||
|
|
||||||
if (flags == 0 ||
|
if (flags == 0 || (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
|
||||||
(flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
|
return EINVAL;
|
||||||
return (EINVAL);
|
|
||||||
|
|
||||||
error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
|
error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
|
||||||
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
|
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
|
||||||
return (error);
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -907,7 +901,7 @@ sys_munlockall(struct lwp *l, const void *v, register_t *retval)
|
|||||||
struct proc *p = l->l_proc;
|
struct proc *p = l->l_proc;
|
||||||
|
|
||||||
(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
|
(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
|
||||||
return (0);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -932,11 +926,11 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
return(0);
|
return 0;
|
||||||
if (foff & PAGE_MASK)
|
if (foff & PAGE_MASK)
|
||||||
return(EINVAL);
|
return EINVAL;
|
||||||
if ((prot & maxprot) != prot)
|
if ((prot & maxprot) != prot)
|
||||||
return(EINVAL);
|
return EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* for non-fixed mappings, round off the suggested address.
|
* for non-fixed mappings, round off the suggested address.
|
||||||
@ -947,7 +941,7 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
|
|||||||
*addr = round_page(*addr);
|
*addr = round_page(*addr);
|
||||||
} else {
|
} else {
|
||||||
if (*addr & PAGE_MASK)
|
if (*addr & PAGE_MASK)
|
||||||
return(EINVAL);
|
return EINVAL;
|
||||||
uvmflag |= UVM_FLAG_FIXED;
|
uvmflag |= UVM_FLAG_FIXED;
|
||||||
(void) uvm_unmap(map, *addr, *addr + size);
|
(void) uvm_unmap(map, *addr, *addr + size);
|
||||||
}
|
}
|
||||||
@ -963,15 +957,15 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
|
|||||||
align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
|
align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
|
||||||
if (align) {
|
if (align) {
|
||||||
if (align >= sizeof(vaddr_t) * NBBY)
|
if (align >= sizeof(vaddr_t) * NBBY)
|
||||||
return(EINVAL);
|
return EINVAL;
|
||||||
align = 1L << align;
|
align = 1L << align;
|
||||||
if (align < PAGE_SIZE)
|
if (align < PAGE_SIZE)
|
||||||
return(EINVAL);
|
return EINVAL;
|
||||||
if (align >= vm_map_max(map))
|
if (align >= vm_map_max(map))
|
||||||
return(ENOMEM);
|
return ENOMEM;
|
||||||
if (flags & MAP_FIXED) {
|
if (flags & MAP_FIXED) {
|
||||||
if ((*addr & (align-1)) != 0)
|
if ((*addr & (align-1)) != 0)
|
||||||
return(EINVAL);
|
return EINVAL;
|
||||||
align = 0;
|
align = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1008,8 +1002,8 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
|
|||||||
}
|
}
|
||||||
|
|
||||||
uvmflag = UVM_MAPFLAG(prot, maxprot,
|
uvmflag = UVM_MAPFLAG(prot, maxprot,
|
||||||
(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
|
(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY, advice,
|
||||||
advice, uvmflag);
|
uvmflag);
|
||||||
error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
|
error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
|
||||||
if (error) {
|
if (error) {
|
||||||
if (uobj)
|
if (uobj)
|
||||||
@ -1030,7 +1024,7 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
|
|||||||
* No more work to do in this case.
|
* No more work to do in this case.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
return (0);
|
return 0;
|
||||||
}
|
}
|
||||||
if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
|
if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
|
||||||
vm_map_lock(map);
|
vm_map_lock(map);
|
||||||
@ -1053,7 +1047,7 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
|
|||||||
uvm_unmap(map, *addr, *addr + size);
|
uvm_unmap(map, *addr, *addr + size);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
return (0);
|
return 0;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1089,8 +1083,8 @@ uvm_mmap_dev(struct proc *p, void **addrp, size_t len, dev_t dev,
|
|||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
|
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
|
||||||
(vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM,
|
(vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM, uobj, off,
|
||||||
uobj, off, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
|
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1109,7 +1103,7 @@ uvm_mmap_anon(struct proc *p, void **addrp, size_t len)
|
|||||||
p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
|
p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
|
||||||
|
|
||||||
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
|
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
|
||||||
(vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL,
|
(vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL, NULL, 0,
|
||||||
NULL, 0, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
|
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user