KNF a little.

This commit is contained in:
maxv 2016-08-07 10:07:58 +00:00
parent d6f286364d
commit 06ab45be44
3 changed files with 168 additions and 174 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: vector.S,v 1.45 2015/11/22 13:41:24 maxv Exp $ */
/* $NetBSD: vector.S,v 1.46 2016/08/07 10:17:32 maxv Exp $ */
/*-
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@ -107,7 +107,6 @@
* If the interrupt frame is made more flexible, INTR can push %eax first and
* decide the ipending case with less overhead, e.g., by avoiding loading the
* segment registers.
*
*/
/* XXX See comment in locore.s */
@ -121,16 +120,16 @@
#ifdef MULTIPROCESSOR
IDTVEC(recurse_lapic_ipi)
INTR_RECURSE_HWFRAME
pushq $0
pushq $0
pushq $T_ASTFLT
INTRENTRY
INTRENTRY
jmp 1f
IDTVEC_END(recurse_lapic_ipi)
IDTVEC(intr_lapic_ipi)
pushq $0
pushq $0
pushq $T_ASTFLT
INTRENTRY
INTRENTRY
movl $0,_C_LABEL(local_apic)+LAPIC_EOI
movl CPUVAR(ILEVEL),%ebx
cmpl $IPL_HIGH,%ebx
@ -166,22 +165,22 @@ IDTVEC(intrddb)
IDTVEC_END(intrddb)
#endif /* DDB */
#endif /* MULTIPROCESSOR */
/*
* Interrupt from the local APIC timer.
*/
IDTVEC(recurse_lapic_ltimer)
INTR_RECURSE_HWFRAME
pushq $0
pushq $0
pushq $T_ASTFLT
INTRENTRY
INTRENTRY
jmp 1f
IDTVEC_END(recurse_lapic_ltimer)
IDTVEC(intr_lapic_ltimer)
pushq $0
pushq $0
pushq $T_ASTFLT
INTRENTRY
INTRENTRY
movl $0,_C_LABEL(local_apic)+LAPIC_EOI
movl CPUVAR(ILEVEL),%ebx
cmpl $IPL_CLOCK,%ebx
@ -211,7 +210,7 @@ IDTVEC(intr_lapic_tlb)
pushq $0
pushq $T_ASTFLT
INTRENTRY
movl $0, _C_LABEL(local_apic)+LAPIC_EOI
movl $0,_C_LABEL(local_apic)+LAPIC_EOI
callq _C_LABEL(pmap_tlb_intr)
INTRFASTEXIT
IDTVEC_END(intr_lapic_tlb)
@ -236,14 +235,14 @@ IDTVEC(recurse_ ## name ## num) ;\
IDTVEC(resume_ ## name ## num) \
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
movl %ebx,%r13d ;\
movq CPUVAR(ISOURCES) + (num) * 8, %r14 ;\
movq CPUVAR(ISOURCES) + (num) * 8,%r14 ;\
movl IS_MAXLEVEL(%r14),%ebx ;\
jmp 1f ;\
IDTVEC(intr_ ## name ## num) ;\
pushq $0 /* dummy error code */ ;\
pushq $T_ASTFLT /* trap # for doing ASTs */ ;\
INTRENTRY ;\
movq CPUVAR(ISOURCES) + (num) * 8, %r14 ;\
movq CPUVAR(ISOURCES) + (num) * 8,%r14 ;\
mask(num) /* mask it in hardware */ ;\
early_ack(num) /* and allow other intrs */ ;\
testq %r14,%r14 ;\
@ -279,14 +278,14 @@ IDTVEC(intr_ ## name ## num) ;\
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
7: \
cli ;\
orl $(1 << num),CPUVAR(IPENDING) ;\
orl $(1 << num),CPUVAR(IPENDING) ;\
level_mask(num) ;\
late_ack(num) ;\
sti ;\
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
10: \
cli ;\
orl $(1 << num),CPUVAR(IPENDING) ;\
orl $(1 << num),CPUVAR(IPENDING) ;\
level_mask(num) ;\
late_ack(num) ;\
INTRFASTEXIT ;\
@ -581,39 +580,39 @@ END(ioapic_level_stubs)
/* Resume/recurse procedures for spl() */
#define XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \
IDTVEC(recurse_ ## name ## num) ;\
INTR_RECURSE_HWFRAME ;\
subq $8,%rsp ;\
pushq $T_ASTFLT /* trap # for doing ASTs */ ;\
INTRENTRY ;\
INTR_RECURSE_HWFRAME ;\
subq $8,%rsp ;\
pushq $T_ASTFLT /* trap # for doing ASTs */ ;\
INTRENTRY ;\
IDTVEC(resume_ ## name ## num) \
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
movl %ebx,%r13d ;\
movq CPUVAR(ISOURCES) + (num) * 8, %r14 ;\
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
movl %ebx,%r13d ;\
movq CPUVAR(ISOURCES) + (num) * 8,%r14 ;\
1: \
pushq %r13 ;\
movl $num,CPUVAR(ILEVEL) ;\
STI(si) ;\
incl CPUVAR(IDEPTH) ;\
movq IS_HANDLERS(%r14),%rbx ;\
pushq %r13 ;\
movl $num,CPUVAR(ILEVEL) ;\
STI(si) ;\
incl CPUVAR(IDEPTH) ;\
movq IS_HANDLERS(%r14),%rbx ;\
6: \
movq IH_ARG(%rbx),%rdi ;\
movq %rsp,%rsi ;\
call *IH_FUN(%rbx) /* call it */ ;\
movq IH_IPL_NEXT(%rbx),%rbx /* next handler in chain */ ;\
testq %rbx,%rbx ;\
jnz 6b ;\
movq IH_ARG(%rbx),%rdi ;\
movq %rsp,%rsi ;\
call *IH_FUN(%rbx) /* call it */ ;\
movq IH_IPL_NEXT(%rbx),%rbx /* next handler in chain */ ;\
testq %rbx,%rbx ;\
jnz 6b ;\
5: \
CLI(si) ;\
unmask(num) /* unmask it in hardware */ ;\
late_ack(num) ;\
STI(si) ;\
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
# The unmask func for Xen events
CLI(si) ;\
unmask(num) /* unmask it in hardware */ ;\
late_ack(num) ;\
STI(si) ;\
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
/* The unmask func for Xen events */
#define hypervisor_asm_unmask(num) \
movq $num, %rdi ;\
call _C_LABEL(hypervisor_enable_ipl)
movq $num,%rdi ;\
call _C_LABEL(hypervisor_enable_ipl)
XENINTRSTUB(xenev,0,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
XENINTRSTUB(xenev,1,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
XENINTRSTUB(xenev,2,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
@ -682,19 +681,21 @@ LABEL(xenev_stubs)
.quad _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31)
END(xenev_stubs)
# Xen callbacks
/*
* Xen callbacks
*/
# Hypervisor callback
/* Hypervisor callback */
NENTRY(hypervisor_callback)
movq (%rsp),%rcx
movq 8(%rsp),%r11
addq $16,%rsp
pushq $0 # Dummy error code
pushq $0 /* Dummy error code */
pushq $T_ASTFLT
INTRENTRY
# sti??
movq %rsp, %rdi
subq $8, %rdi; /* don't forget if_ppl */
/* sti?? */
movq %rsp,%rdi
subq $8,%rdi; /* don't forget if_ppl */
call do_hypervisor_callback
testb $SEL_RPL,TF_CS(%rsp)
jnz doreti_checkast
@ -702,7 +703,7 @@ NENTRY(hypervisor_callback)
INTRFASTEXIT
END(hypervisor_callback)
# Panic?
/* Panic? */
NENTRY(failsafe_callback)
movq (%rsp),%rcx
movq 8(%rsp),%r11
@ -710,11 +711,11 @@ NENTRY(failsafe_callback)
pushq $0
pushq $T_ASTFLT
INTRENTRY
movq %rsp, %rdi
subq $8, %rdi; /* don't forget if_ppl */
movq %rsp,%rdi
subq $8,%rdi; /* don't forget if_ppl */
call xen_failsafe_handler
INTRFASTEXIT
# jmp HYPERVISOR_iret
/* jmp HYPERVISOR_iret */
END(failsafe_callback)
#endif /* !XEN */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vector.S,v 1.64 2014/01/26 19:16:17 dsl Exp $ */
/* $NetBSD: vector.S,v 1.65 2016/08/07 10:17:32 maxv Exp $ */
/*
* Copyright 2002 (c) Wasabi Systems, Inc.
@ -65,7 +65,7 @@
*/
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.64 2014/01/26 19:16:17 dsl Exp $");
__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.65 2016/08/07 10:17:32 maxv Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@ -106,7 +106,6 @@ __KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.64 2014/01/26 19:16:17 dsl Exp $");
* If the interrupt frame is made more flexible, INTR can push %eax first and
* decide the ipending case with less overhead, e.g., by avoiding loading the
* segment registers.
*
*/
/*
@ -132,13 +131,13 @@ __KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.64 2014/01/26 19:16:17 dsl Exp $");
.globl dtrace_invop_jump_addr
.align 4
.type dtrace_invop_jump_addr, @object
.size dtrace_invop_jump_addr, 4
.size dtrace_invop_jump_addr, 4
dtrace_invop_jump_addr:
.zero 4
.globl dtrace_invop_calltrap_addr
.align 4
.type dtrace_invop_calltrap_addr, @object
.size dtrace_invop_calltrap_addr, 4
.size dtrace_invop_calltrap_addr, 4
dtrace_invop_calltrap_addr:
.zero 8
.text
@ -154,15 +153,15 @@ IDTVEC(recurse_lapic_ipi)
pushfl
pushl %cs
pushl %esi
pushl $0
pushl $0
pushl $T_ASTFLT
INTRENTRY
INTRENTRY
jmp 1f
IDTVEC_END(recurse_lapic_ipi)
IDTVEC(intr_lapic_ipi)
pushl $0
pushl $0
pushl $T_ASTFLT
INTRENTRY
INTRENTRY
movl $0,_C_LABEL(local_apic)+LAPIC_EOI
movl CPUVAR(ILEVEL),%ebx
cmpl $IPL_HIGH,%ebx
@ -189,7 +188,7 @@ IDTVEC(intr_lapic_tlb)
pushl $0
pushl $T_ASTFLT
INTRENTRY
movl $0, _C_LABEL(local_apic)+LAPIC_EOI
movl $0,_C_LABEL(local_apic)+LAPIC_EOI
call _C_LABEL(pmap_tlb_intr)
INTRFASTEXIT
IDTVEC_END(intr_lapic_tlb)
@ -213,7 +212,7 @@ IDTVEC(intrddbipi)
IDTVEC_END(intrddbipi)
#endif /* DDB */
#endif /* MULTIPROCESSOR */
/*
* Interrupt from the local APIC timer.
*/
@ -221,15 +220,15 @@ IDTVEC(recurse_lapic_ltimer)
pushfl
pushl %cs
pushl %esi
pushl $0
pushl $0
pushl $T_ASTFLT
INTRENTRY
INTRENTRY
jmp 1f
IDTVEC_END(recurse_lapic_ltimer)
IDTVEC(intr_lapic_ltimer)
pushl $0
pushl $0
pushl $T_ASTFLT
INTRENTRY
INTRENTRY
movl $0,_C_LABEL(local_apic)+LAPIC_EOI
movl CPUVAR(ILEVEL),%ebx
cmpl $IPL_CLOCK,%ebx
@ -242,7 +241,7 @@ IDTVEC(resume_lapic_ltimer)
sti
pushl $0
call _C_LABEL(lapic_clockintr)
addl $4,%esp
addl $4,%esp
cli
jmp _C_LABEL(Xdoreti)
2:
@ -272,7 +271,7 @@ IDTVEC_END(recurse_ ## name ## num) ;\
IDTVEC(resume_ ## name ## num) \
movl $IREENT_MAGIC,TF_ERR(%esp) ;\
movl %ebx,%esi ;\
movl CPUVAR(ISOURCES) + (num) * 4, %ebp ;\
movl CPUVAR(ISOURCES) + (num) * 4,%ebp ;\
movl IS_MAXLEVEL(%ebp),%ebx ;\
jmp 1f ;\
IDTVEC_END(resume_ ## name ## num) ;\
@ -280,7 +279,7 @@ IDTVEC(intr_ ## name ## num) ;\
pushl $0 /* dummy error code */ ;\
pushl $T_ASTFLT /* trap # for doing ASTs */ ;\
INTRENTRY ;\
movl CPUVAR(ISOURCES) + (num) * 4, %ebp ;\
movl CPUVAR(ISOURCES) + (num) * 4,%ebp ;\
mask(num) /* mask it in hardware */ ;\
early_ack(num) /* and allow other intrs */ ;\
testl %ebp,%ebp ;\
@ -318,12 +317,12 @@ IDTVEC(intr_ ## name ## num) ;\
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
7: \
cli ;\
orl $(1 << num),CPUVAR(IPENDING) ;\
orl $(1 << num),CPUVAR(IPENDING) ;\
level_mask(num) ;\
late_ack(num) ;\
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
10: \
orl $(1 << num),CPUVAR(IPENDING) ;\
orl $(1 << num),CPUVAR(IPENDING) ;\
level_mask(num) ;\
late_ack(num) ;\
INTRFASTEXIT ;\
@ -631,7 +630,7 @@ IDTVEC(recurse_ ## name ## num) ;\
IDTVEC(resume_ ## name ## num) \
movl $IREENT_MAGIC,TF_ERR(%esp) ;\
pushl %ebx ;\
movl CPUVAR(ISOURCES) + (num) * 4, %ebp ;\
movl CPUVAR(ISOURCES) + (num) * 4,%ebp ;\
movl $num,CPUVAR(ILEVEL) ;\
IDEPTH_INCR /* leaves old %esp on stack */ ;\
STI(%eax) ;\
@ -657,8 +656,8 @@ IDTVEC(resume_ ## name ## num) \
* the C function doing it, maybe rewrite in inline assembly ?
*/
#define hypervisor_asm_unmask(num) \
pushl $num ;\
call _C_LABEL(hypervisor_enable_ipl) ;\
pushl $num ;\
call _C_LABEL(hypervisor_enable_ipl) ;\
addl $4,%esp
XENINTRSTUB(xenev,0,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
@ -753,35 +752,35 @@ END(xenev_stubs)
* activation and restart the handler using the previous one.
*/
NENTRY(hypervisor_callback)
pushl $0 # dummy error code
pushl $0 /* dummy error code */
pushl $T_ASTFLT
INTRENTRY
movl TF_EIP(%esp),%eax
cmpl $scrit,%eax
jb 11f
cmpl $ecrit,%eax
jb critical_region_fixup
11: pushl CPUVAR(ILEVEL)
push %esp
call do_hypervisor_callback
add $8,%esp
xorl %eax,%eax
movb TF_CS(%esp),%cl
test $CHK_UPL,%cl # slow return to ring 2 or 3
je safesti
movl CPUVAR(ILEVEL),%ebx
jmp doreti_checkast
movl TF_EIP(%esp),%eax
cmpl $scrit,%eax
jb 11f
cmpl $ecrit,%eax
jb critical_region_fixup
11: pushl CPUVAR(ILEVEL)
push %esp
call do_hypervisor_callback
add $8,%esp
xorl %eax,%eax
movb TF_CS(%esp),%cl
test $CHK_UPL,%cl /* slow return to ring 2 or 3 */
je safesti
movl CPUVAR(ILEVEL),%ebx
jmp doreti_checkast
safesti:
movl CPUVAR(VCPU),%esi
XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks
scrit: /**** START OF CRITICAL REGION ****/
XEN_TEST_PENDING(%esi)
jnz 14f # process more events if necessary...
INTRFASTEXIT
movl CPUVAR(VCPU),%esi
XEN_UNBLOCK_EVENTS(%esi) /* reenable event callbacks */
scrit: /**** START OF CRITICAL REGION ****/
XEN_TEST_PENDING(%esi)
jnz 14f /* process more events if necessary... */
INTRFASTEXIT
critiret:
14: XEN_BLOCK_EVENTS(%esi)
jmp 11b
ecrit: /**** END OF CRITICAL REGION ****/
14: XEN_BLOCK_EVENTS(%esi)
jmp 11b
ecrit: /**** END OF CRITICAL REGION ****/
/*
* [How we do the fixup]. We want to merge the current stack frame with the
* just-interrupted frame. How we do this depends on where in the critical
@ -792,27 +791,27 @@ ecrit: /**** END OF CRITICAL REGION ****/
* interrupted stack frame.
*/
critical_region_fixup:
cmpl $(critiret-1),%eax # eip points to iret?
cmpl $(critiret-1),%eax /* eip points to iret? */
jne 1f
movl $(TF_PUSHSIZE+0x8),%eax
jmp 2f
1: xorl %eax,%eax
2:
/* %eax contains num bytes popped */
mov %esp,%esi
add %eax,%esi # %esi points at end of src region
mov %esp,%edi
add $(TF_PUSHSIZE+0x8+0xC),%edi # %edi points at end of dst region
mov %eax,%ecx
shr $2,%ecx # convert words to bytes
je 16f # skip loop if nothing to copy
15: subl $4,%esi # pre-decrementing copy loop
subl $4,%edi
movl (%esi),%eax
movl %eax,(%edi)
loop 15b
16: movl %edi,%esp # final %edi is top of merged stack
jmp 11b
mov %esp,%esi
add %eax,%esi /* %esi points at end of src region */
mov %esp,%edi
add $(TF_PUSHSIZE+0x8+0xC),%edi /* %edi points at end of dst region */
mov %eax,%ecx
shr $2,%ecx /* convert words to bytes */
je 16f /* skip loop if nothing to copy */
15: subl $4,%esi /* pre-decrementing copy loop */
subl $4,%edi
movl (%esi),%eax
movl %eax,(%edi)
loop 15b
16: movl %edi,%esp /* final %edi is top of merged stack */
jmp 11b
END(hypervisor_callback)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_mmap.c,v 1.160 2016/08/07 09:55:18 maxv Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.161 2016/08/07 10:07:58 maxv Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.160 2016/08/07 09:55:18 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.161 2016/08/07 10:07:58 maxv Exp $");
#include "opt_compat_netbsd.h"
#include "opt_pax.h"
@ -64,7 +64,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.160 2016/08/07 09:55:18 maxv Exp $");
#include <uvm/uvm_device.h>
static int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t,
int, int, struct uvm_object *, voff_t, vsize_t);
int, int, struct uvm_object *, voff_t, vsize_t);
static int
range_test(struct vm_map *map, vaddr_t addr, vsize_t size, bool ismmap)
@ -104,7 +104,7 @@ sys_sbrk(struct lwp *l, const struct sys_sbrk_args *uap, register_t *retval)
syscallarg(intptr_t) incr;
} */
return (ENOSYS);
return ENOSYS;
}
/*
@ -119,7 +119,7 @@ sys_sstk(struct lwp *l, const struct sys_sstk_args *uap, register_t *retval)
syscallarg(int) incr;
} */
return (ENOSYS);
return ENOSYS;
}
/*
@ -155,11 +155,11 @@ sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
vec = SCARG(uap, vec);
if (start & PAGE_MASK)
return (EINVAL);
return EINVAL;
len = round_page(len);
end = start + len;
if (end <= start)
return (EINVAL);
return EINVAL;
/*
* Lock down vec, so our returned status isn't outdated by
@ -259,7 +259,7 @@ sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
out:
vm_map_unlock_read(map);
uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs);
return (error);
return error;
}
/*
@ -328,7 +328,7 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
#endif
}
if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
return (EINVAL);
return EINVAL;
/*
* align file position and save offset. adjust size.
@ -340,26 +340,23 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
newsize = (vsize_t)round_page(newsize); /* round up */
if (newsize < size)
return (ENOMEM);
return ENOMEM;
size = newsize;
/*
* now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
*/
if (flags & MAP_FIXED) {
/* ensure address and file offset are aligned properly */
addr -= pageoff;
if (addr & PAGE_MASK)
return (EINVAL);
return EINVAL;
error = range_test(&p->p_vmspace->vm_map, addr, size, true);
if (error) {
return error;
}
} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
/*
* not fixed: make sure we skip over the largest
* possible heap for non-topdown mapping arrangements.
@ -371,8 +368,7 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
(vaddr_t)p->p_vmspace->vm_daddr, size,
p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
if (addr == 0 ||
!(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
addr = MAX(addr, defaddr);
else
addr = MIN(addr, defaddr);
@ -385,14 +381,14 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
advice = UVM_ADV_NORMAL;
if ((flags & MAP_ANON) == 0) {
if ((fp = fd_getfile(fd)) == NULL)
return (EBADF);
return EBADF;
if (fp->f_ops->fo_mmap == NULL) {
error = ENODEV;
goto out;
}
error = (*fp->f_ops->fo_mmap)(fp, &pos, size, prot, &flags,
&advice, &uobj, &maxprot);
&advice, &uobj, &maxprot);
if (error) {
goto out;
}
@ -407,7 +403,7 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
* XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
*/
if (fd != -1)
return (EINVAL);
return EINVAL;
is_anon: /* label for SunOS style /dev/zero */
uobj = NULL;
@ -430,10 +426,10 @@ sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
*retval = (register_t)(addr + pageoff);
out:
if (fp != NULL)
if (fp != NULL)
fd_putfile(fd);
return (error);
return error;
}
/*
@ -468,7 +464,7 @@ sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
(flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
(flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
return (EINVAL);
return EINVAL;
if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
flags |= MS_SYNC;
@ -513,7 +509,7 @@ sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
}
vm_map_unlock_read(map);
if (rv == false)
return (EINVAL);
return EINVAL;
}
/*
@ -565,7 +561,7 @@ sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
size = (vsize_t)round_page(size);
if (size == 0)
return (0);
return 0;
map = &p->p_vmspace->vm_map;
@ -573,23 +569,22 @@ sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
if (error)
return EINVAL;
vm_map_lock(map);
#if 0
/*
* interesting system call semantic: make sure entire range is
* allocated before allowing an unmap.
*/
vm_map_lock(map);
#if 0
if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
vm_map_unlock(map);
return (EINVAL);
return EINVAL;
}
#endif
uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0);
vm_map_unlock(map);
if (dead_entries != NULL)
uvm_unmap_detach(dead_entries, 0);
return (0);
return 0;
}
/*
@ -674,7 +669,7 @@ sys_minherit(struct lwp *l, const struct sys_minherit_args *uap,
return EINVAL;
error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size,
inherit);
inherit);
return error;
}
@ -769,10 +764,10 @@ sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
* There's also what to do for device/file/anonymous memory.
*/
return (EINVAL);
return EINVAL;
default:
return (EINVAL);
return EINVAL;
}
return error;
@ -815,11 +810,11 @@ sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval)
return ENOMEM;
if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
return (EAGAIN);
return EAGAIN;
if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
return (EAGAIN);
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
return EAGAIN;
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
0);
@ -888,13 +883,12 @@ sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap,
flags = SCARG(uap, flags);
if (flags == 0 ||
(flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
return (EINVAL);
if (flags == 0 || (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
return EINVAL;
error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
return (error);
return error;
}
/*
@ -907,7 +901,7 @@ sys_munlockall(struct lwp *l, const void *v, register_t *retval)
struct proc *p = l->l_proc;
(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
return (0);
return 0;
}
/*
@ -932,11 +926,11 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
*/
if (size == 0)
return(0);
return 0;
if (foff & PAGE_MASK)
return(EINVAL);
return EINVAL;
if ((prot & maxprot) != prot)
return(EINVAL);
return EINVAL;
/*
* for non-fixed mappings, round off the suggested address.
@ -947,7 +941,7 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
*addr = round_page(*addr);
} else {
if (*addr & PAGE_MASK)
return(EINVAL);
return EINVAL;
uvmflag |= UVM_FLAG_FIXED;
(void) uvm_unmap(map, *addr, *addr + size);
}
@ -963,15 +957,15 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
if (align) {
if (align >= sizeof(vaddr_t) * NBBY)
return(EINVAL);
return EINVAL;
align = 1L << align;
if (align < PAGE_SIZE)
return(EINVAL);
return EINVAL;
if (align >= vm_map_max(map))
return(ENOMEM);
return ENOMEM;
if (flags & MAP_FIXED) {
if ((*addr & (align-1)) != 0)
return(EINVAL);
return EINVAL;
align = 0;
}
}
@ -1008,8 +1002,8 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
}
uvmflag = UVM_MAPFLAG(prot, maxprot,
(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
advice, uvmflag);
(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY, advice,
uvmflag);
error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
if (error) {
if (uobj)
@ -1030,7 +1024,7 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
* No more work to do in this case.
*/
return (0);
return 0;
}
if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) {
vm_map_lock(map);
@ -1048,12 +1042,12 @@ uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
*/
error = uvm_map_pageable(map, *addr, *addr + size,
false, UVM_LK_ENTER);
false, UVM_LK_ENTER);
if (error) {
uvm_unmap(map, *addr, *addr + size);
return error;
}
return (0);
return 0;
}
return 0;
}
@ -1089,8 +1083,8 @@ uvm_mmap_dev(struct proc *p, void **addrp, size_t len, dev_t dev,
return EINVAL;
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
(vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM,
uobj, off, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
(vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM, uobj, off,
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
return error;
}
@ -1109,7 +1103,7 @@ uvm_mmap_anon(struct proc *p, void **addrp, size_t len)
p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
(vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL,
NULL, 0, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
(vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL, NULL, 0,
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
return error;
}