I should have made uvm_page_physload() take paddr_t's instead of vaddr_t's.

Also, add uvm_coredump32().
This commit is contained in:
eeh 1999-12-30 16:09:47 +00:00
parent 3bd42b03a7
commit c0ac678704
4 changed files with 96 additions and 11 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.34 1999/07/22 22:58:38 thorpej Exp $ */
/* $NetBSD: uvm_extern.h,v 1.35 1999/12/30 16:09:47 eeh Exp $ */
/*
*
@ -370,8 +370,8 @@ struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *,
void uvm_pagerealloc __P((struct vm_page *,
struct uvm_object *, vaddr_t));
/* Actually, uvm_page_physload takes PF#s which need their own type */
void uvm_page_physload __P((vaddr_t, vaddr_t,
vaddr_t, vaddr_t, int));
void uvm_page_physload __P((paddr_t, paddr_t,
paddr_t, paddr_t, int));
void uvm_setpagesize __P((void));
/* uvm_pdaemon.c */
@ -390,6 +390,10 @@ void uvm_swap_init __P((void));
int uvm_coredump __P((struct proc *, struct vnode *,
struct ucred *, struct core *));
int uvm_grow __P((struct proc *, vaddr_t));
/* should only be needed if COMPAT_NETBSD32 is defined */
struct core32;
int uvm_coredump32 __P((struct proc *, struct vnode *,
struct ucred *, struct core32 *));
/* uvm_user.c */
int uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t));

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.28 1999/12/01 16:08:32 drochner Exp $ */
/* $NetBSD: uvm_page.c,v 1.29 1999/12/30 16:09:47 eeh Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -577,7 +577,7 @@ uvm_page_physget(paddrp)
void
uvm_page_physload(start, end, avail_start, avail_end, free_list)
vaddr_t start, end, avail_start, avail_end;
paddr_t start, end, avail_start, avail_end;
int free_list;
{
int preload, lcv;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_unix.c,v 1.9 1999/12/04 23:14:40 fvdl Exp $ */
/* $NetBSD: uvm_unix.c,v 1.10 1999/12/30 16:09:47 eeh Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -48,6 +48,7 @@
/*
* uvm_unix.c: traditional sbrk/grow interface to vm.
*/
#include "opt_compat_netbsd32.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -252,3 +253,83 @@ uvm_coredump(p, vp, cred, chdr)
return (error);
}
#if COMPAT_NETBSD32
/*
* uvm_coredump32: dump 32-bit core!
*/
int
uvm_coredump32(p, vp, cred, chdr)
struct proc *p;
struct vnode *vp;
struct ucred *cred;
struct core32 *chdr;
{
register struct vmspace *vm = p->p_vmspace;
register vm_map_t map = &vm->vm_map;
register vm_map_entry_t entry;
vaddr_t start, end;
struct coreseg32 cseg;
off_t offset;
int flag, error = 0;
offset = chdr->c_hdrsize + chdr->c_seghdrsize + chdr->c_cpusize;
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
/* should never happen for a user process */
if (UVM_ET_ISSUBMAP(entry)) {
panic("uvm_coredump: user process with submap?");
}
if (!(entry->protection & VM_PROT_WRITE))
continue;
start = entry->start;
end = entry->end;
if (start >= VM_MAXUSER_ADDRESS)
continue;
if (end > VM_MAXUSER_ADDRESS)
end = VM_MAXUSER_ADDRESS;
if (start >= (vaddr_t)vm->vm_maxsaddr) {
flag = CORE_STACK;
start = trunc_page(USRSTACK - ctob(vm->vm_ssize));
if (start >= end)
continue;
} else
flag = CORE_DATA;
/*
* Set up a new core file segment.
*/
CORE_SETMAGIC(cseg, CORESEGMAGIC, CORE_GETMID(*chdr), flag);
cseg.c_addr = start;
cseg.c_size = end - start;
error = vn_rdwr(UIO_WRITE, vp,
(caddr_t)&cseg, chdr->c_seghdrsize,
offset, UIO_SYSSPACE,
IO_NODELOCKED|IO_UNIT, cred, NULL, p);
if (error)
break;
offset += chdr->c_seghdrsize;
error = vn_rdwr(UIO_WRITE, vp,
(caddr_t)cseg.c_addr, (int)cseg.c_size,
offset, UIO_USERSPACE,
IO_NODELOCKED|IO_UNIT, cred, NULL, p);
if (error)
break;
offset += cseg.c_size;
chdr->c_nseg++;
}
return (error);
}
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_page.h,v 1.33 1999/07/22 22:58:40 thorpej Exp $ */
/* $NetBSD: vm_page.h,v 1.34 1999/12/30 16:09:47 eeh Exp $ */
/*
* Copyright (c) 1991, 1993
@ -192,10 +192,10 @@ struct vm_page {
* vm_physmemseg: describes one segment of physical memory
*/
struct vm_physseg {
vaddr_t start; /* PF# of first page in segment */
vaddr_t end; /* (PF# of last page in segment) + 1 */
vaddr_t avail_start; /* PF# of first free page in segment */
vaddr_t avail_end; /* (PF# of last free page in segment) +1 */
paddr_t start; /* PF# of first page in segment */
paddr_t end; /* (PF# of last page in segment) + 1 */
paddr_t avail_start; /* PF# of first free page in segment */
paddr_t avail_end; /* (PF# of last free page in segment) +1 */
int free_list; /* which free list they belong on */
struct vm_page *pgs; /* vm_page structures (from start) */
struct vm_page *lastpg; /* vm_page structure for end */