use pmap_kenter_pa() instead of pmap_enter(), this is required for

pages loaned to the kernel.  this implies that we also need to
call pmap_kremove() before uvm_km_free().

other general cleanup:  remove argument names from prototypes,
rename some variables, etc.
This commit is contained in:
chs 2001-11-06 07:30:14 +00:00
parent cf63a6b052
commit 6fbca7d0fc
2 changed files with 65 additions and 63 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_pipe.c,v 1.17 2001/10/28 20:47:15 jdolecek Exp $ */
/* $NetBSD: sys_pipe.c,v 1.18 2001/11/06 07:30:15 chs Exp $ */
/*
* Copyright (c) 1996 John S. Dyson
@ -185,14 +185,13 @@ static int nbigpipe = 0;
*/
static int amountpipekva = 0;
static void pipeclose __P((struct pipe *cpipe));
static void pipe_free_kmem __P((struct pipe *cpipe));
static int pipe_create __P((struct pipe **cpipep, int allockva));
static __inline int pipelock __P((struct pipe *cpipe, int catch));
static __inline void pipeunlock __P((struct pipe *cpipe));
static __inline void pipeselwakeup __P((struct pipe *selp,
struct pipe *sigp));
static int pipespace __P((struct pipe *cpipe, int size));
static void pipeclose __P((struct pipe *));
static void pipe_free_kmem __P((struct pipe *));
static int pipe_create __P((struct pipe **, int));
static __inline int pipelock __P((struct pipe *, int));
static __inline void pipeunlock __P((struct pipe *));
static __inline void pipeselwakeup __P((struct pipe *, struct pipe *));
static int pipespace __P((struct pipe *, int));
#ifdef __FreeBSD__
#ifndef PIPE_NODIRECT
@ -207,10 +206,9 @@ static vm_zone_t pipe_zone;
#ifdef __NetBSD__
#ifndef PIPE_NODIRECT
static __inline int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
static __inline int pipe_loan_alloc __P((struct pipe *wpipe, int npages,
vsize_t blen));
static void pipe_loan_free __P((struct pipe *wpipe));
static int pipe_direct_write __P((struct pipe *, struct uio *));
static int pipe_loan_alloc __P((struct pipe *, int));
static void pipe_loan_free __P((struct pipe *));
#endif /* PIPE_NODIRECT */
static struct pool pipe_pool;
@ -870,7 +868,7 @@ retry:
wpipe->pipe_state &= ~PIPE_WANTR;
wakeup(wpipe);
}
wpipe->pipe_state |= PIPE_WANTW;
error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
if (error)
@ -933,21 +931,22 @@ error1:
/*
* Allocate structure for loan transfer.
*/
static __inline int
pipe_loan_alloc(wpipe, npages, blen)
static int
pipe_loan_alloc(wpipe, npages)
struct pipe *wpipe;
int npages;
vsize_t blen;
{
wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, blen);
vsize_t len;
len = (vsize_t)npages << PAGE_SHIFT;
wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, len);
if (wpipe->pipe_map.kva == NULL)
return (ENOMEM);
amountpipekva += blen;
amountpipekva += len;
wpipe->pipe_map.npages = npages;
wpipe->pipe_map.ms = (struct vm_page **) malloc(
npages * sizeof(struct vm_page *), M_PIPE, M_WAITOK);
wpipe->pipe_map.pgs = malloc(npages * sizeof(struct vm_page *), M_PIPE,
M_WAITOK);
return (0);
}
@ -958,12 +957,15 @@ static void
pipe_loan_free(wpipe)
struct pipe *wpipe;
{
uvm_km_free(kernel_map, wpipe->pipe_map.kva,
wpipe->pipe_map.npages * PAGE_SIZE);
vsize_t len;
len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
pmap_kremove(wpipe->pipe_map.kva, len);
uvm_km_free(kernel_map, wpipe->pipe_map.kva, len);
wpipe->pipe_map.kva = NULL;
amountpipekva -= wpipe->pipe_map.npages * PAGE_SIZE;
free(wpipe->pipe_map.ms, M_PIPE);
wpipe->pipe_map.ms = NULL;
amountpipekva -= len;
free(wpipe->pipe_map.pgs, M_PIPE);
wpipe->pipe_map.pgs = NULL;
}
/*
@ -974,13 +976,13 @@ pipe_loan_free(wpipe)
* be deferred until the receiving process grabs all of the bytes from
* the pipe buffer. Then the direct mapping write is set-up.
*/
static __inline int
static int
pipe_direct_write(wpipe, uio)
struct pipe *wpipe;
struct uio *uio;
{
int error, npages, j;
struct vm_page **res = NULL;
struct vm_page **pgs;
vaddr_t bbase, kva, base, bend;
vsize_t blen, bcnt;
voff_t bpos;
@ -1006,7 +1008,7 @@ retry:
wpipe->pipe_state &= ~PIPE_WANTR;
wakeup(wpipe);
}
wpipe->pipe_state |= PIPE_WANTW;
error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
if (error)
@ -1032,10 +1034,10 @@ retry:
blen = PIPE_DIRECT_CHUNK;
bend = base + blen;
bcnt = PIPE_DIRECT_CHUNK - bpos;
} else
} else {
bcnt = uio->uio_iov->iov_len;
npages = blen / PAGE_SIZE;
}
npages = blen >> PAGE_SHIFT;
wpipe->pipe_map.pos = bpos;
wpipe->pipe_map.cnt = bcnt;
@ -1048,26 +1050,30 @@ retry:
pipe_loan_free(wpipe);
/* Allocate new kva. */
if (!wpipe->pipe_map.kva
&& (error = pipe_loan_alloc(wpipe, npages, blen)))
goto error;
if (wpipe->pipe_map.kva == NULL) {
error = pipe_loan_alloc(wpipe, npages);
if (error) {
goto error;
}
}
/* Loan the write buffer memory from writer process */
pgs = wpipe->pipe_map.pgs;
error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
(void **) wpipe->pipe_map.ms, UVM_LOAN_TOPAGE);
if (error)
pgs, UVM_LOAN_TOPAGE);
if (error) {
pgs = NULL;
goto cleanup;
res = wpipe->pipe_map.ms;
}
/* Enter the loaned pages to kva */
kva = wpipe->pipe_map.kva;
for(j=0; j < npages; j++, kva += PAGE_SIZE)
pmap_enter(pmap_kernel(), kva, res[j]->phys_addr,
VM_PROT_READ, 0);
for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
}
pmap_update(pmap_kernel());
wpipe->pipe_state |= PIPE_DIRECTW;
error = 0;
while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
if (wpipe->pipe_state & PIPE_EOF) {
error = EPIPE;
@ -1084,10 +1090,10 @@ retry:
if (error)
wpipe->pipe_state &= ~PIPE_DIRECTW;
cleanup:
cleanup:
pipelock(wpipe, 0);
if (res)
uvm_unloan((void **) res, npages, UVM_LOAN_TOPAGE);
if (pgs != NULL)
uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
if (error || amountpipekva > maxpipekva)
pipe_loan_free(wpipe);
pipeunlock(wpipe);
@ -1097,12 +1103,12 @@ retry:
/*
* If nothing was read from what we offered, return error
* streight on. Otherwise update uio resid first. Caller
* straight on. Otherwise update uio resid first. Caller
* will deal with the error condition, returning short
* write, error, or restarting the write(2) as appropriate.
*/
if (wpipe->pipe_map.cnt == bcnt) {
error:
error:
wakeup(wpipe);
return (error);
}
@ -1110,9 +1116,9 @@ retry:
bcnt -= wpipe->pipe_map.cnt;
}
uio->uio_resid -= bcnt;
uio->uio_resid -= bcnt;
/* uio_offset not updated, not set/used for write(2) */
(char *) uio->uio_iov->iov_base += bcnt;
uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
uio->uio_iov->iov_len -= bcnt;
if (uio->uio_iov->iov_len == 0) {
uio->uio_iov++;
@ -1189,7 +1195,7 @@ pipe_write(fp, offset, uio, cred, flags)
return (error);
}
}
#ifdef __FreeBSD__
KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
#endif
@ -1304,12 +1310,12 @@ pipe_write(fp, offset, uio, cred, flags)
wpipe->pipe_buffer.in;
if (segsize > size)
segsize = size;
/* Transfer first segment */
error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
segsize, uio);
if (error == 0 && segsize < size) {
/*
* Transfer remaining part now, to
@ -1321,7 +1327,7 @@ pipe_write(fp, offset, uio, cred, flags)
wpipe->pipe_buffer.size)
panic("Expected pipe buffer wraparound disappeared");
#endif
error = uiomove(&wpipe->pipe_buffer.buffer[0],
size - segsize, uio);
}
@ -1335,18 +1341,16 @@ pipe_write(fp, offset, uio, cred, flags)
#endif
wpipe->pipe_buffer.in = size - segsize;
}
wpipe->pipe_buffer.cnt += size;
#ifdef DEBUG
if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
panic("Pipe buffer overflow");
#endif
}
pipeunlock(wpipe);
if (error)
break;
} else {
/*
* If the "read-side" has been blocked, wake it up now.
@ -1381,7 +1385,7 @@ pipe_write(fp, offset, uio, cred, flags)
if (wpipe->pipe_state & PIPE_EOF) {
error = EPIPE;
break;
}
}
}
}
@ -1609,7 +1613,6 @@ pipe_free_kmem(cpipe)
(vaddr_t)cpipe->pipe_buffer.buffer,
cpipe->pipe_buffer.size);
#endif /* NetBSD */
cpipe->pipe_buffer.buffer = NULL;
}
#ifndef PIPE_NODIRECT
@ -1704,7 +1707,6 @@ pipe_kqfilter(struct file *fp, struct knote *kn)
return (1);
}
kn->kn_hook = (caddr_t)cpipe;
SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
return (0);
}

View File

@ -99,7 +99,7 @@ struct pipemapping {
vsize_t cnt; /* number of chars in buffer */
voff_t pos; /* current position within page */
int npages; /* how many pages allocated */
struct vm_page **ms;
struct vm_page **pgs; /* pointers to the pages */
};
#endif