2005-09-11 21:55:26 +04:00
|
|
|
/* $NetBSD: sys_pipe.c,v 1.66 2005/09/11 17:55:26 christos Exp $ */
|
2003-02-13 00:54:15 +03:00
|
|
|
|
|
|
|
/*-
|
|
|
|
* Copyright (c) 2003 The NetBSD Foundation, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Paul Kranenburg.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
2001-06-16 16:00:02 +04:00
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1996 John S. Dyson
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice immediately at the beginning of the file, without modification,
|
|
|
|
* this list of conditions, and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Absolutely no warranty of function or purpose is made by the author
|
|
|
|
* John S. Dyson.
|
|
|
|
* 4. Modifications may be freely made to this file if the above conditions
|
|
|
|
* are met.
|
|
|
|
*
|
2002-03-14 00:50:24 +03:00
|
|
|
* $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file contains a high-performance replacement for the socket-based
|
|
|
|
* pipes scheme originally used in FreeBSD/4.4Lite. It does not support
|
|
|
|
* all features of sockets, but does do everything that pipes normally
|
|
|
|
* do.
|
2001-06-16 16:00:02 +04:00
|
|
|
*
|
|
|
|
* Adaption for NetBSD UVM, including uvm_loan() based direct write, was
|
|
|
|
* written by Jaromir Dolecek.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This code has two modes of operation, a small write mode and a large
|
|
|
|
* write mode. The small write mode acts like conventional pipes with
|
|
|
|
* a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
|
|
|
|
* "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
|
2003-02-13 00:54:15 +03:00
|
|
|
* and PIPE_SIZE in size it is mapped read-only into the kernel address space
|
|
|
|
* using the UVM page loan facility from where the receiving process can copy
|
|
|
|
* the data directly from the pages in the sending process.
|
2001-06-16 13:21:34 +04:00
|
|
|
*
|
|
|
|
* The constant PIPE_MINDIRECT is chosen to make sure that buffering will
|
|
|
|
* happen for small transfers so that the system will not spend all of
|
|
|
|
* its time context switching. PIPE_SIZE is constrained by the
|
|
|
|
* amount of kernel virtual memory.
|
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2005-09-11 21:55:26 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.66 2005/09/11 17:55:26 christos Exp $");
|
2001-11-12 18:25:01 +03:00
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2001-06-16 16:00:02 +04:00
|
|
|
#include <sys/proc.h>
|
2001-06-16 13:21:34 +04:00
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/filio.h>
|
2002-03-14 00:50:24 +03:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
2001-06-16 13:21:34 +04:00
|
|
|
#include <sys/ttycom.h>
|
|
|
|
#include <sys/stat.h>
|
2002-03-14 00:50:24 +03:00
|
|
|
#include <sys/malloc.h>
|
2001-06-16 13:21:34 +04:00
|
|
|
#include <sys/poll.h>
|
|
|
|
#include <sys/signalvar.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/uio.h>
|
2001-06-16 16:00:02 +04:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/select.h>
|
|
|
|
#include <sys/mount.h>
|
2003-01-18 13:06:22 +03:00
|
|
|
#include <sys/sa.h>
|
2001-06-16 16:00:02 +04:00
|
|
|
#include <sys/syscallargs.h>
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
#include <sys/sysctl.h>
|
2001-10-28 23:47:15 +03:00
|
|
|
#include <sys/kernel.h>
|
2001-06-16 16:00:02 +04:00
|
|
|
|
|
|
|
#include <sys/pipe.h>
|
|
|
|
|
2001-10-28 23:47:15 +03:00
|
|
|
/*
|
|
|
|
* Avoid microtime(9), it's slow. We don't guard the read from time(9)
|
|
|
|
* with splclock(9) since we don't actually need to be THAT sure the access
|
|
|
|
* is atomic.
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
#define PIPE_TIMESTAMP(tvp) (*(tvp) = time)
|
2002-10-23 13:10:23 +04:00
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use this define if you want to disable *fancy* VM things. Expect an
|
2003-02-13 00:54:15 +03:00
|
|
|
* approx 30% decrease in transfer rate.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
|
|
|
/* #define PIPE_NODIRECT */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* interfaces to the outside world
|
|
|
|
*/
|
2005-02-27 00:34:55 +03:00
|
|
|
static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
|
2002-03-14 00:50:24 +03:00
|
|
|
struct ucred *cred, int flags);
|
2005-02-27 00:34:55 +03:00
|
|
|
static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
|
2002-03-14 00:50:24 +03:00
|
|
|
struct ucred *cred, int flags);
|
2003-06-30 02:28:00 +04:00
|
|
|
static int pipe_close(struct file *fp, struct proc *p);
|
|
|
|
static int pipe_poll(struct file *fp, int events, struct proc *p);
|
2002-10-23 13:10:23 +04:00
|
|
|
static int pipe_kqfilter(struct file *fp, struct knote *kn);
|
2003-06-30 02:28:00 +04:00
|
|
|
static int pipe_stat(struct file *fp, struct stat *sb, struct proc *p);
|
2003-03-22 00:13:50 +03:00
|
|
|
static int pipe_ioctl(struct file *fp, u_long cmd, void *data,
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p);
|
2002-03-14 00:50:24 +03:00
|
|
|
|
2004-11-30 07:25:43 +03:00
|
|
|
static const struct fileops pipeops = {
|
|
|
|
pipe_read, pipe_write, pipe_ioctl, fnullop_fcntl, pipe_poll,
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe_stat, pipe_close, pipe_kqfilter
|
|
|
|
};
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Default pipe buffer size(s), this can be kind-of large now because pipe
|
|
|
|
* space is pageable. The pipe code will try to maintain locality of
|
|
|
|
* reference for performance reasons, so small amounts of outstanding I/O
|
|
|
|
* will not wipe the cache.
|
|
|
|
*/
|
|
|
|
#define MINPIPESIZE (PIPE_SIZE/3)
|
|
|
|
#define MAXPIPESIZE (2*PIPE_SIZE/3)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Maximum amount of kva for pipes -- this is kind-of a soft limit, but
|
|
|
|
* is there so that on large systems, we don't exhaust it.
|
|
|
|
*/
|
|
|
|
#define MAXPIPEKVA (8*1024*1024)
|
2001-06-16 16:00:02 +04:00
|
|
|
static int maxpipekva = MAXPIPEKVA;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit for direct transfers, we cannot, of course limit
|
|
|
|
* the amount of kva for pipes in general though.
|
|
|
|
*/
|
|
|
|
#define LIMITPIPEKVA (16*1024*1024)
|
2001-06-16 16:00:02 +04:00
|
|
|
static int limitpipekva = LIMITPIPEKVA;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit the number of "big" pipes
|
|
|
|
*/
|
2001-06-16 16:00:02 +04:00
|
|
|
#define LIMITBIGPIPES 32
|
|
|
|
static int maxbigpipes = LIMITBIGPIPES;
|
|
|
|
static int nbigpipe = 0;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* Amount of KVA consumed by pipe buffers.
|
|
|
|
*/
|
|
|
|
static int amountpipekva = 0;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
MALLOC_DEFINE(M_PIPE, "pipe", "Pipe structures");
|
|
|
|
|
2003-09-15 03:47:09 +04:00
|
|
|
static void pipeclose(struct file *fp, struct pipe *pipe);
|
2003-02-13 00:54:15 +03:00
|
|
|
static void pipe_free_kmem(struct pipe *pipe);
|
|
|
|
static int pipe_create(struct pipe **pipep, int allockva);
|
|
|
|
static int pipelock(struct pipe *pipe, int catch);
|
|
|
|
static __inline void pipeunlock(struct pipe *pipe);
|
2005-09-11 21:55:26 +04:00
|
|
|
static void pipeselwakeup(struct pipe *pipe, struct pipe *sigp, int code);
|
2001-06-16 13:21:34 +04:00
|
|
|
#ifndef PIPE_NODIRECT
|
2003-09-15 03:47:09 +04:00
|
|
|
static int pipe_direct_write(struct file *fp, struct pipe *wpipe,
|
|
|
|
struct uio *uio);
|
2001-06-16 13:21:34 +04:00
|
|
|
#endif
|
2003-02-13 00:54:15 +03:00
|
|
|
static int pipespace(struct pipe *pipe, int size);
|
2001-06-16 16:00:02 +04:00
|
|
|
|
|
|
|
#ifndef PIPE_NODIRECT
|
2002-03-14 00:50:24 +03:00
|
|
|
static int pipe_loan_alloc(struct pipe *, int);
|
|
|
|
static void pipe_loan_free(struct pipe *);
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif /* PIPE_NODIRECT */
|
|
|
|
|
2004-04-25 20:42:40 +04:00
|
|
|
static POOL_INIT(pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl",
|
|
|
|
&pool_allocator_nointr);
|
2002-03-14 00:50:24 +03:00
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
/*
|
|
|
|
* The pipe system call for the DTYPE_PIPE type of pipes
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
2001-06-16 16:00:02 +04:00
|
|
|
int
|
2003-01-18 13:06:22 +03:00
|
|
|
sys_pipe(l, v, retval)
|
|
|
|
struct lwp *l;
|
2001-06-16 16:00:02 +04:00
|
|
|
void *v;
|
|
|
|
register_t *retval;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
|
|
|
struct file *rf, *wf;
|
2004-03-04 01:00:34 +03:00
|
|
|
struct pipe *rpipe, *wpipe;
|
2001-06-16 13:21:34 +04:00
|
|
|
int fd, error;
|
2003-01-18 13:06:22 +03:00
|
|
|
struct proc *p;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
p = l->l_proc;
|
2001-07-17 10:05:28 +04:00
|
|
|
rpipe = wpipe = NULL;
|
|
|
|
if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
|
2003-09-15 03:47:09 +04:00
|
|
|
pipeclose(NULL, rpipe);
|
|
|
|
pipeclose(NULL, wpipe);
|
2001-07-17 10:05:28 +04:00
|
|
|
return (ENFILE);
|
|
|
|
}
|
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* Note: the file structure returned from falloc() is marked
|
|
|
|
* as 'larval' initially. Unless we mark it as 'mature' by
|
|
|
|
* FILE_SET_MATURE(), any attempt to do anything with it would
|
|
|
|
* return EBADF, including e.g. dup(2) or close(2). This avoids
|
|
|
|
* file descriptor races if we block in the second falloc().
|
|
|
|
*/
|
|
|
|
|
|
|
|
error = falloc(p, &rf, &fd);
|
|
|
|
if (error)
|
|
|
|
goto free2;
|
|
|
|
retval[0] = fd;
|
|
|
|
rf->f_flag = FREAD;
|
|
|
|
rf->f_type = DTYPE_PIPE;
|
|
|
|
rf->f_data = (caddr_t)rpipe;
|
|
|
|
rf->f_ops = &pipeops;
|
|
|
|
|
|
|
|
error = falloc(p, &wf, &fd);
|
|
|
|
if (error)
|
|
|
|
goto free3;
|
|
|
|
retval[1] = fd;
|
|
|
|
wf->f_flag = FWRITE;
|
|
|
|
wf->f_type = DTYPE_PIPE;
|
|
|
|
wf->f_data = (caddr_t)wpipe;
|
|
|
|
wf->f_ops = &pipeops;
|
|
|
|
|
|
|
|
rpipe->pipe_peer = wpipe;
|
|
|
|
wpipe->pipe_peer = rpipe;
|
|
|
|
|
|
|
|
FILE_SET_MATURE(rf);
|
|
|
|
FILE_SET_MATURE(wf);
|
2003-06-30 02:28:00 +04:00
|
|
|
FILE_UNUSE(rf, p);
|
|
|
|
FILE_UNUSE(wf, p);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (0);
|
2001-06-16 16:00:02 +04:00
|
|
|
free3:
|
2003-06-30 02:28:00 +04:00
|
|
|
FILE_UNUSE(rf, p);
|
2001-06-16 16:00:02 +04:00
|
|
|
ffree(rf);
|
2001-07-26 18:14:28 +04:00
|
|
|
fdremove(p->p_fd, retval[0]);
|
2001-06-16 16:00:02 +04:00
|
|
|
free2:
|
2003-09-15 03:47:09 +04:00
|
|
|
pipeclose(NULL, wpipe);
|
|
|
|
pipeclose(NULL, rpipe);
|
2001-06-16 16:00:02 +04:00
|
|
|
|
|
|
|
return (error);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate kva for pipe circular buffer, the space is pageable
|
|
|
|
* This routine will 'realloc' the size of a pipe safely, if it fails
|
|
|
|
* it will retain the old buffer.
|
|
|
|
* If it fails it will return ENOMEM.
|
|
|
|
*/
|
|
|
|
static int
|
2003-02-13 00:54:15 +03:00
|
|
|
pipespace(pipe, size)
|
|
|
|
struct pipe *pipe;
|
2001-06-16 13:21:34 +04:00
|
|
|
int size;
|
|
|
|
{
|
|
|
|
caddr_t buffer;
|
|
|
|
/*
|
2003-02-13 00:54:15 +03:00
|
|
|
* Allocate pageable virtual address space. Physical memory is
|
|
|
|
* allocated on demand.
|
2001-06-16 16:00:02 +04:00
|
|
|
*/
|
2005-04-01 15:59:21 +04:00
|
|
|
buffer = (caddr_t) uvm_km_alloc(kernel_map, round_page(size), 0,
|
|
|
|
UVM_KMF_PAGEABLE);
|
2001-06-16 16:00:02 +04:00
|
|
|
if (buffer == NULL)
|
|
|
|
return (ENOMEM);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/* free old resources if we're resizing */
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe_free_kmem(pipe);
|
|
|
|
pipe->pipe_buffer.buffer = buffer;
|
|
|
|
pipe->pipe_buffer.size = size;
|
|
|
|
pipe->pipe_buffer.in = 0;
|
|
|
|
pipe->pipe_buffer.out = 0;
|
|
|
|
pipe->pipe_buffer.cnt = 0;
|
|
|
|
amountpipekva += pipe->pipe_buffer.size;
|
2001-06-16 13:21:34 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-02-13 00:54:15 +03:00
|
|
|
* Initialize and allocate VM and memory for pipe.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
|
|
|
static int
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe_create(pipep, allockva)
|
|
|
|
struct pipe **pipep;
|
2001-07-17 10:05:28 +04:00
|
|
|
int allockva;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
2003-02-13 00:54:15 +03:00
|
|
|
struct pipe *pipe;
|
2001-06-16 13:21:34 +04:00
|
|
|
int error;
|
|
|
|
|
2004-03-24 23:25:28 +03:00
|
|
|
pipe = *pipep = pool_get(&pipe_pool, PR_WAITOK);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2005-02-27 00:34:55 +03:00
|
|
|
/* Initialize */
|
2003-02-13 00:54:15 +03:00
|
|
|
memset(pipe, 0, sizeof(struct pipe));
|
|
|
|
pipe->pipe_state = PIPE_SIGNALR;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_TIMESTAMP(&pipe->pipe_ctime);
|
|
|
|
pipe->pipe_atime = pipe->pipe_ctime;
|
|
|
|
pipe->pipe_mtime = pipe->pipe_ctime;
|
|
|
|
simple_lock_init(&pipe->pipe_slock);
|
2004-07-18 00:50:08 +04:00
|
|
|
lockinit(&pipe->pipe_lock, PSOCK | PCATCH, "pipelk", 0, 0);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2004-03-04 01:00:34 +03:00
|
|
|
if (allockva && (error = pipespace(pipe, PIPE_SIZE)))
|
|
|
|
return (error);
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2003-02-13 00:54:15 +03:00
|
|
|
* Lock a pipe for I/O, blocking other access
|
|
|
|
* Called with pipe spin lock held.
|
|
|
|
* Return with pipe spin lock released on success.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
static int
|
|
|
|
pipelock(pipe, catch)
|
|
|
|
struct pipe *pipe;
|
2001-06-16 13:21:34 +04:00
|
|
|
int catch;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
LOCK_ASSERT(simple_lock_held(&pipe->pipe_slock));
|
2001-06-16 16:00:02 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
while (1) {
|
|
|
|
error = lockmgr(&pipe->pipe_lock, LK_EXCLUSIVE | LK_INTERLOCK,
|
|
|
|
&pipe->pipe_slock);
|
|
|
|
if (error == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
simple_lock(&pipe->pipe_slock);
|
|
|
|
if (catch || (error != EINTR && error != ERESTART))
|
|
|
|
break;
|
2003-08-11 14:24:41 +04:00
|
|
|
/*
|
|
|
|
* XXX XXX XXX
|
|
|
|
* The pipe lock is initialised with PCATCH on and we cannot
|
|
|
|
* override this in a lockmgr() call. Thus a pending signal
|
|
|
|
* will cause lockmgr() to return with EINTR or ERESTART.
|
|
|
|
* We cannot simply re-enter lockmgr() at this point since
|
|
|
|
* the pending signals have not yet been posted and would
|
|
|
|
* cause an immediate EINTR/ERESTART return again.
|
|
|
|
* As a workaround we pause for a while here, giving the lock
|
|
|
|
* a chance to drain, before trying again.
|
|
|
|
* XXX XXX XXX
|
|
|
|
*
|
|
|
|
* NOTE: Consider dropping PCATCH from this lock; in practice
|
|
|
|
* it is never held for long enough periods for having it
|
|
|
|
* interruptable at the start of pipe_read/pipe_write to be
|
|
|
|
* beneficial.
|
|
|
|
*/
|
2004-07-18 00:50:08 +04:00
|
|
|
(void) ltsleep(&lbolt, PSOCK, "rstrtpipelock", hz,
|
2004-02-26 11:15:31 +03:00
|
|
|
&pipe->pipe_slock);
|
2003-02-13 00:54:15 +03:00
|
|
|
}
|
2001-06-16 16:00:02 +04:00
|
|
|
return (error);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unlock a pipe I/O lock
|
|
|
|
*/
|
|
|
|
static __inline void
|
2003-02-13 00:54:15 +03:00
|
|
|
pipeunlock(pipe)
|
|
|
|
struct pipe *pipe;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
2002-03-14 00:50:24 +03:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
lockmgr(&pipe->pipe_lock, LK_RELEASE, NULL);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* Select/poll wakup. This also sends SIGIO to peer connected to
|
|
|
|
* 'sigpipe' side of pipe.
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
static void
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(selp, sigp, code)
|
2001-06-21 22:46:22 +04:00
|
|
|
struct pipe *selp, *sigp;
|
2003-09-15 03:47:09 +04:00
|
|
|
int code;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
2003-09-21 23:16:48 +04:00
|
|
|
int band;
|
2002-10-23 13:10:23 +04:00
|
|
|
|
2004-02-22 20:51:25 +03:00
|
|
|
selnotify(&selp->pipe_sel, NOTE_SUBMIT);
|
2003-02-13 00:54:15 +03:00
|
|
|
|
2003-09-21 23:16:48 +04:00
|
|
|
if (sigp == NULL || (sigp->pipe_state & PIPE_ASYNC) == 0)
|
2003-02-13 00:54:15 +03:00
|
|
|
return;
|
|
|
|
|
2003-09-21 23:16:48 +04:00
|
|
|
switch (code) {
|
2003-09-15 03:47:09 +04:00
|
|
|
case POLL_IN:
|
2003-09-21 23:16:48 +04:00
|
|
|
band = POLLIN|POLLRDNORM;
|
2003-09-15 03:47:09 +04:00
|
|
|
break;
|
|
|
|
case POLL_OUT:
|
2003-09-21 23:16:48 +04:00
|
|
|
band = POLLOUT|POLLWRNORM;
|
2003-09-15 03:47:09 +04:00
|
|
|
break;
|
|
|
|
case POLL_HUP:
|
2003-09-21 23:16:48 +04:00
|
|
|
band = POLLHUP;
|
2003-09-15 03:47:09 +04:00
|
|
|
break;
|
|
|
|
#if POLL_HUP != POLL_ERR
|
|
|
|
case POLL_ERR:
|
2003-09-21 23:16:48 +04:00
|
|
|
band = POLLERR;
|
2003-09-15 03:47:09 +04:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
2003-10-25 13:06:51 +04:00
|
|
|
band = 0;
|
2003-09-15 03:47:09 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
printf("bad siginfo code %d in pipe notification.\n", code);
|
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
}
|
2003-09-21 23:16:48 +04:00
|
|
|
|
2003-09-22 16:59:55 +04:00
|
|
|
fownsignal(sigp->pipe_pgid, SIGIO, code, band, selp);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
2001-06-16 16:00:02 +04:00
|
|
|
static int
|
|
|
|
pipe_read(fp, offset, uio, cred, flags)
|
|
|
|
struct file *fp;
|
|
|
|
off_t *offset;
|
|
|
|
struct uio *uio;
|
|
|
|
struct ucred *cred;
|
2001-06-16 13:21:34 +04:00
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
struct pipe *rpipe = (struct pipe *) fp->f_data;
|
2003-02-13 00:54:15 +03:00
|
|
|
struct pipebuf *bp = &rpipe->pipe_buffer;
|
2001-06-16 13:21:34 +04:00
|
|
|
int error;
|
2001-06-16 16:00:02 +04:00
|
|
|
size_t nread = 0;
|
|
|
|
size_t size;
|
|
|
|
size_t ocnt;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2002-03-14 00:50:24 +03:00
|
|
|
PIPE_LOCK(rpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
++rpipe->pipe_busy;
|
2003-02-13 00:54:15 +03:00
|
|
|
ocnt = bp->cnt;
|
2002-11-02 00:34:30 +03:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
again:
|
2001-06-16 13:21:34 +04:00
|
|
|
error = pipelock(rpipe, 1);
|
|
|
|
if (error)
|
|
|
|
goto unlocked_error;
|
|
|
|
|
|
|
|
while (uio->uio_resid) {
|
|
|
|
/*
|
|
|
|
* normal pipe buffer receive
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if (bp->cnt > 0) {
|
|
|
|
size = bp->size - bp->out;
|
|
|
|
if (size > bp->cnt)
|
|
|
|
size = bp->cnt;
|
2001-06-16 16:00:02 +04:00
|
|
|
if (size > uio->uio_resid)
|
|
|
|
size = uio->uio_resid;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
error = uiomove(&bp->buffer[bp->out], size, uio);
|
2001-06-16 13:21:34 +04:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
bp->out += size;
|
|
|
|
if (bp->out >= bp->size)
|
|
|
|
bp->out = 0;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
bp->cnt -= size;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is no more to read in the pipe, reset
|
|
|
|
* its pointers to the beginning. This improves
|
|
|
|
* cache hit stats.
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if (bp->cnt == 0) {
|
|
|
|
bp->in = 0;
|
|
|
|
bp->out = 0;
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
nread += size;
|
|
|
|
#ifndef PIPE_NODIRECT
|
2003-02-13 00:54:15 +03:00
|
|
|
} else if ((rpipe->pipe_state & PIPE_DIRECTR) != 0) {
|
|
|
|
/*
|
|
|
|
* Direct copy, bypassing a kernel buffer.
|
|
|
|
*/
|
2001-06-16 13:21:34 +04:00
|
|
|
caddr_t va;
|
2003-02-13 00:54:15 +03:00
|
|
|
|
|
|
|
KASSERT(rpipe->pipe_state & PIPE_DIRECTW);
|
|
|
|
|
|
|
|
size = rpipe->pipe_map.cnt;
|
2001-06-16 16:00:02 +04:00
|
|
|
if (size > uio->uio_resid)
|
|
|
|
size = uio->uio_resid;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
va = (caddr_t) rpipe->pipe_map.kva +
|
|
|
|
rpipe->pipe_map.pos;
|
|
|
|
error = uiomove(va, size, uio);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
nread += size;
|
|
|
|
rpipe->pipe_map.pos += size;
|
|
|
|
rpipe->pipe_map.cnt -= size;
|
|
|
|
if (rpipe->pipe_map.cnt == 0) {
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(rpipe);
|
|
|
|
rpipe->pipe_state &= ~PIPE_DIRECTR;
|
2001-06-16 13:21:34 +04:00
|
|
|
wakeup(rpipe);
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Break if some data was read.
|
|
|
|
*/
|
|
|
|
if (nread > 0)
|
|
|
|
break;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(rpipe);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* detect EOF condition
|
|
|
|
* read returns 0 on EOF, no need to set error
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_EOF) {
|
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2003-02-14 16:16:44 +03:00
|
|
|
/*
|
|
|
|
* don't block on non-blocking I/O
|
|
|
|
*/
|
|
|
|
if (fp->f_flag & FNONBLOCK) {
|
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
error = EAGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* Unlock the pipe buffer for our remaining processing.
|
|
|
|
* We will either break out with an error or we will
|
|
|
|
* sleep and relock to loop.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
|
|
|
pipeunlock(rpipe);
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
/*
|
|
|
|
* The PIPE_DIRECTR flag is not under the control
|
|
|
|
* of the long-term lock (see pipe_direct_write()),
|
|
|
|
* so re-check now while holding the spin lock.
|
|
|
|
*/
|
|
|
|
if ((rpipe->pipe_state & PIPE_DIRECTR) != 0)
|
|
|
|
goto again;
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
/*
|
2001-06-16 16:00:02 +04:00
|
|
|
* We want to read more, wake up select/poll.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_IN);
|
2001-06-16 16:00:02 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
/*
|
|
|
|
* If the "write-side" is blocked, wake it up now.
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_WANTW) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now wait until the pipe is filled */
|
2001-06-16 16:00:02 +04:00
|
|
|
rpipe->pipe_state |= PIPE_WANTR;
|
2004-07-18 00:50:08 +04:00
|
|
|
error = ltsleep(rpipe, PSOCK | PCATCH,
|
2003-02-13 00:54:15 +03:00
|
|
|
"piperd", 0, &rpipe->pipe_slock);
|
|
|
|
if (error != 0)
|
2001-06-16 13:21:34 +04:00
|
|
|
goto unlocked_error;
|
2003-02-13 00:54:15 +03:00
|
|
|
goto again;
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error == 0)
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_TIMESTAMP(&rpipe->pipe_atime);
|
|
|
|
|
|
|
|
PIPE_LOCK(rpipe);
|
|
|
|
pipeunlock(rpipe);
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
unlocked_error:
|
|
|
|
--rpipe->pipe_busy;
|
|
|
|
|
|
|
|
/*
|
2001-06-16 16:00:02 +04:00
|
|
|
* PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
2001-06-16 16:00:02 +04:00
|
|
|
if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
|
|
|
|
rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
|
2001-06-16 13:21:34 +04:00
|
|
|
wakeup(rpipe);
|
2003-02-13 00:54:15 +03:00
|
|
|
} else if (bp->cnt < MINPIPESIZE) {
|
2001-06-16 13:21:34 +04:00
|
|
|
/*
|
|
|
|
* Handle write blocking hysteresis.
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_WANTW) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* If anything was read off the buffer, signal to the writer it's
|
|
|
|
* possible to write more data. Also send signal if we are here for the
|
|
|
|
* first time after last write.
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if ((bp->size - bp->cnt) >= PIPE_BUF
|
|
|
|
&& (ocnt != bp->cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
|
2001-06-16 16:00:02 +04:00
|
|
|
rpipe->pipe_state &= ~PIPE_SIGNALR;
|
|
|
|
}
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2002-03-14 00:50:24 +03:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
#ifndef PIPE_NODIRECT
|
|
|
|
/*
|
|
|
|
* Allocate structure for loan transfer.
|
|
|
|
*/
|
2001-11-06 10:30:14 +03:00
|
|
|
static int
|
|
|
|
pipe_loan_alloc(wpipe, npages)
|
2001-06-16 16:00:02 +04:00
|
|
|
struct pipe *wpipe;
|
|
|
|
int npages;
|
|
|
|
{
|
2001-11-06 10:30:14 +03:00
|
|
|
vsize_t len;
|
|
|
|
|
|
|
|
len = (vsize_t)npages << PAGE_SHIFT;
|
2005-04-01 15:59:21 +04:00
|
|
|
wpipe->pipe_map.kva = uvm_km_alloc(kernel_map, len, 0,
|
|
|
|
UVM_KMF_VAONLY | UVM_KMF_WAITVA);
|
2002-02-28 07:43:16 +03:00
|
|
|
if (wpipe->pipe_map.kva == 0)
|
2001-06-16 16:00:02 +04:00
|
|
|
return (ENOMEM);
|
|
|
|
|
2001-11-06 10:30:14 +03:00
|
|
|
amountpipekva += len;
|
2001-06-16 16:00:02 +04:00
|
|
|
wpipe->pipe_map.npages = npages;
|
2001-11-06 10:30:14 +03:00
|
|
|
wpipe->pipe_map.pgs = malloc(npages * sizeof(struct vm_page *), M_PIPE,
|
|
|
|
M_WAITOK);
|
2001-06-16 16:00:02 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free resources allocated for loan transfer.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pipe_loan_free(wpipe)
|
|
|
|
struct pipe *wpipe;
|
|
|
|
{
|
2001-11-06 10:30:14 +03:00
|
|
|
vsize_t len;
|
|
|
|
|
|
|
|
len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
|
2005-04-01 15:59:21 +04:00
|
|
|
uvm_km_free(kernel_map, wpipe->pipe_map.kva, len, UVM_KMF_VAONLY);
|
2002-02-28 07:43:16 +03:00
|
|
|
wpipe->pipe_map.kva = 0;
|
2001-11-06 10:30:14 +03:00
|
|
|
amountpipekva -= len;
|
|
|
|
free(wpipe->pipe_map.pgs, M_PIPE);
|
|
|
|
wpipe->pipe_map.pgs = NULL;
|
2001-06-16 16:00:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NetBSD direct write, using uvm_loan() mechanism.
|
|
|
|
* This implements the pipe buffer write mechanism. Note that only
|
|
|
|
* a direct write OR a normal pipe write can be pending at any given time.
|
|
|
|
* If there are any characters in the pipe buffer, the direct write will
|
|
|
|
* be deferred until the receiving process grabs all of the bytes from
|
|
|
|
* the pipe buffer. Then the direct mapping write is set-up.
|
2003-02-13 00:54:15 +03:00
|
|
|
*
|
|
|
|
* Called with the long-term pipe lock held.
|
2001-06-16 16:00:02 +04:00
|
|
|
*/
|
2001-11-06 10:30:14 +03:00
|
|
|
static int
|
2003-09-15 03:47:09 +04:00
|
|
|
pipe_direct_write(fp, wpipe, uio)
|
|
|
|
struct file *fp;
|
2001-06-16 16:00:02 +04:00
|
|
|
struct pipe *wpipe;
|
|
|
|
struct uio *uio;
|
|
|
|
{
|
2001-07-03 00:43:39 +04:00
|
|
|
int error, npages, j;
|
2001-11-06 10:30:14 +03:00
|
|
|
struct vm_page **pgs;
|
2001-06-16 16:00:02 +04:00
|
|
|
vaddr_t bbase, kva, base, bend;
|
|
|
|
vsize_t blen, bcnt;
|
2001-07-03 00:43:39 +04:00
|
|
|
voff_t bpos;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
KASSERT(wpipe->pipe_map.cnt == 0);
|
2001-06-16 16:00:02 +04:00
|
|
|
|
|
|
|
/*
|
2001-09-25 23:01:21 +04:00
|
|
|
* Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
|
|
|
|
* not aligned to PAGE_SIZE.
|
2001-06-16 16:00:02 +04:00
|
|
|
*/
|
2001-09-25 23:01:21 +04:00
|
|
|
bbase = (vaddr_t)uio->uio_iov->iov_base;
|
2001-07-03 00:43:39 +04:00
|
|
|
base = trunc_page(bbase);
|
2001-09-25 23:01:21 +04:00
|
|
|
bend = round_page(bbase + uio->uio_iov->iov_len);
|
2001-07-03 00:43:39 +04:00
|
|
|
blen = bend - base;
|
|
|
|
bpos = bbase - base;
|
2001-06-16 16:00:02 +04:00
|
|
|
|
2001-07-03 00:43:39 +04:00
|
|
|
if (blen > PIPE_DIRECT_CHUNK) {
|
|
|
|
blen = PIPE_DIRECT_CHUNK;
|
|
|
|
bend = base + blen;
|
|
|
|
bcnt = PIPE_DIRECT_CHUNK - bpos;
|
2001-11-06 10:30:14 +03:00
|
|
|
} else {
|
2001-09-25 23:01:21 +04:00
|
|
|
bcnt = uio->uio_iov->iov_len;
|
2001-11-06 10:30:14 +03:00
|
|
|
}
|
|
|
|
npages = blen >> PAGE_SHIFT;
|
2001-07-03 00:43:39 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the old kva if we need more pages than we have
|
|
|
|
* allocated.
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if (wpipe->pipe_map.kva != 0 && npages > wpipe->pipe_map.npages)
|
2001-07-03 00:43:39 +04:00
|
|
|
pipe_loan_free(wpipe);
|
|
|
|
|
|
|
|
/* Allocate new kva. */
|
2002-02-28 07:43:16 +03:00
|
|
|
if (wpipe->pipe_map.kva == 0) {
|
2001-11-06 10:30:14 +03:00
|
|
|
error = pipe_loan_alloc(wpipe, npages);
|
2003-02-13 00:54:15 +03:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2001-11-06 10:30:14 +03:00
|
|
|
}
|
|
|
|
|
2001-07-03 00:43:39 +04:00
|
|
|
/* Loan the write buffer memory from writer process */
|
2001-11-06 10:30:14 +03:00
|
|
|
pgs = wpipe->pipe_map.pgs;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
|
2003-02-13 00:54:15 +03:00
|
|
|
pgs, UVM_LOAN_TOPAGE);
|
2001-11-06 10:30:14 +03:00
|
|
|
if (error) {
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe_loan_free(wpipe);
|
2004-11-21 07:30:33 +03:00
|
|
|
return (ENOMEM); /* so that caller fallback to ordinary write */
|
2001-11-06 10:30:14 +03:00
|
|
|
}
|
|
|
|
|
2001-07-03 00:43:39 +04:00
|
|
|
/* Enter the loaned pages to kva */
|
|
|
|
kva = wpipe->pipe_map.kva;
|
2001-11-06 10:30:14 +03:00
|
|
|
for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
|
|
|
|
pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
|
|
|
|
}
|
2001-09-20 23:09:13 +04:00
|
|
|
pmap_update(pmap_kernel());
|
2001-07-03 00:43:39 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
/* Now we can put the pipe in direct write mode */
|
|
|
|
wpipe->pipe_map.pos = bpos;
|
|
|
|
wpipe->pipe_map.cnt = bcnt;
|
2001-07-03 00:43:39 +04:00
|
|
|
wpipe->pipe_state |= PIPE_DIRECTW;
|
2003-02-13 00:54:15 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* But before we can let someone do a direct read,
|
|
|
|
* we have to wait until the pipe is drained.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Relase the pipe lock while we wait */
|
|
|
|
PIPE_LOCK(wpipe);
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
|
|
|
|
while (error == 0 && wpipe->pipe_buffer.cnt > 0) {
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
2001-06-16 16:00:02 +04:00
|
|
|
}
|
2003-02-13 00:54:15 +03:00
|
|
|
|
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2004-07-18 00:50:08 +04:00
|
|
|
error = ltsleep(wpipe, PSOCK | PCATCH, "pipdwc", 0,
|
2003-02-13 00:54:15 +03:00
|
|
|
&wpipe->pipe_slock);
|
|
|
|
if (error == 0 && wpipe->pipe_state & PIPE_EOF)
|
|
|
|
error = EPIPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pipe is drained; next read will off the direct buffer */
|
|
|
|
wpipe->pipe_state |= PIPE_DIRECTR;
|
|
|
|
|
|
|
|
/* Wait until the reader is done */
|
|
|
|
while (error == 0 && (wpipe->pipe_state & PIPE_DIRECTR)) {
|
2001-07-03 00:43:39 +04:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
2001-06-16 16:00:02 +04:00
|
|
|
}
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(wpipe, wpipe, POLL_IN);
|
2004-07-18 00:50:08 +04:00
|
|
|
error = ltsleep(wpipe, PSOCK | PCATCH, "pipdwt", 0,
|
2003-02-13 00:54:15 +03:00
|
|
|
&wpipe->pipe_slock);
|
|
|
|
if (error == 0 && wpipe->pipe_state & PIPE_EOF)
|
|
|
|
error = EPIPE;
|
2001-07-03 00:43:39 +04:00
|
|
|
}
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
/* Take pipe out of direct write mode */
|
|
|
|
wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTR);
|
2001-06-16 16:00:02 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
/* Acquire the pipe lock and cleanup */
|
|
|
|
(void)pipelock(wpipe, 0);
|
2001-12-18 11:49:40 +03:00
|
|
|
if (pgs != NULL) {
|
|
|
|
pmap_kremove(wpipe->pipe_map.kva, blen);
|
2001-11-06 10:30:14 +03:00
|
|
|
uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
|
2001-12-18 11:49:40 +03:00
|
|
|
}
|
2001-07-03 00:43:39 +04:00
|
|
|
if (error || amountpipekva > maxpipekva)
|
|
|
|
pipe_loan_free(wpipe);
|
|
|
|
|
2001-09-29 17:48:11 +04:00
|
|
|
if (error) {
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(wpipe, wpipe, POLL_ERR);
|
2001-07-03 00:43:39 +04:00
|
|
|
|
|
|
|
/*
|
2001-09-29 17:48:11 +04:00
|
|
|
* If nothing was read from what we offered, return error
|
2001-11-06 10:30:14 +03:00
|
|
|
* straight on. Otherwise update uio resid first. Caller
|
2001-09-29 17:48:11 +04:00
|
|
|
* will deal with the error condition, returning short
|
|
|
|
* write, error, or restarting the write(2) as appropriate.
|
2001-07-03 00:43:39 +04:00
|
|
|
*/
|
2001-09-29 17:48:11 +04:00
|
|
|
if (wpipe->pipe_map.cnt == bcnt) {
|
2003-02-13 00:54:15 +03:00
|
|
|
wpipe->pipe_map.cnt = 0;
|
2001-09-29 17:48:11 +04:00
|
|
|
wakeup(wpipe);
|
|
|
|
return (error);
|
2001-06-16 16:00:02 +04:00
|
|
|
}
|
|
|
|
|
2001-09-29 17:48:11 +04:00
|
|
|
bcnt -= wpipe->pipe_map.cnt;
|
2001-07-03 00:43:39 +04:00
|
|
|
}
|
2001-06-16 16:00:02 +04:00
|
|
|
|
2001-11-06 10:30:14 +03:00
|
|
|
uio->uio_resid -= bcnt;
|
2001-07-17 22:21:59 +04:00
|
|
|
/* uio_offset not updated, not set/used for write(2) */
|
2001-11-06 10:30:14 +03:00
|
|
|
uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
|
2001-09-25 23:01:21 +04:00
|
|
|
uio->uio_iov->iov_len -= bcnt;
|
|
|
|
if (uio->uio_iov->iov_len == 0) {
|
|
|
|
uio->uio_iov++;
|
|
|
|
uio->uio_iovcnt--;
|
|
|
|
}
|
2001-06-16 16:00:02 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
wpipe->pipe_map.cnt = 0;
|
2001-09-29 17:48:11 +04:00
|
|
|
return (error);
|
2001-06-16 16:00:02 +04:00
|
|
|
}
|
|
|
|
#endif /* !PIPE_NODIRECT */
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_write(fp, offset, uio, cred, flags)
|
|
|
|
struct file *fp;
|
|
|
|
off_t *offset;
|
|
|
|
struct uio *uio;
|
|
|
|
struct ucred *cred;
|
2001-06-16 13:21:34 +04:00
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
struct pipe *wpipe, *rpipe;
|
2003-02-13 00:54:15 +03:00
|
|
|
struct pipebuf *bp;
|
|
|
|
int error;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
/* We want to write to our peer */
|
2001-06-16 13:21:34 +04:00
|
|
|
rpipe = (struct pipe *) fp->f_data;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
retry:
|
|
|
|
error = 0;
|
2002-03-14 00:50:24 +03:00
|
|
|
PIPE_LOCK(rpipe);
|
2003-02-13 00:54:15 +03:00
|
|
|
wpipe = rpipe->pipe_peer;
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
/*
|
2003-02-13 00:54:15 +03:00
|
|
|
* Detect loss of pipe read side, issue SIGPIPE if lost.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if (wpipe == NULL)
|
|
|
|
error = EPIPE;
|
|
|
|
else if (simple_lock_try(&wpipe->pipe_slock) == 0) {
|
|
|
|
/* Deal with race for peer */
|
2002-03-14 00:50:24 +03:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2003-02-13 00:54:15 +03:00
|
|
|
goto retry;
|
|
|
|
} else if ((wpipe->pipe_state & PIPE_EOF) != 0) {
|
|
|
|
PIPE_UNLOCK(wpipe);
|
|
|
|
error = EPIPE;
|
2002-03-14 00:50:24 +03:00
|
|
|
}
|
2001-06-16 16:00:02 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
++wpipe->pipe_busy;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
/* Aquire the long-term pipe lock */
|
|
|
|
if ((error = pipelock(wpipe,1)) != 0) {
|
|
|
|
--wpipe->pipe_busy;
|
|
|
|
if (wpipe->pipe_busy == 0
|
|
|
|
&& (wpipe->pipe_state & PIPE_WANTCLOSE)) {
|
|
|
|
wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
|
|
|
PIPE_UNLOCK(wpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
bp = &wpipe->pipe_buffer;
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
/*
|
2003-02-13 00:54:15 +03:00
|
|
|
* If it is advantageous to resize the pipe buffer, do so.
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
|
|
|
if ((uio->uio_resid > PIPE_SIZE) &&
|
2003-02-13 00:54:15 +03:00
|
|
|
(nbigpipe < maxbigpipes) &&
|
2001-06-16 16:00:02 +04:00
|
|
|
#ifndef PIPE_NODIRECT
|
2003-02-13 00:54:15 +03:00
|
|
|
(wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif
|
2003-02-13 00:54:15 +03:00
|
|
|
(bp->size <= PIPE_SIZE) && (bp->cnt == 0)) {
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
|
|
|
|
nbigpipe++;
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
2001-11-06 10:30:14 +03:00
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
while (uio->uio_resid) {
|
2002-08-26 03:16:39 +04:00
|
|
|
size_t space;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
#ifndef PIPE_NODIRECT
|
2003-02-13 00:54:15 +03:00
|
|
|
/*
|
|
|
|
* Pipe buffered writes cannot be coincidental with
|
|
|
|
* direct writes. Also, only one direct write can be
|
|
|
|
* in progress at any one time. We wait until the currently
|
|
|
|
* executing direct write is completed before continuing.
|
|
|
|
*
|
|
|
|
* We break out if a signal occurs or the reader goes away.
|
|
|
|
*/
|
|
|
|
while (error == 0 && wpipe->pipe_state & PIPE_DIRECTW) {
|
|
|
|
PIPE_LOCK(wpipe);
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
|
|
|
pipeunlock(wpipe);
|
2004-07-18 00:50:08 +04:00
|
|
|
error = ltsleep(wpipe, PSOCK | PCATCH,
|
2003-02-13 00:54:15 +03:00
|
|
|
"pipbww", 0, &wpipe->pipe_slock);
|
|
|
|
|
|
|
|
(void)pipelock(wpipe, 0);
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF)
|
|
|
|
error = EPIPE;
|
|
|
|
}
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
/*
|
|
|
|
* If the transfer is large, we can gain performance if
|
|
|
|
* we do process-to-process copies directly.
|
|
|
|
* If the write is non-blocking, we don't use the
|
|
|
|
* direct write mechanism.
|
|
|
|
*
|
|
|
|
* The direct write mechanism will detect the reader going
|
|
|
|
* away on us.
|
|
|
|
*/
|
2001-09-25 23:01:21 +04:00
|
|
|
if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
|
2001-06-16 13:21:34 +04:00
|
|
|
(fp->f_flag & FNONBLOCK) == 0 &&
|
2001-06-16 16:00:02 +04:00
|
|
|
(wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
|
2003-09-15 03:47:09 +04:00
|
|
|
error = pipe_direct_write(fp, wpipe, uio);
|
2001-07-03 00:43:39 +04:00
|
|
|
|
|
|
|
/*
|
2004-02-24 18:12:51 +03:00
|
|
|
* Break out if error occurred, unless it's ENOMEM.
|
2001-09-25 23:01:21 +04:00
|
|
|
* ENOMEM means we failed to allocate some resources
|
|
|
|
* for direct write, so we just fallback to ordinary
|
|
|
|
* write. If the direct write was successful,
|
|
|
|
* process rest of data via ordinary write.
|
2001-07-03 00:43:39 +04:00
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if (error == 0)
|
2001-09-25 23:01:21 +04:00
|
|
|
continue;
|
|
|
|
|
2001-07-03 00:43:39 +04:00
|
|
|
if (error != ENOMEM)
|
2001-06-16 13:21:34 +04:00
|
|
|
break;
|
|
|
|
}
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif /* PIPE_NODIRECT */
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
space = bp->size - bp->cnt;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/* Writes of size <= PIPE_BUF must be atomic. */
|
2001-09-25 23:01:21 +04:00
|
|
|
if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
|
2001-06-16 13:21:34 +04:00
|
|
|
space = 0;
|
|
|
|
|
2001-10-08 11:50:17 +04:00
|
|
|
if (space > 0) {
|
2001-06-16 16:00:02 +04:00
|
|
|
int size; /* Transfer size */
|
|
|
|
int segsize; /* first segment to transfer */
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* Transfer size is minimum of uio transfer
|
|
|
|
* and free space in pipe buffer.
|
|
|
|
*/
|
|
|
|
if (space > uio->uio_resid)
|
|
|
|
size = uio->uio_resid;
|
|
|
|
else
|
|
|
|
size = space;
|
|
|
|
/*
|
2005-02-27 00:34:55 +03:00
|
|
|
* First segment to transfer is minimum of
|
2001-06-16 16:00:02 +04:00
|
|
|
* transfer size and contiguous space in
|
|
|
|
* pipe buffer. If first segment to transfer
|
|
|
|
* is less than the transfer size, we've got
|
|
|
|
* a wraparound in the buffer.
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
segsize = bp->size - bp->in;
|
2001-06-16 16:00:02 +04:00
|
|
|
if (segsize > size)
|
|
|
|
segsize = size;
|
2001-11-06 10:30:14 +03:00
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/* Transfer first segment */
|
2003-02-13 00:54:15 +03:00
|
|
|
error = uiomove(&bp->buffer[bp->in], segsize, uio);
|
2001-11-06 10:30:14 +03:00
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
if (error == 0 && segsize < size) {
|
2005-02-27 00:34:55 +03:00
|
|
|
/*
|
2001-06-16 16:00:02 +04:00
|
|
|
* Transfer remaining part now, to
|
|
|
|
* support atomic writes. Wraparound
|
|
|
|
* happened.
|
|
|
|
*/
|
|
|
|
#ifdef DEBUG
|
2003-02-13 00:54:15 +03:00
|
|
|
if (bp->in + segsize != bp->size)
|
2001-06-16 16:00:02 +04:00
|
|
|
panic("Expected pipe buffer wraparound disappeared");
|
|
|
|
#endif
|
2001-11-06 10:30:14 +03:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
error = uiomove(&bp->buffer[0],
|
2001-06-16 16:00:02 +04:00
|
|
|
size - segsize, uio);
|
|
|
|
}
|
2003-02-13 00:54:15 +03:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bp->in += size;
|
|
|
|
if (bp->in >= bp->size) {
|
2001-06-16 16:00:02 +04:00
|
|
|
#ifdef DEBUG
|
2003-02-13 00:54:15 +03:00
|
|
|
if (bp->in != size - segsize + bp->size)
|
|
|
|
panic("Expected wraparound bad");
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif
|
2003-02-13 00:54:15 +03:00
|
|
|
bp->in = size - segsize;
|
|
|
|
}
|
2001-11-06 10:30:14 +03:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
bp->cnt += size;
|
2001-06-16 16:00:02 +04:00
|
|
|
#ifdef DEBUG
|
2003-02-13 00:54:15 +03:00
|
|
|
if (bp->cnt > bp->size)
|
|
|
|
panic("Pipe buffer overflow");
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif
|
2001-06-16 13:21:34 +04:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the "read-side" has been blocked, wake it up now.
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(wpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_UNLOCK(wpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* don't block on non-blocking I/O
|
|
|
|
*/
|
|
|
|
if (fp->f_flag & FNONBLOCK) {
|
|
|
|
error = EAGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have no more space and have something to offer,
|
|
|
|
* wake up select/poll.
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if (bp->cnt)
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(wpipe, wpipe, POLL_OUT);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(wpipe);
|
|
|
|
pipeunlock(wpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2004-07-18 00:50:08 +04:00
|
|
|
error = ltsleep(wpipe, PSOCK | PCATCH, "pipewr", 0,
|
2003-02-13 00:54:15 +03:00
|
|
|
&wpipe->pipe_slock);
|
|
|
|
(void)pipelock(wpipe, 0);
|
2001-06-16 13:21:34 +04:00
|
|
|
if (error != 0)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* If read side wants to go away, we just issue a signal
|
|
|
|
* to ourselves.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
error = EPIPE;
|
|
|
|
break;
|
2001-11-06 10:30:14 +03:00
|
|
|
}
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(wpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
--wpipe->pipe_busy;
|
2001-06-16 16:00:02 +04:00
|
|
|
if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
|
|
|
|
wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
|
2001-06-16 13:21:34 +04:00
|
|
|
wakeup(wpipe);
|
2003-02-13 00:54:15 +03:00
|
|
|
} else if (bp->cnt > 0) {
|
2001-06-16 13:21:34 +04:00
|
|
|
/*
|
|
|
|
* If we have put any characters in the buffer, we wake up
|
|
|
|
* the reader.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't return EPIPE if I/O was successful
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if (error == EPIPE && bp->cnt == 0 && uio->uio_resid == 0)
|
2001-06-16 13:21:34 +04:00
|
|
|
error = 0;
|
|
|
|
|
|
|
|
if (error == 0)
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_TIMESTAMP(&wpipe->pipe_mtime);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
/*
|
2001-06-16 16:00:02 +04:00
|
|
|
* We have something to offer, wake up select/poll.
|
|
|
|
* wpipe->pipe_map.cnt is always 0 in this point (direct write
|
2001-09-25 23:01:21 +04:00
|
|
|
* is only done synchronously), so check only wpipe->pipe_buffer.cnt
|
2001-06-16 13:21:34 +04:00
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if (bp->cnt)
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(wpipe, wpipe, POLL_OUT);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* Arrange for next read(2) to do a signal.
|
|
|
|
*/
|
|
|
|
wpipe->pipe_state |= PIPE_SIGNALR;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
pipeunlock(wpipe);
|
|
|
|
PIPE_UNLOCK(wpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we implement a very minimal set of ioctls for compatibility with sockets.
|
|
|
|
*/
|
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
pipe_ioctl(fp, cmd, data, p)
|
2001-06-16 13:21:34 +04:00
|
|
|
struct file *fp;
|
|
|
|
u_long cmd;
|
2003-03-22 00:13:50 +03:00
|
|
|
void *data;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
2003-02-13 00:54:15 +03:00
|
|
|
struct pipe *pipe = (struct pipe *)fp->f_data;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
case FIONBIO:
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
case FIOASYNC:
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(pipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
if (*(int *)data) {
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe->pipe_state |= PIPE_ASYNC;
|
2001-06-16 13:21:34 +04:00
|
|
|
} else {
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe->pipe_state &= ~PIPE_ASYNC;
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_UNLOCK(pipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
case FIONREAD:
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(pipe);
|
2001-06-16 16:00:02 +04:00
|
|
|
#ifndef PIPE_NODIRECT
|
2003-02-13 00:54:15 +03:00
|
|
|
if (pipe->pipe_state & PIPE_DIRECTW)
|
|
|
|
*(int *)data = pipe->pipe_map.cnt;
|
2001-06-16 13:21:34 +04:00
|
|
|
else
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif
|
2003-02-13 00:54:15 +03:00
|
|
|
*(int *)data = pipe->pipe_buffer.cnt;
|
|
|
|
PIPE_UNLOCK(pipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (0);
|
|
|
|
|
2004-11-06 05:03:20 +03:00
|
|
|
case FIONWRITE:
|
|
|
|
/* Look at other side */
|
|
|
|
pipe = pipe->pipe_peer;
|
|
|
|
PIPE_LOCK(pipe);
|
|
|
|
#ifndef PIPE_NODIRECT
|
|
|
|
if (pipe->pipe_state & PIPE_DIRECTW)
|
|
|
|
*(int *)data = pipe->pipe_map.cnt;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
*(int *)data = pipe->pipe_buffer.cnt;
|
|
|
|
PIPE_UNLOCK(pipe);
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
case FIONSPACE:
|
|
|
|
/* Look at other side */
|
|
|
|
pipe = pipe->pipe_peer;
|
|
|
|
PIPE_LOCK(pipe);
|
|
|
|
#ifndef PIPE_NODIRECT
|
|
|
|
/*
|
|
|
|
* If we're in direct-mode, we don't really have a
|
|
|
|
* send queue, and any other write will block. Thus
|
|
|
|
* zero seems like the best answer.
|
|
|
|
*/
|
|
|
|
if (pipe->pipe_state & PIPE_DIRECTW)
|
|
|
|
*(int *)data = 0;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
*(int *)data = pipe->pipe_buffer.size -
|
|
|
|
pipe->pipe_buffer.cnt;
|
|
|
|
PIPE_UNLOCK(pipe);
|
|
|
|
return (0);
|
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
case TIOCSPGRP:
|
2003-09-21 23:16:48 +04:00
|
|
|
case FIOSETOWN:
|
|
|
|
return fsetown(p, &pipe->pipe_pgid, cmd, data);
|
2001-06-16 16:00:02 +04:00
|
|
|
|
|
|
|
case TIOCGPGRP:
|
2003-09-21 23:16:48 +04:00
|
|
|
case FIOGETOWN:
|
|
|
|
return fgetown(p, pipe->pipe_pgid, cmd, data);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
|
|
|
}
|
2002-03-17 22:40:26 +03:00
|
|
|
return (EPASSTHROUGH);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
pipe_poll(fp, events, td)
|
2002-03-14 00:50:24 +03:00
|
|
|
struct file *fp;
|
|
|
|
int events;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *td;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
|
|
|
struct pipe *rpipe = (struct pipe *)fp->f_data;
|
|
|
|
struct pipe *wpipe;
|
2003-02-13 00:54:15 +03:00
|
|
|
int eof = 0;
|
2001-06-16 13:21:34 +04:00
|
|
|
int revents = 0;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
retry:
|
2002-03-14 00:50:24 +03:00
|
|
|
PIPE_LOCK(rpipe);
|
2003-02-13 00:54:15 +03:00
|
|
|
wpipe = rpipe->pipe_peer;
|
|
|
|
if (wpipe != NULL && simple_lock_try(&wpipe->pipe_slock) == 0) {
|
|
|
|
/* Deal with race for peer */
|
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
if (events & (POLLIN | POLLRDNORM))
|
2001-06-16 16:00:02 +04:00
|
|
|
if ((rpipe->pipe_buffer.cnt > 0) ||
|
|
|
|
#ifndef PIPE_NODIRECT
|
2003-02-13 00:54:15 +03:00
|
|
|
(rpipe->pipe_state & PIPE_DIRECTR) ||
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif
|
2001-06-16 13:21:34 +04:00
|
|
|
(rpipe->pipe_state & PIPE_EOF))
|
|
|
|
revents |= events & (POLLIN | POLLRDNORM);
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
eof |= (rpipe->pipe_state & PIPE_EOF);
|
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
|
|
|
|
if (wpipe == NULL)
|
|
|
|
revents |= events & (POLLOUT | POLLWRNORM);
|
|
|
|
else {
|
|
|
|
if (events & (POLLOUT | POLLWRNORM))
|
|
|
|
if ((wpipe->pipe_state & PIPE_EOF) || (
|
2001-06-16 16:00:02 +04:00
|
|
|
#ifndef PIPE_NODIRECT
|
2003-02-13 00:54:15 +03:00
|
|
|
(wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif
|
2003-02-13 00:54:15 +03:00
|
|
|
(wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
|
|
|
|
revents |= events & (POLLOUT | POLLWRNORM);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
eof |= (wpipe->pipe_state & PIPE_EOF);
|
|
|
|
PIPE_UNLOCK(wpipe);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wpipe == NULL || eof)
|
2001-06-16 13:21:34 +04:00
|
|
|
revents |= POLLHUP;
|
|
|
|
|
|
|
|
if (revents == 0) {
|
2003-02-13 00:54:15 +03:00
|
|
|
if (events & (POLLIN | POLLRDNORM))
|
2003-06-30 02:28:00 +04:00
|
|
|
selrecord(td, &rpipe->pipe_sel);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
if (events & (POLLOUT | POLLWRNORM))
|
2003-06-30 02:28:00 +04:00
|
|
|
selrecord(td, &wpipe->pipe_sel);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return (revents);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2003-06-30 02:28:00 +04:00
|
|
|
pipe_stat(fp, ub, td)
|
2002-03-14 00:50:24 +03:00
|
|
|
struct file *fp;
|
|
|
|
struct stat *ub;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *td;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
|
|
|
struct pipe *pipe = (struct pipe *)fp->f_data;
|
|
|
|
|
2001-07-18 10:48:27 +04:00
|
|
|
memset((caddr_t)ub, 0, sizeof(*ub));
|
2002-12-05 19:30:55 +03:00
|
|
|
ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
|
2001-06-16 13:21:34 +04:00
|
|
|
ub->st_blksize = pipe->pipe_buffer.size;
|
2005-03-12 21:43:42 +03:00
|
|
|
if (ub->st_blksize == 0 && pipe->pipe_peer)
|
|
|
|
ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
|
2001-06-16 13:21:34 +04:00
|
|
|
ub->st_size = pipe->pipe_buffer.cnt;
|
2001-06-16 16:00:02 +04:00
|
|
|
ub->st_blocks = (ub->st_size) ? 1 : 0;
|
2004-11-14 06:30:08 +03:00
|
|
|
TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec);
|
2001-06-16 16:00:02 +04:00
|
|
|
TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
|
2001-06-16 13:21:34 +04:00
|
|
|
ub->st_uid = fp->f_cred->cr_uid;
|
|
|
|
ub->st_gid = fp->f_cred->cr_gid;
|
|
|
|
/*
|
|
|
|
* Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
|
|
|
|
* XXX (st_dev, st_ino) should be unique.
|
|
|
|
*/
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2003-06-30 02:28:00 +04:00
|
|
|
pipe_close(fp, td)
|
2002-03-14 00:50:24 +03:00
|
|
|
struct file *fp;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *td;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
2003-02-13 00:54:15 +03:00
|
|
|
struct pipe *pipe = (struct pipe *)fp->f_data;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
fp->f_data = NULL;
|
2003-09-15 03:47:09 +04:00
|
|
|
pipeclose(fp, pipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe_free_kmem(pipe)
|
|
|
|
struct pipe *pipe;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
if (pipe->pipe_buffer.buffer != NULL) {
|
|
|
|
if (pipe->pipe_buffer.size > PIPE_SIZE)
|
2001-06-16 13:21:34 +04:00
|
|
|
--nbigpipe;
|
2003-02-13 00:54:15 +03:00
|
|
|
amountpipekva -= pipe->pipe_buffer.size;
|
2001-06-16 16:00:02 +04:00
|
|
|
uvm_km_free(kernel_map,
|
2003-02-13 00:54:15 +03:00
|
|
|
(vaddr_t)pipe->pipe_buffer.buffer,
|
2005-04-01 15:59:21 +04:00
|
|
|
pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe->pipe_buffer.buffer = NULL;
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
#ifndef PIPE_NODIRECT
|
2003-02-13 00:54:15 +03:00
|
|
|
if (pipe->pipe_map.kva != 0) {
|
|
|
|
pipe_loan_free(pipe);
|
|
|
|
pipe->pipe_map.cnt = 0;
|
|
|
|
pipe->pipe_map.kva = 0;
|
|
|
|
pipe->pipe_map.pos = 0;
|
|
|
|
pipe->pipe_map.npages = 0;
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
2001-06-16 16:00:02 +04:00
|
|
|
#endif /* !PIPE_NODIRECT */
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* shutdown the pipe
|
|
|
|
*/
|
|
|
|
static void
|
2003-09-15 03:47:09 +04:00
|
|
|
pipeclose(fp, pipe)
|
|
|
|
struct file *fp;
|
2003-02-13 00:54:15 +03:00
|
|
|
struct pipe *pipe;
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
|
|
|
struct pipe *ppipe;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
if (pipe == NULL)
|
2001-06-16 16:00:02 +04:00
|
|
|
return;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
retry:
|
|
|
|
PIPE_LOCK(pipe);
|
|
|
|
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(pipe, pipe, POLL_HUP);
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* If the other side is blocked, wake it up saying that
|
|
|
|
* we want to close it down.
|
|
|
|
*/
|
2005-09-11 21:55:26 +04:00
|
|
|
pipe->pipe_state |= PIPE_EOF;
|
2003-02-13 00:54:15 +03:00
|
|
|
while (pipe->pipe_busy) {
|
|
|
|
wakeup(pipe);
|
2005-09-11 21:55:26 +04:00
|
|
|
pipe->pipe_state |= PIPE_WANTCLOSE;
|
2004-07-18 00:50:08 +04:00
|
|
|
ltsleep(pipe, PSOCK, "pipecl", 0, &pipe->pipe_slock);
|
2001-06-16 16:00:02 +04:00
|
|
|
}
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* Disconnect from peer
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
if ((ppipe = pipe->pipe_peer) != NULL) {
|
|
|
|
/* Deal with race for peer */
|
|
|
|
if (simple_lock_try(&ppipe->pipe_slock) == 0) {
|
|
|
|
PIPE_UNLOCK(pipe);
|
|
|
|
goto retry;
|
|
|
|
}
|
2005-09-11 21:55:26 +04:00
|
|
|
pipeselwakeup(ppipe, ppipe, POLL_HUP);
|
2001-06-16 16:00:02 +04:00
|
|
|
|
|
|
|
ppipe->pipe_state |= PIPE_EOF;
|
|
|
|
wakeup(ppipe);
|
|
|
|
ppipe->pipe_peer = NULL;
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_UNLOCK(ppipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
2003-02-13 00:54:15 +03:00
|
|
|
|
|
|
|
(void)lockmgr(&pipe->pipe_lock, LK_DRAIN | LK_INTERLOCK,
|
|
|
|
&pipe->pipe_slock);
|
|
|
|
|
2001-06-16 16:00:02 +04:00
|
|
|
/*
|
|
|
|
* free resources
|
|
|
|
*/
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe_free_kmem(pipe);
|
|
|
|
pool_put(&pipe_pool, pipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
2002-10-23 13:10:23 +04:00
|
|
|
static void
|
|
|
|
filt_pipedetach(struct knote *kn)
|
2001-06-16 13:21:34 +04:00
|
|
|
{
|
2003-02-13 00:54:15 +03:00
|
|
|
struct pipe *pipe = (struct pipe *)kn->kn_fp->f_data;
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2002-10-23 13:10:23 +04:00
|
|
|
switch(kn->kn_filter) {
|
2001-06-16 13:21:34 +04:00
|
|
|
case EVFILT_WRITE:
|
2002-10-23 13:10:23 +04:00
|
|
|
/* need the peer structure, not our own */
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe = pipe->pipe_peer;
|
|
|
|
/* XXXSMP: race for peer */
|
2002-10-23 13:10:23 +04:00
|
|
|
|
|
|
|
/* if reader end already closed, just return */
|
2003-02-13 00:54:15 +03:00
|
|
|
if (pipe == NULL)
|
2002-10-23 13:10:23 +04:00
|
|
|
return;
|
|
|
|
|
2001-06-16 13:21:34 +04:00
|
|
|
break;
|
|
|
|
default:
|
2002-10-23 13:10:23 +04:00
|
|
|
/* nothing to do */
|
2002-11-02 00:46:51 +03:00
|
|
|
break;
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
2002-10-23 13:10:23 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
2003-02-13 00:54:15 +03:00
|
|
|
if (kn->kn_hook != pipe)
|
2002-10-23 13:10:23 +04:00
|
|
|
panic("filt_pipedetach: inconsistent knote");
|
|
|
|
#endif
|
2001-06-16 13:21:34 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(pipe);
|
|
|
|
SLIST_REMOVE(&pipe->pipe_sel.sel_klist, kn, knote, kn_selnext);
|
|
|
|
PIPE_UNLOCK(pipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_piperead(struct knote *kn, long hint)
|
|
|
|
{
|
|
|
|
struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
|
|
|
|
struct pipe *wpipe = rpipe->pipe_peer;
|
|
|
|
|
2004-02-22 20:51:25 +03:00
|
|
|
if ((hint & NOTE_SUBMIT) == 0)
|
|
|
|
PIPE_LOCK(rpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
kn->kn_data = rpipe->pipe_buffer.cnt;
|
|
|
|
if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
|
|
|
|
kn->kn_data = rpipe->pipe_map.cnt;
|
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
/* XXXSMP: race for peer */
|
2001-06-16 13:21:34 +04:00
|
|
|
if ((rpipe->pipe_state & PIPE_EOF) ||
|
|
|
|
(wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
|
2002-03-14 00:50:24 +03:00
|
|
|
kn->kn_flags |= EV_EOF;
|
2004-02-22 20:51:25 +03:00
|
|
|
if ((hint & NOTE_SUBMIT) == 0)
|
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (1);
|
|
|
|
}
|
2004-02-22 20:51:25 +03:00
|
|
|
if ((hint & NOTE_SUBMIT) == 0)
|
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (kn->kn_data > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_pipewrite(struct knote *kn, long hint)
|
|
|
|
{
|
|
|
|
struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
|
|
|
|
struct pipe *wpipe = rpipe->pipe_peer;
|
|
|
|
|
2004-02-22 20:51:25 +03:00
|
|
|
if ((hint & NOTE_SUBMIT) == 0)
|
|
|
|
PIPE_LOCK(rpipe);
|
2003-02-13 00:54:15 +03:00
|
|
|
/* XXXSMP: race for peer */
|
2001-06-16 13:21:34 +04:00
|
|
|
if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
|
|
|
|
kn->kn_data = 0;
|
2005-02-27 00:34:55 +03:00
|
|
|
kn->kn_flags |= EV_EOF;
|
2004-02-22 20:51:25 +03:00
|
|
|
if ((hint & NOTE_SUBMIT) == 0)
|
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
|
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW)
|
|
|
|
kn->kn_data = 0;
|
|
|
|
|
2004-02-22 20:51:25 +03:00
|
|
|
if ((hint & NOTE_SUBMIT) == 0)
|
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-06-16 13:21:34 +04:00
|
|
|
return (kn->kn_data >= PIPE_BUF);
|
|
|
|
}
|
2002-10-23 13:10:23 +04:00
|
|
|
|
|
|
|
static const struct filterops pipe_rfiltops =
|
|
|
|
{ 1, NULL, filt_pipedetach, filt_piperead };
|
|
|
|
static const struct filterops pipe_wfiltops =
|
|
|
|
{ 1, NULL, filt_pipedetach, filt_pipewrite };
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
pipe_kqfilter(struct file *fp, struct knote *kn)
|
|
|
|
{
|
2003-02-13 00:54:15 +03:00
|
|
|
struct pipe *pipe;
|
2002-10-23 13:10:23 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
pipe = (struct pipe *)kn->kn_fp->f_data;
|
2002-10-23 13:10:23 +04:00
|
|
|
switch (kn->kn_filter) {
|
|
|
|
case EVFILT_READ:
|
|
|
|
kn->kn_fop = &pipe_rfiltops;
|
|
|
|
break;
|
|
|
|
case EVFILT_WRITE:
|
|
|
|
kn->kn_fop = &pipe_wfiltops;
|
2003-02-13 00:54:15 +03:00
|
|
|
/* XXXSMP: race for peer */
|
|
|
|
pipe = pipe->pipe_peer;
|
|
|
|
if (pipe == NULL) {
|
2002-10-23 13:10:23 +04:00
|
|
|
/* other end of pipe has been closed */
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (1);
|
|
|
|
}
|
2003-02-13 00:54:15 +03:00
|
|
|
kn->kn_hook = pipe;
|
2002-10-23 13:10:23 +04:00
|
|
|
|
2003-02-13 00:54:15 +03:00
|
|
|
PIPE_LOCK(pipe);
|
|
|
|
SLIST_INSERT_HEAD(&pipe->pipe_sel.sel_klist, kn, kn_selnext);
|
|
|
|
PIPE_UNLOCK(pipe);
|
2002-10-23 13:10:23 +04:00
|
|
|
return (0);
|
|
|
|
}
|
2001-06-16 16:00:02 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle pipe sysctls.
|
|
|
|
*/
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
|
2001-06-16 16:00:02 +04:00
|
|
|
{
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
CTLTYPE_NODE, "kern", NULL,
|
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_KERN, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
2004-04-08 10:20:29 +04:00
|
|
|
CTLTYPE_NODE, "pipe",
|
|
|
|
SYSCTL_DESCR("Pipe settings"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_KERN, KERN_PIPE, CTL_EOL);
|
|
|
|
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-04-08 10:20:29 +04:00
|
|
|
CTLTYPE_INT, "maxkvasz",
|
|
|
|
SYSCTL_DESCR("Maximum amount of kernel memory to be "
|
|
|
|
"used for pipes"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &maxpipekva, 0,
|
|
|
|
CTL_KERN, KERN_PIPE, KERN_PIPE_MAXKVASZ, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-04-08 10:20:29 +04:00
|
|
|
CTLTYPE_INT, "maxloankvasz",
|
|
|
|
SYSCTL_DESCR("Limit for direct transfers via page loan"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &limitpipekva, 0,
|
|
|
|
CTL_KERN, KERN_PIPE, KERN_PIPE_LIMITKVA, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-04-08 10:20:29 +04:00
|
|
|
CTLTYPE_INT, "maxbigpipes",
|
|
|
|
SYSCTL_DESCR("Maximum number of \"big\" pipes"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &maxbigpipes, 0,
|
|
|
|
CTL_KERN, KERN_PIPE, KERN_PIPE_MAXBIGPIPES, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
2004-04-08 10:20:29 +04:00
|
|
|
CTLTYPE_INT, "nbigpipes",
|
|
|
|
SYSCTL_DESCR("Number of \"big\" pipes"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &nbigpipe, 0,
|
|
|
|
CTL_KERN, KERN_PIPE, KERN_PIPE_NBIGPIPES, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
2004-04-08 10:20:29 +04:00
|
|
|
CTLTYPE_INT, "kvasize",
|
|
|
|
SYSCTL_DESCR("Amount of kernel memory consumed by pipe "
|
|
|
|
"buffers"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &amountpipekva, 0,
|
|
|
|
CTL_KERN, KERN_PIPE, KERN_PIPE_KVASIZE, CTL_EOL);
|
2001-06-16 16:00:02 +04:00
|
|
|
}
|