2006-09-04 01:12:14 +04:00
|
|
|
/* $NetBSD: uipc_usrreq.c,v 1.93 2006/09/03 21:15:29 christos Exp $ */
|
1998-01-08 01:57:09 +03:00
|
|
|
|
|
|
|
/*-
|
2004-04-19 02:20:32 +04:00
|
|
|
* Copyright (c) 1998, 2000, 2004 The NetBSD Foundation, Inc.
|
1998-01-08 01:57:09 +03:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
|
|
|
|
* NASA Ames Research Center.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-05-04 13:50:11 +04:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)uipc_usrreq.c 8.9 (Berkeley) 5/14/95
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 1997 Christopher G. Demetriou. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
1993-03-21 12:45:37 +03:00
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-03-01 05:20:01 +03:00
|
|
|
* @(#)uipc_usrreq.c 8.9 (Berkeley) 5/14/95
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2006-09-04 01:12:14 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uipc_usrreq.c,v 1.93 2006/09/03 21:15:29 christos Exp $");
|
2001-11-12 18:25:01 +03:00
|
|
|
|
1993-12-18 07:21:37 +03:00
|
|
|
#include <sys/param.h>
|
1994-05-04 13:50:11 +04:00
|
|
|
#include <sys/systm.h>
|
1993-12-18 07:21:37 +03:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/domain.h>
|
|
|
|
#include <sys/protosw.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
|
|
|
#include <sys/unpcb.h>
|
|
|
|
#include <sys/un.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/mbuf.h>
|
2006-05-15 01:15:11 +04:00
|
|
|
#include <sys/kauth.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unix communications domain.
|
|
|
|
*
|
|
|
|
* TODO:
|
|
|
|
* SEQPACKET, RDM
|
|
|
|
* rethink name space problems
|
|
|
|
* need a proper out-of-band
|
|
|
|
*/
|
2006-09-04 01:12:14 +04:00
|
|
|
const struct sockaddr_un sun_noname = {
|
|
|
|
.sun_len = sizeof(sun_noname),
|
|
|
|
.sun_family = AF_LOCAL,
|
|
|
|
};
|
1993-03-21 12:45:37 +03:00
|
|
|
ino_t unp_ino; /* prototype for fake inode numbers */
|
|
|
|
|
2006-07-24 02:06:03 +04:00
|
|
|
struct mbuf *unp_addsockcred(struct lwp *, struct mbuf *);
|
1998-01-08 01:57:09 +03:00
|
|
|
|
1996-05-23 20:03:45 +04:00
|
|
|
int
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_output(struct mbuf *m, struct mbuf *control, struct unpcb *unp,
|
2006-07-24 02:06:03 +04:00
|
|
|
struct lwp *l)
|
1996-05-23 20:03:45 +04:00
|
|
|
{
|
|
|
|
struct socket *so2;
|
2004-04-19 02:20:32 +04:00
|
|
|
const struct sockaddr_un *sun;
|
1996-05-23 20:03:45 +04:00
|
|
|
|
|
|
|
so2 = unp->unp_conn->unp_socket;
|
|
|
|
if (unp->unp_addr)
|
|
|
|
sun = unp->unp_addr;
|
|
|
|
else
|
|
|
|
sun = &sun_noname;
|
1998-01-08 01:57:09 +03:00
|
|
|
if (unp->unp_conn->unp_flags & UNP_WANTCRED)
|
2006-07-24 02:06:03 +04:00
|
|
|
control = unp_addsockcred(l, control);
|
2005-05-30 02:24:14 +04:00
|
|
|
if (sbappendaddr(&so2->so_rcv, (const struct sockaddr *)sun, m,
|
1996-05-23 20:03:45 +04:00
|
|
|
control) == 0) {
|
|
|
|
m_freem(control);
|
|
|
|
m_freem(m);
|
2004-09-03 22:14:09 +04:00
|
|
|
so2->so_rcv.sb_overflowed++;
|
2003-04-10 22:55:11 +04:00
|
|
|
return (ENOBUFS);
|
1996-05-23 20:03:45 +04:00
|
|
|
} else {
|
|
|
|
sorwakeup(so2);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_setsockaddr(struct unpcb *unp, struct mbuf *nam)
|
1996-05-23 20:03:45 +04:00
|
|
|
{
|
2004-04-19 02:20:32 +04:00
|
|
|
const struct sockaddr_un *sun;
|
1996-05-23 20:03:45 +04:00
|
|
|
|
|
|
|
if (unp->unp_addr)
|
|
|
|
sun = unp->unp_addr;
|
|
|
|
else
|
|
|
|
sun = &sun_noname;
|
|
|
|
nam->m_len = sun->sun_len;
|
2002-11-25 11:31:58 +03:00
|
|
|
if (nam->m_len > MLEN)
|
1997-06-26 10:06:40 +04:00
|
|
|
MEXTMALLOC(nam, nam->m_len, M_WAITOK);
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memcpy(mtod(nam, caddr_t), sun, (size_t)nam->m_len);
|
1996-05-23 20:03:45 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_setpeeraddr(struct unpcb *unp, struct mbuf *nam)
|
1996-05-23 20:03:45 +04:00
|
|
|
{
|
2004-04-19 02:20:32 +04:00
|
|
|
const struct sockaddr_un *sun;
|
1996-05-23 20:03:45 +04:00
|
|
|
|
|
|
|
if (unp->unp_conn && unp->unp_conn->unp_addr)
|
|
|
|
sun = unp->unp_conn->unp_addr;
|
|
|
|
else
|
|
|
|
sun = &sun_noname;
|
|
|
|
nam->m_len = sun->sun_len;
|
2002-11-25 11:31:58 +03:00
|
|
|
if (nam->m_len > MLEN)
|
1997-06-26 10:06:40 +04:00
|
|
|
MEXTMALLOC(nam, nam->m_len, M_WAITOK);
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memcpy(mtod(nam, caddr_t), sun, (size_t)nam->m_len);
|
1996-05-23 20:03:45 +04:00
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*ARGSUSED*/
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2004-04-19 01:48:15 +04:00
|
|
|
uipc_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
|
2005-12-11 15:16:03 +03:00
|
|
|
struct mbuf *control, struct lwp *l)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
|
|
|
struct unpcb *unp = sotounpcb(so);
|
2000-03-30 13:27:11 +04:00
|
|
|
struct socket *so2;
|
2005-12-11 15:16:03 +03:00
|
|
|
struct proc *p;
|
2004-04-17 19:15:29 +04:00
|
|
|
u_int newhiwat;
|
2000-03-30 13:27:11 +04:00
|
|
|
int error = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
if (req == PRU_CONTROL)
|
|
|
|
return (EOPNOTSUPP);
|
1996-05-23 20:03:45 +04:00
|
|
|
|
1996-05-23 20:49:08 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (req != PRU_SEND && req != PRU_SENDOOB && control)
|
|
|
|
panic("uipc_usrreq: unexpected control mbuf");
|
|
|
|
#endif
|
2005-12-11 15:16:03 +03:00
|
|
|
p = l ? l->l_proc : NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
if (unp == 0 && req != PRU_ATTACH) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto release;
|
|
|
|
}
|
1996-05-23 20:03:45 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
switch (req) {
|
|
|
|
|
|
|
|
case PRU_ATTACH:
|
1996-05-23 20:03:45 +04:00
|
|
|
if (unp != 0) {
|
1993-03-21 12:45:37 +03:00
|
|
|
error = EISCONN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = unp_attach(so);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_DETACH:
|
|
|
|
unp_detach(unp);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_BIND:
|
2006-04-15 03:15:21 +04:00
|
|
|
KASSERT(l != NULL);
|
2005-12-11 15:16:03 +03:00
|
|
|
error = unp_bind(unp, nam, l);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_LISTEN:
|
|
|
|
if (unp->unp_vnode == 0)
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_CONNECT:
|
2006-04-15 03:15:21 +04:00
|
|
|
KASSERT(l != NULL);
|
2005-12-11 15:16:03 +03:00
|
|
|
error = unp_connect(so, nam, l);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_CONNECT2:
|
2003-11-29 13:02:42 +03:00
|
|
|
error = unp_connect2(so, (struct socket *)nam, PRU_CONNECT2);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_DISCONNECT:
|
|
|
|
unp_disconnect(unp);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_ACCEPT:
|
1996-05-23 20:03:45 +04:00
|
|
|
unp_setpeeraddr(unp, nam);
|
2003-11-29 13:02:42 +03:00
|
|
|
/*
|
|
|
|
* Mark the initiating STREAM socket as connected *ONLY*
|
|
|
|
* after it's been accepted. This prevents a client from
|
|
|
|
* overrunning a server and receiving ECONNREFUSED.
|
|
|
|
*/
|
|
|
|
if (unp->unp_conn != NULL &&
|
|
|
|
(unp->unp_conn->unp_socket->so_state & SS_ISCONNECTING))
|
|
|
|
soisconnected(unp->unp_conn->unp_socket);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_SHUTDOWN:
|
|
|
|
socantsendmore(so);
|
|
|
|
unp_shutdown(unp);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_RCVD:
|
|
|
|
switch (so->so_type) {
|
|
|
|
|
|
|
|
case SOCK_DGRAM:
|
|
|
|
panic("uipc 1");
|
|
|
|
/*NOTREACHED*/
|
|
|
|
|
|
|
|
case SOCK_STREAM:
|
|
|
|
#define rcv (&so->so_rcv)
|
|
|
|
#define snd (&so2->so_snd)
|
|
|
|
if (unp->unp_conn == 0)
|
|
|
|
break;
|
|
|
|
so2 = unp->unp_conn->unp_socket;
|
|
|
|
/*
|
|
|
|
* Adjust backpressure on sender
|
|
|
|
* and wakeup any waiting to write.
|
|
|
|
*/
|
|
|
|
snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt;
|
|
|
|
unp->unp_mbcnt = rcv->sb_mbcnt;
|
2004-04-17 19:15:29 +04:00
|
|
|
newhiwat = snd->sb_hiwat + unp->unp_cc - rcv->sb_cc;
|
2005-05-07 21:42:09 +04:00
|
|
|
(void)chgsbsize(so2->so_uidinfo,
|
2004-04-17 19:15:29 +04:00
|
|
|
&snd->sb_hiwat, newhiwat, RLIM_INFINITY);
|
1993-03-21 12:45:37 +03:00
|
|
|
unp->unp_cc = rcv->sb_cc;
|
|
|
|
sowwakeup(so2);
|
|
|
|
#undef snd
|
|
|
|
#undef rcv
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("uipc 2");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_SEND:
|
1998-01-08 01:57:09 +03:00
|
|
|
/*
|
|
|
|
* Note: unp_internalize() rejects any control message
|
|
|
|
* other than SCM_RIGHTS, and only allows one. This
|
|
|
|
* has the side-effect of preventing a caller from
|
|
|
|
* forging SCM_CREDS.
|
|
|
|
*/
|
2006-04-15 03:15:21 +04:00
|
|
|
if (control) {
|
|
|
|
KASSERT(l != NULL);
|
|
|
|
if ((error = unp_internalize(control, l)) != 0)
|
|
|
|
goto die;
|
2005-06-16 18:36:42 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
switch (so->so_type) {
|
|
|
|
|
|
|
|
case SOCK_DGRAM: {
|
|
|
|
if (nam) {
|
1996-05-23 20:03:45 +04:00
|
|
|
if ((so->so_state & SS_ISCONNECTED) != 0) {
|
1993-03-21 12:45:37 +03:00
|
|
|
error = EISCONN;
|
1996-05-23 20:41:49 +04:00
|
|
|
goto die;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2006-04-15 03:15:21 +04:00
|
|
|
KASSERT(l != NULL);
|
2005-12-11 15:16:03 +03:00
|
|
|
error = unp_connect(so, nam, l);
|
1996-05-23 20:03:45 +04:00
|
|
|
if (error) {
|
1996-05-23 21:07:03 +04:00
|
|
|
die:
|
1996-05-23 20:41:49 +04:00
|
|
|
m_freem(control);
|
1996-05-23 20:03:45 +04:00
|
|
|
m_freem(m);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
1996-05-23 20:03:45 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
} else {
|
1996-05-23 20:03:45 +04:00
|
|
|
if ((so->so_state & SS_ISCONNECTED) == 0) {
|
1993-03-21 12:45:37 +03:00
|
|
|
error = ENOTCONN;
|
1996-05-23 20:41:49 +04:00
|
|
|
goto die;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
2006-04-15 03:12:14 +04:00
|
|
|
KASSERT(p != NULL);
|
2006-07-24 02:06:03 +04:00
|
|
|
error = unp_output(m, control, unp, l);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (nam)
|
|
|
|
unp_disconnect(unp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case SOCK_STREAM:
|
|
|
|
#define rcv (&so2->so_rcv)
|
|
|
|
#define snd (&so->so_snd)
|
2006-03-01 05:06:11 +03:00
|
|
|
if (unp->unp_conn == NULL) {
|
|
|
|
error = ENOTCONN;
|
|
|
|
break;
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
so2 = unp->unp_conn->unp_socket;
|
1998-01-08 01:57:09 +03:00
|
|
|
if (unp->unp_conn->unp_flags & UNP_WANTCRED) {
|
|
|
|
/*
|
|
|
|
* Credentials are passed only once on
|
|
|
|
* SOCK_STREAM.
|
|
|
|
*/
|
|
|
|
unp->unp_conn->unp_flags &= ~UNP_WANTCRED;
|
2006-07-24 02:06:03 +04:00
|
|
|
control = unp_addsockcred(l, control);
|
1998-01-08 01:57:09 +03:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Send to paired receive port, and then reduce
|
|
|
|
* send buffer hiwater marks to maintain backpressure.
|
|
|
|
* Wake up readers.
|
|
|
|
*/
|
|
|
|
if (control) {
|
1996-05-23 20:41:49 +04:00
|
|
|
if (sbappendcontrol(rcv, m, control) == 0)
|
|
|
|
m_freem(control);
|
1993-03-21 12:45:37 +03:00
|
|
|
} else
|
|
|
|
sbappend(rcv, m);
|
|
|
|
snd->sb_mbmax -=
|
|
|
|
rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt;
|
|
|
|
unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt;
|
2004-04-17 19:15:29 +04:00
|
|
|
newhiwat = snd->sb_hiwat -
|
|
|
|
(rcv->sb_cc - unp->unp_conn->unp_cc);
|
2005-05-07 21:42:09 +04:00
|
|
|
(void)chgsbsize(so->so_uidinfo,
|
2004-04-17 19:15:29 +04:00
|
|
|
&snd->sb_hiwat, newhiwat, RLIM_INFINITY);
|
1993-03-21 12:45:37 +03:00
|
|
|
unp->unp_conn->unp_cc = rcv->sb_cc;
|
|
|
|
sorwakeup(so2);
|
|
|
|
#undef snd
|
|
|
|
#undef rcv
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("uipc 4");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_ABORT:
|
|
|
|
unp_drop(unp, ECONNABORTED);
|
1999-03-22 20:54:38 +03:00
|
|
|
|
2006-04-13 08:58:31 +04:00
|
|
|
KASSERT(so->so_head == NULL);
|
1999-03-22 20:54:38 +03:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (so->so_pcb == 0)
|
|
|
|
panic("uipc 5: drop killed pcb");
|
|
|
|
#endif
|
|
|
|
unp_detach(unp);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_SENSE:
|
|
|
|
((struct stat *) m)->st_blksize = so->so_snd.sb_hiwat;
|
|
|
|
if (so->so_type == SOCK_STREAM && unp->unp_conn != 0) {
|
|
|
|
so2 = unp->unp_conn->unp_socket;
|
|
|
|
((struct stat *) m)->st_blksize += so2->so_rcv.sb_cc;
|
|
|
|
}
|
|
|
|
((struct stat *) m)->st_dev = NODEV;
|
|
|
|
if (unp->unp_ino == 0)
|
|
|
|
unp->unp_ino = unp_ino++;
|
1997-05-15 21:01:04 +04:00
|
|
|
((struct stat *) m)->st_atimespec =
|
|
|
|
((struct stat *) m)->st_mtimespec =
|
|
|
|
((struct stat *) m)->st_ctimespec = unp->unp_ctime;
|
1993-03-21 12:45:37 +03:00
|
|
|
((struct stat *) m)->st_ino = unp->unp_ino;
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
case PRU_RCVOOB:
|
1996-05-23 20:03:45 +04:00
|
|
|
error = EOPNOTSUPP;
|
|
|
|
break;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
case PRU_SENDOOB:
|
1996-05-23 20:49:08 +04:00
|
|
|
m_freem(control);
|
1996-05-23 20:03:45 +04:00
|
|
|
m_freem(m);
|
1993-03-21 12:45:37 +03:00
|
|
|
error = EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_SOCKADDR:
|
1996-05-23 20:03:45 +04:00
|
|
|
unp_setsockaddr(unp, nam);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRU_PEERADDR:
|
1996-05-23 20:03:45 +04:00
|
|
|
unp_setpeeraddr(unp, nam);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("piusrreq");
|
|
|
|
}
|
1996-05-23 20:03:45 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
release:
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1998-01-08 01:57:09 +03:00
|
|
|
/*
|
|
|
|
* Unix domain socket option processing.
|
|
|
|
*/
|
|
|
|
int
|
2004-04-19 01:48:15 +04:00
|
|
|
uipc_ctloutput(int op, struct socket *so, int level, int optname,
|
|
|
|
struct mbuf **mp)
|
1998-01-08 01:57:09 +03:00
|
|
|
{
|
|
|
|
struct unpcb *unp = sotounpcb(so);
|
|
|
|
struct mbuf *m = *mp;
|
|
|
|
int optval = 0, error = 0;
|
|
|
|
|
|
|
|
if (level != 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
if (op == PRCO_SETOPT && m)
|
|
|
|
(void) m_free(m);
|
|
|
|
} else switch (op) {
|
|
|
|
|
|
|
|
case PRCO_SETOPT:
|
|
|
|
switch (optname) {
|
|
|
|
case LOCAL_CREDS:
|
2003-11-29 13:02:42 +03:00
|
|
|
case LOCAL_CONNWAIT:
|
1998-01-08 01:57:09 +03:00
|
|
|
if (m == NULL || m->m_len != sizeof(int))
|
|
|
|
error = EINVAL;
|
|
|
|
else {
|
|
|
|
optval = *mtod(m, int *);
|
|
|
|
switch (optname) {
|
|
|
|
#define OPTSET(bit) \
|
|
|
|
if (optval) \
|
|
|
|
unp->unp_flags |= (bit); \
|
|
|
|
else \
|
|
|
|
unp->unp_flags &= ~(bit);
|
|
|
|
|
|
|
|
case LOCAL_CREDS:
|
|
|
|
OPTSET(UNP_WANTCRED);
|
|
|
|
break;
|
2003-11-29 13:02:42 +03:00
|
|
|
case LOCAL_CONNWAIT:
|
|
|
|
OPTSET(UNP_CONNWAIT);
|
|
|
|
break;
|
1998-01-08 01:57:09 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#undef OPTSET
|
|
|
|
|
|
|
|
default:
|
|
|
|
error = ENOPROTOOPT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (m)
|
|
|
|
(void) m_free(m);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PRCO_GETOPT:
|
|
|
|
switch (optname) {
|
|
|
|
case LOCAL_CREDS:
|
|
|
|
*mp = m = m_get(M_WAIT, MT_SOOPTS);
|
|
|
|
m->m_len = sizeof(int);
|
|
|
|
switch (optname) {
|
|
|
|
|
|
|
|
#define OPTBIT(bit) (unp->unp_flags & (bit) ? 1 : 0)
|
|
|
|
|
|
|
|
case LOCAL_CREDS:
|
|
|
|
optval = OPTBIT(UNP_WANTCRED);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*mtod(m, int *) = optval;
|
|
|
|
break;
|
|
|
|
#undef OPTBIT
|
|
|
|
|
|
|
|
default:
|
|
|
|
error = ENOPROTOOPT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Both send and receive buffers are allocated PIPSIZ bytes of buffering
|
|
|
|
* for stream sockets, although the total for sender and receiver is
|
|
|
|
* actually only PIPSIZ.
|
|
|
|
* Datagram sockets really use the sendspace as the maximum datagram size,
|
|
|
|
* and don't really want to reserve the sendspace. Their recvspace should
|
|
|
|
* be large enough for at least one max-size datagram plus address.
|
|
|
|
*/
|
|
|
|
#define PIPSIZ 4096
|
|
|
|
u_long unpst_sendspace = PIPSIZ;
|
|
|
|
u_long unpst_recvspace = PIPSIZ;
|
|
|
|
u_long unpdg_sendspace = 2*1024; /* really max datagram size */
|
|
|
|
u_long unpdg_recvspace = 4*1024;
|
|
|
|
|
|
|
|
int unp_rights; /* file descriptors in flight */
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_attach(struct socket *so)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct unpcb *unp;
|
1993-03-21 12:45:37 +03:00
|
|
|
int error;
|
2005-02-27 00:34:55 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
|
|
|
|
switch (so->so_type) {
|
|
|
|
|
|
|
|
case SOCK_STREAM:
|
|
|
|
error = soreserve(so, unpst_sendspace, unpst_recvspace);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SOCK_DGRAM:
|
|
|
|
error = soreserve(so, unpdg_sendspace, unpdg_recvspace);
|
|
|
|
break;
|
1994-05-04 13:50:11 +04:00
|
|
|
|
|
|
|
default:
|
|
|
|
panic("unp_attach");
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
1995-08-16 04:29:50 +04:00
|
|
|
unp = malloc(sizeof(*unp), M_PCB, M_NOWAIT);
|
|
|
|
if (unp == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (ENOBUFS);
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memset((caddr_t)unp, 0, sizeof(*unp));
|
1993-03-21 12:45:37 +03:00
|
|
|
unp->unp_socket = so;
|
1995-08-17 06:57:20 +04:00
|
|
|
so->so_pcb = unp;
|
2005-11-11 10:07:42 +03:00
|
|
|
nanotime(&unp->unp_ctime);
|
1993-03-21 12:45:37 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1996-02-04 23:32:15 +03:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_detach(struct unpcb *unp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2005-02-27 00:34:55 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (unp->unp_vnode) {
|
|
|
|
unp->unp_vnode->v_socket = 0;
|
|
|
|
vrele(unp->unp_vnode);
|
|
|
|
unp->unp_vnode = 0;
|
|
|
|
}
|
|
|
|
if (unp->unp_conn)
|
|
|
|
unp_disconnect(unp);
|
|
|
|
while (unp->unp_refs)
|
|
|
|
unp_drop(unp->unp_refs, ECONNRESET);
|
|
|
|
soisdisconnected(unp->unp_socket);
|
|
|
|
unp->unp_socket->so_pcb = 0;
|
1996-05-23 20:03:45 +04:00
|
|
|
if (unp->unp_addr)
|
1997-06-24 23:12:53 +04:00
|
|
|
free(unp->unp_addr, M_SONAME);
|
1994-05-04 13:50:11 +04:00
|
|
|
if (unp_rights) {
|
|
|
|
/*
|
|
|
|
* Normally the receive buffer is flushed later,
|
|
|
|
* in sofree, but if our receive buffer holds references
|
|
|
|
* to descriptors that are now garbage, we will dispose
|
|
|
|
* of those descriptor references after the garbage collector
|
|
|
|
* gets them (resulting in a "panic: closef: count < 0").
|
|
|
|
*/
|
|
|
|
sorflush(unp->unp_socket);
|
1995-08-16 04:29:50 +04:00
|
|
|
free(unp, M_PCB);
|
1993-03-21 12:45:37 +03:00
|
|
|
unp_gc();
|
1995-08-16 04:29:50 +04:00
|
|
|
} else
|
|
|
|
free(unp, M_PCB);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2005-12-11 15:16:03 +03:00
|
|
|
unp_bind(struct unpcb *unp, struct mbuf *nam, struct lwp *l)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1997-06-26 10:06:40 +04:00
|
|
|
struct sockaddr_un *sun;
|
2000-03-30 13:27:11 +04:00
|
|
|
struct vnode *vp;
|
2003-10-15 15:28:59 +04:00
|
|
|
struct mount *mp;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct vattr vattr;
|
1997-06-26 10:06:40 +04:00
|
|
|
size_t addrlen;
|
2005-12-11 15:16:03 +03:00
|
|
|
struct proc *p;
|
1993-03-21 12:45:37 +03:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1996-05-23 20:03:45 +04:00
|
|
|
if (unp->unp_vnode != 0)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (EINVAL);
|
1997-06-26 10:06:40 +04:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
p = l->l_proc;
|
1997-06-26 10:06:40 +04:00
|
|
|
/*
|
|
|
|
* Allocate the new sockaddr. We have to allocate one
|
|
|
|
* extra byte so that we can ensure that the pathname
|
|
|
|
* is nul-terminated.
|
|
|
|
*/
|
|
|
|
addrlen = nam->m_len + 1;
|
|
|
|
sun = malloc(addrlen, M_SONAME, M_WAITOK);
|
|
|
|
m_copydata(nam, 0, nam->m_len, (caddr_t)sun);
|
|
|
|
*(((char *)sun) + nam->m_len) = '\0';
|
|
|
|
|
2003-10-15 15:28:59 +04:00
|
|
|
restart:
|
1996-05-23 20:03:45 +04:00
|
|
|
NDINIT(&nd, CREATE, FOLLOW | LOCKPARENT, UIO_SYSSPACE,
|
2005-12-11 15:16:03 +03:00
|
|
|
sun->sun_path, l);
|
1997-06-26 10:06:40 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
|
1996-02-04 05:17:43 +03:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1997-06-26 10:06:40 +04:00
|
|
|
goto bad;
|
1994-06-08 15:28:29 +04:00
|
|
|
vp = nd.ni_vp;
|
2003-10-15 15:28:59 +04:00
|
|
|
if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
1994-06-08 15:28:29 +04:00
|
|
|
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
|
|
|
|
if (nd.ni_dvp == vp)
|
|
|
|
vrele(nd.ni_dvp);
|
1993-03-21 12:45:37 +03:00
|
|
|
else
|
1994-06-08 15:28:29 +04:00
|
|
|
vput(nd.ni_dvp);
|
1993-03-21 12:45:37 +03:00
|
|
|
vrele(vp);
|
2003-10-15 15:28:59 +04:00
|
|
|
if (vp != NULL) {
|
|
|
|
error = EADDRINUSE;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
error = vn_start_write(NULL, &mp,
|
|
|
|
V_WAIT | V_SLEEPONLY | V_PCATCH);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
goto restart;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_type = VSOCK;
|
2005-08-30 19:03:04 +04:00
|
|
|
vattr.va_mode = ACCESSPERMS & ~(p->p_cwdi->cwdi_cmask);
|
2006-07-24 02:06:03 +04:00
|
|
|
VOP_LEASE(nd.ni_dvp, l, l->l_cred, LEASE_WRITE);
|
1996-02-04 05:17:43 +03:00
|
|
|
error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
|
2003-10-15 15:28:59 +04:00
|
|
|
vn_finished_write(mp, 0);
|
1996-02-04 05:17:43 +03:00
|
|
|
if (error)
|
1997-06-26 10:06:40 +04:00
|
|
|
goto bad;
|
1994-06-08 15:28:29 +04:00
|
|
|
vp = nd.ni_vp;
|
1993-03-21 12:45:37 +03:00
|
|
|
vp->v_socket = unp->unp_socket;
|
|
|
|
unp->unp_vnode = vp;
|
1997-06-26 10:06:40 +04:00
|
|
|
unp->unp_addrlen = addrlen;
|
|
|
|
unp->unp_addr = sun;
|
1998-03-01 05:20:01 +03:00
|
|
|
VOP_UNLOCK(vp, 0);
|
1993-03-21 12:45:37 +03:00
|
|
|
return (0);
|
1997-06-26 10:06:40 +04:00
|
|
|
|
|
|
|
bad:
|
|
|
|
free(sun, M_SONAME);
|
|
|
|
return (error);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2005-12-11 15:16:03 +03:00
|
|
|
unp_connect(struct socket *so, struct mbuf *nam, struct lwp *l)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct sockaddr_un *sun;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct socket *so2, *so3;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct unpcb *unp2, *unp3;
|
1997-06-26 10:06:40 +04:00
|
|
|
size_t addrlen;
|
1993-03-21 12:45:37 +03:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-06-26 10:06:40 +04:00
|
|
|
/*
|
|
|
|
* Allocate a temporary sockaddr. We have to allocate one extra
|
|
|
|
* byte so that we can ensure that the pathname is nul-terminated.
|
|
|
|
* When we establish the connection, we copy the other PCB's
|
|
|
|
* sockaddr to our own.
|
|
|
|
*/
|
|
|
|
addrlen = nam->m_len + 1;
|
|
|
|
sun = malloc(addrlen, M_SONAME, M_WAITOK);
|
|
|
|
m_copydata(nam, 0, nam->m_len, (caddr_t)sun);
|
|
|
|
*(((char *)sun) + nam->m_len) = '\0';
|
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, sun->sun_path, l);
|
1997-06-26 10:06:40 +04:00
|
|
|
|
1996-02-04 05:17:43 +03:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1997-06-26 10:06:40 +04:00
|
|
|
goto bad2;
|
1994-06-08 15:28:29 +04:00
|
|
|
vp = nd.ni_vp;
|
1993-03-21 12:45:37 +03:00
|
|
|
if (vp->v_type != VSOCK) {
|
|
|
|
error = ENOTSOCK;
|
|
|
|
goto bad;
|
|
|
|
}
|
2006-07-24 02:06:03 +04:00
|
|
|
if ((error = VOP_ACCESS(vp, VWRITE, l->l_cred, l)) != 0)
|
1993-03-21 12:45:37 +03:00
|
|
|
goto bad;
|
|
|
|
so2 = vp->v_socket;
|
|
|
|
if (so2 == 0) {
|
|
|
|
error = ECONNREFUSED;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (so->so_type != so2->so_type) {
|
|
|
|
error = EPROTOTYPE;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
|
|
|
|
if ((so2->so_options & SO_ACCEPTCONN) == 0 ||
|
|
|
|
(so3 = sonewconn(so2, 0)) == 0) {
|
|
|
|
error = ECONNREFUSED;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
unp2 = sotounpcb(so2);
|
|
|
|
unp3 = sotounpcb(so3);
|
1997-06-24 23:12:53 +04:00
|
|
|
if (unp2->unp_addr) {
|
|
|
|
unp3->unp_addr = malloc(unp2->unp_addrlen,
|
|
|
|
M_SONAME, M_WAITOK);
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memcpy(unp3->unp_addr, unp2->unp_addr,
|
1997-06-24 23:12:53 +04:00
|
|
|
unp2->unp_addrlen);
|
|
|
|
unp3->unp_addrlen = unp2->unp_addrlen;
|
|
|
|
}
|
1998-01-08 01:57:09 +03:00
|
|
|
unp3->unp_flags = unp2->unp_flags;
|
1998-07-16 04:46:50 +04:00
|
|
|
so2 = so3;
|
|
|
|
}
|
2003-11-29 13:02:42 +03:00
|
|
|
error = unp_connect2(so, so2, PRU_CONNECT);
|
1997-06-26 10:06:40 +04:00
|
|
|
bad:
|
1993-03-21 12:45:37 +03:00
|
|
|
vput(vp);
|
1997-06-26 10:06:40 +04:00
|
|
|
bad2:
|
|
|
|
free(sun, M_SONAME);
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_connect2(struct socket *so, struct socket *so2, int req)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct unpcb *unp = sotounpcb(so);
|
|
|
|
struct unpcb *unp2;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
if (so2->so_type != so->so_type)
|
|
|
|
return (EPROTOTYPE);
|
|
|
|
unp2 = sotounpcb(so2);
|
|
|
|
unp->unp_conn = unp2;
|
|
|
|
switch (so->so_type) {
|
|
|
|
|
|
|
|
case SOCK_DGRAM:
|
|
|
|
unp->unp_nextref = unp2->unp_refs;
|
|
|
|
unp2->unp_refs = unp;
|
|
|
|
soisconnected(so);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SOCK_STREAM:
|
|
|
|
unp2->unp_conn = unp;
|
2003-11-29 13:02:42 +03:00
|
|
|
if (req == PRU_CONNECT &&
|
|
|
|
((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT))
|
|
|
|
soisconnecting(so);
|
|
|
|
else
|
|
|
|
soisconnected(so);
|
1993-03-21 12:45:37 +03:00
|
|
|
soisconnected(so2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("unp_connect2");
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_disconnect(struct unpcb *unp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct unpcb *unp2 = unp->unp_conn;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
if (unp2 == 0)
|
|
|
|
return;
|
|
|
|
unp->unp_conn = 0;
|
|
|
|
switch (unp->unp_socket->so_type) {
|
|
|
|
|
|
|
|
case SOCK_DGRAM:
|
|
|
|
if (unp2->unp_refs == unp)
|
|
|
|
unp2->unp_refs = unp->unp_nextref;
|
|
|
|
else {
|
|
|
|
unp2 = unp2->unp_refs;
|
|
|
|
for (;;) {
|
|
|
|
if (unp2 == 0)
|
|
|
|
panic("unp_disconnect");
|
|
|
|
if (unp2->unp_nextref == unp)
|
|
|
|
break;
|
|
|
|
unp2 = unp2->unp_nextref;
|
|
|
|
}
|
|
|
|
unp2->unp_nextref = unp->unp_nextref;
|
|
|
|
}
|
|
|
|
unp->unp_nextref = 0;
|
|
|
|
unp->unp_socket->so_state &= ~SS_ISCONNECTED;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SOCK_STREAM:
|
|
|
|
soisdisconnected(unp->unp_socket);
|
|
|
|
unp2->unp_conn = 0;
|
|
|
|
soisdisconnected(unp2->unp_socket);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef notdef
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_abort(struct unpcb *unp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
|
|
|
unp_detach(unp);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_shutdown(struct unpcb *unp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
|
|
|
struct socket *so;
|
|
|
|
|
|
|
|
if (unp->unp_socket->so_type == SOCK_STREAM && unp->unp_conn &&
|
|
|
|
(so = unp->unp_conn->unp_socket))
|
|
|
|
socantrcvmore(so);
|
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_drop(struct unpcb *unp, int errno)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
|
|
|
struct socket *so = unp->unp_socket;
|
|
|
|
|
|
|
|
so->so_error = errno;
|
|
|
|
unp_disconnect(unp);
|
|
|
|
if (so->so_head) {
|
1995-08-17 06:57:20 +04:00
|
|
|
so->so_pcb = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
sofree(so);
|
1996-05-23 20:03:45 +04:00
|
|
|
if (unp->unp_addr)
|
1997-06-24 23:12:53 +04:00
|
|
|
free(unp->unp_addr, M_SONAME);
|
1995-08-16 04:29:50 +04:00
|
|
|
free(unp, M_PCB);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef notdef
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_drain(void)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2005-12-11 15:16:03 +03:00
|
|
|
unp_externalize(struct mbuf *rights, struct lwp *l)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct cmsghdr *cm = mtod(rights, struct cmsghdr *);
|
2005-12-11 15:16:03 +03:00
|
|
|
struct proc *p = l->l_proc;
|
2000-06-05 10:06:07 +04:00
|
|
|
int i, *fdp;
|
2000-03-30 13:27:11 +04:00
|
|
|
struct file **rp;
|
|
|
|
struct file *fp;
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
int nfds, error = 0;
|
2000-06-05 10:06:07 +04:00
|
|
|
|
|
|
|
nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) /
|
|
|
|
sizeof(struct file *);
|
|
|
|
rp = (struct file **)CMSG_DATA(cm);
|
1993-03-21 12:45:37 +03:00
|
|
|
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
fdp = malloc(nfds * sizeof(int), M_TEMP, M_WAITOK);
|
|
|
|
|
1999-03-22 20:54:38 +03:00
|
|
|
/* Make sure the recipient should be able to see the descriptors.. */
|
1999-04-30 22:42:58 +04:00
|
|
|
if (p->p_cwdi->cwdi_rdir != NULL) {
|
2000-06-05 20:29:45 +04:00
|
|
|
rp = (struct file **)CMSG_DATA(cm);
|
1999-03-22 20:54:38 +03:00
|
|
|
for (i = 0; i < nfds; i++) {
|
|
|
|
fp = *rp++;
|
|
|
|
/*
|
|
|
|
* If we are in a chroot'ed directory, and
|
|
|
|
* someone wants to pass us a directory, make
|
|
|
|
* sure it's inside the subtree we're allowed
|
|
|
|
* to access.
|
|
|
|
*/
|
|
|
|
if (fp->f_type == DTYPE_VNODE) {
|
|
|
|
struct vnode *vp = (struct vnode *)fp->f_data;
|
|
|
|
if ((vp->v_type == VDIR) &&
|
2005-12-11 15:16:03 +03:00
|
|
|
!vn_isunder(vp, p->p_cwdi->cwdi_rdir, l)) {
|
1999-03-22 20:54:38 +03:00
|
|
|
error = EPERM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
|
|
|
|
restart:
|
2000-06-05 10:06:07 +04:00
|
|
|
rp = (struct file **)CMSG_DATA(cm);
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
if (error != 0) {
|
1997-04-10 05:51:21 +04:00
|
|
|
for (i = 0; i < nfds; i++) {
|
1993-03-21 12:45:37 +03:00
|
|
|
fp = *rp;
|
1999-03-22 20:54:38 +03:00
|
|
|
/*
|
|
|
|
* zero the pointer before calling unp_discard,
|
|
|
|
* since it may end up in unp_gc()..
|
|
|
|
*/
|
1993-03-21 12:45:37 +03:00
|
|
|
*rp++ = 0;
|
1999-03-22 20:54:38 +03:00
|
|
|
unp_discard(fp);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
goto out;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
|
1997-04-10 05:51:21 +04:00
|
|
|
/*
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
* First loop -- allocate file descriptor table slots for the
|
|
|
|
* new descriptors.
|
1997-04-10 05:51:21 +04:00
|
|
|
*/
|
|
|
|
for (i = 0; i < nfds; i++) {
|
1998-01-07 07:03:38 +03:00
|
|
|
fp = *rp++;
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
if ((error = fdalloc(p, 0, &fdp[i])) != 0) {
|
2001-06-06 21:00:00 +04:00
|
|
|
/*
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
* Back out what we've done so far.
|
2001-06-06 21:00:00 +04:00
|
|
|
*/
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
for (--i; i >= 0; i--)
|
|
|
|
fdremove(p->p_fd, fdp[i]);
|
|
|
|
|
|
|
|
if (error == ENOSPC) {
|
|
|
|
fdexpand(p);
|
|
|
|
error = 0;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* This is the error that has historically
|
|
|
|
* been returned, and some callers may
|
|
|
|
* expect it.
|
|
|
|
*/
|
|
|
|
error = EMSGSIZE;
|
|
|
|
}
|
|
|
|
goto restart;
|
2001-06-06 21:00:00 +04:00
|
|
|
}
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make the slot reference the descriptor so that
|
|
|
|
* fdalloc() works properly.. We finalize it all
|
|
|
|
* in the loop below.
|
|
|
|
*/
|
|
|
|
p->p_fd->fd_ofiles[fdp[i]] = fp;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1997-04-10 05:51:21 +04:00
|
|
|
|
|
|
|
/*
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
* Now that adding them has succeeded, update all of the
|
|
|
|
* descriptor passing state.
|
1997-04-10 05:51:21 +04:00
|
|
|
*/
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
rp = (struct file **)CMSG_DATA(cm);
|
|
|
|
for (i = 0; i < nfds; i++) {
|
|
|
|
fp = *rp++;
|
|
|
|
fp->f_msgcount--;
|
|
|
|
unp_rights--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy temporary array to message and adjust length, in case of
|
|
|
|
* transition from large struct file pointers to ints.
|
|
|
|
*/
|
|
|
|
memcpy(CMSG_DATA(cm), fdp, nfds * sizeof(int));
|
2000-06-05 10:06:07 +04:00
|
|
|
cm->cmsg_len = CMSG_LEN(nfds * sizeof(int));
|
|
|
|
rights->m_len = CMSG_SPACE(nfds * sizeof(int));
|
Rework fdalloc() even further: split fdalloc() into fdalloc() and
fdexpand(). The former will return ENOSPC if there is not space
in the current filedesc table. The latter performs the expansion
of the filedesc table. This means that fdalloc() won't ever block,
and it gives callers an opportunity to clean up before the
potentially-blocking fdexpand() call.
Update all fdalloc() callers to deal with the need-to-fdexpand() case.
Rewrite unp_externalize() to use fdalloc() and fdexpand() in a
safe way, using an algorithm suggested by Bill Sommerfeld:
- Use a temporary array of integers to hold the new filedesc table
indexes. This allows us to repeat the loop if necessary.
- Loop through the array of file *'s, assigning them to filedesc table
slots. If fdalloc() indicates expansion is necessary, undo the
assignments we've done so far, expand, and retry the whole process.
- Once all file *'s have been assigned to slots, update the f_msgcount
and unp_rights counters.
- Right before we return, copy the temporary integer array to the message
buffer, and trim the length as before.
Note that once locking is added to the filedesc array, this entire
operation will be `atomic', in that the lock will be held while
file *'s are assigned to embryonic table slots, thus preventing anything
else from using them.
2001-06-07 05:29:16 +04:00
|
|
|
out:
|
|
|
|
free(fdp, M_TEMP);
|
|
|
|
return (error);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2005-12-11 15:16:03 +03:00
|
|
|
unp_internalize(struct mbuf *control, struct lwp *l)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct proc *p = l->l_proc;
|
1997-04-10 05:51:21 +04:00
|
|
|
struct filedesc *fdescp = p->p_fd;
|
2003-12-30 01:08:02 +03:00
|
|
|
struct cmsghdr *newcm, *cm = mtod(control, struct cmsghdr *);
|
|
|
|
struct file **rp, **files;
|
2000-03-30 13:27:11 +04:00
|
|
|
struct file *fp;
|
|
|
|
int i, fd, *fdp;
|
1997-04-10 05:51:21 +04:00
|
|
|
int nfds;
|
|
|
|
u_int neededspace;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1997-04-10 05:51:21 +04:00
|
|
|
/* Sanity check the control message header */
|
2003-07-24 11:30:48 +04:00
|
|
|
if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET ||
|
1993-03-21 12:45:37 +03:00
|
|
|
cm->cmsg_len != control->m_len)
|
|
|
|
return (EINVAL);
|
1997-04-10 05:51:21 +04:00
|
|
|
|
|
|
|
/* Verify that the file descriptors are valid */
|
2000-06-05 10:06:07 +04:00
|
|
|
nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / sizeof(int);
|
|
|
|
fdp = (int *)CMSG_DATA(cm);
|
1997-04-10 05:51:21 +04:00
|
|
|
for (i = 0; i < nfds; i++) {
|
|
|
|
fd = *fdp++;
|
2003-02-25 12:56:15 +03:00
|
|
|
if ((fp = fd_getfile(fdescp, fd)) == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (EBADF);
|
2003-02-25 12:56:15 +03:00
|
|
|
simple_unlock(&fp->f_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1997-04-10 05:51:21 +04:00
|
|
|
|
|
|
|
/* Make sure we have room for the struct file pointers */
|
2000-06-05 10:06:07 +04:00
|
|
|
neededspace = CMSG_SPACE(nfds * sizeof(struct file *)) -
|
|
|
|
control->m_len;
|
1997-04-10 05:51:21 +04:00
|
|
|
if (neededspace > M_TRAILINGSPACE(control)) {
|
|
|
|
|
2003-12-30 01:08:02 +03:00
|
|
|
/* allocate new space and copy header into it */
|
|
|
|
newcm = malloc(
|
|
|
|
CMSG_SPACE(nfds * sizeof(struct file *)),
|
|
|
|
M_MBUF, M_WAITOK);
|
|
|
|
if (newcm == NULL)
|
1997-04-10 05:51:21 +04:00
|
|
|
return (E2BIG);
|
2003-12-30 01:08:02 +03:00
|
|
|
memcpy(newcm, cm, sizeof(struct cmsghdr));
|
2005-02-27 00:34:55 +03:00
|
|
|
files = (struct file **)CMSG_DATA(newcm);
|
2003-12-30 01:08:02 +03:00
|
|
|
} else {
|
|
|
|
/* we can convert in-place */
|
|
|
|
newcm = NULL;
|
|
|
|
files = (struct file **)CMSG_DATA(cm);
|
1997-04-10 05:51:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transform the file descriptors into struct file pointers, in
|
|
|
|
* reverse order so that if pointers are bigger than ints, the
|
|
|
|
* int won't get until we're done.
|
|
|
|
*/
|
2003-12-30 01:08:02 +03:00
|
|
|
fdp = (int *)CMSG_DATA(cm) + nfds - 1;
|
|
|
|
rp = files + nfds - 1;
|
1997-04-10 05:51:21 +04:00
|
|
|
for (i = 0; i < nfds; i++) {
|
1997-10-17 21:35:08 +04:00
|
|
|
fp = fdescp->fd_ofiles[*fdp--];
|
2003-02-23 17:37:32 +03:00
|
|
|
simple_lock(&fp->f_slock);
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (fp->f_iflags & FIF_WANTCLOSE)
|
|
|
|
panic("unp_internalize: file already closed");
|
|
|
|
#endif
|
1997-04-10 05:51:21 +04:00
|
|
|
*rp-- = fp;
|
1993-03-21 12:45:37 +03:00
|
|
|
fp->f_count++;
|
|
|
|
fp->f_msgcount++;
|
2003-02-23 17:37:32 +03:00
|
|
|
simple_unlock(&fp->f_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
unp_rights++;
|
|
|
|
}
|
2003-12-30 01:08:02 +03:00
|
|
|
|
|
|
|
if (newcm) {
|
|
|
|
if (control->m_flags & M_EXT)
|
|
|
|
MEXTREMOVE(control);
|
|
|
|
MEXTADD(control, newcm,
|
|
|
|
CMSG_SPACE(nfds * sizeof(struct file *)),
|
|
|
|
M_MBUF, NULL, NULL);
|
|
|
|
cm = newcm;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* adjust message & mbuf to note amount of space actually used. */
|
|
|
|
cm->cmsg_len = CMSG_LEN(nfds * sizeof(struct file *));
|
|
|
|
control->m_len = CMSG_SPACE(nfds * sizeof(struct file *));
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1998-01-08 01:57:09 +03:00
|
|
|
struct mbuf *
|
2006-07-24 02:06:03 +04:00
|
|
|
unp_addsockcred(struct lwp *l, struct mbuf *control)
|
1998-01-08 01:57:09 +03:00
|
|
|
{
|
|
|
|
struct cmsghdr *cmp;
|
|
|
|
struct sockcred *sc;
|
|
|
|
struct mbuf *m, *n;
|
2000-06-05 10:06:07 +04:00
|
|
|
int len, space, i;
|
1998-01-08 01:57:09 +03:00
|
|
|
|
2006-07-24 02:06:03 +04:00
|
|
|
len = CMSG_LEN(SOCKCREDSIZE(kauth_cred_ngroups(l->l_cred)));
|
|
|
|
space = CMSG_SPACE(SOCKCREDSIZE(kauth_cred_ngroups(l->l_cred)));
|
1998-01-08 01:57:09 +03:00
|
|
|
|
|
|
|
m = m_get(M_WAIT, MT_CONTROL);
|
2000-06-05 10:06:07 +04:00
|
|
|
if (space > MLEN) {
|
|
|
|
if (space > MCLBYTES)
|
|
|
|
MEXTMALLOC(m, space, M_WAITOK);
|
1998-01-08 01:57:09 +03:00
|
|
|
else
|
2003-02-26 09:31:08 +03:00
|
|
|
m_clget(m, M_WAIT);
|
1998-01-08 01:57:09 +03:00
|
|
|
if ((m->m_flags & M_EXT) == 0) {
|
|
|
|
m_free(m);
|
|
|
|
return (control);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-06-05 10:06:07 +04:00
|
|
|
m->m_len = space;
|
1998-01-08 01:57:09 +03:00
|
|
|
m->m_next = NULL;
|
|
|
|
cmp = mtod(m, struct cmsghdr *);
|
|
|
|
sc = (struct sockcred *)CMSG_DATA(cmp);
|
|
|
|
cmp->cmsg_len = len;
|
|
|
|
cmp->cmsg_level = SOL_SOCKET;
|
|
|
|
cmp->cmsg_type = SCM_CREDS;
|
2006-07-24 02:06:03 +04:00
|
|
|
sc->sc_uid = kauth_cred_getuid(l->l_cred);
|
|
|
|
sc->sc_euid = kauth_cred_geteuid(l->l_cred);
|
|
|
|
sc->sc_gid = kauth_cred_getgid(l->l_cred);
|
|
|
|
sc->sc_egid = kauth_cred_getegid(l->l_cred);
|
|
|
|
sc->sc_ngroups = kauth_cred_ngroups(l->l_cred);
|
1998-01-08 01:57:09 +03:00
|
|
|
for (i = 0; i < sc->sc_ngroups; i++)
|
2006-07-24 02:06:03 +04:00
|
|
|
sc->sc_groups[i] = kauth_cred_group(l->l_cred, i);
|
1998-01-08 01:57:09 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If a control message already exists, append us to the end.
|
|
|
|
*/
|
|
|
|
if (control != NULL) {
|
|
|
|
for (n = control; n->m_next != NULL; n = n->m_next)
|
|
|
|
;
|
|
|
|
n->m_next = m;
|
|
|
|
} else
|
|
|
|
control = m;
|
|
|
|
|
|
|
|
return (control);
|
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
int unp_defer, unp_gcing;
|
|
|
|
extern struct domain unixdomain;
|
|
|
|
|
1999-03-22 20:54:38 +03:00
|
|
|
/*
|
|
|
|
* Comment added long after the fact explaining what's going on here.
|
|
|
|
* Do a mark-sweep GC of file descriptors on the system, to free up
|
|
|
|
* any which are caught in flight to an about-to-be-closed socket.
|
|
|
|
*
|
|
|
|
* Traditional mark-sweep gc's start at the "root", and mark
|
|
|
|
* everything reachable from the root (which, in our case would be the
|
|
|
|
* process table). The mark bits are cleared during the sweep.
|
|
|
|
*
|
|
|
|
* XXX For some inexplicable reason (perhaps because the file
|
|
|
|
* descriptor tables used to live in the u area which could be swapped
|
|
|
|
* out and thus hard to reach), we do multiple scans over the set of
|
|
|
|
* descriptors, using use *two* mark bits per object (DEFER and MARK).
|
|
|
|
* Whenever we find a descriptor which references other descriptors,
|
|
|
|
* the ones it references are marked with both bits, and we iterate
|
|
|
|
* over the whole file table until there are no more DEFER bits set.
|
|
|
|
* We also make an extra pass *before* the GC to clear the mark bits,
|
|
|
|
* which could have been cleared at almost no cost during the previous
|
|
|
|
* sweep.
|
|
|
|
*
|
|
|
|
* XXX MP: this needs to run with locks such that no other thread of
|
|
|
|
* control can create or destroy references to file descriptors. it
|
|
|
|
* may be necessary to defer the GC until later (when the locking
|
|
|
|
* situation is more hospitable); it may be necessary to push this
|
|
|
|
* into a separate thread.
|
|
|
|
*/
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_gc(void)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct file *fp, *nextfp;
|
|
|
|
struct socket *so, *so1;
|
1994-05-04 13:50:11 +04:00
|
|
|
struct file **extra_ref, **fpp;
|
|
|
|
int nunref, i;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
if (unp_gcing)
|
|
|
|
return;
|
|
|
|
unp_gcing = 1;
|
|
|
|
unp_defer = 0;
|
1999-03-22 20:54:38 +03:00
|
|
|
|
|
|
|
/* Clear mark bits */
|
2002-09-04 05:32:31 +04:00
|
|
|
LIST_FOREACH(fp, &filehead, f_list)
|
1993-03-21 12:45:37 +03:00
|
|
|
fp->f_flag &= ~(FMARK|FDEFER);
|
1999-03-22 20:54:38 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate over the set of descriptors, marking ones believed
|
|
|
|
* (based on refcount) to be referenced from a process, and
|
|
|
|
* marking for rescan descriptors which are queued on a socket.
|
|
|
|
*/
|
1993-03-21 12:45:37 +03:00
|
|
|
do {
|
2002-09-04 05:32:31 +04:00
|
|
|
LIST_FOREACH(fp, &filehead, f_list) {
|
1993-03-21 12:45:37 +03:00
|
|
|
if (fp->f_flag & FDEFER) {
|
|
|
|
fp->f_flag &= ~FDEFER;
|
|
|
|
unp_defer--;
|
1999-03-22 20:54:38 +03:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (fp->f_count == 0)
|
|
|
|
panic("unp_gc: deferred unreferenced socket");
|
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
} else {
|
1999-03-22 20:54:38 +03:00
|
|
|
if (fp->f_count == 0)
|
|
|
|
continue;
|
1993-03-21 12:45:37 +03:00
|
|
|
if (fp->f_flag & FMARK)
|
|
|
|
continue;
|
|
|
|
if (fp->f_count == fp->f_msgcount)
|
|
|
|
continue;
|
|
|
|
}
|
1999-03-22 20:54:38 +03:00
|
|
|
fp->f_flag |= FMARK;
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (fp->f_type != DTYPE_SOCKET ||
|
|
|
|
(so = (struct socket *)fp->f_data) == 0)
|
|
|
|
continue;
|
|
|
|
if (so->so_proto->pr_domain != &unixdomain ||
|
|
|
|
(so->so_proto->pr_flags&PR_RIGHTS) == 0)
|
|
|
|
continue;
|
|
|
|
#ifdef notdef
|
|
|
|
if (so->so_rcv.sb_flags & SB_LOCK) {
|
|
|
|
/*
|
|
|
|
* This is problematical; it's not clear
|
|
|
|
* we need to wait for the sockbuf to be
|
|
|
|
* unlocked (on a uniprocessor, at least),
|
|
|
|
* and it's also not clear what to do
|
|
|
|
* if sbwait returns an error due to receipt
|
|
|
|
* of a signal. If sbwait does return
|
|
|
|
* an error, we'll go into an infinite
|
|
|
|
* loop. Delete all of this for now.
|
|
|
|
*/
|
|
|
|
(void) sbwait(&so->so_rcv);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
#endif
|
1999-03-22 20:54:38 +03:00
|
|
|
unp_scan(so->so_rcv.sb_mb, unp_mark, 0);
|
|
|
|
/*
|
|
|
|
* mark descriptors referenced from sockets queued on the accept queue as well.
|
|
|
|
*/
|
|
|
|
if (so->so_options & SO_ACCEPTCONN) {
|
2002-09-04 05:32:31 +04:00
|
|
|
TAILQ_FOREACH(so1, &so->so_q0, so_qe) {
|
1999-03-22 20:54:38 +03:00
|
|
|
unp_scan(so1->so_rcv.sb_mb, unp_mark, 0);
|
|
|
|
}
|
2002-09-04 05:32:31 +04:00
|
|
|
TAILQ_FOREACH(so1, &so->so_q, so_qe) {
|
1999-03-22 20:54:38 +03:00
|
|
|
unp_scan(so1->so_rcv.sb_mb, unp_mark, 0);
|
|
|
|
}
|
|
|
|
}
|
2005-02-27 00:34:55 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
} while (unp_defer);
|
1994-05-04 13:50:11 +04:00
|
|
|
/*
|
1999-03-22 20:54:38 +03:00
|
|
|
* Sweep pass. Find unmarked descriptors, and free them.
|
|
|
|
*
|
1994-05-04 13:50:11 +04:00
|
|
|
* We grab an extra reference to each of the file table entries
|
|
|
|
* that are not otherwise accessible and then free the rights
|
|
|
|
* that are stored in messages on them.
|
|
|
|
*
|
2003-02-23 17:37:32 +03:00
|
|
|
* The bug in the original code is a little tricky, so I'll describe
|
1994-05-04 13:50:11 +04:00
|
|
|
* what's wrong with it here.
|
|
|
|
*
|
|
|
|
* It is incorrect to simply unp_discard each entry for f_msgcount
|
|
|
|
* times -- consider the case of sockets A and B that contain
|
|
|
|
* references to each other. On a last close of some other socket,
|
|
|
|
* we trigger a gc since the number of outstanding rights (unp_rights)
|
|
|
|
* is non-zero. If during the sweep phase the gc code un_discards,
|
|
|
|
* we end up doing a (full) closef on the descriptor. A closef on A
|
|
|
|
* results in the following chain. Closef calls soo_close, which
|
|
|
|
* calls soclose. Soclose calls first (through the switch
|
|
|
|
* uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply
|
|
|
|
* returns because the previous instance had set unp_gcing, and
|
|
|
|
* we return all the way back to soclose, which marks the socket
|
|
|
|
* with SS_NOFDREF, and then calls sofree. Sofree calls sorflush
|
|
|
|
* to free up the rights that are queued in messages on the socket A,
|
|
|
|
* i.e., the reference on B. The sorflush calls via the dom_dispose
|
|
|
|
* switch unp_dispose, which unp_scans with unp_discard. This second
|
|
|
|
* instance of unp_discard just calls closef on B.
|
|
|
|
*
|
|
|
|
* Well, a similar chain occurs on B, resulting in a sorflush on B,
|
|
|
|
* which results in another closef on A. Unfortunately, A is already
|
|
|
|
* being closed, and the descriptor has already been marked with
|
|
|
|
* SS_NOFDREF, and soclose panics at this point.
|
|
|
|
*
|
|
|
|
* Here, we first take an extra reference to each inaccessible
|
1999-03-22 20:54:38 +03:00
|
|
|
* descriptor. Then, if the inaccessible descriptor is a
|
|
|
|
* socket, we call sorflush in case it is a Unix domain
|
|
|
|
* socket. After we destroy all the rights carried in
|
|
|
|
* messages, we do a last closef to get rid of our extra
|
|
|
|
* reference. This is the last close, and the unp_detach etc
|
|
|
|
* will shut down the socket.
|
1994-05-04 13:50:11 +04:00
|
|
|
*
|
|
|
|
* 91/09/19, bsy@cs.cmu.edu
|
|
|
|
*/
|
|
|
|
extra_ref = malloc(nfiles * sizeof(struct file *), M_FILE, M_WAITOK);
|
2002-09-04 05:32:31 +04:00
|
|
|
for (nunref = 0, fp = LIST_FIRST(&filehead), fpp = extra_ref; fp != 0;
|
1994-08-30 07:04:28 +04:00
|
|
|
fp = nextfp) {
|
2002-09-04 05:32:31 +04:00
|
|
|
nextfp = LIST_NEXT(fp, f_list);
|
2003-02-23 17:37:32 +03:00
|
|
|
simple_lock(&fp->f_slock);
|
|
|
|
if (fp->f_count != 0 &&
|
|
|
|
fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) {
|
1994-05-04 13:50:11 +04:00
|
|
|
*fpp++ = fp;
|
|
|
|
nunref++;
|
|
|
|
fp->f_count++;
|
|
|
|
}
|
2003-02-23 17:37:32 +03:00
|
|
|
simple_unlock(&fp->f_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1999-03-22 20:54:38 +03:00
|
|
|
for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) {
|
|
|
|
fp = *fpp;
|
2003-02-23 17:37:32 +03:00
|
|
|
simple_lock(&fp->f_slock);
|
1999-06-18 03:17:45 +04:00
|
|
|
FILE_USE(fp);
|
1999-03-22 20:54:38 +03:00
|
|
|
if (fp->f_type == DTYPE_SOCKET)
|
|
|
|
sorflush((struct socket *)fp->f_data);
|
1999-05-06 00:01:01 +04:00
|
|
|
FILE_UNUSE(fp, NULL);
|
1999-03-22 20:54:38 +03:00
|
|
|
}
|
1999-05-06 00:01:01 +04:00
|
|
|
for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) {
|
1999-06-18 03:17:45 +04:00
|
|
|
fp = *fpp;
|
2003-02-23 17:37:32 +03:00
|
|
|
simple_lock(&fp->f_slock);
|
1999-05-06 00:01:01 +04:00
|
|
|
FILE_USE(fp);
|
2005-12-11 15:16:03 +03:00
|
|
|
(void) closef(fp, (struct lwp *)0);
|
1999-05-06 00:01:01 +04:00
|
|
|
}
|
1994-05-04 13:50:11 +04:00
|
|
|
free((caddr_t)extra_ref, M_FILE);
|
1993-03-21 12:45:37 +03:00
|
|
|
unp_gcing = 0;
|
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_dispose(struct mbuf *m)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1994-05-04 13:50:11 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (m)
|
1999-03-22 20:54:38 +03:00
|
|
|
unp_scan(m, unp_discard, 1);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_scan(struct mbuf *m0, void (*op)(struct file *), int discard)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct mbuf *m;
|
|
|
|
struct file **rp;
|
|
|
|
struct cmsghdr *cm;
|
|
|
|
int i;
|
1993-03-21 12:45:37 +03:00
|
|
|
int qfds;
|
|
|
|
|
|
|
|
while (m0) {
|
2000-06-05 20:29:45 +04:00
|
|
|
for (m = m0; m; m = m->m_next) {
|
1993-03-21 12:45:37 +03:00
|
|
|
if (m->m_type == MT_CONTROL &&
|
|
|
|
m->m_len >= sizeof(*cm)) {
|
|
|
|
cm = mtod(m, struct cmsghdr *);
|
|
|
|
if (cm->cmsg_level != SOL_SOCKET ||
|
|
|
|
cm->cmsg_type != SCM_RIGHTS)
|
|
|
|
continue;
|
2000-06-05 20:29:45 +04:00
|
|
|
qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm)))
|
|
|
|
/ sizeof(struct file *);
|
|
|
|
rp = (struct file **)CMSG_DATA(cm);
|
1999-03-22 20:54:38 +03:00
|
|
|
for (i = 0; i < qfds; i++) {
|
|
|
|
struct file *fp = *rp;
|
|
|
|
if (discard)
|
|
|
|
*rp = 0;
|
|
|
|
(*op)(fp);
|
|
|
|
rp++;
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
break; /* XXX, but saves time */
|
|
|
|
}
|
2000-06-05 20:29:45 +04:00
|
|
|
}
|
2001-10-19 00:17:24 +04:00
|
|
|
m0 = m0->m_nextpkt;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_mark(struct file *fp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1999-03-22 20:54:38 +03:00
|
|
|
if (fp == NULL)
|
|
|
|
return;
|
2005-02-27 00:34:55 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (fp->f_flag & FMARK)
|
|
|
|
return;
|
1999-03-22 20:54:38 +03:00
|
|
|
|
|
|
|
/* If we're already deferred, don't screw up the defer count */
|
|
|
|
if (fp->f_flag & FDEFER)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Minimize the number of deferrals... Sockets are the only
|
|
|
|
* type of descriptor which can hold references to another
|
|
|
|
* descriptor, so just mark other descriptors, and defer
|
|
|
|
* unmarked sockets for the next pass.
|
|
|
|
*/
|
|
|
|
if (fp->f_type == DTYPE_SOCKET) {
|
|
|
|
unp_defer++;
|
|
|
|
if (fp->f_count == 0)
|
|
|
|
panic("unp_mark: queued unref");
|
|
|
|
fp->f_flag |= FDEFER;
|
|
|
|
} else {
|
|
|
|
fp->f_flag |= FMARK;
|
|
|
|
}
|
|
|
|
return;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2004-04-19 01:48:15 +04:00
|
|
|
unp_discard(struct file *fp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1999-03-22 20:54:38 +03:00
|
|
|
if (fp == NULL)
|
|
|
|
return;
|
2003-02-23 17:37:32 +03:00
|
|
|
simple_lock(&fp->f_slock);
|
|
|
|
fp->f_usecount++; /* i.e. FILE_USE(fp) sans locking */
|
1993-03-21 12:45:37 +03:00
|
|
|
fp->f_msgcount--;
|
2003-02-23 17:37:32 +03:00
|
|
|
simple_unlock(&fp->f_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
unp_rights--;
|
2005-12-11 15:16:03 +03:00
|
|
|
(void) closef(fp, (struct lwp *)0);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|