Continued updating slirp code from libslirp (no functional changes).
This commit is contained in:
parent
47f9e4bc12
commit
b0f238af76
@ -27,7 +27,9 @@ extern int slirp_debug;
|
||||
|
||||
#else
|
||||
|
||||
#define DEBUG_CALL(...)
|
||||
#define DEBUG_CALL(name)
|
||||
#define DEBUG_VERBOSE_CALL(name)
|
||||
#define DEBUG_RAW_CALL(...)
|
||||
#define DEBUG_ARG(...)
|
||||
#define DEBUG_ARGS(...)
|
||||
#define DEBUG_MISC(...)
|
||||
|
@ -48,89 +48,90 @@ void if_output(struct socket *so, struct mbuf *ifm)
|
||||
int on_fastq = 1;
|
||||
|
||||
DEBUG_CALL("if_output");
|
||||
DEBUG_ARG("so = %lx", (long)so);
|
||||
DEBUG_ARG("ifm = %lx", (long)ifm);
|
||||
DEBUG_ARG("so = %p", so);
|
||||
DEBUG_ARG("ifm = %p", ifm);
|
||||
|
||||
/*
|
||||
* First remove the mbuf from m_usedlist,
|
||||
* since we're gonna use m_next and m_prev ourselves
|
||||
* XXX Shouldn't need this, gotta change dtom() etc.
|
||||
*/
|
||||
if (ifm->m_flags & M_USEDLIST) {
|
||||
slirp_remque(ifm);
|
||||
ifm->m_flags &= ~M_USEDLIST;
|
||||
}
|
||||
/*
|
||||
* First remove the mbuf from m_usedlist,
|
||||
* since we're gonna use m_next and m_prev ourselves
|
||||
* XXX Shouldn't need this, gotta change dtom() etc.
|
||||
*/
|
||||
if (ifm->m_flags & M_USEDLIST) {
|
||||
slirp_remque(ifm);
|
||||
ifm->m_flags &= ~M_USEDLIST;
|
||||
}
|
||||
|
||||
/*
|
||||
* See if there's already a batchq list for this session.
|
||||
* This can include an interactive session, which should go on fastq,
|
||||
* but gets too greedy... hence it'll be downgraded from fastq to batchq.
|
||||
* We mustn't put this packet back on the fastq (or we'll send it out of order)
|
||||
* XXX add cache here?
|
||||
*/
|
||||
for (ifq = slirp->if_batchq.ifq_prev; ifq != &slirp->if_batchq;
|
||||
ifq = ifq->ifq_prev) {
|
||||
if (so == ifq->ifq_so) {
|
||||
/* A match! */
|
||||
ifm->ifq_so = so;
|
||||
ifs_insque(ifm, ifq->ifs_prev);
|
||||
goto diddit;
|
||||
}
|
||||
}
|
||||
|
||||
/* No match, check which queue to put it on */
|
||||
if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
|
||||
ifq = slirp->if_fastq.ifq_prev;
|
||||
on_fastq = 1;
|
||||
/*
|
||||
* Check if this packet is a part of the last
|
||||
* packet's session
|
||||
*/
|
||||
if (ifq->ifq_so == so) {
|
||||
ifm->ifq_so = so;
|
||||
ifs_insque(ifm, ifq->ifs_prev);
|
||||
goto diddit;
|
||||
}
|
||||
} else {
|
||||
ifq = slirp->if_batchq.ifq_prev;
|
||||
/* Set next_m if the queue was empty so far */
|
||||
if (slirp->next_m == &slirp->if_batchq) {
|
||||
slirp->next_m = ifm;
|
||||
}
|
||||
/*
|
||||
* See if there's already a batchq list for this session.
|
||||
* This can include an interactive session, which should go on fastq,
|
||||
* but gets too greedy... hence it'll be downgraded from fastq to batchq.
|
||||
* We mustn't put this packet back on the fastq (or we'll send it out of
|
||||
* order)
|
||||
* XXX add cache here?
|
||||
*/
|
||||
for (ifq = slirp->if_batchq.ifq_prev; ifq != &slirp->if_batchq;
|
||||
ifq = ifq->ifq_prev) {
|
||||
if (so == ifq->ifq_so) {
|
||||
/* A match! */
|
||||
ifm->ifq_so = so;
|
||||
ifs_insque(ifm, ifq->ifs_prev);
|
||||
goto diddit;
|
||||
}
|
||||
}
|
||||
|
||||
/* Create a new doubly linked list for this session */
|
||||
ifm->ifq_so = so;
|
||||
ifs_init(ifm);
|
||||
slirp_insque(ifm, ifq);
|
||||
/* No match, check which queue to put it on */
|
||||
if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
|
||||
ifq = slirp->if_fastq.ifq_prev;
|
||||
on_fastq = 1;
|
||||
/*
|
||||
* Check if this packet is a part of the last
|
||||
* packet's session
|
||||
*/
|
||||
if (ifq->ifq_so == so) {
|
||||
ifm->ifq_so = so;
|
||||
ifs_insque(ifm, ifq->ifs_prev);
|
||||
goto diddit;
|
||||
}
|
||||
} else {
|
||||
ifq = slirp->if_batchq.ifq_prev;
|
||||
/* Set next_m if the queue was empty so far */
|
||||
if (slirp->next_m == &slirp->if_batchq) {
|
||||
slirp->next_m = ifm;
|
||||
}
|
||||
}
|
||||
|
||||
/* Create a new doubly linked list for this session */
|
||||
ifm->ifq_so = so;
|
||||
ifs_init(ifm);
|
||||
slirp_insque(ifm, ifq);
|
||||
|
||||
diddit:
|
||||
if (so) {
|
||||
/* Update *_queued */
|
||||
so->so_queued++;
|
||||
so->so_nqueued++;
|
||||
/*
|
||||
* Check if the interactive session should be downgraded to
|
||||
* the batchq. A session is downgraded if it has queued 6
|
||||
* packets without pausing, and at least 3 of those packets
|
||||
* have been sent over the link
|
||||
* (XXX These are arbitrary numbers, probably not optimal..)
|
||||
*/
|
||||
if (on_fastq && ((so->so_nqueued >= 6) &&
|
||||
(so->so_nqueued - so->so_queued) >= 3)) {
|
||||
if (so) {
|
||||
/* Update *_queued */
|
||||
so->so_queued++;
|
||||
so->so_nqueued++;
|
||||
/*
|
||||
* Check if the interactive session should be downgraded to
|
||||
* the batchq. A session is downgraded if it has queued 6
|
||||
* packets without pausing, and at least 3 of those packets
|
||||
* have been sent over the link
|
||||
* (XXX These are arbitrary numbers, probably not optimal..)
|
||||
*/
|
||||
if (on_fastq &&
|
||||
((so->so_nqueued >= 6) && (so->so_nqueued - so->so_queued) >= 3)) {
|
||||
|
||||
/* Remove from current queue... */
|
||||
slirp_remque(ifm->ifs_next);
|
||||
/* Remove from current queue... */
|
||||
slirp_remque(ifm->ifs_next);
|
||||
|
||||
/* ...And insert in the new. That'll teach ya! */
|
||||
slirp_insque(ifm->ifs_next, &slirp->if_batchq);
|
||||
}
|
||||
}
|
||||
/* ...And insert in the new. That'll teach ya! */
|
||||
slirp_insque(ifm->ifs_next, &slirp->if_batchq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This prevents us from malloc()ing too many mbufs
|
||||
*/
|
||||
if_start(ifm->slirp);
|
||||
/*
|
||||
* This prevents us from malloc()ing too many mbufs
|
||||
*/
|
||||
if_start(ifm->slirp);
|
||||
}
|
||||
|
||||
void if_start(Slirp *slirp)
|
||||
@ -139,7 +140,7 @@ void if_start(Slirp *slirp)
|
||||
bool from_batchq, next_from_batchq;
|
||||
struct mbuf *ifm, *ifm_next, *ifqt;
|
||||
|
||||
// DEBUG_CALL("if_start"); // Disabled to avoid flooding output
|
||||
DEBUG_VERBOSE_CALL("if_start");
|
||||
|
||||
if (slirp->if_start_busy) {
|
||||
return;
|
||||
@ -197,9 +198,7 @@ void if_start(Slirp *slirp)
|
||||
|
||||
slirp_insque(next, ifqt);
|
||||
ifs_remque(ifm);
|
||||
|
||||
if (!from_batchq) {
|
||||
/* Next packet in fastq is from the same session */
|
||||
ifm_next = next;
|
||||
next_from_batchq = false;
|
||||
} else if (slirp->next_m == &slirp->if_batchq) {
|
||||
|
@ -6,14 +6,18 @@
|
||||
#ifndef IF_H
|
||||
#define IF_H
|
||||
|
||||
#define IF_COMPRESS 0x01 /* We want compression */
|
||||
#define IF_NOCOMPRESS 0x02 /* Do not do compression */
|
||||
#define IF_AUTOCOMP 0x04 /* Autodetect (default) */
|
||||
#define IF_NOCIDCOMP 0x08 /* CID compression */
|
||||
#define IF_COMPRESS 0x01 /* We want compression */
|
||||
#define IF_NOCOMPRESS 0x02 /* Do not do compression */
|
||||
#define IF_AUTOCOMP 0x04 /* Autodetect (default) */
|
||||
#define IF_NOCIDCOMP 0x08 /* CID compression */
|
||||
|
||||
#define IF_MTU 1500
|
||||
#define IF_MRU 1500
|
||||
#define IF_COMP IF_AUTOCOMP /* Flags for compression */
|
||||
#define IF_MTU_DEFAULT 1500
|
||||
#define IF_MTU_MIN 68
|
||||
#define IF_MTU_MAX 65521
|
||||
#define IF_MRU_DEFAULT 1500
|
||||
#define IF_MRU_MIN 68
|
||||
#define IF_MRU_MAX 65521
|
||||
#define IF_COMP IF_AUTOCOMP /* Flags for compression */
|
||||
|
||||
/* 2 for alignment, 14 for ethernet */
|
||||
#define IF_MAXLINKHDR (2 + ETH_HLEN)
|
||||
|
@ -79,7 +79,7 @@ ip_output(struct socket *so, struct mbuf *m0)
|
||||
/*
|
||||
* If small enough for interface, can just send directly.
|
||||
*/
|
||||
if ((uint16_t)ip->ip_len <= IF_MTU) {
|
||||
if ((uint16_t)ip->ip_len <= IF_MTU_DEFAULT) {
|
||||
ip->ip_len = htons((uint16_t)ip->ip_len);
|
||||
ip->ip_off = htons((uint16_t)ip->ip_off);
|
||||
ip->ip_sum = 0;
|
||||
@ -98,7 +98,7 @@ ip_output(struct socket *so, struct mbuf *m0)
|
||||
goto bad;
|
||||
}
|
||||
|
||||
len = (IF_MTU - hlen) &~ 7; /* ip databytes per packet */
|
||||
len = (IF_MTU_DEFAULT - hlen) &~ 7; /* ip databytes per packet */
|
||||
if (len < 8) {
|
||||
error = -1;
|
||||
goto bad;
|
||||
|
@ -23,7 +23,7 @@
|
||||
* Find a nice value for msize
|
||||
* XXX if_maxlinkhdr already in mtu
|
||||
*/
|
||||
#define SLIRP_MSIZE (IF_MTU + IF_MAXLINKHDR + offsetof(struct mbuf, m_dat) + 6)
|
||||
#define SLIRP_MSIZE (IF_MTU_DEFAULT + IF_MAXLINKHDR + offsetof(struct mbuf, m_dat) + 6)
|
||||
|
||||
void
|
||||
m_init(Slirp *slirp)
|
||||
|
@ -16,28 +16,23 @@
|
||||
int slirp_debug = DBG_CALL|DBG_MISC|DBG_ERROR;
|
||||
#endif
|
||||
|
||||
struct quehead {
|
||||
struct quehead *qh_link;
|
||||
struct quehead *qh_rlink;
|
||||
};
|
||||
|
||||
void slirp_insque(void *a, void *b)
|
||||
{
|
||||
struct quehead *element = (struct quehead *) a;
|
||||
struct quehead *head = (struct quehead *) b;
|
||||
element->qh_link = head->qh_link;
|
||||
head->qh_link = (struct quehead *)element;
|
||||
element->qh_rlink = (struct quehead *)head;
|
||||
((struct quehead *)(element->qh_link))->qh_rlink
|
||||
= (struct quehead *)element;
|
||||
struct slirp_quehead *element = (struct slirp_quehead *) a;
|
||||
struct slirp_quehead *head = (struct slirp_quehead *) b;
|
||||
element->qh_link = head->qh_link;
|
||||
head->qh_link = (struct slirp_quehead *)element;
|
||||
element->qh_rlink = (struct slirp_quehead *)head;
|
||||
((struct slirp_quehead *)(element->qh_link))->qh_rlink =
|
||||
(struct slirp_quehead *)element;
|
||||
}
|
||||
|
||||
void slirp_remque(void *a)
|
||||
{
|
||||
struct quehead *element = (struct quehead *) a;
|
||||
((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
|
||||
((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link;
|
||||
element->qh_rlink = NULL;
|
||||
struct slirp_quehead *element = (struct slirp_quehead *) a;
|
||||
((struct slirp_quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
|
||||
((struct slirp_quehead *)(element->qh_rlink))->qh_link = element->qh_link;
|
||||
element->qh_rlink = NULL;
|
||||
}
|
||||
|
||||
int add_exec(struct ex_list **ex_ptr, int do_pty, char *exec,
|
||||
|
@ -47,8 +47,17 @@ struct emu_t {
|
||||
struct emu_t *next;
|
||||
};
|
||||
|
||||
void slirp_insque(void *, void *);
|
||||
void slirp_remque(void *);
|
||||
struct slirp_quehead {
|
||||
struct slirp_quehead *qh_link;
|
||||
struct slirp_quehead *qh_rlink;
|
||||
};
|
||||
|
||||
/* Insert element a into queue b */
|
||||
void slirp_insque(void *a, void *b);
|
||||
|
||||
/* Remove element a from its queue */
|
||||
void slirp_remque(void *a);
|
||||
|
||||
int add_exec(struct ex_list **, int, char *, struct in_addr, int);
|
||||
int fork_exec(struct socket *so, const char *ex, int do_pty);
|
||||
|
||||
|
@ -1471,7 +1471,7 @@ tcp_mss(struct tcpcb *tp, u_int offer)
|
||||
DEBUG_ARG("tp = %lx", (long)tp);
|
||||
DEBUG_ARG("offer = %d", offer);
|
||||
|
||||
mss = min(IF_MTU, IF_MRU) - sizeof(struct tcpiphdr);
|
||||
mss = min(IF_MTU_DEFAULT, IF_MRU_DEFAULT) - sizeof(struct tcpiphdr);
|
||||
if (offer)
|
||||
mss = min(mss, (int)offer);
|
||||
mss = max(mss, 32);
|
||||
|
Loading…
Reference in New Issue
Block a user