NetBSD/usr.bin/ftp/fetch.c

1796 lines
45 KiB
C
Raw Normal View History

/* $NetBSD: fetch.c,v 1.112 2000/05/25 15:35:51 itojun Exp $ */
/*-
* Copyright (c) 1997-2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Luke Mewburn.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#ifndef lint
__RCSID("$NetBSD: fetch.c,v 1.112 2000/05/25 15:35:51 itojun Exp $");
#endif /* not lint */
/*
* FTP User Program -- Command line file retrieval
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <netinet/in.h>
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
#include <arpa/ftp.h>
#include <arpa/inet.h>
#include <ctype.h>
#include <err.h>
#include <errno.h>
#include <netdb.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <util.h>
#include "ftp_var.h"
#include "version.h"
typedef enum {
UNKNOWN_URL_T=-1,
HTTP_URL_T,
FTP_URL_T,
FILE_URL_T,
CLASSIC_URL_T
} url_t;
2000-05-01 14:35:16 +04:00
void aborthttp(int);
static int auth_url(const char *, char **, const char *, const char *);
static void base64_encode(const char *, size_t, char *);
static int go_fetch(const char *);
static int fetch_ftp(const char *);
static int fetch_url(const char *, const char *, char *, char *);
static int parse_url(const char *, const char *, url_t *, char **,
char **, char **, char **, in_port_t *, char **);
static void url_decode(char *);
static int redirect_loop;
#define ABOUT_URL "about:" /* propaganda */
#define FILE_URL "file://" /* file URL prefix */
#define FTP_URL "ftp://" /* ftp URL prefix */
#define HTTP_URL "http://" /* http URL prefix */
/*
* Generate authorization response based on given authentication challenge.
* Returns -1 if an error occurred, otherwise 0.
* Sets response to a malloc(3)ed string; caller should free.
*/
static int
2000-05-01 14:35:16 +04:00
auth_url(const char *challenge, char **response, const char *guser,
const char *gpass)
{
char *cp, *ep, *clear, *line, *realm, *scheme;
char user[BUFSIZ], *pass;
int rval;
size_t len, clen, rlen;
*response = NULL;
clear = realm = scheme = NULL;
rval = -1;
line = xstrdup(challenge);
cp = line;
if (debug)
fprintf(ttyout, "auth_url: challenge `%s'\n", challenge);
scheme = strsep(&cp, " ");
#define SCHEME_BASIC "Basic"
if (strncasecmp(scheme, SCHEME_BASIC, sizeof(SCHEME_BASIC) - 1) != 0) {
warnx("Unsupported WWW Authentication challenge - `%s'",
challenge);
goto cleanup_auth_url;
}
cp += strspn(cp, " ");
#define REALM "realm=\""
if (strncasecmp(cp, REALM, sizeof(REALM) - 1) == 0)
cp += sizeof(REALM) - 1;
else {
warnx("Unsupported WWW Authentication challenge - `%s'",
challenge);
goto cleanup_auth_url;
}
if ((ep = strchr(cp, '\"')) != NULL) {
size_t len = ep - cp;
realm = (char *)xmalloc(len + 1);
(void)strlcpy(realm, cp, len + 1);
} else {
warnx("Unsupported WWW Authentication challenge - `%s'",
challenge);
goto cleanup_auth_url;
}
if (guser != NULL)
(void)strlcpy(user, guser, sizeof(user));
else {
fprintf(ttyout, "Username for `%s': ", realm);
(void)fflush(ttyout);
if (fgets(user, sizeof(user) - 1, stdin) == NULL) {
clearerr(stdin);
goto cleanup_auth_url;
}
user[strlen(user) - 1] = '\0';
}
if (gpass != NULL)
pass = (char *)gpass;
else
pass = getpass("Password: ");
clen = strlen(user) + strlen(pass) + 2; /* user + ":" + pass + "\0" */
clear = (char *)xmalloc(clen);
(void)strlcpy(clear, user, clen);
(void)strlcat(clear, ":", clen);
(void)strlcat(clear, pass, clen);
if (gpass == NULL)
memset(pass, 0, strlen(pass));
/* scheme + " " + enc + "\0" */
rlen = strlen(scheme) + 1 + (clen + 2) * 4 / 3 + 1;
*response = (char *)xmalloc(rlen);
(void)strlcpy(*response, scheme, rlen);
len = strlcat(*response, " ", rlen);
base64_encode(clear, clen, *response + len);
memset(clear, 0, clen);
rval = 0;
cleanup_auth_url:
FREEPTR(clear);
FREEPTR(line);
FREEPTR(realm);
return (rval);
}
/*
* Encode len bytes starting at clear using base64 encoding into encoded,
* which should be at least ((len + 2) * 4 / 3 + 1) in size.
*/
void
2000-05-01 14:35:16 +04:00
base64_encode(const char *clear, size_t len, char *encoded)
{
static const char enc[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
char *cp;
int i;
cp = encoded;
for (i = 0; i < len; i += 3) {
*(cp++) = enc[((clear[i + 0] >> 2))];
*(cp++) = enc[((clear[i + 0] << 4) & 0x30)
| ((clear[i + 1] >> 4) & 0x0f)];
*(cp++) = enc[((clear[i + 1] << 2) & 0x3c)
| ((clear[i + 2] >> 6) & 0x03)];
*(cp++) = enc[((clear[i + 2] ) & 0x3f)];
}
*cp = '\0';
while (i-- > len)
*(--cp) = '=';
}
/*
* Decode %xx escapes in given string, `in-place'.
*/
static void
2000-05-01 14:35:16 +04:00
url_decode(char *url)
{
unsigned char *p, *q;
if (EMPTYSTRING(url))
return;
p = q = (unsigned char *)url;
#define HEXTOINT(x) (x - (isdigit(x) ? '0' : (islower(x) ? 'a' : 'A') - 10))
while (*p) {
if (p[0] == '%'
&& p[1] && isxdigit((unsigned char)p[1])
&& p[2] && isxdigit((unsigned char)p[2])) {
*q++ = HEXTOINT(p[1]) * 16 + HEXTOINT(p[2]);
p+=3;
} else
*q++ = *p++;
}
*q = '\0';
}
/*
* Parse URL of form:
* <type>://[<user>[:<password>@]]<host>[:<port>][/<path>]
* Returns -1 if a parse error occurred, otherwise 0.
* It's the caller's responsibility to url_decode() the returned
* user, pass and path.
*
* Sets type to url_t, each of the given char ** pointers to a
* malloc(3)ed strings of the relevant section, and port to
* the number given, or ftpport if ftp://, or httpport if http://.
*
* If <host> is surrounded by `[' and ']', it's parsed as an
* IPv6 address (as per RFC 2732).
*
* XXX: this is not totally RFC 1738 compliant; <path> will have the
* leading `/' unless it's an ftp:// URL, as this makes things easier
* for file:// and http:// URLs. ftp:// URLs have the `/' between the
* host and the url-path removed, but any additional leading slashes
* in the url-path are retained (because they imply that we should
* later do "CWD" with a null argument).
*
* Examples:
* input url output path
* --------- -----------
* "ftp://host" NULL
* "http://host/" NULL
* "file://host/dir/file" "dir/file"
* "ftp://host/" ""
* "ftp://host//" NULL
* "ftp://host//dir/file" "/dir/file"
*/
static int
2000-05-01 14:35:16 +04:00
parse_url(const char *url, const char *desc, url_t *type,
char **user, char **pass, char **host, char **port,
in_port_t *portnum, char **path)
{
const char *origurl;
char *cp, *ep, *thost, *tport;
size_t len;
if (url == NULL || desc == NULL || type == NULL || user == NULL
|| pass == NULL || host == NULL || port == NULL || portnum == NULL
|| path == NULL)
errx(1, "parse_url: invoked with NULL argument!");
origurl = url;
*type = UNKNOWN_URL_T;
*user = *pass = *host = *port = *path = NULL;
*portnum = 0;
tport = NULL;
if (strncasecmp(url, HTTP_URL, sizeof(HTTP_URL) - 1) == 0) {
url += sizeof(HTTP_URL) - 1;
*type = HTTP_URL_T;
*portnum = HTTP_PORT;
tport = httpport;
} else if (strncasecmp(url, FTP_URL, sizeof(FTP_URL) - 1) == 0) {
url += sizeof(FTP_URL) - 1;
*type = FTP_URL_T;
*portnum = FTP_PORT;
tport = ftpport;
} else if (strncasecmp(url, FILE_URL, sizeof(FILE_URL) - 1) == 0) {
url += sizeof(FILE_URL) - 1;
*type = FILE_URL_T;
} else {
warnx("Invalid %s `%s'", desc, url);
cleanup_parse_url:
FREEPTR(*user);
FREEPTR(*pass);
FREEPTR(*host);
FREEPTR(*port);
FREEPTR(*path);
return (-1);
}
if (*url == '\0')
return (0);
/* find [user[:pass]@]host[:port] */
ep = strchr(url, '/');
if (ep == NULL)
thost = xstrdup(url);
else {
len = ep - url;
thost = (char *)xmalloc(len + 1);
(void)strlcpy(thost, url, len + 1);
if (*type == FTP_URL_T) /* skip first / for ftp URLs */
ep++;
*path = xstrdup(ep);
}
cp = strchr(thost, '@'); /* look for user[:pass]@ in URLs */
if (cp != NULL) {
if (*type == FTP_URL_T)
anonftp = 0; /* disable anonftp */
*user = thost;
*cp = '\0';
thost = xstrdup(cp + 1);
cp = strchr(*user, ':');
if (cp != NULL) {
*cp = '\0';
*pass = xstrdup(cp + 1);
}
}
#ifdef INET6
/*
* Check if thost is an encoded IPv6 address, as per
* RFC 2732:
* `[' ipv6-address ']'
*/
if (*thost == '[') {
cp = thost + 1;
if ((ep = strchr(cp, ']')) == NULL ||
(ep[1] != '\0' && ep[1] != ':')) {
warnx("Invalid address `%s' in %s `%s'",
thost, desc, origurl);
goto cleanup_parse_url;
}
len = ep - cp; /* change `[xyz]' -> `xyz' */
memmove(thost, thost + 1, len);
thost[len] = '\0';
if (! isipv6addr(thost)) {
warnx("Invalid IPv6 address `%s' in %s `%s'",
thost, desc, origurl);
goto cleanup_parse_url;
}
cp = ep + 1;
if (*cp == ':')
cp++;
else
cp = NULL;
} else
#endif /* INET6 */
if ((cp = strchr(thost, ':')) != NULL)
*cp++ = '\0';
*host = thost;
/* look for [:port] */
if (cp != NULL) {
long nport;
nport = strtol(cp, &ep, 10);
if (*ep != '\0' && ep == cp) {
struct servent *svp;
svp = getservbyname(cp, "tcp");
if (svp == NULL) {
warnx("Unknown port `%s' in %s `%s'",
cp, desc, origurl);
goto cleanup_parse_url;
} else
nport = ntohs(svp->s_port);
} else if (nport < 1 || nport > MAX_IN_PORT_T || *ep != '\0') {
warnx("Invalid port `%s' in %s `%s'", cp, desc,
origurl);
goto cleanup_parse_url;
}
*portnum = nport;
tport = cp;
}
if (tport != NULL)
*port = xstrdup(tport);
if (*path == NULL)
*path = xstrdup("");
if (debug)
fprintf(ttyout,
"parse_url: user `%s' pass `%s' host %s port %s(%d) "
"path `%s'\n",
*user ? *user : "<null>", *pass ? *pass : "<null>",
*host ? *host : "<null>", *port ? *port : "<null>",
*portnum ? *portnum : -1, *path ? *path : "<null>");
return (0);
}
sigjmp_buf httpabort;
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
/*
* Retrieve URL, via a proxy if necessary, using HTTP.
* If proxyenv is set, use that for the proxy, otherwise try ftp_proxy or
* http_proxy as appropriate.
* Supports HTTP redirects.
* Returns -1 on failure, 0 on completed xfer, 1 if ftp connection
* is still open (e.g, ftp xfer with trailing /)
*/
static int
2000-05-01 14:35:16 +04:00
fetch_url(const char *url, const char *proxyenv, char *proxyauth, char *wwwauth)
{
#if defined(NI_NUMERICHOST) && defined(INET6)
struct addrinfo hints, *res, *res0 = NULL;
int error;
char hbuf[NI_MAXHOST];
#else
struct sockaddr_in sin;
struct hostent *hp = NULL;
#endif
volatile sigfunc oldintr, oldintp;
volatile int s;
struct stat sb;
1999-11-11 04:19:11 +03:00
int ischunked, isproxy, rval, hcode;
size_t len;
static size_t bufsize;
static char *xferbuf;
char *cp, *ep, *buf, *savefile;
char *auth, *location, *message;
char *user, *pass, *host, *port, *path, *decodedpath;
char *puser, *ppass;
off_t hashbytes, rangestart, rangeend, entitylen;
2000-05-01 14:35:16 +04:00
int (*closefunc)(FILE *);
FILE *fin, *fout;
time_t mtime;
url_t urltype;
in_port_t portnum;
oldintr = oldintp = NULL;
closefunc = NULL;
fin = fout = NULL;
s = -1;
buf = savefile = NULL;
auth = location = message = NULL;
ischunked = isproxy = hcode = 0;
rval = 1;
user = pass = host = path = decodedpath = puser = ppass = NULL;
#ifdef __GNUC__ /* shut up gcc warnings */
(void)&closefunc;
(void)&fin;
(void)&fout;
(void)&buf;
(void)&savefile;
(void)&rval;
(void)&isproxy;
(void)&hcode;
(void)&ischunked;
(void)&message;
(void)&location;
(void)&auth;
(void)&decodedpath;
#endif
if (parse_url(url, "URL", &urltype, &user, &pass, &host, &port,
&portnum, &path) == -1)
goto cleanup_fetch_url;
if (urltype == FILE_URL_T && ! EMPTYSTRING(host)
&& strcasecmp(host, "localhost") != 0) {
warnx("No support for non local file URL `%s'", url);
goto cleanup_fetch_url;
}
if (EMPTYSTRING(path)) {
if (urltype == FTP_URL_T) {
rval = fetch_ftp(url);
goto cleanup_fetch_url;
}
if (urltype != HTTP_URL_T || outfile == NULL) {
warnx("Invalid URL (no file after host) `%s'", url);
goto cleanup_fetch_url;
}
}
decodedpath = xstrdup(path);
url_decode(decodedpath);
if (outfile)
savefile = xstrdup(outfile);
else {
cp = strrchr(decodedpath, '/'); /* find savefile */
if (cp != NULL)
savefile = xstrdup(cp + 1);
else
savefile = xstrdup(decodedpath);
}
if (EMPTYSTRING(savefile)) {
if (urltype == FTP_URL_T) {
rval = fetch_ftp(url);
goto cleanup_fetch_url;
}
warnx("Invalid URL (no file after directory) `%s'", url);
goto cleanup_fetch_url;
} else {
if (debug)
fprintf(ttyout, "got savefile as `%s'\n", savefile);
}
restart_point = 0;
filesize = -1;
rangestart = rangeend = entitylen = -1;
mtime = -1;
if (restartautofetch) {
if (strcmp(savefile, "-") != 0 && *savefile != '|' &&
stat(savefile, &sb) == 0)
restart_point = sb.st_size;
}
if (urltype == FILE_URL_T) { /* file:// URLs */
direction = "copied";
fin = fopen(decodedpath, "r");
if (fin == NULL) {
warn("Cannot open file `%s'", decodedpath);
goto cleanup_fetch_url;
}
if (fstat(fileno(fin), &sb) == 0) {
mtime = sb.st_mtime;
filesize = sb.st_size;
}
if (restart_point) {
if (lseek(fileno(fin), restart_point, SEEK_SET) < 0) {
warn("Can't lseek to restart `%s'",
decodedpath);
goto cleanup_fetch_url;
}
}
if (verbose) {
fprintf(ttyout, "Copying %s", decodedpath);
if (restart_point)
#ifndef NO_QUAD
fprintf(ttyout, " (restarting at %lld)",
(long long)restart_point);
#else
fprintf(ttyout, " (restarting at %ld)",
(long)restart_point);
#endif
fputs("\n", ttyout);
}
} else { /* ftp:// or http:// URLs */
char *leading;
int hasleading;
if (proxyenv == NULL) {
if (urltype == HTTP_URL_T)
proxyenv = getoptionvalue("http_proxy");
else if (urltype == FTP_URL_T)
proxyenv = getoptionvalue("ftp_proxy");
}
direction = "retrieved";
if (! EMPTYSTRING(proxyenv)) { /* use proxy */
url_t purltype;
char *phost, *ppath;
char *pport, *no_proxy;
isproxy = 1;
/* check URL against list of no_proxied sites */
no_proxy = getoptionvalue("no_proxy");
if (! EMPTYSTRING(no_proxy)) {
char *np, *np_copy;
long np_port;
size_t hlen, plen;
np_copy = xstrdup(no_proxy);
hlen = strlen(host);
while ((cp = strsep(&np_copy, " ,")) != NULL) {
if (*cp == '\0')
continue;
if ((np = strrchr(cp, ':')) != NULL) {
*np = '\0';
np_port =
strtol(np + 1, &ep, 10);
if (*ep != '\0')
continue;
if (np_port != portnum)
continue;
}
plen = strlen(cp);
if (hlen < plen)
continue;
if (strncasecmp(host + hlen - plen,
cp, plen) == 0) {
isproxy = 0;
break;
}
}
FREEPTR(np_copy);
}
if (isproxy) {
if (parse_url(proxyenv, "proxy URL", &purltype,
&puser, &ppass, &phost, &pport, &portnum,
&ppath) == -1)
goto cleanup_fetch_url;
if ((purltype != HTTP_URL_T
&& purltype != FTP_URL_T) ||
EMPTYSTRING(phost) ||
(! EMPTYSTRING(ppath)
&& strcmp(ppath, "/") != 0)) {
warnx("Malformed proxy URL `%s'",
proxyenv);
FREEPTR(phost);
FREEPTR(pport);
FREEPTR(ppath);
goto cleanup_fetch_url;
}
if (isipv6addr(host) &&
strchr(host, '%') != NULL) {
warnx(
"Scoped address notation `%s' disallowed via web proxy",
host);
FREEPTR(phost);
FREEPTR(pport);
FREEPTR(ppath);
goto cleanup_fetch_url;
}
FREEPTR(host);
host = phost;
FREEPTR(port);
port = pport;
FREEPTR(path);
path = xstrdup(url);
FREEPTR(ppath);
}
} /* ! EMPTYSTRING(proxyenv) */
#if !defined(NI_NUMERICHOST) || !defined(INET6)
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
if (isdigit((unsigned char)host[0])) {
if (inet_aton(host, &sin.sin_addr) == 0) {
warnx("Invalid IP address `%s'", host);
goto cleanup_fetch_url;
}
} else {
hp = gethostbyname(host);
if (hp == NULL) {
warnx("%s: %s", host, hstrerror(h_errno));
goto cleanup_fetch_url;
}
if (hp->h_addrtype != AF_INET) {
warnx("`%s': not an Internet address?", host);
goto cleanup_fetch_url;
}
if (hp->h_length > sizeof(sin.sin_addr))
hp->h_length = sizeof(sin.sin_addr);
memcpy(&sin.sin_addr, hp->h_addr, hp->h_length);
}
sin.sin_port = htons(portnum);
s = socket(AF_INET, SOCK_STREAM, 0);
if (s == -1) {
warn("Can't create socket");
goto cleanup_fetch_url;
}
while (xconnect(s, (struct sockaddr *)&sin,
sizeof(sin)) == -1) {
if (errno == EINTR)
continue;
if (hp && hp->h_addr_list[1]) {
int oerrno = errno;
char *ia;
ia = inet_ntoa(sin.sin_addr);
errno = oerrno;
warn("Connect to address `%s'", ia);
hp->h_addr_list++;
memcpy(&sin.sin_addr, hp->h_addr_list[0],
(size_t)hp->h_length);
if (verbose)
fprintf(ttyout, "Trying %s...\n",
inet_ntoa(sin.sin_addr));
(void)close(s);
s = socket(AF_INET, SOCK_STREAM, 0);
if (s < 0) {
warn("Can't create socket");
goto cleanup_fetch_url;
}
continue;
}
warn("Can't connect to `%s'", host);
goto cleanup_fetch_url;
}
#else
memset(&hints, 0, sizeof(hints));
hints.ai_flags = 0;
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = 0;
error = getaddrinfo(host, port, &hints, &res0);
if (error) {
1999-09-15 02:49:14 +04:00
warnx(gai_strerror(error));
goto cleanup_fetch_url;
}
if (res0->ai_canonname)
host = res0->ai_canonname;
s = -1;
for (res = res0; res; res = res->ai_next) {
if (getnameinfo(res->ai_addr, res->ai_addrlen,
hbuf, sizeof(hbuf), NULL, 0,
NI_NUMERICHOST) != 0)
strncpy(hbuf, "invalid", sizeof(hbuf));
if (verbose && res != res0)
fprintf(ttyout, "Trying %s...\n", hbuf);
s = socket(res->ai_family, res->ai_socktype,
res->ai_protocol);
if (s < 0) {
warn("Can't create socket");
continue;
}
if (xconnect(s, res->ai_addr, res->ai_addrlen) < 0) {
warn("Connect to address `%s'", hbuf);
close(s);
s = -1;
continue;
}
/* success */
break;
}
freeaddrinfo(res0);
if (s < 0) {
warn("Can't connect to %s", host);
goto cleanup_fetch_url;
}
#endif
fin = fdopen(s, "r+");
/*
* Construct and send the request.
*/
if (verbose)
fprintf(ttyout, "Requesting %s\n", url);
leading = " (";
hasleading = 0;
if (isproxy) {
if (verbose) {
fprintf(ttyout, "%svia %s:%s", leading,
host, port);
leading = ", ";
hasleading++;
}
fprintf(fin, "GET %s HTTP/1.0\r\n", path);
if (flushcache)
fprintf(fin, "Pragma: no-cache\r\n");
} else {
fprintf(fin, "GET %s HTTP/1.1\r\n", path);
if (strchr(host, ':')) {
char *h, *p;
/*
* strip off IPv6 scope identifier, since it is
* local to the node
*/
h = xstrdup(host);
if (isipv6addr(h) &&
(p = strchr(h, '%')) != NULL) {
*p = '\0';
}
fprintf(fin, "Host: [%s]:%d\r\n", h, portnum);
free(h);
} else
fprintf(fin, "Host: %s:%d\r\n", host, portnum);
fprintf(fin, "Accept: */*\r\n");
fprintf(fin, "Connection: close\r\n");
if (restart_point) {
fputs(leading, ttyout);
#ifndef NO_QUAD
fprintf(fin, "Range: bytes=%lld-\r\n",
(long long)restart_point);
fprintf(ttyout, "restarting at %lld",
(long long)restart_point);
#else
fprintf(fin, "Range: bytes=%ld-\r\n",
(long)restart_point);
fprintf(ttyout, "restarting at %ld",
(long)restart_point);
#endif
leading = ", ";
hasleading++;
}
if (flushcache)
fprintf(fin, "Cache-Control: no-cache\r\n");
}
fprintf(fin, "User-Agent: %s/%s\r\n", FTP_PRODUCT, FTP_VERSION);
if (wwwauth) {
if (verbose) {
fprintf(ttyout, "%swith authorization",
leading);
leading = ", ";
hasleading++;
}
fprintf(fin, "Authorization: %s\r\n", wwwauth);
}
if (proxyauth) {
if (verbose) {
fprintf(ttyout,
"%swith proxy authorization", leading);
leading = ", ";
hasleading++;
}
fprintf(fin, "Proxy-Authorization: %s\r\n", proxyauth);
}
if (verbose && hasleading)
fputs(")\n", ttyout);
fprintf(fin, "\r\n");
if (fflush(fin) == EOF) {
warn("Writing HTTP request");
goto cleanup_fetch_url;
}
/* Read the response */
if ((buf = fparseln(fin, &len, NULL, "\0\0\0", 0)) == NULL) {
warn("Receiving HTTP reply");
goto cleanup_fetch_url;
}
while (len > 0 && (buf[len-1] == '\r' || buf[len-1] == '\n'))
buf[--len] = '\0';
if (debug)
fprintf(ttyout, "received `%s'\n", buf);
/* Determine HTTP response code */
cp = strchr(buf, ' ');
if (cp == NULL)
goto improper;
else
cp++;
hcode = strtol(cp, &ep, 10);
if (*ep != '\0' && !isspace((unsigned char)*ep))
goto improper;
message = xstrdup(cp);
/* Read the rest of the header. */
FREEPTR(buf);
while (1) {
if ((buf = fparseln(fin, &len, NULL, "\0\0\0", 0))
== NULL) {
warn("Receiving HTTP reply");
goto cleanup_fetch_url;
}
while (len > 0 &&
(buf[len-1] == '\r' || buf[len-1] == '\n'))
buf[--len] = '\0';
if (len == 0)
break;
if (debug)
fprintf(ttyout, "received `%s'\n", buf);
/* Look for some headers */
cp = buf;
#define CONTENTLEN "Content-Length: "
if (strncasecmp(cp, CONTENTLEN,
sizeof(CONTENTLEN) - 1) == 0) {
cp += sizeof(CONTENTLEN) - 1;
#ifndef NO_QUAD
filesize = strtoll(cp, &ep, 10);
#else
filesize = strtol(cp, &ep, 10);
#endif
if (filesize < 0 || *ep != '\0')
goto improper;
if (debug)
#ifndef NO_QUAD
fprintf(ttyout, "parsed len as: %lld\n",
(long long)filesize);
#else
fprintf(ttyout, "parsed len as: %ld\n",
(long)filesize);
#endif
#define CONTENTRANGE "Content-Range: bytes "
} else if (strncasecmp(cp, CONTENTRANGE,
sizeof(CONTENTRANGE) - 1) == 0) {
cp += sizeof(CONTENTRANGE) - 1;
#ifndef NO_QUAD
rangestart = strtoll(cp, &ep, 10);
#else
rangestart = strtol(cp, &ep, 10);
#endif
if (rangestart < 0 || *ep != '-')
goto improper;
cp = ep + 1;
#ifndef NO_QUAD
rangeend = strtoll(cp, &ep, 10);
#else
rangeend = strtol(cp, &ep, 10);
#endif
if (rangeend < 0 || *ep != '/' ||
rangeend < rangestart)
goto improper;
cp = ep + 1;
#ifndef NO_QUAD
entitylen = strtoll(cp, &ep, 10);
#else
entitylen = strtol(cp, &ep, 10);
#endif
if (entitylen < 0 || *ep != '\0')
goto improper;
if (debug)
#ifndef NO_QUAD
fprintf(ttyout,
"parsed range as: %lld-%lld/%lld\n",
(long long)rangestart,
(long long)rangeend,
(long long)entitylen);
#else
fprintf(ttyout,
"parsed range as: %ld-%ld/%ld\n",
(long)rangestart,
(long)rangeend,
(long)entitylen);
#endif
if (! restart_point) {
warnx(
"Received unexpected Content-Range header");
goto cleanup_fetch_url;
}
#define LASTMOD "Last-Modified: "
} else if (strncasecmp(cp, LASTMOD,
sizeof(LASTMOD) - 1) == 0) {
struct tm parsed;
char *t;
cp += sizeof(LASTMOD) - 1;
/* RFC 1123 */
if ((t = strptime(cp,
"%a, %d %b %Y %H:%M:%S GMT",
&parsed))
/* RFC 850 */
|| (t = strptime(cp,
"%a, %d-%b-%y %H:%M:%S GMT",
&parsed))
/* asctime */
|| (t = strptime(cp,
"%a, %b %d %H:%M:%S %Y",
&parsed))) {
parsed.tm_isdst = -1;
if (*t == '\0')
mtime = timegm(&parsed);
if (debug && mtime != -1) {
fprintf(ttyout,
"parsed date as: %s",
ctime(&mtime));
}
}
#define LOCATION "Location: "
} else if (strncasecmp(cp, LOCATION,
sizeof(LOCATION) - 1) == 0) {
cp += sizeof(LOCATION) - 1;
location = xstrdup(cp);
if (debug)
fprintf(ttyout,
"parsed location as: %s\n", cp);
#define TRANSENC "Transfer-Encoding: "
} else if (strncasecmp(cp, TRANSENC,
sizeof(TRANSENC) - 1) == 0) {
cp += sizeof(TRANSENC) - 1;
if (strcasecmp(cp, "binary") == 0) {
warnx(
"Bogus transfer encoding - `%s' (fetching anyway)",
cp);
continue;
}
if (strcasecmp(cp, "chunked") != 0) {
warnx(
"Unsupported transfer encoding - `%s'",
cp);
goto cleanup_fetch_url;
}
ischunked++;
if (debug)
fprintf(ttyout,
"using chunked encoding\n");
#define PROXYAUTH "Proxy-Authenticate: "
} else if (strncasecmp(cp, PROXYAUTH,
sizeof(PROXYAUTH) - 1) == 0) {
cp += sizeof(PROXYAUTH) - 1;
FREEPTR(auth);
auth = xstrdup(cp);
if (debug)
fprintf(ttyout,
"parsed proxy-auth as: %s\n", cp);
#define WWWAUTH "WWW-Authenticate: "
} else if (strncasecmp(cp, WWWAUTH,
sizeof(WWWAUTH) - 1) == 0) {
cp += sizeof(WWWAUTH) - 1;
FREEPTR(auth);
auth = xstrdup(cp);
if (debug)
fprintf(ttyout,
"parsed www-auth as: %s\n", cp);
}
}
/* finished parsing header */
FREEPTR(buf);
switch (hcode) {
case 200:
break;
case 206:
if (! restart_point) {
warnx("Not expecting partial content header");
goto cleanup_fetch_url;
}
break;
case 300:
case 301:
case 302:
case 303:
case 305:
if (EMPTYSTRING(location)) {
warnx(
"No redirection Location provided by server");
goto cleanup_fetch_url;
}
if (redirect_loop++ > 5) {
warnx("Too many redirections requested");
goto cleanup_fetch_url;
}
if (hcode == 305) {
if (verbose)
fprintf(ttyout, "Redirected via %s\n",
location);
rval = fetch_url(url, location,
proxyauth, wwwauth);
} else {
if (verbose)
fprintf(ttyout, "Redirected to %s\n",
location);
rval = go_fetch(location);
}
goto cleanup_fetch_url;
case 401:
case 407:
{
char **authp;
char *auser, *apass;
fprintf(ttyout, "%s\n", message);
if (EMPTYSTRING(auth)) {
warnx(
"No authentication challenge provided by server");
goto cleanup_fetch_url;
}
if (hcode == 401) {
authp = &wwwauth;
auser = user;
apass = pass;
} else {
authp = &proxyauth;
auser = puser;
apass = ppass;
}
if (*authp != NULL) {
char reply[10];
fprintf(ttyout,
"Authorization failed. Retry (y/n)? ");
if (fgets(reply, sizeof(reply), stdin)
== NULL) {
clearerr(stdin);
goto cleanup_fetch_url;
} else {
if (tolower(reply[0]) != 'y')
goto cleanup_fetch_url;
}
auser = NULL;
apass = NULL;
}
if (auth_url(auth, authp, auser, apass) == 0) {
rval = fetch_url(url, proxyenv,
proxyauth, wwwauth);
memset(*authp, 0, strlen(*authp));
FREEPTR(*authp);
}
goto cleanup_fetch_url;
}
default:
if (message)
warnx("Error retrieving file - `%s'", message);
else
warnx("Unknown error retrieving file");
goto cleanup_fetch_url;
}
} /* end of ftp:// or http:// specific setup */
/* Open the output file. */
if (strcmp(savefile, "-") == 0) {
fout = stdout;
} else if (*savefile == '|') {
oldintp = xsignal(SIGPIPE, SIG_IGN);
fout = popen(savefile + 1, "w");
if (fout == NULL) {
warn("Can't run `%s'", savefile + 1);
goto cleanup_fetch_url;
}
closefunc = pclose;
} else {
if (restart_point){
if (entitylen != -1)
filesize = entitylen;
if (rangestart != -1 && rangestart != restart_point) {
warnx(
"Size of `%s' differs from save file `%s'",
url, savefile);
goto cleanup_fetch_url;
}
fout = fopen(savefile, "a");
} else
fout = fopen(savefile, "w");
if (fout == NULL) {
warn("Can't open `%s'", savefile);
goto cleanup_fetch_url;
}
closefunc = fclose;
}
/* Trap signals */
if (sigsetjmp(httpabort, 1))
goto cleanup_fetch_url;
(void)xsignal(SIGQUIT, psummary);
oldintr = xsignal(SIGINT, aborthttp);
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
if (rcvbuf_size > bufsize) {
if (xferbuf)
(void)free(xferbuf);
bufsize = rcvbuf_size;
xferbuf = xmalloc(bufsize);
}
bytes = 0;
hashbytes = mark;
progressmeter(-1);
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
/* Finally, suck down the file. */
do {
long chunksize;
chunksize = 0;
/* read chunksize */
if (ischunked) {
if (fgets(xferbuf, bufsize, fin) == NULL) {
warnx("Unexpected EOF reading chunksize");
goto cleanup_fetch_url;
}
chunksize = strtol(xferbuf, &ep, 16);
/*
* XXX: Work around bug in Apache 1.3.9, which
* incorrectly puts a trailing space after
* the chunksize.
*/
if (*ep == ' ')
ep++;
if (strcmp(ep, "\r\n") != 0) {
warnx("Unexpected data following chunksize");
goto cleanup_fetch_url;
}
if (debug)
[fear this; more ftp hacking from lukem :-] features: --------- * transfer rate throttling with the new `rate' command. syntax: rate direction [max [incr]] where direction is `all', `get' or `put'. if max is not supplied, the current settings are displayed. if max is supplied, then transfers in the given direction will be throttled to this value. if incr is supplied, the increment for the `on-the-fly' scaling will be set to that, otherwise `1024' is used. currently implemented for binary get, binary put, and url fetches. not yet supported for ascii get or put, or local file copies. * on-the-fly scaling of the throttle based on signals: - SIGUSR1 raises the throttle rate by the increment for that direction - SIGUSR2 lowers the throttle rate by the increment for that direction * -T dir,max[,incr] option to set rate from the command line * `k', `m', `g' suffix support for bytecounts in the `hash', `rate', `rcvbuf' and `sndbuf' commands) bug fixes and code mods: ------------------------ * fix up ftp_login() so that ruserpass() is always called, even for command-line url fetches. * implement strsuftoi(), which parses a given number into a int with suffix support. replaces getsockbufsize() * implement parserate(), which does the argv parsing for -T and rate * save and restore errno in signal handlers (may not be necessary, but it doesn't hurt) notes: ------ the rate command has had reasonable testing, but I'd like feedback if it doesn't do the right thing, especially from people on slower (i.e, modem) links. I haven't tested the rate throttle against a http server which does `transfer-encoding: chunked' because I couldn't find a server to test against.
1999-06-29 14:43:16 +04:00
fprintf(ttyout,
#ifndef NO_QUAD
"got chunksize of %lld\n",
(long long)chunksize);
[fear this; more ftp hacking from lukem :-] features: --------- * transfer rate throttling with the new `rate' command. syntax: rate direction [max [incr]] where direction is `all', `get' or `put'. if max is not supplied, the current settings are displayed. if max is supplied, then transfers in the given direction will be throttled to this value. if incr is supplied, the increment for the `on-the-fly' scaling will be set to that, otherwise `1024' is used. currently implemented for binary get, binary put, and url fetches. not yet supported for ascii get or put, or local file copies. * on-the-fly scaling of the throttle based on signals: - SIGUSR1 raises the throttle rate by the increment for that direction - SIGUSR2 lowers the throttle rate by the increment for that direction * -T dir,max[,incr] option to set rate from the command line * `k', `m', `g' suffix support for bytecounts in the `hash', `rate', `rcvbuf' and `sndbuf' commands) bug fixes and code mods: ------------------------ * fix up ftp_login() so that ruserpass() is always called, even for command-line url fetches. * implement strsuftoi(), which parses a given number into a int with suffix support. replaces getsockbufsize() * implement parserate(), which does the argv parsing for -T and rate * save and restore errno in signal handlers (may not be necessary, but it doesn't hurt) notes: ------ the rate command has had reasonable testing, but I'd like feedback if it doesn't do the right thing, especially from people on slower (i.e, modem) links. I haven't tested the rate throttle against a http server which does `transfer-encoding: chunked' because I couldn't find a server to test against.
1999-06-29 14:43:16 +04:00
#else
"got chunksize of %ld\n",
(long)chunksize);
#endif
if (chunksize == 0)
break;
}
[fear this; more ftp hacking from lukem :-] features: --------- * transfer rate throttling with the new `rate' command. syntax: rate direction [max [incr]] where direction is `all', `get' or `put'. if max is not supplied, the current settings are displayed. if max is supplied, then transfers in the given direction will be throttled to this value. if incr is supplied, the increment for the `on-the-fly' scaling will be set to that, otherwise `1024' is used. currently implemented for binary get, binary put, and url fetches. not yet supported for ascii get or put, or local file copies. * on-the-fly scaling of the throttle based on signals: - SIGUSR1 raises the throttle rate by the increment for that direction - SIGUSR2 lowers the throttle rate by the increment for that direction * -T dir,max[,incr] option to set rate from the command line * `k', `m', `g' suffix support for bytecounts in the `hash', `rate', `rcvbuf' and `sndbuf' commands) bug fixes and code mods: ------------------------ * fix up ftp_login() so that ruserpass() is always called, even for command-line url fetches. * implement strsuftoi(), which parses a given number into a int with suffix support. replaces getsockbufsize() * implement parserate(), which does the argv parsing for -T and rate * save and restore errno in signal handlers (may not be necessary, but it doesn't hurt) notes: ------ the rate command has had reasonable testing, but I'd like feedback if it doesn't do the right thing, especially from people on slower (i.e, modem) links. I haven't tested the rate throttle against a http server which does `transfer-encoding: chunked' because I couldn't find a server to test against.
1999-06-29 14:43:16 +04:00
/* transfer file or chunk */
while (1) {
struct timeval then, now, td;
off_t bufrem;
if (rate_get)
(void)gettimeofday(&then, NULL);
bufrem = rate_get ? rate_get : bufsize;
if (ischunked)
bufrem = MIN(chunksize, bufrem);
[fear this; more ftp hacking from lukem :-] features: --------- * transfer rate throttling with the new `rate' command. syntax: rate direction [max [incr]] where direction is `all', `get' or `put'. if max is not supplied, the current settings are displayed. if max is supplied, then transfers in the given direction will be throttled to this value. if incr is supplied, the increment for the `on-the-fly' scaling will be set to that, otherwise `1024' is used. currently implemented for binary get, binary put, and url fetches. not yet supported for ascii get or put, or local file copies. * on-the-fly scaling of the throttle based on signals: - SIGUSR1 raises the throttle rate by the increment for that direction - SIGUSR2 lowers the throttle rate by the increment for that direction * -T dir,max[,incr] option to set rate from the command line * `k', `m', `g' suffix support for bytecounts in the `hash', `rate', `rcvbuf' and `sndbuf' commands) bug fixes and code mods: ------------------------ * fix up ftp_login() so that ruserpass() is always called, even for command-line url fetches. * implement strsuftoi(), which parses a given number into a int with suffix support. replaces getsockbufsize() * implement parserate(), which does the argv parsing for -T and rate * save and restore errno in signal handlers (may not be necessary, but it doesn't hurt) notes: ------ the rate command has had reasonable testing, but I'd like feedback if it doesn't do the right thing, especially from people on slower (i.e, modem) links. I haven't tested the rate throttle against a http server which does `transfer-encoding: chunked' because I couldn't find a server to test against.
1999-06-29 14:43:16 +04:00
while (bufrem > 0) {
len = fread(xferbuf, sizeof(char),
MIN(bufsize, bufrem), fin);
[fear this; more ftp hacking from lukem :-] features: --------- * transfer rate throttling with the new `rate' command. syntax: rate direction [max [incr]] where direction is `all', `get' or `put'. if max is not supplied, the current settings are displayed. if max is supplied, then transfers in the given direction will be throttled to this value. if incr is supplied, the increment for the `on-the-fly' scaling will be set to that, otherwise `1024' is used. currently implemented for binary get, binary put, and url fetches. not yet supported for ascii get or put, or local file copies. * on-the-fly scaling of the throttle based on signals: - SIGUSR1 raises the throttle rate by the increment for that direction - SIGUSR2 lowers the throttle rate by the increment for that direction * -T dir,max[,incr] option to set rate from the command line * `k', `m', `g' suffix support for bytecounts in the `hash', `rate', `rcvbuf' and `sndbuf' commands) bug fixes and code mods: ------------------------ * fix up ftp_login() so that ruserpass() is always called, even for command-line url fetches. * implement strsuftoi(), which parses a given number into a int with suffix support. replaces getsockbufsize() * implement parserate(), which does the argv parsing for -T and rate * save and restore errno in signal handlers (may not be necessary, but it doesn't hurt) notes: ------ the rate command has had reasonable testing, but I'd like feedback if it doesn't do the right thing, especially from people on slower (i.e, modem) links. I haven't tested the rate throttle against a http server which does `transfer-encoding: chunked' because I couldn't find a server to test against.
1999-06-29 14:43:16 +04:00
if (len <= 0)
goto chunkdone;
bytes += len;
bufrem -= len;
if (fwrite(xferbuf, sizeof(char), len, fout)
[fear this; more ftp hacking from lukem :-] features: --------- * transfer rate throttling with the new `rate' command. syntax: rate direction [max [incr]] where direction is `all', `get' or `put'. if max is not supplied, the current settings are displayed. if max is supplied, then transfers in the given direction will be throttled to this value. if incr is supplied, the increment for the `on-the-fly' scaling will be set to that, otherwise `1024' is used. currently implemented for binary get, binary put, and url fetches. not yet supported for ascii get or put, or local file copies. * on-the-fly scaling of the throttle based on signals: - SIGUSR1 raises the throttle rate by the increment for that direction - SIGUSR2 lowers the throttle rate by the increment for that direction * -T dir,max[,incr] option to set rate from the command line * `k', `m', `g' suffix support for bytecounts in the `hash', `rate', `rcvbuf' and `sndbuf' commands) bug fixes and code mods: ------------------------ * fix up ftp_login() so that ruserpass() is always called, even for command-line url fetches. * implement strsuftoi(), which parses a given number into a int with suffix support. replaces getsockbufsize() * implement parserate(), which does the argv parsing for -T and rate * save and restore errno in signal handlers (may not be necessary, but it doesn't hurt) notes: ------ the rate command has had reasonable testing, but I'd like feedback if it doesn't do the right thing, especially from people on slower (i.e, modem) links. I haven't tested the rate throttle against a http server which does `transfer-encoding: chunked' because I couldn't find a server to test against.
1999-06-29 14:43:16 +04:00
!= len) {
warn("Writing `%s'", savefile);
goto cleanup_fetch_url;
}
if (hash && !progress) {
while (bytes >= hashbytes) {
(void)putc('#', ttyout);
hashbytes += mark;
}
(void)fflush(ttyout);
}
if (ischunked) {
chunksize -= len;
if (chunksize <= 0)
break;
}
[fear this; more ftp hacking from lukem :-] features: --------- * transfer rate throttling with the new `rate' command. syntax: rate direction [max [incr]] where direction is `all', `get' or `put'. if max is not supplied, the current settings are displayed. if max is supplied, then transfers in the given direction will be throttled to this value. if incr is supplied, the increment for the `on-the-fly' scaling will be set to that, otherwise `1024' is used. currently implemented for binary get, binary put, and url fetches. not yet supported for ascii get or put, or local file copies. * on-the-fly scaling of the throttle based on signals: - SIGUSR1 raises the throttle rate by the increment for that direction - SIGUSR2 lowers the throttle rate by the increment for that direction * -T dir,max[,incr] option to set rate from the command line * `k', `m', `g' suffix support for bytecounts in the `hash', `rate', `rcvbuf' and `sndbuf' commands) bug fixes and code mods: ------------------------ * fix up ftp_login() so that ruserpass() is always called, even for command-line url fetches. * implement strsuftoi(), which parses a given number into a int with suffix support. replaces getsockbufsize() * implement parserate(), which does the argv parsing for -T and rate * save and restore errno in signal handlers (may not be necessary, but it doesn't hurt) notes: ------ the rate command has had reasonable testing, but I'd like feedback if it doesn't do the right thing, especially from people on slower (i.e, modem) links. I haven't tested the rate throttle against a http server which does `transfer-encoding: chunked' because I couldn't find a server to test against.
1999-06-29 14:43:16 +04:00
}
if (rate_get) {
while (1) {
(void)gettimeofday(&now, NULL);
timersub(&now, &then, &td);
if (td.tv_sec > 0)
break;
usleep(1000000 - td.tv_usec);
}
}
if (ischunked && chunksize <= 0)
break;
}
/* read CRLF after chunk*/
[fear this; more ftp hacking from lukem :-] features: --------- * transfer rate throttling with the new `rate' command. syntax: rate direction [max [incr]] where direction is `all', `get' or `put'. if max is not supplied, the current settings are displayed. if max is supplied, then transfers in the given direction will be throttled to this value. if incr is supplied, the increment for the `on-the-fly' scaling will be set to that, otherwise `1024' is used. currently implemented for binary get, binary put, and url fetches. not yet supported for ascii get or put, or local file copies. * on-the-fly scaling of the throttle based on signals: - SIGUSR1 raises the throttle rate by the increment for that direction - SIGUSR2 lowers the throttle rate by the increment for that direction * -T dir,max[,incr] option to set rate from the command line * `k', `m', `g' suffix support for bytecounts in the `hash', `rate', `rcvbuf' and `sndbuf' commands) bug fixes and code mods: ------------------------ * fix up ftp_login() so that ruserpass() is always called, even for command-line url fetches. * implement strsuftoi(), which parses a given number into a int with suffix support. replaces getsockbufsize() * implement parserate(), which does the argv parsing for -T and rate * save and restore errno in signal handlers (may not be necessary, but it doesn't hurt) notes: ------ the rate command has had reasonable testing, but I'd like feedback if it doesn't do the right thing, especially from people on slower (i.e, modem) links. I haven't tested the rate throttle against a http server which does `transfer-encoding: chunked' because I couldn't find a server to test against.
1999-06-29 14:43:16 +04:00
chunkdone:
if (ischunked) {
if (fgets(xferbuf, bufsize, fin) == NULL)
break;
if (strcmp(xferbuf, "\r\n") != 0) {
warnx("Unexpected data following chunk");
goto cleanup_fetch_url;
}
}
} while (ischunked);
if (hash && !progress && bytes > 0) {
if (bytes < mark)
(void)putc('#', ttyout);
(void)putc('\n', ttyout);
}
if (ferror(fin)) {
warn("Reading file");
goto cleanup_fetch_url;
}
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
progressmeter(1);
bytes = 0;
(void)fflush(fout);
if (closefunc == fclose && mtime != -1) {
struct timeval tval[2];
(void)gettimeofday(&tval[0], NULL);
tval[1].tv_sec = mtime;
tval[1].tv_usec = 0;
(*closefunc)(fout);
fout = NULL;
if (utimes(savefile, tval) == -1) {
fprintf(ttyout,
"Can't change modification time to %s",
asctime(localtime(&mtime)));
}
}
if (bytes > 0)
ptransfer(0);
rval = 0;
goto cleanup_fetch_url;
improper:
warnx("Improper response from `%s'", host);
cleanup_fetch_url:
if (oldintr)
(void)xsignal(SIGINT, oldintr);
if (oldintp)
(void)xsignal(SIGPIPE, oldintp);
if (fin != NULL)
fclose(fin);
else if (s != -1)
close(s);
if (closefunc != NULL && fout != NULL)
(*closefunc)(fout);
FREEPTR(savefile);
FREEPTR(user);
FREEPTR(pass);
FREEPTR(host);
FREEPTR(port);
FREEPTR(path);
FREEPTR(decodedpath);
FREEPTR(puser);
FREEPTR(ppass);
FREEPTR(buf);
FREEPTR(auth);
FREEPTR(location);
FREEPTR(message);
return (rval);
}
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
/*
* Abort a HTTP retrieval
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
*/
void
2000-05-01 14:35:16 +04:00
aborthttp(int notused)
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
{
char msgbuf[100];
int len;
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
alarmtimer(0);
len = strlcpy(msgbuf, "\nHTTP fetch aborted.\n", sizeof(msgbuf));
write(fileno(ttyout), msgbuf, len);
siglongjmp(httpabort, 1);
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
}
/*
* Retrieve ftp URL or classic ftp argument using FTP.
* Returns 1 on failure, 0 on completed xfer, -1 if ftp connection
* is still open (e.g, ftp xfer with trailing /)
*/
static int
2000-05-01 14:35:16 +04:00
fetch_ftp(const char *url)
{
char *cp, *xargv[5], rempath[MAXPATHLEN];
char *host, *path, *dir, *file, *user, *pass;
char *port;
int dirhasglob, filehasglob, oautologin, rval, type, xargc;
in_port_t portnum;
url_t urltype;
host = path = dir = file = user = pass = NULL;
port = NULL;
rval = 1;
type = TYPE_I;
if (strncasecmp(url, FTP_URL, sizeof(FTP_URL) - 1) == 0) {
if ((parse_url(url, "URL", &urltype, &user, &pass,
&host, &port, &portnum, &path) == -1) ||
(user != NULL && *user == '\0') ||
(pass != NULL && *pass == '\0') ||
EMPTYSTRING(host)) {
warnx("Invalid URL `%s'", url);
goto cleanup_fetch_ftp;
}
url_decode(user);
url_decode(pass);
/*
* Note: Don't url_decode(path) here. We need to keep the
* distinction between "/" and "%2F" until later.
*/
/* check for trailing ';type=[aid]' */
if (! EMPTYSTRING(path) && (cp = strrchr(path, ';')) != NULL) {
if (strcasecmp(cp, ";type=a") == 0)
type = TYPE_A;
else if (strcasecmp(cp, ";type=i") == 0)
type = TYPE_I;
else if (strcasecmp(cp, ";type=d") == 0) {
warnx(
"Directory listing via a URL is not supported");
goto cleanup_fetch_ftp;
} else {
warnx("Invalid suffix `%s' in URL `%s'", cp,
url);
goto cleanup_fetch_ftp;
}
*cp = 0;
}
} else { /* classic style `[user@]host:[file]' */
urltype = CLASSIC_URL_T;
host = xstrdup(url);
cp = strchr(host, '@');
if (cp != NULL) {
*cp = '\0';
user = host;
anonftp = 0; /* disable anonftp */
host = xstrdup(cp + 1);
}
cp = strchr(host, ':');
if (cp != NULL) {
*cp = '\0';
path = xstrdup(cp + 1);
}
}
if (EMPTYSTRING(host))
goto cleanup_fetch_ftp;
/* Extract the file and (if present) directory name. */
dir = path;
if (! EMPTYSTRING(dir)) {
/*
* If we are dealing with classic `[user@]host:[path]' syntax,
* then a path of the form `/file' (resulting from input of the
* form `host:/file') means that we should do "CWD /" before
* retrieving the file. So we set dir="/" and file="file".
*
* But if we are dealing with URLs like `ftp://host/path' then
* a path of the form `/file' (resulting from a URL of the form
* `ftp://host//file') means that we should do `CWD ' (with an
* empty argument) before retrieving the file. So we set
* dir="" and file="file".
*
* If the path does not contain / at all, we set dir=NULL.
* (We get a path without any slashes if we are dealing with
* classic `[user@]host:[file]' or URL `ftp://host/file'.)
*
* In all other cases, we set dir to a string that does not
* include the final '/' that separates the dir part from the
* file part of the path. (This will be the empty string if
* and only if we are dealing with a path of the form `/file'
* resulting from an URL of the form `ftp://host//file'.)
*/
cp = strrchr(dir, '/');
if (cp == dir && urltype == CLASSIC_URL_T) {
file = cp + 1;
dir = "/";
} else if (cp != NULL) {
*cp++ = '\0';
file = cp;
} else {
file = dir;
dir = NULL;
}
} else
dir = NULL;
if (urltype == FTP_URL_T && file != NULL) {
url_decode(file);
/* but still don't url_decode(dir) */
}
if (debug)
fprintf(ttyout,
"fetch_ftp: user `%s' pass `%s' host %s port %s "
"path `%s' dir `%s' file `%s'\n",
user ? user : "<null>", pass ? pass : "<null>",
host ? host : "<null>", port ? port : "<null>",
path ? path : "<null>",
dir ? dir : "<null>", file ? file : "<null>");
dirhasglob = filehasglob = 0;
if (doglob && urltype == CLASSIC_URL_T) {
if (! EMPTYSTRING(dir) && strpbrk(dir, "*?[]{}") != NULL)
dirhasglob = 1;
if (! EMPTYSTRING(file) && strpbrk(file, "*?[]{}") != NULL)
filehasglob = 1;
}
/* Set up the connection */
if (connected)
disconnect(0, NULL);
xargv[0] = __progname;
xargv[1] = host;
xargv[2] = NULL;
xargc = 2;
if (port) {
xargv[2] = port;
xargv[3] = NULL;
xargc = 3;
}
oautologin = autologin;
if (user != NULL)
autologin = 0;
setpeer(xargc, xargv);
autologin = oautologin;
if ((connected == 0) || ((connected == 1)
&& !ftp_login(host, user, pass))) {
warnx("Can't connect or login to host `%s'", host);
goto cleanup_fetch_ftp;
}
switch (type) {
case TYPE_A:
setascii(0, NULL);
break;
case TYPE_I:
setbinary(0, NULL);
break;
default:
errx(1, "fetch_ftp: unknown transfer type %d", type);
}
/*
* Change directories, if necessary.
*
* Note: don't use EMPTYSTRING(dir) below, because
* dir=="" means something different from dir==NULL.
*/
if (dir != NULL && !dirhasglob) {
char *nextpart;
/*
* If we are dealing with a classic `[user@]host:[path]'
* (urltype is CLASSIC_URL_T) then we have a raw directory
* name (not encoded in any way) and we can change
* directories in one step.
*
* If we are dealing with an `ftp://host/path' URL
* (urltype is FTP_URL_T), then RFC 1738 says we need to
* send a separate CWD command for each unescaped "/"
* in the path, and we have to interpret %hex escaping
* *after* we find the slashes. It's possible to get
* empty components here, (from multiple adjacent
* slashes in the path) and RFC 1738 says that we should
* still do `CWD ' (with a null argument) in such cases.
*
* Many ftp servers don't support `CWD ', so if there's an
* error performing that command, bail out with a descriptive
* message.
*
* Examples:
*
* host: dir="", urltype=CLASSIC_URL_T
* logged in (to default directory)
* host:file dir=NULL, urltype=CLASSIC_URL_T
* "RETR file"
* host:dir/ dir="dir", urltype=CLASSIC_URL_T
* "CWD dir", logged in
* ftp://host/ dir="", urltype=FTP_URL_T
* logged in (to default directory)
* ftp://host/dir/ dir="dir", urltype=FTP_URL_T
* "CWD dir", logged in
* ftp://host/file dir=NULL, urltype=FTP_URL_T
* "RETR file"
* ftp://host//file dir="", urltype=FTP_URL_T
* "CWD ", "RETR file"
* host:/file dir="/", urltype=CLASSIC_URL_T
* "CWD /", "RETR file"
* ftp://host///file dir="/", urltype=FTP_URL_T
* "CWD ", "CWD ", "RETR file"
* ftp://host/%2F/file dir="%2F", urltype=FTP_URL_T
* "CWD /", "RETR file"
* ftp://host/foo/file dir="foo", urltype=FTP_URL_T
* "CWD foo", "RETR file"
* ftp://host/foo/bar/file dir="foo/bar"
* "CWD foo", "CWD bar", "RETR file"
* ftp://host//foo/bar/file dir="/foo/bar"
* "CWD ", "CWD foo", "CWD bar", "RETR file"
* ftp://host/foo//bar/file dir="foo//bar"
* "CWD foo", "CWD ", "CWD bar", "RETR file"
* ftp://host/%2F/foo/bar/file dir="%2F/foo/bar"
* "CWD /", "CWD foo", "CWD bar", "RETR file"
* ftp://host/%2Ffoo/bar/file dir="%2Ffoo/bar"
* "CWD /foo", "CWD bar", "RETR file"
* ftp://host/%2Ffoo%2Fbar/file dir="%2Ffoo%2Fbar"
* "CWD /foo/bar", "RETR file"
* ftp://host/%2Ffoo%2Fbar%2Ffile dir=NULL
* "RETR /foo/bar/file"
*
* Note that we don't need `dir' after this point.
*/
do {
if (urltype == FTP_URL_T) {
nextpart = strchr(dir, '/');
if (nextpart) {
*nextpart = '\0';
nextpart++;
}
url_decode(dir);
} else
nextpart = NULL;
if (debug)
fprintf(ttyout, "dir `%s', nextpart `%s'\n",
dir ? dir : "<null>",
nextpart ? nextpart : "<null>");
if (urltype == FTP_URL_T || *dir != '\0') {
xargv[0] = "cd";
xargv[1] = dir;
xargv[2] = NULL;
dirchange = 0;
cd(2, xargv);
if (! dirchange) {
if (*dir == '\0' && code == 500)
fprintf(stderr,
"\n"
"ftp: The `CWD ' command (without a directory), which is required by\n"
" RFC 1738 to support the empty directory in the URL pathname (`//'),\n"
" conflicts with the server's conformance to RFC 959.\n"
" Try the same URL without the `//' in the URL pathname.\n"
"\n");
goto cleanup_fetch_ftp;
}
}
dir = nextpart;
} while (dir != NULL);
}
if (EMPTYSTRING(file)) {
rval = -1;
goto cleanup_fetch_ftp;
}
if (dirhasglob) {
(void)strlcpy(rempath, dir, sizeof(rempath));
(void)strlcat(rempath, "/", sizeof(rempath));
(void)strlcat(rempath, file, sizeof(rempath));
file = rempath;
}
/* Fetch the file(s). */
xargc = 2;
xargv[0] = "get";
xargv[1] = file;
xargv[2] = NULL;
if (dirhasglob || filehasglob) {
int ointeractive;
ointeractive = interactive;
interactive = 0;
xargv[0] = "mget";
mget(xargc, xargv);
interactive = ointeractive;
} else {
if (outfile == NULL) {
cp = strrchr(file, '/'); /* find savefile */
if (cp != NULL)
outfile = cp + 1;
else
outfile = file;
}
xargv[2] = (char *)outfile;
xargv[3] = NULL;
xargc++;
if (restartautofetch)
reget(xargc, xargv);
else
get(xargc, xargv);
}
if ((code / 100) == COMPLETE)
rval = 0;
cleanup_fetch_ftp:
FREEPTR(host);
FREEPTR(path);
FREEPTR(user);
FREEPTR(pass);
return (rval);
}
/*
* Retrieve the given file to outfile.
* Supports arguments of the form:
* "host:path", "ftp://host/path" if $ftpproxy, call fetch_url() else
* call fetch_ftp()
* "http://host/path" call fetch_url() to use HTTP
* "file:///path" call fetch_url() to copy
* "about:..." print a message
*
* Returns 1 on failure, 0 on completed xfer, -1 if ftp connection
* is still open (e.g, ftp xfer with trailing /)
*/
static int
2000-05-01 14:35:16 +04:00
go_fetch(const char *url)
{
char *proxy;
/*
* Check for about:*
*/
if (strncasecmp(url, ABOUT_URL, sizeof(ABOUT_URL) - 1) == 0) {
url += sizeof(ABOUT_URL) -1;
if (strcasecmp(url, "ftp") == 0) {
1999-10-06 12:57:46 +04:00
fputs(
"This version of ftp has been enhanced by Luke Mewburn <lukem@netbsd.org>\n"
"for the NetBSD project. Execute `man ftp' for more details.\n", ttyout);
} else if (strcasecmp(url, "lukem") == 0) {
fputs(
"Luke Mewburn is the author of most of the enhancements in this ftp client.\n"
"Please email feedback to <lukem@netbsd.org>.\n", ttyout);
} else if (strcasecmp(url, "netbsd") == 0) {
1999-10-06 12:57:46 +04:00
fputs(
"NetBSD is a freely available and redistributable UNIX-like operating system.\n"
"For more information, see http://www.netbsd.org/index.html\n", ttyout);
} else if (strcasecmp(url, "version") == 0) {
fprintf(ttyout, "Version: %s %s\n",
FTP_PRODUCT, FTP_VERSION);
} else {
fprintf(ttyout, "`%s' is an interesting topic.\n", url);
}
fputs("\n", ttyout);
return (0);
}
/*
* Check for file:// and http:// URLs.
*/
if (strncasecmp(url, HTTP_URL, sizeof(HTTP_URL) - 1) == 0 ||
strncasecmp(url, FILE_URL, sizeof(FILE_URL) - 1) == 0)
return (fetch_url(url, NULL, NULL, NULL));
/*
* Try FTP URL-style and host:file arguments next.
* If ftpproxy is set with an FTP URL, use fetch_url()
* Othewise, use fetch_ftp().
*/
proxy = getoptionvalue("ftp_proxy");
if (!EMPTYSTRING(proxy) &&
strncasecmp(url, FTP_URL, sizeof(FTP_URL) - 1) == 0)
return (fetch_url(url, NULL, NULL, NULL));
return (fetch_ftp(url));
}
/*
* Retrieve multiple files from the command line,
* calling go_fetch() for each file.
*
* If an ftp path has a trailing "/", the path will be cd-ed into and
* the connection remains open, and the function will return -1
* (to indicate the connection is alive).
* If an error occurs the return value will be the offset+1 in
* argv[] of the file that caused a problem (i.e, argv[x]
* returns x+1)
* Otherwise, 0 is returned if all files retrieved successfully.
*/
int
2000-05-01 14:35:16 +04:00
auto_fetch(int argc, char *argv[])
{
volatile int argpos;
int rval;
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
argpos = 0;
if (sigsetjmp(toplevel, 1)) {
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
if (connected)
disconnect(0, NULL);
return (argpos + 1);
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
}
(void)xsignal(SIGINT, intr);
(void)xsignal(SIGPIPE, lostpeer);
/*
* Loop through as long as there's files to fetch.
*/
for (rval = 0; (rval == 0) && (argpos < argc); argpos++) {
if (strchr(argv[argpos], ':') == NULL)
break;
redirect_loop = 0;
if (!anonftp)
anonftp = 2; /* Handle "automatic" transfers. */
rval = go_fetch(argv[argpos]);
if (outfile != NULL && strcmp(outfile, "-") != 0
&& outfile[0] != '|')
outfile = NULL;
if (rval > 0)
[Yet Another Huge Ftp Commit - hopefully the last for a while, barring any more little things people want added ...] New features: * progressmeter is now asynchronous, so "stalled" transfers can be detected. "- stalled -" is displayed instead of the ETA in this case. When the xfer resumes, the time that the xfer was stalled for is factored out of the ETA. It is debatable whether this is better than not factoring it out, but I like it this way (I.e, if it stalls for 8 seconds and the ETA was 30 seconds, when it resumes the ETA will still be 30 seconds). * verbosity can be disabled on the command line (-V), so that in auto-fetch mode the only lines displayed will be a description of the file, and the progress bar (if possible) * if the screen is resized (and detected via the SIGWINCH signal), the progress bar will rescale automatically. Bugs fixed: * progress bar will not use the last character on the line, as this can cause problems on some terminals * screen dimensions (via ioctl(TIOCWINSZ)) should use stdout not stdin * progressmeter() used some vars before initialising them * ^D will quit now. [fixes bin/3162] * use hstrerror() to generate error message for host name lookup failure. * use getcwd instead of getwd (it should have been OK, but why tempt fate?) * auto-fetch transfers will always return a positive exit value upon failure or interruption, relative to the file's position in argv[]. * remote completion of / will work, without putting a leading "///". This is actually a bug in ftpd(1), where "NLST /" prefixes all names with "//", but fixing every ftpd(1) is not an option...
1997-02-01 13:44:54 +03:00
rval = argpos + 1;
}
if (connected && rval != -1)
disconnect(0, NULL);
return (rval);
}