2007-06-10 21:46:44 +04:00
|
|
|
/*
|
2019-05-28 02:38:40 +03:00
|
|
|
* Copyright 2006-2019 Daniel Silverstone <dsilvers@digital-scurf.org>
|
|
|
|
* Copyright 2010-2018 Vincent Sanders <vince@netsurf-browser.org>
|
2007-06-10 21:46:44 +04:00
|
|
|
* Copyright 2007 James Bursa <bursa@users.sourceforge.net>
|
2007-08-08 20:16:03 +04:00
|
|
|
*
|
|
|
|
* This file is part of NetSurf.
|
|
|
|
*
|
|
|
|
* NetSurf is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; version 2 of the License.
|
|
|
|
*
|
|
|
|
* NetSurf is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2009-01-08 13:36:11 +03:00
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2007-08-08 20:16:03 +04:00
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/**
|
|
|
|
* \file
|
|
|
|
* implementation of fetching of data from http and https schemes.
|
2007-06-10 21:46:44 +04:00
|
|
|
*
|
|
|
|
* This implementation uses libcurl's 'multi' interface.
|
|
|
|
*
|
2014-11-09 15:50:30 +03:00
|
|
|
* The CURL handles are cached in the curl_handle_ring.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
|
|
|
|
2016-04-17 15:42:28 +03:00
|
|
|
/* must come first to ensure winsock2.h vs windows.h ordering issues */
|
|
|
|
#include "utils/inet.h"
|
2015-03-13 02:48:53 +03:00
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
2010-03-28 16:56:39 +04:00
|
|
|
#include <inttypes.h>
|
2007-06-10 21:46:44 +04:00
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <strings.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <sys/stat.h>
|
2014-03-08 18:13:27 +04:00
|
|
|
#include <openssl/ssl.h>
|
2010-01-29 14:32:53 +03:00
|
|
|
|
2011-09-27 01:07:19 +04:00
|
|
|
#include <libwapcaplet/libwapcaplet.h>
|
2016-04-19 16:18:09 +03:00
|
|
|
#include <nsutils/time.h>
|
2011-09-27 01:07:19 +04:00
|
|
|
|
2014-01-24 23:31:50 +04:00
|
|
|
#include "utils/corestrings.h"
|
2013-05-26 01:46:27 +04:00
|
|
|
#include "utils/nsoption.h"
|
2007-06-10 21:46:44 +04:00
|
|
|
#include "utils/log.h"
|
|
|
|
#include "utils/messages.h"
|
|
|
|
#include "utils/utils.h"
|
|
|
|
#include "utils/ring.h"
|
|
|
|
#include "utils/useragent.h"
|
2014-05-07 19:14:18 +04:00
|
|
|
#include "utils/file.h"
|
2016-04-22 00:36:21 +03:00
|
|
|
#include "utils/string.h"
|
2016-05-30 13:29:39 +03:00
|
|
|
#include "netsurf/fetch.h"
|
2016-05-30 13:23:32 +03:00
|
|
|
#include "netsurf/misc.h"
|
2014-10-16 12:48:09 +04:00
|
|
|
#include "desktop/gui_internal.h"
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-03-08 18:13:27 +04:00
|
|
|
#include "content/fetch.h"
|
2014-06-19 21:27:24 +04:00
|
|
|
#include "content/fetchers.h"
|
2014-03-08 18:13:27 +04:00
|
|
|
#include "content/fetchers/curl.h"
|
|
|
|
#include "content/urldb.h"
|
|
|
|
|
2016-04-19 16:18:09 +03:00
|
|
|
/** maximum number of progress notifications per second */
|
|
|
|
#define UPDATES_PER_SECOND 2
|
2011-10-09 21:22:57 +04:00
|
|
|
|
2018-04-22 15:54:23 +03:00
|
|
|
/* the ciphersuites we are willing to use */
|
|
|
|
#define CIPHER_LIST \
|
|
|
|
/* disable everything */ \
|
|
|
|
"-ALL:" \
|
|
|
|
/* enable TLSv1.2 PFS suites */ \
|
|
|
|
"EECDH+AES+TLSv1.2:EDH+AES+TLSv1.2:" \
|
|
|
|
/* enable PFS AES GCM suites */ \
|
|
|
|
"EECDH+AESGCM:EDH+AESGCM:" \
|
|
|
|
/* Enable PFS AES CBC suites */ \
|
|
|
|
"EECDH+AES:EDH+AES:" \
|
|
|
|
/* Enable non-PFS fallback suite */ \
|
|
|
|
"AES128-SHA:" \
|
|
|
|
/* Remove any PFS suites using weak DSA key exchange */ \
|
|
|
|
"-DSS"
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/** SSL certificate info */
|
|
|
|
struct cert_info {
|
|
|
|
X509 *cert; /**< Pointer to certificate */
|
|
|
|
long err; /**< OpenSSL error code */
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Information for a single fetch. */
|
|
|
|
struct curl_fetch_info {
|
2007-07-04 22:05:16 +04:00
|
|
|
struct fetch *fetch_handle; /**< The fetch handle we're parented by. */
|
2007-06-10 21:46:44 +04:00
|
|
|
CURL * curl_handle; /**< cURL handle if being fetched, or 0. */
|
|
|
|
bool had_headers; /**< Headers have been processed. */
|
|
|
|
bool abort; /**< Abort requested. */
|
|
|
|
bool stopped; /**< Download stopped on purpose. */
|
|
|
|
bool only_2xx; /**< Only HTTP 2xx responses acceptable. */
|
2013-01-05 03:13:23 +04:00
|
|
|
bool downgrade_tls; /**< Downgrade to TLS <= 1.0 */
|
2011-10-01 20:02:49 +04:00
|
|
|
nsurl *url; /**< URL of this fetch. */
|
2011-10-01 20:53:45 +04:00
|
|
|
lwc_string *host; /**< The hostname of this fetch. */
|
2007-06-10 21:46:44 +04:00
|
|
|
struct curl_slist *headers; /**< List of request headers. */
|
|
|
|
char *location; /**< Response Location header, or 0. */
|
|
|
|
unsigned long content_length; /**< Response Content-Length, or 0. */
|
|
|
|
char *cookie_string; /**< Cookie string for this fetch */
|
|
|
|
char *realm; /**< HTTP Auth Realm */
|
|
|
|
char *post_urlenc; /**< Url encoded POST string, or 0. */
|
2009-01-08 13:36:11 +03:00
|
|
|
long http_code; /**< HTTP result code from cURL. */
|
2007-06-10 21:46:44 +04:00
|
|
|
struct curl_httppost *post_multipart; /**< Multipart post data, or 0. */
|
2016-04-19 16:18:09 +03:00
|
|
|
uint64_t last_progress_update; /**< Time of last progress update */
|
2016-06-27 14:37:27 +03:00
|
|
|
int cert_depth; /**< deepest certificate in use */
|
2019-08-05 20:11:13 +03:00
|
|
|
struct cert_info cert_data[MAX_SSL_CERTS]; /**< HTTPS certificate data */
|
2007-06-10 21:46:44 +04:00
|
|
|
};
|
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
/** curl handle cache entry */
|
2007-06-10 21:46:44 +04:00
|
|
|
struct cache_handle {
|
|
|
|
CURL *handle; /**< The cached cURL handle */
|
2016-06-27 14:37:27 +03:00
|
|
|
lwc_string *host; /**< The host for which this handle is cached */
|
2007-06-10 21:46:44 +04:00
|
|
|
|
|
|
|
struct cache_handle *r_prev; /**< Previous cached handle in ring. */
|
|
|
|
struct cache_handle *r_next; /**< Next cached handle in ring. */
|
|
|
|
};
|
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
/** Global cURL multi handle. */
|
|
|
|
CURLM *fetch_curl_multi;
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/** Curl handle with default options set; not used for transfers. */
|
|
|
|
static CURL *fetch_blank_curl;
|
2016-06-27 14:37:27 +03:00
|
|
|
|
|
|
|
/** Ring of cached handles */
|
|
|
|
static struct cache_handle *curl_handle_ring = 0;
|
|
|
|
|
|
|
|
/** Count of how many schemes the curl fetcher is handling */
|
2007-07-04 22:05:16 +04:00
|
|
|
static int curl_fetchers_registered = 0;
|
2016-06-27 14:37:27 +03:00
|
|
|
|
|
|
|
/** Flag for runtime detection of openssl usage */
|
2009-02-21 05:46:47 +03:00
|
|
|
static bool curl_with_openssl;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
/** Error buffer for cURL. */
|
|
|
|
static char fetch_error_buffer[CURL_ERROR_SIZE];
|
|
|
|
|
|
|
|
/** Proxy authentication details. */
|
|
|
|
static char fetch_proxy_userpwd[100];
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2019-06-05 21:51:47 +03:00
|
|
|
/** Interlock to prevent initiation during callbacks */
|
|
|
|
static bool inside_curl = false;
|
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
|
2016-11-21 21:29:23 +03:00
|
|
|
/* OpenSSL 1.0.x to 1.1.0 certificate reference counting changed
|
|
|
|
* LibreSSL declares its OpenSSL version as 2.1 but only supports the old way
|
|
|
|
*/
|
|
|
|
#if (defined(LIBRESSL_VERSION_NUMBER) || (OPENSSL_VERSION_NUMBER < 0x1010000fL))
|
2016-11-20 15:14:36 +03:00
|
|
|
static int ns_X509_up_ref(X509 *cert)
|
|
|
|
{
|
|
|
|
cert->references++;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ns_X509_free(X509 *cert)
|
|
|
|
{
|
|
|
|
cert->references--;
|
|
|
|
if (cert->references == 0) {
|
|
|
|
X509_free(cert);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define ns_X509_up_ref X509_up_ref
|
|
|
|
#define ns_X509_free X509_free
|
|
|
|
#endif
|
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
/**
|
|
|
|
* Initialise a cURL fetcher.
|
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static bool fetch_curl_initialise(lwc_string *scheme)
|
2007-07-04 22:05:16 +04:00
|
|
|
{
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "Initialise cURL fetcher for %s",
|
|
|
|
lwc_string_data(scheme));
|
2007-07-04 22:05:16 +04:00
|
|
|
curl_fetchers_registered++;
|
|
|
|
return true; /* Always succeeds */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2014-10-25 20:10:08 +04:00
|
|
|
* Finalise a cURL fetcher.
|
|
|
|
*
|
|
|
|
* \param scheme The scheme to finalise.
|
2007-07-04 22:05:16 +04:00
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static void fetch_curl_finalise(lwc_string *scheme)
|
2007-07-04 22:05:16 +04:00
|
|
|
{
|
2011-10-01 20:53:45 +04:00
|
|
|
struct cache_handle *h;
|
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
curl_fetchers_registered--;
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "Finalise cURL fetcher %s",
|
|
|
|
lwc_string_data(scheme));
|
2007-07-04 22:05:16 +04:00
|
|
|
if (curl_fetchers_registered == 0) {
|
2008-07-29 13:05:36 +04:00
|
|
|
CURLMcode codem;
|
2007-07-04 22:05:16 +04:00
|
|
|
/* All the fetchers have been finalised. */
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO,
|
|
|
|
"All cURL fetchers finalised, closing down cURL");
|
2007-07-04 22:05:16 +04:00
|
|
|
|
|
|
|
curl_easy_cleanup(fetch_blank_curl);
|
|
|
|
|
|
|
|
codem = curl_multi_cleanup(fetch_curl_multi);
|
|
|
|
if (codem != CURLM_OK)
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO,
|
|
|
|
"curl_multi_cleanup failed: ignoring");
|
2007-07-04 22:05:16 +04:00
|
|
|
|
|
|
|
curl_global_cleanup();
|
|
|
|
}
|
2011-10-01 20:53:45 +04:00
|
|
|
|
|
|
|
/* Free anything remaining in the cached curl handle ring */
|
|
|
|
while (curl_handle_ring != NULL) {
|
|
|
|
h = curl_handle_ring;
|
|
|
|
RING_REMOVE(curl_handle_ring, h);
|
|
|
|
lwc_string_unref(h->host);
|
2013-10-24 21:12:18 +04:00
|
|
|
curl_easy_cleanup(h->handle);
|
|
|
|
free(h);
|
2011-10-01 20:53:45 +04:00
|
|
|
}
|
2007-07-04 22:05:16 +04:00
|
|
|
}
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Check if this fetcher can fetch a url.
|
|
|
|
*
|
|
|
|
* \param url The url to check.
|
2014-11-18 20:16:26 +03:00
|
|
|
* \return true if the fetcher supports the url else false.
|
2014-10-25 20:10:08 +04:00
|
|
|
*/
|
|
|
|
static bool fetch_curl_can_fetch(const nsurl *url)
|
2011-11-27 18:14:36 +04:00
|
|
|
{
|
2011-11-27 22:32:06 +04:00
|
|
|
return nsurl_has_component(url, NSURL_HOST);
|
2011-11-27 18:14:36 +04:00
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert a list of struct ::fetch_multipart_data to a list of
|
|
|
|
* struct curl_httppost for libcurl.
|
|
|
|
*/
|
|
|
|
static struct curl_httppost *
|
|
|
|
fetch_curl_post_convert(const struct fetch_multipart_data *control)
|
|
|
|
{
|
|
|
|
struct curl_httppost *post = 0, *last = 0;
|
|
|
|
CURLFORMcode code;
|
|
|
|
nserror ret;
|
|
|
|
|
|
|
|
for (; control; control = control->next) {
|
|
|
|
if (control->file) {
|
|
|
|
char *leafname = NULL;
|
|
|
|
ret = guit->file->basename(control->value, &leafname, NULL);
|
|
|
|
if (ret != NSERROR_OK) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We have to special case filenames of "", so curl
|
|
|
|
* a) actually attempts the fetch and
|
|
|
|
* b) doesn't attempt to open the file ""
|
|
|
|
*/
|
|
|
|
if (control->value[0] == '\0') {
|
|
|
|
/* dummy buffer - needs to be static so
|
|
|
|
* pointer's still valid when we go out
|
|
|
|
* of scope (not that libcurl should be
|
|
|
|
* attempting to access it, of course).
|
|
|
|
*/
|
|
|
|
static char buf;
|
|
|
|
|
|
|
|
code = curl_formadd(&post, &last,
|
|
|
|
CURLFORM_COPYNAME, control->name,
|
|
|
|
CURLFORM_BUFFER, control->value,
|
|
|
|
/* needed, as basename("") == "." */
|
|
|
|
CURLFORM_FILENAME, "",
|
|
|
|
CURLFORM_BUFFERPTR, &buf,
|
|
|
|
CURLFORM_BUFFERLENGTH, 0,
|
|
|
|
CURLFORM_CONTENTTYPE,
|
|
|
|
"application/octet-stream",
|
|
|
|
CURLFORM_END);
|
|
|
|
if (code != CURL_FORMADD_OK)
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO,
|
|
|
|
"curl_formadd: %d (%s)", code,
|
|
|
|
control->name);
|
2014-10-25 20:10:08 +04:00
|
|
|
} else {
|
|
|
|
char *mimetype = guit->fetch->mimetype(control->value);
|
|
|
|
code = curl_formadd(&post, &last,
|
|
|
|
CURLFORM_COPYNAME, control->name,
|
|
|
|
CURLFORM_FILE, control->rawfile,
|
|
|
|
CURLFORM_FILENAME, leafname,
|
|
|
|
CURLFORM_CONTENTTYPE,
|
|
|
|
(mimetype != 0 ? mimetype : "text/plain"),
|
|
|
|
CURLFORM_END);
|
|
|
|
if (code != CURL_FORMADD_OK)
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO,
|
|
|
|
"curl_formadd: %d (%s=%s)",
|
|
|
|
code,
|
|
|
|
control->name,
|
|
|
|
control->value);
|
2014-10-25 20:10:08 +04:00
|
|
|
free(mimetype);
|
|
|
|
}
|
|
|
|
free(leafname);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
code = curl_formadd(&post, &last,
|
|
|
|
CURLFORM_COPYNAME, control->name,
|
|
|
|
CURLFORM_COPYCONTENTS, control->value,
|
|
|
|
CURLFORM_END);
|
|
|
|
if (code != CURL_FORMADD_OK)
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO,
|
|
|
|
"curl_formadd: %d (%s=%s)", code,
|
|
|
|
control->name, control->value);
|
2014-10-25 20:10:08 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return post;
|
|
|
|
}
|
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/**
|
|
|
|
* Start fetching data for the given URL.
|
|
|
|
*
|
|
|
|
* The function returns immediately. The fetch may be queued for later
|
|
|
|
* processing.
|
|
|
|
*
|
2014-10-25 20:10:08 +04:00
|
|
|
* A pointer to an opaque struct curl_fetch_info is returned, which can be
|
|
|
|
* passed to fetch_abort() to abort the fetch at any time. Returns 0 if memory
|
2009-05-14 14:13:20 +04:00
|
|
|
* is exhausted (or some other fatal error occurred).
|
2007-06-10 21:46:44 +04:00
|
|
|
*
|
|
|
|
* The caller must supply a callback function which is called when anything
|
|
|
|
* interesting happens. The callback function is first called with msg
|
2010-04-08 16:16:52 +04:00
|
|
|
* FETCH_HEADER, with the header in data, then one or more times
|
2007-06-10 21:46:44 +04:00
|
|
|
* with FETCH_DATA with some data for the url, and finally with
|
|
|
|
* FETCH_FINISHED. Alternatively, FETCH_ERROR indicates an error occurred:
|
2010-04-08 16:16:52 +04:00
|
|
|
* data contains an error message. FETCH_REDIRECT may replace the FETCH_HEADER,
|
2007-06-10 21:46:44 +04:00
|
|
|
* FETCH_DATA, FETCH_FINISHED sequence if the server sends a replacement URL.
|
|
|
|
*
|
|
|
|
* Some private data can be passed as the last parameter to fetch_start, and
|
|
|
|
* callbacks will contain this.
|
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static void *
|
|
|
|
fetch_curl_setup(struct fetch *parent_fetch,
|
|
|
|
nsurl *url,
|
|
|
|
bool only_2xx,
|
|
|
|
bool downgrade_tls,
|
|
|
|
const char *post_urlenc,
|
2010-04-10 21:13:53 +04:00
|
|
|
const struct fetch_multipart_data *post_multipart,
|
2008-01-30 22:56:41 +03:00
|
|
|
const char **headers)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
|
|
|
struct curl_fetch_info *fetch;
|
|
|
|
struct curl_slist *slist;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
fetch = malloc(sizeof (*fetch));
|
2011-10-15 01:41:16 +04:00
|
|
|
if (fetch == NULL)
|
2007-06-10 21:46:44 +04:00
|
|
|
return 0;
|
2007-06-10 22:08:22 +04:00
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
fetch->fetch_handle = parent_fetch;
|
2007-06-10 22:08:22 +04:00
|
|
|
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "fetch %p, url '%s'", fetch, nsurl_access(url));
|
2007-06-10 21:46:44 +04:00
|
|
|
|
|
|
|
/* construct a new fetch structure */
|
2011-10-15 01:41:16 +04:00
|
|
|
fetch->curl_handle = NULL;
|
2007-06-10 21:46:44 +04:00
|
|
|
fetch->had_headers = false;
|
|
|
|
fetch->abort = false;
|
|
|
|
fetch->stopped = false;
|
|
|
|
fetch->only_2xx = only_2xx;
|
2013-01-05 03:13:23 +04:00
|
|
|
fetch->downgrade_tls = downgrade_tls;
|
2011-10-15 01:41:16 +04:00
|
|
|
fetch->headers = NULL;
|
2011-10-01 20:53:45 +04:00
|
|
|
fetch->url = nsurl_ref(url);
|
|
|
|
fetch->host = nsurl_get_component(url, NSURL_HOST);
|
2011-10-15 01:41:16 +04:00
|
|
|
fetch->location = NULL;
|
2007-06-10 21:46:44 +04:00
|
|
|
fetch->content_length = 0;
|
|
|
|
fetch->http_code = 0;
|
2011-10-15 01:41:16 +04:00
|
|
|
fetch->cookie_string = NULL;
|
|
|
|
fetch->realm = NULL;
|
|
|
|
fetch->post_urlenc = NULL;
|
|
|
|
fetch->post_multipart = NULL;
|
2016-06-27 14:37:27 +03:00
|
|
|
if (post_urlenc) {
|
2007-06-10 21:46:44 +04:00
|
|
|
fetch->post_urlenc = strdup(post_urlenc);
|
2016-06-27 14:37:27 +03:00
|
|
|
} else if (post_multipart) {
|
2007-06-10 21:46:44 +04:00
|
|
|
fetch->post_multipart = fetch_curl_post_convert(post_multipart);
|
2016-06-27 14:37:27 +03:00
|
|
|
}
|
2010-02-10 03:32:12 +03:00
|
|
|
fetch->last_progress_update = 0;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
/* TLS defaults */
|
|
|
|
memset(fetch->cert_data, 0, sizeof(fetch->cert_data));
|
|
|
|
fetch->cert_depth = -1;
|
|
|
|
|
|
|
|
if ((fetch->host == NULL) ||
|
|
|
|
(post_multipart != NULL && fetch->post_multipart == NULL) ||
|
|
|
|
(post_urlenc != NULL && fetch->post_urlenc == NULL)) {
|
2007-06-10 21:46:44 +04:00
|
|
|
goto failed;
|
2016-06-27 14:37:27 +03:00
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
|
|
|
#define APPEND(list, value) \
|
|
|
|
slist = curl_slist_append(list, value); \
|
2011-10-15 01:41:16 +04:00
|
|
|
if (slist == NULL) \
|
2007-06-10 21:46:44 +04:00
|
|
|
goto failed; \
|
|
|
|
list = slist;
|
|
|
|
|
|
|
|
/* remove curl default headers */
|
|
|
|
APPEND(fetch->headers, "Pragma:");
|
|
|
|
|
|
|
|
/* when doing a POST libcurl sends Expect: 100-continue" by default
|
|
|
|
* which fails with lighttpd, so disable it (see bug 1429054) */
|
|
|
|
APPEND(fetch->headers, "Expect:");
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
if ((nsoption_charp(accept_language) != NULL) &&
|
2012-03-22 13:34:34 +04:00
|
|
|
(nsoption_charp(accept_language)[0] != '\0')) {
|
2007-06-10 21:46:44 +04:00
|
|
|
char s[80];
|
|
|
|
snprintf(s, sizeof s, "Accept-Language: %s, *;q=0.1",
|
2012-03-22 13:34:34 +04:00
|
|
|
nsoption_charp(accept_language));
|
2007-06-10 21:46:44 +04:00
|
|
|
s[sizeof s - 1] = 0;
|
|
|
|
APPEND(fetch->headers, s);
|
|
|
|
}
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
if (nsoption_charp(accept_charset) != NULL &&
|
2012-03-22 13:34:34 +04:00
|
|
|
nsoption_charp(accept_charset)[0] != '\0') {
|
2007-06-10 21:46:44 +04:00
|
|
|
char s[80];
|
|
|
|
snprintf(s, sizeof s, "Accept-Charset: %s, *;q=0.1",
|
2012-03-22 13:34:34 +04:00
|
|
|
nsoption_charp(accept_charset));
|
2007-06-10 21:46:44 +04:00
|
|
|
s[sizeof s - 1] = 0;
|
|
|
|
APPEND(fetch->headers, s);
|
|
|
|
}
|
|
|
|
|
2012-05-20 20:29:57 +04:00
|
|
|
if (nsoption_bool(do_not_track) == true) {
|
|
|
|
APPEND(fetch->headers, "DNT: 1");
|
|
|
|
}
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/* And add any headers specified by the caller */
|
2011-10-15 01:41:16 +04:00
|
|
|
for (i = 0; headers[i] != NULL; i++) {
|
2007-06-10 21:46:44 +04:00
|
|
|
APPEND(fetch->headers, headers[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return fetch;
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
#undef APPEND
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
failed:
|
2011-10-15 01:41:16 +04:00
|
|
|
if (fetch->host != NULL)
|
|
|
|
lwc_string_unref(fetch->host);
|
|
|
|
|
2011-10-01 20:02:49 +04:00
|
|
|
nsurl_unref(fetch->url);
|
2007-06-10 21:46:44 +04:00
|
|
|
free(fetch->post_urlenc);
|
|
|
|
if (fetch->post_multipart)
|
|
|
|
curl_formfree(fetch->post_multipart);
|
|
|
|
curl_slist_free_all(fetch->headers);
|
|
|
|
free(fetch);
|
2011-10-15 01:41:16 +04:00
|
|
|
return NULL;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
|
2019-08-05 20:11:13 +03:00
|
|
|
/**
|
|
|
|
* Report the certificate information in the fetch to the users
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fetch_curl_report_certs_upstream(struct curl_fetch_info *f)
|
|
|
|
{
|
|
|
|
int depth;
|
|
|
|
BIO *mem;
|
|
|
|
BUF_MEM *buf;
|
|
|
|
const ASN1_INTEGER *asn1_num;
|
|
|
|
BIGNUM *bignum;
|
|
|
|
struct ssl_cert_info ssl_certs[MAX_SSL_CERTS];
|
|
|
|
fetch_msg msg;
|
|
|
|
struct cert_info *certs = f->cert_data;
|
|
|
|
memset(ssl_certs, 0, sizeof(ssl_certs));
|
|
|
|
|
|
|
|
for (depth = 0; depth <= f->cert_depth; depth++) {
|
|
|
|
assert(certs[depth].cert != NULL);
|
|
|
|
|
|
|
|
/* get certificate version */
|
|
|
|
ssl_certs[depth].version = X509_get_version(certs[depth].cert);
|
|
|
|
|
|
|
|
/* not before date */
|
|
|
|
mem = BIO_new(BIO_s_mem());
|
|
|
|
ASN1_TIME_print(mem, X509_get_notBefore(certs[depth].cert));
|
|
|
|
BIO_get_mem_ptr(mem, &buf);
|
|
|
|
(void) BIO_set_close(mem, BIO_NOCLOSE);
|
|
|
|
BIO_free(mem);
|
|
|
|
memcpy(ssl_certs[depth].not_before,
|
|
|
|
buf->data,
|
|
|
|
min(sizeof(ssl_certs[depth].not_before) - 1,
|
|
|
|
(unsigned)buf->length));
|
|
|
|
ssl_certs[depth].not_before[min(sizeof(ssl_certs[depth].not_before) - 1,
|
|
|
|
(unsigned)buf->length)] = 0;
|
|
|
|
BUF_MEM_free(buf);
|
|
|
|
|
|
|
|
/* not after date */
|
|
|
|
mem = BIO_new(BIO_s_mem());
|
|
|
|
ASN1_TIME_print(mem,
|
|
|
|
X509_get_notAfter(certs[depth].cert));
|
|
|
|
BIO_get_mem_ptr(mem, &buf);
|
|
|
|
(void) BIO_set_close(mem, BIO_NOCLOSE);
|
|
|
|
BIO_free(mem);
|
|
|
|
memcpy(ssl_certs[depth].not_after,
|
|
|
|
buf->data,
|
|
|
|
min(sizeof(ssl_certs[depth].not_after) - 1,
|
|
|
|
(unsigned)buf->length));
|
|
|
|
ssl_certs[depth].not_after[min(sizeof(ssl_certs[depth].not_after) - 1,
|
|
|
|
(unsigned)buf->length)] = 0;
|
|
|
|
BUF_MEM_free(buf);
|
|
|
|
|
|
|
|
/* signature type */
|
|
|
|
ssl_certs[depth].sig_type =
|
|
|
|
X509_get_signature_type(certs[depth].cert);
|
|
|
|
|
|
|
|
/* serial number */
|
2019-08-06 00:03:42 +03:00
|
|
|
asn1_num = X509_get_serialNumber(certs[depth].cert);
|
2019-08-05 20:11:13 +03:00
|
|
|
if (asn1_num != NULL) {
|
|
|
|
bignum = ASN1_INTEGER_to_BN(asn1_num, NULL);
|
|
|
|
if (bignum != NULL) {
|
|
|
|
char *tmp = BN_bn2hex(bignum);
|
|
|
|
if (tmp != NULL) {
|
|
|
|
strncpy(ssl_certs[depth].serialnum,
|
|
|
|
tmp,
|
|
|
|
sizeof(ssl_certs[depth].serialnum));
|
|
|
|
ssl_certs[depth].serialnum[sizeof(ssl_certs[depth].serialnum)-1] = '\0';
|
|
|
|
OPENSSL_free(tmp);
|
|
|
|
}
|
|
|
|
BN_free(bignum);
|
|
|
|
bignum = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* issuer name */
|
|
|
|
mem = BIO_new(BIO_s_mem());
|
|
|
|
X509_NAME_print_ex(mem,
|
|
|
|
X509_get_issuer_name(certs[depth].cert),
|
|
|
|
0, XN_FLAG_SEP_CPLUS_SPC |
|
|
|
|
XN_FLAG_DN_REV | XN_FLAG_FN_NONE);
|
|
|
|
BIO_get_mem_ptr(mem, &buf);
|
|
|
|
(void) BIO_set_close(mem, BIO_NOCLOSE);
|
|
|
|
BIO_free(mem);
|
|
|
|
memcpy(ssl_certs[depth].issuer,
|
|
|
|
buf->data,
|
|
|
|
min(sizeof(ssl_certs[depth].issuer) - 1,
|
|
|
|
(unsigned) buf->length));
|
|
|
|
ssl_certs[depth].issuer[min(sizeof(ssl_certs[depth].issuer) - 1,
|
|
|
|
(unsigned) buf->length)] = 0;
|
|
|
|
BUF_MEM_free(buf);
|
|
|
|
|
|
|
|
/* subject */
|
|
|
|
mem = BIO_new(BIO_s_mem());
|
|
|
|
X509_NAME_print_ex(mem,
|
|
|
|
X509_get_subject_name(certs[depth].cert),
|
|
|
|
0,
|
|
|
|
XN_FLAG_SEP_CPLUS_SPC |
|
|
|
|
XN_FLAG_DN_REV |
|
|
|
|
XN_FLAG_FN_NONE);
|
|
|
|
BIO_get_mem_ptr(mem, &buf);
|
|
|
|
(void) BIO_set_close(mem, BIO_NOCLOSE);
|
|
|
|
BIO_free(mem);
|
|
|
|
memcpy(ssl_certs[depth].subject,
|
|
|
|
buf->data,
|
|
|
|
min(sizeof(ssl_certs[depth].subject) - 1,
|
|
|
|
(unsigned)buf->length));
|
|
|
|
ssl_certs[depth].subject[min(sizeof(ssl_certs[depth].subject) - 1,
|
|
|
|
(unsigned) buf->length)] = 0;
|
|
|
|
BUF_MEM_free(buf);
|
|
|
|
|
|
|
|
/* type of certificate */
|
|
|
|
ssl_certs[depth].cert_type =
|
|
|
|
X509_certificate_type(certs[depth].cert,
|
|
|
|
X509_get_pubkey(certs[depth].cert));
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.type = FETCH_CERTS;
|
|
|
|
msg.data.certs.certs = ssl_certs;
|
|
|
|
msg.data.certs.num_certs = depth;
|
|
|
|
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
/**
|
2014-10-25 20:10:08 +04:00
|
|
|
* OpenSSL Certificate verification callback
|
2016-06-27 14:37:27 +03:00
|
|
|
*
|
|
|
|
* Called for each certificate in a chain being verified. OpenSSL
|
|
|
|
* calls this in deepest first order from the certificate authority to
|
|
|
|
* the peer certificate at position 0.
|
|
|
|
*
|
|
|
|
* Each certificate is stored in the fetch context the first time it
|
|
|
|
* is presented. If an error is encountered it is only returned for
|
|
|
|
* the peer certificate at position 0 allowing the enumeration of the
|
|
|
|
* entire chain not stopping early at the depth of the erroring
|
|
|
|
* certificate.
|
|
|
|
*
|
|
|
|
* \param verify_ok 0 if the caller has already determined the chain
|
|
|
|
* has errors else 1
|
|
|
|
* \param x509_ctx certificate context being verified
|
|
|
|
* \return 1 to indicate verification should continue and 0 to indicate
|
|
|
|
* verification should stop.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static int
|
2016-06-27 14:37:27 +03:00
|
|
|
fetch_curl_verify_callback(int verify_ok, X509_STORE_CTX *x509_ctx)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2016-06-27 14:37:27 +03:00
|
|
|
int depth;
|
|
|
|
struct curl_fetch_info *fetch;
|
|
|
|
|
|
|
|
depth = X509_STORE_CTX_get_error_depth(x509_ctx);
|
|
|
|
fetch = X509_STORE_CTX_get_app_data(x509_ctx);
|
|
|
|
|
|
|
|
/* record the max depth */
|
|
|
|
if (depth > fetch->cert_depth) {
|
|
|
|
fetch->cert_depth = depth;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* certificate chain is excessively deep so fail verification */
|
2019-08-05 20:11:13 +03:00
|
|
|
if (depth >= MAX_SSL_CERTS) {
|
2016-06-27 14:37:27 +03:00
|
|
|
X509_STORE_CTX_set_error(x509_ctx,
|
|
|
|
X509_V_ERR_CERT_CHAIN_TOO_LONG);
|
|
|
|
return 0;
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/* save the certificate by incrementing the reference count and
|
|
|
|
* keeping a pointer.
|
|
|
|
*/
|
2016-06-27 14:37:27 +03:00
|
|
|
if (!fetch->cert_data[depth].cert) {
|
|
|
|
fetch->cert_data[depth].cert = X509_STORE_CTX_get_current_cert(x509_ctx);
|
2016-11-20 15:14:36 +03:00
|
|
|
ns_X509_up_ref(fetch->cert_data[depth].cert);
|
2016-06-27 14:37:27 +03:00
|
|
|
fetch->cert_data[depth].err = X509_STORE_CTX_get_error(x509_ctx);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
/* allow certificate chain to be completed */
|
|
|
|
if (depth > 0) {
|
|
|
|
verify_ok = 1;
|
|
|
|
} else {
|
|
|
|
/* search for deeper certificates in the chain with errors */
|
|
|
|
for (depth = fetch->cert_depth; depth > 0; depth--) {
|
|
|
|
if (fetch->cert_data[depth].err != 0) {
|
|
|
|
/* error in previous certificate so fail verification */
|
|
|
|
verify_ok = 0;
|
|
|
|
X509_STORE_CTX_set_error(x509_ctx, fetch->cert_data[depth].err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return verify_ok;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/**
|
2014-10-25 20:10:08 +04:00
|
|
|
* OpenSSL certificate chain verification callback
|
2016-06-27 14:37:27 +03:00
|
|
|
*
|
|
|
|
* Verifies certificate chain by calling standard implementation after
|
|
|
|
* setting up context for the certificate callback.
|
|
|
|
*
|
|
|
|
* \param x509_ctx The certificate store to validate
|
|
|
|
* \param parm The fetch context.
|
|
|
|
* \return 1 to indicate verification success and 0 to indicate verification failure.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static int fetch_curl_cert_verify_callback(X509_STORE_CTX *x509_ctx, void *parm)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2019-08-05 20:11:13 +03:00
|
|
|
struct curl_fetch_info *f = (struct curl_fetch_info *) parm;
|
2014-10-25 20:10:08 +04:00
|
|
|
int ok;
|
|
|
|
|
|
|
|
/* Store fetch struct in context for verify callback */
|
|
|
|
ok = X509_STORE_CTX_set_app_data(x509_ctx, parm);
|
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
/* verify the certificate chain using standard call */
|
|
|
|
if (ok) {
|
2014-10-25 20:10:08 +04:00
|
|
|
ok = X509_verify_cert(x509_ctx);
|
2016-06-27 14:37:27 +03:00
|
|
|
}
|
2014-10-25 20:10:08 +04:00
|
|
|
|
2019-08-05 20:11:13 +03:00
|
|
|
fetch_curl_report_certs_upstream(f);
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
return ok;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2014-10-25 20:10:08 +04:00
|
|
|
* cURL SSL setup callback
|
|
|
|
*
|
|
|
|
* \param curl_handle The curl handle to perform the ssl operation on.
|
|
|
|
* \param _sslctx The ssl context.
|
|
|
|
* \param parm The callback context.
|
|
|
|
* \return A curl result code.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static CURLcode
|
|
|
|
fetch_curl_sslctxfun(CURL *curl_handle, void *_sslctx, void *parm)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2014-10-25 20:10:08 +04:00
|
|
|
struct curl_fetch_info *f = (struct curl_fetch_info *) parm;
|
|
|
|
SSL_CTX *sslctx = _sslctx;
|
|
|
|
long options = SSL_OP_ALL | SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3;
|
2011-05-17 02:05:47 +04:00
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
/* set verify callback for each certificate in chain */
|
2014-10-25 20:10:08 +04:00
|
|
|
SSL_CTX_set_verify(sslctx, SSL_VERIFY_PEER, fetch_curl_verify_callback);
|
2016-06-27 14:37:27 +03:00
|
|
|
|
|
|
|
/* set callback used to verify certificate chain */
|
2014-10-25 20:10:08 +04:00
|
|
|
SSL_CTX_set_cert_verify_callback(sslctx,
|
|
|
|
fetch_curl_cert_verify_callback,
|
|
|
|
parm);
|
|
|
|
|
|
|
|
if (f->downgrade_tls) {
|
|
|
|
/* Disable TLS 1.1/1.2 if the server can't cope with them */
|
|
|
|
#ifdef SSL_OP_NO_TLSv1_1
|
|
|
|
options |= SSL_OP_NO_TLSv1_1;
|
2014-02-06 05:19:22 +04:00
|
|
|
#endif
|
2014-10-25 20:10:08 +04:00
|
|
|
#ifdef SSL_OP_NO_TLSv1_2
|
|
|
|
options |= SSL_OP_NO_TLSv1_2;
|
|
|
|
#endif
|
|
|
|
#ifdef SSL_MODE_SEND_FALLBACK_SCSV
|
|
|
|
/* Ensure server rejects the connection if downgraded too far */
|
|
|
|
SSL_CTX_set_mode(sslctx, SSL_MODE_SEND_FALLBACK_SCSV);
|
|
|
|
#endif
|
2018-04-22 15:54:23 +03:00
|
|
|
/* Disable TLS1.2 ciphersuites */
|
|
|
|
SSL_CTX_set_cipher_list(sslctx, CIPHER_LIST ":-TLSv1.2");
|
2014-10-25 20:10:08 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
SSL_CTX_set_options(sslctx, options);
|
|
|
|
|
2018-04-22 15:24:56 +03:00
|
|
|
#ifdef SSL_OP_NO_TICKET
|
|
|
|
SSL_CTX_clear_options(sslctx, SSL_OP_NO_TICKET);
|
|
|
|
#endif
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
return CURLE_OK;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/**
|
|
|
|
* Set options specific for a fetch.
|
2014-10-25 20:10:08 +04:00
|
|
|
*
|
|
|
|
* \param f The fetch to set options on.
|
|
|
|
* \return A curl result code.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static CURLcode fetch_curl_set_options(struct curl_fetch_info *f)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
|
|
|
CURLcode code;
|
|
|
|
const char *auth;
|
|
|
|
|
|
|
|
#undef SETOPT
|
|
|
|
#define SETOPT(option, value) { \
|
|
|
|
code = curl_easy_setopt(f->curl_handle, option, value); \
|
|
|
|
if (code != CURLE_OK) \
|
|
|
|
return code; \
|
|
|
|
}
|
|
|
|
|
2011-10-01 20:02:49 +04:00
|
|
|
SETOPT(CURLOPT_URL, nsurl_access(f->url));
|
2007-06-10 21:46:44 +04:00
|
|
|
SETOPT(CURLOPT_PRIVATE, f);
|
|
|
|
SETOPT(CURLOPT_WRITEDATA, f);
|
|
|
|
SETOPT(CURLOPT_WRITEHEADER, f);
|
|
|
|
SETOPT(CURLOPT_PROGRESSDATA, f);
|
|
|
|
SETOPT(CURLOPT_REFERER, fetch_get_referer_to_send(f->fetch_handle));
|
|
|
|
SETOPT(CURLOPT_HTTPHEADER, f->headers);
|
|
|
|
if (f->post_urlenc) {
|
2009-01-08 13:36:11 +03:00
|
|
|
SETOPT(CURLOPT_HTTPPOST, NULL);
|
2007-06-10 21:46:44 +04:00
|
|
|
SETOPT(CURLOPT_HTTPGET, 0L);
|
|
|
|
SETOPT(CURLOPT_POSTFIELDS, f->post_urlenc);
|
|
|
|
} else if (f->post_multipart) {
|
2009-01-08 13:36:11 +03:00
|
|
|
SETOPT(CURLOPT_POSTFIELDS, NULL);
|
2007-06-10 21:46:44 +04:00
|
|
|
SETOPT(CURLOPT_HTTPGET, 0L);
|
|
|
|
SETOPT(CURLOPT_HTTPPOST, f->post_multipart);
|
|
|
|
} else {
|
2009-01-08 13:36:11 +03:00
|
|
|
SETOPT(CURLOPT_POSTFIELDS, NULL);
|
|
|
|
SETOPT(CURLOPT_HTTPPOST, NULL);
|
2007-06-10 21:46:44 +04:00
|
|
|
SETOPT(CURLOPT_HTTPGET, 1L);
|
|
|
|
}
|
|
|
|
|
2012-11-09 15:38:46 +04:00
|
|
|
f->cookie_string = urldb_get_cookie(f->url, true);
|
2007-06-10 21:46:44 +04:00
|
|
|
if (f->cookie_string) {
|
|
|
|
SETOPT(CURLOPT_COOKIE, f->cookie_string);
|
|
|
|
} else {
|
2009-01-08 13:36:11 +03:00
|
|
|
SETOPT(CURLOPT_COOKIE, NULL);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
2012-10-08 23:33:17 +04:00
|
|
|
if ((auth = urldb_get_auth_details(f->url, NULL)) != NULL) {
|
2007-06-10 21:46:44 +04:00
|
|
|
SETOPT(CURLOPT_HTTPAUTH, CURLAUTH_ANY);
|
|
|
|
SETOPT(CURLOPT_USERPWD, auth);
|
|
|
|
} else {
|
2009-01-08 13:36:11 +03:00
|
|
|
SETOPT(CURLOPT_USERPWD, NULL);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
2009-02-22 18:13:10 +03:00
|
|
|
|
2013-06-04 21:11:15 +04:00
|
|
|
/* set up proxy options */
|
2014-10-25 20:10:08 +04:00
|
|
|
if (nsoption_bool(http_proxy) &&
|
2012-03-22 13:34:34 +04:00
|
|
|
(nsoption_charp(http_proxy_host) != NULL) &&
|
|
|
|
(strncmp(nsurl_access(f->url), "file:", 5) != 0)) {
|
|
|
|
SETOPT(CURLOPT_PROXY, nsoption_charp(http_proxy_host));
|
|
|
|
SETOPT(CURLOPT_PROXYPORT, (long) nsoption_int(http_proxy_port));
|
2013-06-04 21:11:15 +04:00
|
|
|
|
2013-06-04 21:30:22 +04:00
|
|
|
#if LIBCURL_VERSION_NUM >= 0x071304
|
|
|
|
/* Added in 7.19.4 */
|
2013-06-04 21:11:15 +04:00
|
|
|
/* setup the omission list */
|
|
|
|
SETOPT(CURLOPT_NOPROXY, nsoption_charp(http_proxy_noproxy));
|
2013-06-04 21:30:22 +04:00
|
|
|
#endif
|
2013-06-04 21:11:15 +04:00
|
|
|
|
2012-03-22 13:34:34 +04:00
|
|
|
if (nsoption_int(http_proxy_auth) != OPTION_HTTP_PROXY_AUTH_NONE) {
|
2007-06-10 21:46:44 +04:00
|
|
|
SETOPT(CURLOPT_PROXYAUTH,
|
2012-03-22 13:34:34 +04:00
|
|
|
nsoption_int(http_proxy_auth) ==
|
2007-06-10 21:46:44 +04:00
|
|
|
OPTION_HTTP_PROXY_AUTH_BASIC ?
|
|
|
|
(long) CURLAUTH_BASIC :
|
|
|
|
(long) CURLAUTH_NTLM);
|
|
|
|
snprintf(fetch_proxy_userpwd,
|
|
|
|
sizeof fetch_proxy_userpwd,
|
|
|
|
"%s:%s",
|
2012-03-22 13:34:34 +04:00
|
|
|
nsoption_charp(http_proxy_auth_user),
|
|
|
|
nsoption_charp(http_proxy_auth_pass));
|
2007-06-10 21:46:44 +04:00
|
|
|
SETOPT(CURLOPT_PROXYUSERPWD, fetch_proxy_userpwd);
|
|
|
|
}
|
2009-12-25 22:27:22 +03:00
|
|
|
} else {
|
|
|
|
SETOPT(CURLOPT_PROXY, NULL);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
2018-04-22 15:04:21 +03:00
|
|
|
/* Force-enable SSL session ID caching, as some distros are odd. */
|
|
|
|
SETOPT(CURLOPT_SSL_SESSIONID_CACHE, 1);
|
|
|
|
|
2012-10-08 23:33:17 +04:00
|
|
|
if (urldb_get_cert_permissions(f->url)) {
|
2007-06-10 21:46:44 +04:00
|
|
|
/* Disable certificate verification */
|
|
|
|
SETOPT(CURLOPT_SSL_VERIFYPEER, 0L);
|
|
|
|
SETOPT(CURLOPT_SSL_VERIFYHOST, 0L);
|
2009-02-21 05:46:47 +03:00
|
|
|
if (curl_with_openssl) {
|
|
|
|
SETOPT(CURLOPT_SSL_CTX_FUNCTION, NULL);
|
|
|
|
SETOPT(CURLOPT_SSL_CTX_DATA, NULL);
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
} else {
|
|
|
|
/* do verification */
|
|
|
|
SETOPT(CURLOPT_SSL_VERIFYPEER, 1L);
|
|
|
|
SETOPT(CURLOPT_SSL_VERIFYHOST, 2L);
|
2009-02-21 05:46:47 +03:00
|
|
|
if (curl_with_openssl) {
|
|
|
|
SETOPT(CURLOPT_SSL_CTX_FUNCTION, fetch_curl_sslctxfun);
|
|
|
|
SETOPT(CURLOPT_SSL_CTX_DATA, f);
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return CURLE_OK;
|
|
|
|
}
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/**
|
|
|
|
* Initiate a fetch from the queue.
|
|
|
|
*
|
|
|
|
* \param fetch fetch to use to fetch content.
|
|
|
|
* \param handle CURL handle to be used to fetch the content.
|
|
|
|
* \return true if the fetch was successfully initiated else false.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
fetch_curl_initiate_fetch(struct curl_fetch_info *fetch, CURL *handle)
|
|
|
|
{
|
|
|
|
CURLcode code;
|
|
|
|
CURLMcode codem;
|
|
|
|
|
|
|
|
fetch->curl_handle = handle;
|
|
|
|
|
|
|
|
/* Initialise the handle */
|
|
|
|
code = fetch_curl_set_options(fetch);
|
|
|
|
if (code != CURLE_OK) {
|
|
|
|
fetch->curl_handle = 0;
|
2019-06-05 21:51:47 +03:00
|
|
|
/* The handle maybe went bad, eat it */
|
|
|
|
NSLOG(netsurf, WARNING, "cURL handle maybe went bad, retry later");
|
|
|
|
curl_easy_cleanup(handle);
|
2014-10-25 20:10:08 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add to the global curl multi handle */
|
|
|
|
codem = curl_multi_add_handle(fetch_curl_multi, fetch->curl_handle);
|
|
|
|
assert(codem == CURLM_OK || codem == CURLM_CALL_MULTI_PERFORM);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
|
|
|
|
/**
|
2014-10-25 20:10:08 +04:00
|
|
|
* Find a CURL handle to use to dispatch a job
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static CURL *fetch_curl_get_handle(lwc_string *host)
|
|
|
|
{
|
|
|
|
struct cache_handle *h;
|
|
|
|
CURL *ret;
|
|
|
|
RING_FINDBYLWCHOST(curl_handle_ring, h, host);
|
|
|
|
if (h) {
|
|
|
|
ret = h->handle;
|
|
|
|
lwc_string_unref(h->host);
|
|
|
|
RING_REMOVE(curl_handle_ring, h);
|
|
|
|
free(h);
|
|
|
|
} else {
|
|
|
|
ret = curl_easy_duphandle(fetch_blank_curl);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Dispatch a single job
|
|
|
|
*/
|
|
|
|
static bool fetch_curl_start(void *vfetch)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2014-10-25 20:10:08 +04:00
|
|
|
struct curl_fetch_info *fetch = (struct curl_fetch_info*)vfetch;
|
2019-06-05 21:51:47 +03:00
|
|
|
if (inside_curl) {
|
|
|
|
NSLOG(netsurf, DEBUG, "Deferring fetch because we're inside cURL");
|
|
|
|
return false;
|
|
|
|
}
|
2014-10-25 20:10:08 +04:00
|
|
|
return fetch_curl_initiate_fetch(fetch,
|
|
|
|
fetch_curl_get_handle(fetch->host));
|
|
|
|
}
|
2013-01-05 03:13:23 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/**
|
|
|
|
* Cache a CURL handle for the provided host (if wanted)
|
|
|
|
*/
|
|
|
|
static void fetch_curl_cache_handle(CURL *handle, lwc_string *host)
|
|
|
|
{
|
|
|
|
#if LIBCURL_VERSION_NUM >= 0x071e00
|
|
|
|
/* 7.30.0 or later has its own connection caching; suppress ours */
|
|
|
|
curl_easy_cleanup(handle);
|
|
|
|
return;
|
|
|
|
#else
|
|
|
|
struct cache_handle *h = 0;
|
|
|
|
int c;
|
|
|
|
RING_FINDBYLWCHOST(curl_handle_ring, h, host);
|
|
|
|
if (h) {
|
|
|
|
/* Already have a handle cached for this hostname */
|
|
|
|
curl_easy_cleanup(handle);
|
|
|
|
return;
|
2014-01-17 05:09:24 +04:00
|
|
|
}
|
2014-10-25 20:10:08 +04:00
|
|
|
/* We do not have a handle cached, first up determine if the cache is full */
|
|
|
|
RING_GETSIZE(struct cache_handle, curl_handle_ring, c);
|
|
|
|
if (c >= nsoption_int(max_cached_fetch_handles)) {
|
|
|
|
/* Cache is full, so, we rotate the ring by one and
|
|
|
|
* replace the oldest handle with this one. We do this
|
|
|
|
* without freeing/allocating memory (except the
|
|
|
|
* hostname) and without removing the entry from the
|
|
|
|
* ring and then re-inserting it, in order to be as
|
|
|
|
* efficient as we can.
|
|
|
|
*/
|
|
|
|
if (curl_handle_ring != NULL) {
|
|
|
|
h = curl_handle_ring;
|
|
|
|
curl_handle_ring = h->r_next;
|
|
|
|
curl_easy_cleanup(h->handle);
|
|
|
|
h->handle = handle;
|
|
|
|
lwc_string_unref(h->host);
|
|
|
|
h->host = lwc_string_ref(host);
|
|
|
|
} else {
|
|
|
|
/* Actually, we don't want to cache any handles */
|
|
|
|
curl_easy_cleanup(handle);
|
|
|
|
}
|
2013-01-05 03:13:23 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* The table isn't full yet, so make a shiny new handle to add to the ring */
|
|
|
|
h = (struct cache_handle*)malloc(sizeof(struct cache_handle));
|
|
|
|
h->handle = handle;
|
|
|
|
h->host = lwc_string_ref(host);
|
|
|
|
RING_INSERT(curl_handle_ring, h);
|
|
|
|
#endif
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Clean up the provided fetch object and free it.
|
|
|
|
*
|
|
|
|
* Will prod the queue afterwards to allow pending requests to be initiated.
|
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static void fetch_curl_stop(struct curl_fetch_info *f)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
|
|
|
CURLMcode codem;
|
|
|
|
|
|
|
|
assert(f);
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "fetch %p, url '%s'", f, nsurl_access(f->url));
|
2007-06-10 21:46:44 +04:00
|
|
|
|
|
|
|
if (f->curl_handle) {
|
|
|
|
/* remove from curl multi handle */
|
|
|
|
codem = curl_multi_remove_handle(fetch_curl_multi,
|
|
|
|
f->curl_handle);
|
|
|
|
assert(codem == CURLM_OK);
|
|
|
|
/* Put this curl handle into the cache if wanted. */
|
|
|
|
fetch_curl_cache_handle(f->curl_handle, f->host);
|
|
|
|
f->curl_handle = 0;
|
|
|
|
}
|
|
|
|
|
2007-07-04 22:44:13 +04:00
|
|
|
fetch_remove_from_queues(f->fetch_handle);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-02 16:38:38 +03:00
|
|
|
/**
|
|
|
|
* Abort a fetch.
|
|
|
|
*/
|
|
|
|
static void fetch_curl_abort(void *vf)
|
|
|
|
{
|
|
|
|
struct curl_fetch_info *f = (struct curl_fetch_info *)vf;
|
|
|
|
assert(f);
|
|
|
|
NSLOG(netsurf, INFO, "fetch %p, url '%s'", f, nsurl_access(f->url));
|
|
|
|
if (f->curl_handle) {
|
|
|
|
if (inside_curl) {
|
|
|
|
NSLOG(netsurf, DEBUG, "Deferring cleanup");
|
|
|
|
f->abort = true;
|
|
|
|
} else {
|
|
|
|
NSLOG(netsurf, DEBUG, "Immediate abort");
|
|
|
|
fetch_curl_stop(f);
|
|
|
|
fetch_free(f->fetch_handle);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fetch_remove_from_queues(f->fetch_handle);
|
|
|
|
fetch_free(f->fetch_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/**
|
|
|
|
* Free a fetch structure and associated resources.
|
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static void fetch_curl_free(void *vf)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2007-07-04 22:05:16 +04:00
|
|
|
struct curl_fetch_info *f = (struct curl_fetch_info *)vf;
|
2007-06-10 21:46:44 +04:00
|
|
|
int i;
|
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
if (f->curl_handle) {
|
2007-06-10 21:46:44 +04:00
|
|
|
curl_easy_cleanup(f->curl_handle);
|
2016-06-27 14:37:27 +03:00
|
|
|
}
|
2011-10-01 20:02:49 +04:00
|
|
|
nsurl_unref(f->url);
|
2011-10-01 20:53:45 +04:00
|
|
|
lwc_string_unref(f->host);
|
2007-06-10 21:46:44 +04:00
|
|
|
free(f->location);
|
|
|
|
free(f->cookie_string);
|
|
|
|
free(f->realm);
|
2016-06-27 14:37:27 +03:00
|
|
|
if (f->headers) {
|
2007-06-10 21:46:44 +04:00
|
|
|
curl_slist_free_all(f->headers);
|
2016-06-27 14:37:27 +03:00
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
free(f->post_urlenc);
|
2016-06-27 14:37:27 +03:00
|
|
|
if (f->post_multipart) {
|
2007-06-10 21:46:44 +04:00
|
|
|
curl_formfree(f->post_multipart);
|
2016-06-27 14:37:27 +03:00
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2019-08-05 20:11:13 +03:00
|
|
|
for (i = 0; i < MAX_SSL_CERTS && f->cert_data[i].cert; i++) {
|
2016-11-20 15:14:36 +03:00
|
|
|
ns_X509_free(f->cert_data[i].cert);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
free(f);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2014-10-25 20:10:08 +04:00
|
|
|
* Find the status code and content type and inform the caller.
|
2007-06-10 21:46:44 +04:00
|
|
|
*
|
2014-10-25 20:10:08 +04:00
|
|
|
* Return true if the fetch is being aborted.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static bool fetch_curl_process_headers(struct curl_fetch_info *f)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2014-10-25 20:10:08 +04:00
|
|
|
long http_code;
|
|
|
|
CURLcode code;
|
|
|
|
fetch_msg msg;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
f->had_headers = true;
|
|
|
|
|
2016-04-19 16:18:09 +03:00
|
|
|
if (!f->http_code) {
|
2014-10-25 20:10:08 +04:00
|
|
|
code = curl_easy_getinfo(f->curl_handle, CURLINFO_HTTP_CODE,
|
|
|
|
&f->http_code);
|
|
|
|
fetch_set_http_code(f->fetch_handle, f->http_code);
|
|
|
|
assert(code == CURLE_OK);
|
|
|
|
}
|
|
|
|
http_code = f->http_code;
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "HTTP status code %li", http_code);
|
2014-10-25 20:10:08 +04:00
|
|
|
|
|
|
|
if (http_code == 304 && !f->post_urlenc && !f->post_multipart) {
|
|
|
|
/* Not Modified && GET request */
|
|
|
|
msg.type = FETCH_NOTMODIFIED;
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle HTTP redirects (3xx response codes) */
|
|
|
|
if (300 <= http_code && http_code < 400 && f->location != 0) {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "FETCH_REDIRECT, '%s'", f->location);
|
2014-10-25 20:10:08 +04:00
|
|
|
msg.type = FETCH_REDIRECT;
|
|
|
|
msg.data.redirect = f->location;
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
|
|
|
return true;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
2014-10-25 20:10:08 +04:00
|
|
|
|
|
|
|
/* handle HTTP 401 (Authentication errors) */
|
|
|
|
if (http_code == 401) {
|
|
|
|
msg.type = FETCH_AUTH;
|
|
|
|
msg.data.auth.realm = f->realm;
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle HTTP errors (non 2xx response codes) */
|
|
|
|
if (f->only_2xx && strncmp(nsurl_access(f->url), "http", 4) == 0 &&
|
|
|
|
(http_code < 200 || 299 < http_code)) {
|
|
|
|
msg.type = FETCH_ERROR;
|
|
|
|
msg.data.error = messages_get("Not2xx");
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (f->abort)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handle a completed fetch (CURLMSG_DONE from curl_multi_info_read()).
|
|
|
|
*
|
2014-11-09 15:50:30 +03:00
|
|
|
* \param curl_handle curl easy handle of fetch
|
|
|
|
* \param result The result code of the completed fetch.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static void fetch_curl_done(CURL *curl_handle, CURLcode result)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
|
|
|
bool finished = false;
|
|
|
|
bool error = false;
|
|
|
|
bool cert = false;
|
2009-05-14 14:13:20 +04:00
|
|
|
bool abort_fetch;
|
2007-06-10 21:46:44 +04:00
|
|
|
struct curl_fetch_info *f;
|
2009-01-15 19:00:16 +03:00
|
|
|
char **_hideous_hack = (char **) (void *) &f;
|
2007-06-10 21:46:44 +04:00
|
|
|
CURLcode code;
|
|
|
|
|
|
|
|
/* find the structure associated with this fetch */
|
2009-01-08 13:36:11 +03:00
|
|
|
/* For some reason, cURL thinks CURLINFO_PRIVATE should be a string?! */
|
|
|
|
code = curl_easy_getinfo(curl_handle, CURLINFO_PRIVATE, _hideous_hack);
|
2007-06-10 21:46:44 +04:00
|
|
|
assert(code == CURLE_OK);
|
|
|
|
|
2009-05-14 14:13:20 +04:00
|
|
|
abort_fetch = f->abort;
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "done %s", nsurl_access(f->url));
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
if ((abort_fetch == false) &&
|
|
|
|
(result == CURLE_OK ||
|
|
|
|
((result == CURLE_WRITE_ERROR) && (f->stopped == false)))) {
|
2014-10-25 20:10:08 +04:00
|
|
|
/* fetch completed normally or the server fed us a junk gzip
|
|
|
|
* stream (usually in the form of garbage at the end of the
|
|
|
|
* stream). Curl will have fed us all but the last chunk of
|
|
|
|
* decoded data, which is sad as, if we'd received the last
|
2011-11-06 02:10:46 +04:00
|
|
|
* chunk, too, we'd be able to render the whole object.
|
|
|
|
* As is, we'll just have to accept that the end of the
|
|
|
|
* object will be truncated in this case and leave it to
|
2014-10-25 20:10:08 +04:00
|
|
|
* the content handlers to cope.
|
|
|
|
*/
|
2007-06-10 21:46:44 +04:00
|
|
|
if (f->stopped ||
|
2016-06-27 14:37:27 +03:00
|
|
|
(!f->had_headers && fetch_curl_process_headers(f))) {
|
2007-06-10 21:46:44 +04:00
|
|
|
; /* redirect with no body or similar */
|
2016-06-27 14:37:27 +03:00
|
|
|
} else {
|
2007-06-10 21:46:44 +04:00
|
|
|
finished = true;
|
2016-06-27 14:37:27 +03:00
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
} else if (result == CURLE_PARTIAL_FILE) {
|
|
|
|
/* CURLE_PARTIAL_FILE occurs if the received body of a
|
|
|
|
* response is smaller than that specified in the
|
2016-06-27 14:37:27 +03:00
|
|
|
* Content-Length header.
|
|
|
|
*/
|
2007-06-10 21:46:44 +04:00
|
|
|
if (!f->had_headers && fetch_curl_process_headers(f))
|
|
|
|
; /* redirect with partial body, or similar */
|
2009-12-18 02:55:02 +03:00
|
|
|
else {
|
2012-10-11 02:48:33 +04:00
|
|
|
finished = true;
|
2009-12-18 02:55:02 +03:00
|
|
|
}
|
2013-01-05 03:13:23 +04:00
|
|
|
} else if (result == CURLE_WRITE_ERROR && f->stopped) {
|
2007-06-10 21:46:44 +04:00
|
|
|
/* CURLE_WRITE_ERROR occurs when fetch_curl_data
|
2016-06-27 14:37:27 +03:00
|
|
|
* returns 0, which we use to abort intentionally
|
|
|
|
*/
|
2007-06-10 21:46:44 +04:00
|
|
|
;
|
2013-01-05 03:13:23 +04:00
|
|
|
} else if (result == CURLE_SSL_PEER_CERTIFICATE ||
|
2007-06-10 21:46:44 +04:00
|
|
|
result == CURLE_SSL_CACERT) {
|
2016-06-27 14:37:27 +03:00
|
|
|
/* CURLE_SSL_PEER_CERTIFICATE renamed to
|
|
|
|
* CURLE_PEER_FAILED_VERIFICATION
|
|
|
|
*/
|
2007-06-10 21:46:44 +04:00
|
|
|
cert = true;
|
2013-01-05 03:13:23 +04:00
|
|
|
} else {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "Unknown cURL response code %d", result);
|
2009-01-08 13:36:11 +03:00
|
|
|
error = true;
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2007-07-04 22:44:13 +04:00
|
|
|
fetch_curl_stop(f);
|
|
|
|
|
2016-06-27 14:37:27 +03:00
|
|
|
if (abort_fetch) {
|
2007-06-10 21:46:44 +04:00
|
|
|
; /* fetch was aborted: no callback */
|
2016-06-27 14:37:27 +03:00
|
|
|
} else if (finished) {
|
|
|
|
fetch_msg msg;
|
2011-11-09 01:51:42 +04:00
|
|
|
msg.type = FETCH_FINISHED;
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
|
|
|
} else if (cert) {
|
2016-06-27 14:37:27 +03:00
|
|
|
/* user needs to validate certificate with issue */
|
2019-08-05 20:11:13 +03:00
|
|
|
fetch_msg msg;
|
|
|
|
msg.type = FETCH_CERT_ERR;
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
2011-11-09 01:51:42 +04:00
|
|
|
} else if (error) {
|
2016-06-27 14:37:27 +03:00
|
|
|
fetch_msg msg;
|
2015-11-11 00:51:54 +03:00
|
|
|
switch (result) {
|
|
|
|
case CURLE_SSL_CONNECT_ERROR:
|
|
|
|
msg.type = FETCH_SSL_ERR;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_OPERATION_TIMEDOUT:
|
|
|
|
msg.type = FETCH_TIMEDOUT;
|
|
|
|
msg.data.error = curl_easy_strerror(result);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-01-05 03:13:23 +04:00
|
|
|
msg.type = FETCH_ERROR;
|
2014-11-15 21:23:44 +03:00
|
|
|
msg.data.error = curl_easy_strerror(result);
|
2013-01-05 03:13:23 +04:00
|
|
|
}
|
2014-10-25 20:10:08 +04:00
|
|
|
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
fetch_free(f->fetch_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Do some work on current fetches.
|
|
|
|
*
|
|
|
|
* Must be called regularly to make progress on fetches.
|
|
|
|
*/
|
|
|
|
static void fetch_curl_poll(lwc_string *scheme_ignored)
|
|
|
|
{
|
|
|
|
int running, queue;
|
|
|
|
CURLMcode codem;
|
|
|
|
CURLMsg *curl_msg;
|
|
|
|
|
2015-11-11 13:38:17 +03:00
|
|
|
if (nsoption_bool(suppress_curl_debug) == false) {
|
|
|
|
fd_set read_fd_set, write_fd_set, exc_fd_set;
|
|
|
|
int max_fd = -1;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
FD_ZERO(&read_fd_set);
|
|
|
|
FD_ZERO(&write_fd_set);
|
|
|
|
FD_ZERO(&exc_fd_set);
|
|
|
|
|
|
|
|
codem = curl_multi_fdset(fetch_curl_multi,
|
|
|
|
&read_fd_set, &write_fd_set,
|
|
|
|
&exc_fd_set, &max_fd);
|
|
|
|
assert(codem == CURLM_OK);
|
|
|
|
|
2018-08-09 18:43:37 +03:00
|
|
|
NSLOG(netsurf, DEEPDEBUG,
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
"Curl file descriptor states (maxfd=%i):", max_fd);
|
2015-11-11 13:38:17 +03:00
|
|
|
for (i = 0; i <= max_fd; i++) {
|
|
|
|
bool read = false;
|
|
|
|
bool write = false;
|
|
|
|
bool error = false;
|
|
|
|
|
|
|
|
if (FD_ISSET(i, &read_fd_set)) {
|
|
|
|
read = true;
|
|
|
|
}
|
|
|
|
if (FD_ISSET(i, &write_fd_set)) {
|
|
|
|
write = true;
|
|
|
|
}
|
|
|
|
if (FD_ISSET(i, &exc_fd_set)) {
|
|
|
|
error = true;
|
|
|
|
}
|
|
|
|
if (read || write || error) {
|
2018-08-09 16:58:49 +03:00
|
|
|
NSLOG(netsurf, DEEPDEBUG, " fd %i: %s %s %s", i,
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
read ? "read" : " ",
|
|
|
|
write ? "write" : " ",
|
|
|
|
error ? "error" : " ");
|
2015-11-11 13:38:17 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/* do any possible work on the current fetches */
|
2019-08-02 17:02:45 +03:00
|
|
|
inside_curl = true;
|
2014-10-25 20:10:08 +04:00
|
|
|
do {
|
|
|
|
codem = curl_multi_perform(fetch_curl_multi, &running);
|
|
|
|
if (codem != CURLM_OK && codem != CURLM_CALL_MULTI_PERFORM) {
|
2018-08-09 18:43:37 +03:00
|
|
|
NSLOG(netsurf, WARNING, "curl_multi_perform: %i %s",
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
codem, curl_multi_strerror(codem));
|
2016-04-26 14:50:16 +03:00
|
|
|
guit->misc->warning("MiscError", curl_multi_strerror(codem));
|
2014-10-25 20:10:08 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} while (codem == CURLM_CALL_MULTI_PERFORM);
|
|
|
|
|
|
|
|
/* process curl results */
|
|
|
|
curl_msg = curl_multi_info_read(fetch_curl_multi, &queue);
|
|
|
|
while (curl_msg) {
|
|
|
|
switch (curl_msg->msg) {
|
|
|
|
case CURLMSG_DONE:
|
|
|
|
fetch_curl_done(curl_msg->easy_handle,
|
|
|
|
curl_msg->data.result);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
curl_msg = curl_multi_info_read(fetch_curl_multi, &queue);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
2019-08-02 17:02:45 +03:00
|
|
|
inside_curl = false;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/**
|
|
|
|
* Callback function for fetch progress.
|
|
|
|
*/
|
2016-06-27 14:37:27 +03:00
|
|
|
static int
|
|
|
|
fetch_curl_progress(void *clientp,
|
|
|
|
double dltotal,
|
|
|
|
double dlnow,
|
|
|
|
double ultotal,
|
|
|
|
double ulnow)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2010-02-10 03:32:12 +03:00
|
|
|
static char fetch_progress_buffer[256]; /**< Progress buffer for cURL */
|
2007-06-10 21:46:44 +04:00
|
|
|
struct curl_fetch_info *f = (struct curl_fetch_info *) clientp;
|
2016-04-19 16:18:09 +03:00
|
|
|
uint64_t time_now_ms;
|
2011-11-09 01:51:42 +04:00
|
|
|
fetch_msg msg;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2016-04-19 16:18:09 +03:00
|
|
|
if (f->abort) {
|
2007-06-10 21:46:44 +04:00
|
|
|
return 0;
|
2016-04-19 16:18:09 +03:00
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2011-11-09 01:51:42 +04:00
|
|
|
msg.type = FETCH_PROGRESS;
|
|
|
|
msg.data.progress = fetch_progress_buffer;
|
|
|
|
|
2016-04-19 16:18:09 +03:00
|
|
|
/* Rate limit each fetch's progress notifications */
|
|
|
|
nsu_getmonotonic_ms(&time_now_ms);
|
|
|
|
#define UPDATE_DELAY_MS (1000 / UPDATES_PER_SECOND)
|
|
|
|
if (time_now_ms - f->last_progress_update < UPDATE_DELAY_MS) {
|
2010-02-10 03:32:12 +03:00
|
|
|
return 0;
|
2016-04-19 16:18:09 +03:00
|
|
|
}
|
|
|
|
#undef UPDATE_DELAY_MS
|
|
|
|
f->last_progress_update = time_now_ms;
|
2010-02-10 03:32:12 +03:00
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
if (dltotal > 0) {
|
|
|
|
snprintf(fetch_progress_buffer, 255,
|
|
|
|
messages_get("Progress"),
|
|
|
|
human_friendly_bytesize(dlnow),
|
|
|
|
human_friendly_bytesize(dltotal));
|
2011-11-09 01:51:42 +04:00
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
2007-06-10 21:46:44 +04:00
|
|
|
} else {
|
|
|
|
snprintf(fetch_progress_buffer, 255,
|
|
|
|
messages_get("ProgressU"),
|
|
|
|
human_friendly_bytesize(dlnow));
|
2011-11-09 01:51:42 +04:00
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-04 22:05:16 +04:00
|
|
|
/**
|
2019-05-28 02:38:40 +03:00
|
|
|
* Format curl debug for nslog
|
2007-07-04 22:05:16 +04:00
|
|
|
*/
|
2019-05-28 02:38:40 +03:00
|
|
|
static int
|
|
|
|
fetch_curl_debug(CURL *handle,
|
|
|
|
curl_infotype type,
|
|
|
|
char *data,
|
|
|
|
size_t size,
|
|
|
|
void *userptr)
|
2007-07-04 22:05:16 +04:00
|
|
|
{
|
2019-05-28 02:38:40 +03:00
|
|
|
static const char s_infotype[CURLINFO_END][3] = {
|
|
|
|
"* ", "< ", "> ", "{ ", "} ", "{ ", "} "
|
|
|
|
};
|
|
|
|
switch(type) {
|
|
|
|
case CURLINFO_TEXT:
|
|
|
|
case CURLINFO_HEADER_OUT:
|
|
|
|
case CURLINFO_HEADER_IN:
|
|
|
|
NSLOG(fetch, DEBUG, "%s%.*s", s_infotype[type], (int)size - 1, data);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2007-07-04 22:05:16 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
/**
|
|
|
|
* Callback function for cURL.
|
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
static size_t fetch_curl_data(char *data, size_t size, size_t nmemb, void *_f)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2009-01-08 13:36:11 +03:00
|
|
|
struct curl_fetch_info *f = _f;
|
2007-06-10 21:46:44 +04:00
|
|
|
CURLcode code;
|
2011-11-09 01:51:42 +04:00
|
|
|
fetch_msg msg;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
|
|
|
/* ensure we only have to get this information once */
|
2016-06-27 14:37:27 +03:00
|
|
|
if (!f->http_code) {
|
2007-06-10 21:46:44 +04:00
|
|
|
code = curl_easy_getinfo(f->curl_handle, CURLINFO_HTTP_CODE,
|
2007-07-04 22:05:16 +04:00
|
|
|
&f->http_code);
|
|
|
|
fetch_set_http_code(f->fetch_handle, f->http_code);
|
2007-06-10 21:46:44 +04:00
|
|
|
assert(code == CURLE_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ignore body if this is a 401 reply by skipping it and reset
|
2014-10-25 20:10:08 +04:00
|
|
|
* the HTTP response code to enable follow up fetches.
|
|
|
|
*/
|
2016-06-27 14:37:27 +03:00
|
|
|
if (f->http_code == 401) {
|
2007-06-10 21:46:44 +04:00
|
|
|
f->http_code = 0;
|
|
|
|
return size * nmemb;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (f->abort || (!f->had_headers && fetch_curl_process_headers(f))) {
|
|
|
|
f->stopped = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* send data to the caller */
|
2011-11-09 01:51:42 +04:00
|
|
|
msg.type = FETCH_DATA;
|
|
|
|
msg.data.header_or_data.buf = (const uint8_t *) data;
|
|
|
|
msg.data.header_or_data.len = size * nmemb;
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
2007-06-10 21:46:44 +04:00
|
|
|
|
|
|
|
if (f->abort) {
|
|
|
|
f->stopped = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return size * nmemb;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Callback function for headers.
|
2008-05-30 08:11:16 +04:00
|
|
|
*
|
|
|
|
* See RFC 2616 4.2.
|
2007-06-10 21:46:44 +04:00
|
|
|
*/
|
2016-06-27 14:37:27 +03:00
|
|
|
static size_t
|
|
|
|
fetch_curl_header(char *data, size_t size, size_t nmemb, void *_f)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
2009-01-08 13:36:11 +03:00
|
|
|
struct curl_fetch_info *f = _f;
|
2007-06-10 21:46:44 +04:00
|
|
|
int i;
|
2011-11-09 01:51:42 +04:00
|
|
|
fetch_msg msg;
|
2007-06-10 21:46:44 +04:00
|
|
|
size *= nmemb;
|
|
|
|
|
2008-05-31 20:52:37 +04:00
|
|
|
if (f->abort) {
|
|
|
|
f->stopped = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-09 01:51:42 +04:00
|
|
|
msg.type = FETCH_HEADER;
|
|
|
|
msg.data.header_or_data.buf = (const uint8_t *) data;
|
|
|
|
msg.data.header_or_data.len = size;
|
|
|
|
fetch_send_callback(&msg, f->fetch_handle);
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2008-05-30 08:11:16 +04:00
|
|
|
#define SKIP_ST(o) for (i = (o); i < (int) size && (data[i] == ' ' || data[i] == '\t'); i++)
|
2007-06-10 21:46:44 +04:00
|
|
|
|
|
|
|
if (12 < size && strncasecmp(data, "Location:", 9) == 0) {
|
|
|
|
/* extract Location header */
|
|
|
|
free(f->location);
|
|
|
|
f->location = malloc(size);
|
|
|
|
if (!f->location) {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "malloc failed");
|
2007-06-10 21:46:44 +04:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
SKIP_ST(9);
|
|
|
|
strncpy(f->location, data + i, size - i);
|
|
|
|
f->location[size - i] = '\0';
|
|
|
|
for (i = size - i - 1; i >= 0 &&
|
|
|
|
(f->location[i] == ' ' ||
|
|
|
|
f->location[i] == '\t' ||
|
|
|
|
f->location[i] == '\r' ||
|
|
|
|
f->location[i] == '\n'); i--)
|
|
|
|
f->location[i] = '\0';
|
|
|
|
} else if (15 < size && strncasecmp(data, "Content-Length:", 15) == 0) {
|
|
|
|
/* extract Content-Length header */
|
|
|
|
SKIP_ST(15);
|
|
|
|
if (i < (int)size && '0' <= data[i] && data[i] <= '9')
|
|
|
|
f->content_length = atol(data + i);
|
|
|
|
} else if (17 < size && strncasecmp(data, "WWW-Authenticate:", 17) == 0) {
|
|
|
|
/* extract the first Realm from WWW-Authenticate header */
|
|
|
|
SKIP_ST(17);
|
|
|
|
|
|
|
|
while (i < (int) size - 5 &&
|
|
|
|
strncasecmp(data + i, "realm", 5))
|
|
|
|
i++;
|
|
|
|
while (i < (int) size - 1 && data[++i] != '"')
|
|
|
|
/* */;
|
|
|
|
i++;
|
|
|
|
|
|
|
|
if (i < (int) size) {
|
2011-06-30 03:32:53 +04:00
|
|
|
size_t end = i;
|
|
|
|
|
|
|
|
while (end < size && data[end] != '"')
|
|
|
|
++end;
|
|
|
|
|
|
|
|
if (end < size) {
|
|
|
|
free(f->realm);
|
|
|
|
f->realm = malloc(end - i + 1);
|
|
|
|
if (f->realm != NULL) {
|
|
|
|
strncpy(f->realm, data + i, end - i);
|
|
|
|
f->realm[end - i] = '\0';
|
|
|
|
}
|
2011-02-25 03:41:16 +03:00
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
} else if (11 < size && strncasecmp(data, "Set-Cookie:", 11) == 0) {
|
|
|
|
/* extract Set-Cookie header */
|
|
|
|
SKIP_ST(11);
|
|
|
|
|
2008-01-30 22:56:41 +03:00
|
|
|
fetch_set_cookie(f->fetch_handle, &data[i]);
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return size;
|
|
|
|
#undef SKIP_ST
|
|
|
|
}
|
|
|
|
|
2016-06-27 23:00:58 +03:00
|
|
|
static int fetch_curl_fdset(lwc_string *scheme, fd_set *read_set,
|
|
|
|
fd_set *write_set, fd_set *error_set)
|
|
|
|
{
|
|
|
|
CURLMcode code;
|
|
|
|
int maxfd = -1;
|
|
|
|
|
|
|
|
code = curl_multi_fdset(fetch_curl_multi,
|
|
|
|
read_set,
|
|
|
|
write_set,
|
|
|
|
error_set,
|
|
|
|
&maxfd);
|
|
|
|
assert(code == CURLM_OK);
|
|
|
|
|
|
|
|
return maxfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/* exported function documented in content/fetchers/curl.h */
|
|
|
|
nserror fetch_curl_register(void)
|
2007-06-10 21:46:44 +04:00
|
|
|
{
|
|
|
|
CURLcode code;
|
2014-10-25 20:10:08 +04:00
|
|
|
curl_version_info_data *data;
|
|
|
|
int i;
|
|
|
|
lwc_string *scheme;
|
|
|
|
const struct fetcher_operation_table fetcher_ops = {
|
|
|
|
.initialise = fetch_curl_initialise,
|
|
|
|
.acceptable = fetch_curl_can_fetch,
|
|
|
|
.setup = fetch_curl_setup,
|
|
|
|
.start = fetch_curl_start,
|
|
|
|
.abort = fetch_curl_abort,
|
|
|
|
.free = fetch_curl_free,
|
|
|
|
.poll = fetch_curl_poll,
|
2016-06-27 23:00:58 +03:00
|
|
|
.fdset = fetch_curl_fdset,
|
2014-10-25 20:10:08 +04:00
|
|
|
.finalise = fetch_curl_finalise
|
|
|
|
};
|
2007-06-10 21:46:44 +04:00
|
|
|
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "curl_version %s", curl_version());
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
code = curl_global_init(CURL_GLOBAL_ALL);
|
|
|
|
if (code != CURLE_OK) {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "curl_global_init failed.");
|
2014-10-25 20:10:08 +04:00
|
|
|
return NSERROR_INIT_FAILED;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
fetch_curl_multi = curl_multi_init();
|
|
|
|
if (!fetch_curl_multi) {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "curl_multi_init failed.");
|
2014-10-25 20:10:08 +04:00
|
|
|
return NSERROR_INIT_FAILED;
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
#if LIBCURL_VERSION_NUM >= 0x071e00
|
|
|
|
/* built against 7.30.0 or later: configure caching */
|
|
|
|
{
|
|
|
|
CURLMcode mcode;
|
|
|
|
int maxconnects = nsoption_int(max_fetchers) +
|
|
|
|
nsoption_int(max_cached_fetch_handles);
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
#undef SETOPT
|
|
|
|
#define SETOPT(option, value) \
|
|
|
|
mcode = curl_multi_setopt(fetch_curl_multi, option, value); \
|
|
|
|
if (mcode != CURLM_OK) \
|
|
|
|
goto curl_multi_setopt_failed;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
SETOPT(CURLMOPT_MAXCONNECTS, maxconnects);
|
|
|
|
SETOPT(CURLMOPT_MAX_TOTAL_CONNECTIONS, maxconnects);
|
|
|
|
SETOPT(CURLMOPT_MAX_HOST_CONNECTIONS, nsoption_int(max_fetchers_per_host));
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|
2014-10-25 20:10:08 +04:00
|
|
|
#endif
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/* Create a curl easy handle with the options that are common to all
|
2016-06-27 14:37:27 +03:00
|
|
|
* fetches.
|
|
|
|
*/
|
2014-10-25 20:10:08 +04:00
|
|
|
fetch_blank_curl = curl_easy_init();
|
|
|
|
if (!fetch_blank_curl) {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "curl_easy_init failed");
|
2014-10-25 20:10:08 +04:00
|
|
|
return NSERROR_INIT_FAILED;
|
|
|
|
}
|
2010-04-07 15:10:55 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
#undef SETOPT
|
|
|
|
#define SETOPT(option, value) \
|
|
|
|
code = curl_easy_setopt(fetch_blank_curl, option, value); \
|
|
|
|
if (code != CURLE_OK) \
|
|
|
|
goto curl_easy_setopt_failed;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
SETOPT(CURLOPT_ERRORBUFFER, fetch_error_buffer);
|
2019-05-28 02:38:40 +03:00
|
|
|
SETOPT(CURLOPT_DEBUGFUNCTION, fetch_curl_debug);
|
2014-10-25 20:10:08 +04:00
|
|
|
if (nsoption_bool(suppress_curl_debug)) {
|
2019-05-28 02:38:40 +03:00
|
|
|
SETOPT(CURLOPT_VERBOSE, 0);
|
|
|
|
} else {
|
|
|
|
SETOPT(CURLOPT_VERBOSE, 1);
|
2014-10-25 20:10:08 +04:00
|
|
|
}
|
2019-05-06 19:12:14 +03:00
|
|
|
|
|
|
|
/* Currently we explode if curl uses HTTP2, so force 1.1. */
|
|
|
|
SETOPT(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
|
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
SETOPT(CURLOPT_WRITEFUNCTION, fetch_curl_data);
|
|
|
|
SETOPT(CURLOPT_HEADERFUNCTION, fetch_curl_header);
|
|
|
|
SETOPT(CURLOPT_PROGRESSFUNCTION, fetch_curl_progress);
|
|
|
|
SETOPT(CURLOPT_NOPROGRESS, 0);
|
|
|
|
SETOPT(CURLOPT_USERAGENT, user_agent_string());
|
|
|
|
SETOPT(CURLOPT_ENCODING, "gzip");
|
|
|
|
SETOPT(CURLOPT_LOW_SPEED_LIMIT, 1L);
|
|
|
|
SETOPT(CURLOPT_LOW_SPEED_TIME, 180L);
|
|
|
|
SETOPT(CURLOPT_NOSIGNAL, 1L);
|
2015-11-11 00:51:54 +03:00
|
|
|
SETOPT(CURLOPT_CONNECTTIMEOUT, nsoption_uint(curl_fetch_timeout));
|
2018-04-22 15:54:23 +03:00
|
|
|
SETOPT(CURLOPT_SSL_CIPHER_LIST, CIPHER_LIST);
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
if (nsoption_charp(ca_bundle) &&
|
|
|
|
strcmp(nsoption_charp(ca_bundle), "")) {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "ca_bundle: '%s'",
|
|
|
|
nsoption_charp(ca_bundle));
|
2014-10-25 20:10:08 +04:00
|
|
|
SETOPT(CURLOPT_CAINFO, nsoption_charp(ca_bundle));
|
|
|
|
}
|
|
|
|
if (nsoption_charp(ca_path) && strcmp(nsoption_charp(ca_path), "")) {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "ca_path: '%s'", nsoption_charp(ca_path));
|
2014-10-25 20:10:08 +04:00
|
|
|
SETOPT(CURLOPT_CAPATH, nsoption_charp(ca_path));
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/* Detect whether the SSL CTX function API works */
|
|
|
|
curl_with_openssl = true;
|
|
|
|
code = curl_easy_setopt(fetch_blank_curl,
|
|
|
|
CURLOPT_SSL_CTX_FUNCTION, NULL);
|
|
|
|
if (code != CURLE_OK) {
|
|
|
|
curl_with_openssl = false;
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "cURL %slinked against openssl",
|
|
|
|
curl_with_openssl ? "" : "not ");
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
/* cURL initialised okay, register the fetchers */
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
data = curl_version_info(CURLVERSION_NOW);
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
for (i = 0; data->protocols[i]; i++) {
|
|
|
|
if (strcmp(data->protocols[i], "http") == 0) {
|
|
|
|
scheme = lwc_string_ref(corestring_lwc_http);
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
} else if (strcmp(data->protocols[i], "https") == 0) {
|
|
|
|
scheme = lwc_string_ref(corestring_lwc_https);
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
} else {
|
|
|
|
/* Ignore non-http(s) protocols */
|
|
|
|
continue;
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
if (fetcher_add(scheme, &fetcher_ops) != NSERROR_OK) {
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO,
|
|
|
|
"Unable to register cURL fetcher for %s",
|
|
|
|
data->protocols[i]);
|
2014-10-25 20:10:08 +04:00
|
|
|
}
|
|
|
|
}
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
return NSERROR_OK;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
curl_easy_setopt_failed:
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "curl_easy_setopt failed.");
|
2014-10-25 20:10:08 +04:00
|
|
|
return NSERROR_INIT_FAILED;
|
2007-06-10 21:46:44 +04:00
|
|
|
|
2014-10-25 20:10:08 +04:00
|
|
|
#if LIBCURL_VERSION_NUM >= 0x071e00
|
|
|
|
curl_multi_setopt_failed:
|
Use coccinelle to change logging macro calls in c files
for F in $(git ls-files '*.c');do spatch --sp-file foo.cocci --in-place ${F};done
@@ expression E; @@
-LOG(E);
+NSLOG(netsurf, INFO, E);
@@ expression E, E1; @@
-LOG(E, E1);
+NSLOG(netsurf, INFO, E, E1);
@@ expression E, E1, E2; @@
-LOG(E, E1, E2);
+NSLOG(netsurf, INFO, E, E1, E2);
@@ expression E, E1, E2, E3; @@
-LOG(E, E1, E2, E3);
+NSLOG(netsurf, INFO, E, E1, E2, E3);
@@ expression E, E1, E2, E3, E4; @@
-LOG(E, E1, E2, E3, E4);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4);
@@ expression E, E1, E2, E3, E4, E5; @@
-LOG(E, E1, E2, E3, E4, E5);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5);
@@ expression E, E1, E2, E3, E4, E5, E6; @@
-LOG(E, E1, E2, E3, E4, E5, E6);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6);
@@ expression E, E1, E2, E3, E4, E5, E6, E7; @@
-LOG(E, E1, E2, E3, E4, E5, E6, E7);
+NSLOG(netsurf, INFO, E, E1, E2, E3, E4, E5, E6, E7);
2017-09-06 20:28:12 +03:00
|
|
|
NSLOG(netsurf, INFO, "curl_multi_setopt failed.");
|
2014-10-25 20:10:08 +04:00
|
|
|
return NSERROR_INIT_FAILED;
|
|
|
|
#endif
|
2007-06-10 21:46:44 +04:00
|
|
|
}
|