From 986d7f4d37124e1ab7dcc99587f0d6d1deeedd9c Mon Sep 17 00:00:00 2001 From: Jeff King Date: Tue, 6 Dec 2016 13:24:29 -0500 Subject: http: simplify update_url_from_redirect This function looks for a common tail between what we asked for and where we were redirected to, but it open-codes the comparison. We can avoid some confusing subtractions by using strip_suffix_mem(). Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- http.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'http.c') diff --git a/http.c b/http.c index d8e427b69..d4034a14b 100644 --- a/http.c +++ b/http.c @@ -1500,7 +1500,7 @@ static int update_url_from_redirect(struct strbuf *base, const struct strbuf *got) { const char *tail; - size_t tail_len; + size_t new_len; if (!strcmp(asked, got->buf)) return 0; @@ -1509,14 +1509,12 @@ static int update_url_from_redirect(struct strbuf *base, die("BUG: update_url_from_redirect: %s is not a superset of %s", asked, base->buf); - tail_len = strlen(tail); - - if (got->len < tail_len || - strcmp(tail, got->buf + got->len - tail_len)) + new_len = got->len; + if (!strip_suffix_mem(got->buf, &new_len, tail)) return 0; /* insane redirect scheme */ strbuf_reset(base); - strbuf_add(base, got->buf, got->len - tail_len); + strbuf_add(base, got->buf, new_len); return 1; } -- cgit v1.2.1 From 6628eb41db5189c0cdfdced6d8697e7c813c5f0f Mon Sep 17 00:00:00 2001 From: Jeff King Date: Tue, 6 Dec 2016 13:24:35 -0500 Subject: http: always update the base URL for redirects If a malicious server redirects the initial ref advertisement, it may be able to leak sha1s from other, unrelated servers that the client has access to. For example, imagine that Alice is a git user, she has access to a private repository on a server hosted by Bob, and Mallory runs a malicious server and wants to find out about Bob's private repository. Mallory asks Alice to clone an unrelated repository from her over HTTP. When Alice's client contacts Mallory's server for the initial ref advertisement, the server issues an HTTP redirect for Bob's server. Alice contacts Bob's server and gets the ref advertisement for the private repository. If there is anything to fetch, she then follows up by asking the server for one or more sha1 objects. But who is the server? If it is still Mallory's server, then Alice will leak the existence of those sha1s to her. Since commit c93c92f30 (http: update base URLs when we see redirects, 2013-09-28), the client usually rewrites the base URL such that all further requests will go to Bob's server. But this is done by textually matching the URL. If we were originally looking for "http://mallory/repo.git/info/refs", and we got pointed at "http://bob/other.git/info/refs", then we know that the right root is "http://bob/other.git". If the redirect appears to change more than just the root, we punt and continue to use the original server. E.g., imagine the redirect adds a URL component that Bob's server will ignore, like "http://bob/other.git/info/refs?dummy=1". We can solve this by aborting in this case rather than silently continuing to use Mallory's server. In addition to protecting from sha1 leakage, it's arguably safer and more sane to refuse a confusing redirect like that in general. For example, part of the motivation in c93c92f30 is avoiding accidentally sending credentials over clear http, just to get a response that says "try again over https". So even in a non-malicious case, we'd prefer to err on the side of caution. The downside is that it's possible this will break a legitimate but complicated server-side redirection scheme. The setup given in the newly added test does work, but it's convoluted enough that we don't need to care about it. A more plausible case would be a server which redirects a request for "info/refs?service=git-upload-pack" to just "info/refs" (because it does not do smart HTTP, and for some reason really dislikes query parameters). Right now we would transparently downgrade to dumb-http, but with this patch, we'd complain (and the user would have to set GIT_SMART_HTTP=0 to fetch). Reported-by: Jann Horn Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- http.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'http.c') diff --git a/http.c b/http.c index d4034a14b..718d2109b 100644 --- a/http.c +++ b/http.c @@ -1491,9 +1491,9 @@ static int http_request(const char *url, * * Note that this assumes a sane redirect scheme. It's entirely possible * in the example above to end up at a URL that does not even end in - * "info/refs". In such a case we simply punt, as there is not much we can - * do (and such a scheme is unlikely to represent a real git repository, - * which means we are likely about to abort anyway). + * "info/refs". In such a case we die. There's not much we can do, such a + * scheme is unlikely to represent a real git repository, and failing to + * rewrite the base opens options for malicious redirects to do funny things. */ static int update_url_from_redirect(struct strbuf *base, const char *asked, @@ -1511,10 +1511,14 @@ static int update_url_from_redirect(struct strbuf *base, new_len = got->len; if (!strip_suffix_mem(got->buf, &new_len, tail)) - return 0; /* insane redirect scheme */ + die(_("unable to update url base from redirection:\n" + " asked for: %s\n" + " redirect: %s"), + asked, got->buf); strbuf_reset(base); strbuf_add(base, got->buf, new_len); + return 1; } -- cgit v1.2.1 From 50d3413740d1da599cdc0106e6e916741394cc98 Mon Sep 17 00:00:00 2001 From: Jeff King Date: Tue, 6 Dec 2016 13:24:41 -0500 Subject: http: make redirects more obvious We instruct curl to always follow HTTP redirects. This is convenient, but it creates opportunities for malicious servers to create confusing situations. For instance, imagine Alice is a git user with access to a private repository on Bob's server. Mallory runs her own server and wants to access objects from Bob's repository. Mallory may try a few tricks that involve asking Alice to clone from her, build on top, and then push the result: 1. Mallory may simply redirect all fetch requests to Bob's server. Git will transparently follow those redirects and fetch Bob's history, which Alice may believe she got from Mallory. The subsequent push seems like it is just feeding Mallory back her own objects, but is actually leaking Bob's objects. There is nothing in git's output to indicate that Bob's repository was involved at all. The downside (for Mallory) of this attack is that Alice will have received Bob's entire repository, and is likely to notice that when building on top of it. 2. If Mallory happens to know the sha1 of some object X in Bob's repository, she can instead build her own history that references that object. She then runs a dumb http server, and Alice's client will fetch each object individually. When it asks for X, Mallory redirects her to Bob's server. The end result is that Alice obtains objects from Bob, but they may be buried deep in history. Alice is less likely to notice. Both of these attacks are fairly hard to pull off. There's a social component in getting Mallory to convince Alice to work with her. Alice may be prompted for credentials in accessing Bob's repository (but not always, if she is using a credential helper that caches). Attack (1) requires a certain amount of obliviousness on Alice's part while making a new commit. Attack (2) requires that Mallory knows a sha1 in Bob's repository, that Bob's server supports dumb http, and that the object in question is loose on Bob's server. But we can probably make things a bit more obvious without any loss of functionality. This patch does two things to that end. First, when we encounter a whole-repo redirect during the initial ref discovery, we now inform the user on stderr, making attack (1) much more obvious. Second, the decision to follow redirects is now configurable. The truly paranoid can set the new http.followRedirects to false to avoid any redirection entirely. But for a more practical default, we will disallow redirects only after the initial ref discovery. This is enough to thwart attacks similar to (2), while still allowing the common use of redirects at the repository level. Since c93c92f30 (http: update base URLs when we see redirects, 2013-09-28) we re-root all further requests from the redirect destination, which should generally mean that no further redirection is necessary. As an escape hatch, in case there really is a server that needs to redirect individual requests, the user can set http.followRedirects to "true" (and this can be done on a per-server basis via http.*.followRedirects config). Reported-by: Jann Horn Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- http.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) (limited to 'http.c') diff --git a/http.c b/http.c index 718d2109b..b99ade5fa 100644 --- a/http.c +++ b/http.c @@ -98,6 +98,8 @@ static int http_proactive_auth; static const char *user_agent; static int curl_empty_auth; +enum http_follow_config http_follow_config = HTTP_FOLLOW_INITIAL; + #if LIBCURL_VERSION_NUM >= 0x071700 /* Use CURLOPT_KEYPASSWD as is */ #elif LIBCURL_VERSION_NUM >= 0x070903 @@ -337,6 +339,16 @@ static int http_options(const char *var, const char *value, void *cb) return 0; } + if (!strcmp("http.followredirects", var)) { + if (value && !strcmp(value, "initial")) + http_follow_config = HTTP_FOLLOW_INITIAL; + else if (git_config_bool(var, value)) + http_follow_config = HTTP_FOLLOW_ALWAYS; + else + http_follow_config = HTTP_FOLLOW_NONE; + return 0; + } + /* Fall back on the default ones */ return git_default_config(var, value, cb); } @@ -553,7 +565,6 @@ static CURL *get_curl_handle(void) curl_low_speed_time); } - curl_easy_setopt(result, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(result, CURLOPT_MAXREDIRS, 20); #if LIBCURL_VERSION_NUM >= 0x071301 curl_easy_setopt(result, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL); @@ -882,6 +893,16 @@ struct active_request_slot *get_active_slot(void) curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 1); curl_easy_setopt(slot->curl, CURLOPT_RANGE, NULL); + /* + * Default following to off unless "ALWAYS" is configured; this gives + * callers a sane starting point, and they can tweak for individual + * HTTP_FOLLOW_* cases themselves. + */ + if (http_follow_config == HTTP_FOLLOW_ALWAYS) + curl_easy_setopt(slot->curl, CURLOPT_FOLLOWLOCATION, 1); + else + curl_easy_setopt(slot->curl, CURLOPT_FOLLOWLOCATION, 0); + #if LIBCURL_VERSION_NUM >= 0x070a08 curl_easy_setopt(slot->curl, CURLOPT_IPRESOLVE, git_curl_ipresolve); #endif @@ -1122,9 +1143,12 @@ static int handle_curl_result(struct slot_results *results) * If we see a failing http code with CURLE_OK, we have turned off * FAILONERROR (to keep the server's custom error response), and should * translate the code into failure here. + * + * Likewise, if we see a redirect (30x code), that means we turned off + * redirect-following, and we should treat the result as an error. */ if (results->curl_result == CURLE_OK && - results->http_code >= 400) { + results->http_code >= 300) { results->curl_result = CURLE_HTTP_RETURNED_ERROR; /* * Normally curl will already have put the "reason phrase" @@ -1443,6 +1467,9 @@ static int http_request(const char *url, strbuf_addstr(&buf, " no-cache"); if (options && options->keep_error) curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 0); + if (options && options->initial_request && + http_follow_config == HTTP_FOLLOW_INITIAL) + curl_easy_setopt(slot->curl, CURLOPT_FOLLOWLOCATION, 1); headers = curl_slist_append(headers, buf.buf); -- cgit v1.2.1 From cb4d2d35c4622ec2513c1c352d30ff8f9f9cdb9e Mon Sep 17 00:00:00 2001 From: Jeff King Date: Tue, 6 Dec 2016 13:24:45 -0500 Subject: http: treat http-alternates like redirects The previous commit made HTTP redirects more obvious and tightened up the default behavior. However, there's another way for a server to ask a git client to fetch arbitrary content: by having an http-alternates file (or a regular alternates file, which is used as a backup). Similar to the HTTP redirect case, a malicious server can claim to have refs pointing at object X, return a 404 when the client asks for X, but point to some other URL via http-alternates, which the client will transparently fetch. The end result is that it looks from the user's perspective like the objects came from the malicious server, as the other URL is not mentioned at all. Worse, because we feed the new URL to curl ourselves, the usual protocol restrictions do not kick in (neither curl's default of disallowing file://, nor the protocol whitelisting in f4113cac0 (http: limit redirection to protocol-whitelist, 2015-09-22). Let's apply the same rules here as we do for HTTP redirects. Namely: - unless http.followRedirects is set to "always", we will not follow remote redirects from http-alternates (or alternates) at all - set CURLOPT_PROTOCOLS alongside CURLOPT_REDIR_PROTOCOLS restrict ourselves to a known-safe set and respect any user-provided whitelist. - mention alternate object stores on stderr so that the user is aware another source of objects may be involved The first item may prove to be too restrictive. The most common use of alternates is to point to another path on the same server. While it's possible for a single-server redirect to be an attack, it takes a fairly obscure setup (victim and evil repository on the same host, host speaks dumb http, and evil repository has access to edit its own http-alternates file). So we could make the checks more specific, and only cover cross-server redirects. But that means parsing the URLs ourselves, rather than letting curl handle them. This patch goes for the simpler approach. Given that they are only used with dumb http, http-alternates are probably pretty rare. And there's an escape hatch: the user can allow redirects on a specific server by setting http..followRedirects to "always". Reported-by: Jann Horn Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- http.c | 1 + 1 file changed, 1 insertion(+) (limited to 'http.c') diff --git a/http.c b/http.c index b99ade5fa..a9778bfa4 100644 --- a/http.c +++ b/http.c @@ -581,6 +581,7 @@ static CURL *get_curl_handle(void) if (is_transport_allowed("ftps")) allowed_protocols |= CURLPROTO_FTPS; curl_easy_setopt(result, CURLOPT_REDIR_PROTOCOLS, allowed_protocols); + curl_easy_setopt(result, CURLOPT_PROTOCOLS, allowed_protocols); #else if (transport_restrict_protocols()) warning("protocol restrictions not applied to curl redirects because\n" -- cgit v1.2.1 From 3680f16f9d6832dc78c6b80f1e8a546385d946f9 Mon Sep 17 00:00:00 2001 From: Jeff King Date: Tue, 6 Dec 2016 13:25:39 -0500 Subject: http-walker: complain about non-404 loose object errors Since commit 17966c0a6 (http: avoid disconnecting on 404s for loose objects, 2016-07-11), we turn off curl's FAILONERROR option and instead manually deal with failing HTTP codes. However, the logic to do so only recognizes HTTP 404 as a failure. This is probably the most common result, but if we were to get another code, the curl result remains CURLE_OK, and we treat it as success. We still end up detecting the failure when we try to zlib-inflate the object (which will fail), but instead of reporting the HTTP error, we just claim that the object is corrupt. Instead, let's catch anything in the 300's or above as an error (300's are redirects which are not an error at the HTTP level, but are an indication that we've explicitly disabled redirects, so we should treat them as such; we certainly don't have the resulting object content). Note that we also fill in req->errorstr, which we didn't do before. Without FAILONERROR, curl will not have filled this in, and it will remain a blank string. This never mattered for the 404 case, because in the logic below we hit the "missing_target()" branch and print nothing. But for other errors, we'd want to say _something_, if only to fill in the blank slot in the error message. Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- http.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'http.c') diff --git a/http.c b/http.c index 7b66519b6..5cd3ffd67 100644 --- a/http.c +++ b/http.c @@ -1894,7 +1894,7 @@ static size_t fwrite_sha1_file(char *ptr, size_t eltsize, size_t nmemb, if (c != CURLE_OK) die("BUG: curl_easy_getinfo for HTTP code failed: %s", curl_easy_strerror(c)); - if (slot->http_code >= 400) + if (slot->http_code >= 300) return size; } -- cgit v1.2.1