From 2d4177c01c238071777db5b1fbd8a14efb62ce02 Mon Sep 17 00:00:00 2001 From: Daniel Barkalow Date: Mon, 10 Sep 2007 23:03:00 -0400 Subject: Make fetch-pack a builtin with an internal API Signed-off-by: Daniel Barkalow Signed-off-by: Junio C Hamano --- builtin-fetch-pack.c | 829 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 829 insertions(+) create mode 100644 builtin-fetch-pack.c (limited to 'builtin-fetch-pack.c') diff --git a/builtin-fetch-pack.c b/builtin-fetch-pack.c new file mode 100644 index 000000000..3b217d96f --- /dev/null +++ b/builtin-fetch-pack.c @@ -0,0 +1,829 @@ +#include "cache.h" +#include "refs.h" +#include "pkt-line.h" +#include "commit.h" +#include "tag.h" +#include "exec_cmd.h" +#include "pack.h" +#include "sideband.h" +#include "fetch-pack.h" + +static int keep_pack; +static int transfer_unpack_limit = -1; +static int fetch_unpack_limit = -1; +static int unpack_limit = 100; +static int quiet; +static int verbose; +static int fetch_all; +static int depth; +static int no_progress; +static const char fetch_pack_usage[] = +"git-fetch-pack [--all] [--quiet|-q] [--keep|-k] [--thin] [--upload-pack=] [--depth=] [--no-progress] [-v] [:] [...]"; +static const char *uploadpack = "git-upload-pack"; + +#define COMPLETE (1U << 0) +#define COMMON (1U << 1) +#define COMMON_REF (1U << 2) +#define SEEN (1U << 3) +#define POPPED (1U << 4) + +/* + * After sending this many "have"s if we do not get any new ACK , we + * give up traversing our history. + */ +#define MAX_IN_VAIN 256 + +static struct commit_list *rev_list; +static int non_common_revs, multi_ack, use_thin_pack, use_sideband; + +static void rev_list_push(struct commit *commit, int mark) +{ + if (!(commit->object.flags & mark)) { + commit->object.flags |= mark; + + if (!(commit->object.parsed)) + parse_commit(commit); + + insert_by_date(commit, &rev_list); + + if (!(commit->object.flags & COMMON)) + non_common_revs++; + } +} + +static int rev_list_insert_ref(const char *path, const unsigned char *sha1, int flag, void *cb_data) +{ + struct object *o = deref_tag(parse_object(sha1), path, 0); + + if (o && o->type == OBJ_COMMIT) + rev_list_push((struct commit *)o, SEEN); + + return 0; +} + +/* + This function marks a rev and its ancestors as common. + In some cases, it is desirable to mark only the ancestors (for example + when only the server does not yet know that they are common). +*/ + +static void mark_common(struct commit *commit, + int ancestors_only, int dont_parse) +{ + if (commit != NULL && !(commit->object.flags & COMMON)) { + struct object *o = (struct object *)commit; + + if (!ancestors_only) + o->flags |= COMMON; + + if (!(o->flags & SEEN)) + rev_list_push(commit, SEEN); + else { + struct commit_list *parents; + + if (!ancestors_only && !(o->flags & POPPED)) + non_common_revs--; + if (!o->parsed && !dont_parse) + parse_commit(commit); + + for (parents = commit->parents; + parents; + parents = parents->next) + mark_common(parents->item, 0, dont_parse); + } + } +} + +/* + Get the next rev to send, ignoring the common. +*/ + +static const unsigned char* get_rev(void) +{ + struct commit *commit = NULL; + + while (commit == NULL) { + unsigned int mark; + struct commit_list* parents; + + if (rev_list == NULL || non_common_revs == 0) + return NULL; + + commit = rev_list->item; + if (!(commit->object.parsed)) + parse_commit(commit); + commit->object.flags |= POPPED; + if (!(commit->object.flags & COMMON)) + non_common_revs--; + + parents = commit->parents; + + if (commit->object.flags & COMMON) { + /* do not send "have", and ignore ancestors */ + commit = NULL; + mark = COMMON | SEEN; + } else if (commit->object.flags & COMMON_REF) + /* send "have", and ignore ancestors */ + mark = COMMON | SEEN; + else + /* send "have", also for its ancestors */ + mark = SEEN; + + while (parents) { + if (!(parents->item->object.flags & SEEN)) + rev_list_push(parents->item, mark); + if (mark & COMMON) + mark_common(parents->item, 1, 0); + parents = parents->next; + } + + rev_list = rev_list->next; + } + + return commit->object.sha1; +} + +static int find_common(int fd[2], unsigned char *result_sha1, + struct ref *refs) +{ + int fetching; + int count = 0, flushes = 0, retval; + const unsigned char *sha1; + unsigned in_vain = 0; + int got_continue = 0; + + for_each_ref(rev_list_insert_ref, NULL); + + fetching = 0; + for ( ; refs ; refs = refs->next) { + unsigned char *remote = refs->old_sha1; + struct object *o; + + /* + * If that object is complete (i.e. it is an ancestor of a + * local ref), we tell them we have it but do not have to + * tell them about its ancestors, which they already know + * about. + * + * We use lookup_object here because we are only + * interested in the case we *know* the object is + * reachable and we have already scanned it. + */ + if (((o = lookup_object(remote)) != NULL) && + (o->flags & COMPLETE)) { + continue; + } + + if (!fetching) + packet_write(fd[1], "want %s%s%s%s%s%s%s\n", + sha1_to_hex(remote), + (multi_ack ? " multi_ack" : ""), + (use_sideband == 2 ? " side-band-64k" : ""), + (use_sideband == 1 ? " side-band" : ""), + (use_thin_pack ? " thin-pack" : ""), + (no_progress ? " no-progress" : ""), + " ofs-delta"); + else + packet_write(fd[1], "want %s\n", sha1_to_hex(remote)); + fetching++; + } + if (is_repository_shallow()) + write_shallow_commits(fd[1], 1); + if (depth > 0) + packet_write(fd[1], "deepen %d", depth); + packet_flush(fd[1]); + if (!fetching) + return 1; + + if (depth > 0) { + char line[1024]; + unsigned char sha1[20]; + int len; + + while ((len = packet_read_line(fd[0], line, sizeof(line)))) { + if (!prefixcmp(line, "shallow ")) { + if (get_sha1_hex(line + 8, sha1)) + die("invalid shallow line: %s", line); + register_shallow(sha1); + continue; + } + if (!prefixcmp(line, "unshallow ")) { + if (get_sha1_hex(line + 10, sha1)) + die("invalid unshallow line: %s", line); + if (!lookup_object(sha1)) + die("object not found: %s", line); + /* make sure that it is parsed as shallow */ + parse_object(sha1); + if (unregister_shallow(sha1)) + die("no shallow found: %s", line); + continue; + } + die("expected shallow/unshallow, got %s", line); + } + } + + flushes = 0; + retval = -1; + while ((sha1 = get_rev())) { + packet_write(fd[1], "have %s\n", sha1_to_hex(sha1)); + if (verbose) + fprintf(stderr, "have %s\n", sha1_to_hex(sha1)); + in_vain++; + if (!(31 & ++count)) { + int ack; + + packet_flush(fd[1]); + flushes++; + + /* + * We keep one window "ahead" of the other side, and + * will wait for an ACK only on the next one + */ + if (count == 32) + continue; + + do { + ack = get_ack(fd[0], result_sha1); + if (verbose && ack) + fprintf(stderr, "got ack %d %s\n", ack, + sha1_to_hex(result_sha1)); + if (ack == 1) { + flushes = 0; + multi_ack = 0; + retval = 0; + goto done; + } else if (ack == 2) { + struct commit *commit = + lookup_commit(result_sha1); + mark_common(commit, 0, 1); + retval = 0; + in_vain = 0; + got_continue = 1; + } + } while (ack); + flushes--; + if (got_continue && MAX_IN_VAIN < in_vain) { + if (verbose) + fprintf(stderr, "giving up\n"); + break; /* give up */ + } + } + } +done: + packet_write(fd[1], "done\n"); + if (verbose) + fprintf(stderr, "done\n"); + if (retval != 0) { + multi_ack = 0; + flushes++; + } + while (flushes || multi_ack) { + int ack = get_ack(fd[0], result_sha1); + if (ack) { + if (verbose) + fprintf(stderr, "got ack (%d) %s\n", ack, + sha1_to_hex(result_sha1)); + if (ack == 1) + return 0; + multi_ack = 1; + continue; + } + flushes--; + } + return retval; +} + +static struct commit_list *complete; + +static int mark_complete(const char *path, const unsigned char *sha1, int flag, void *cb_data) +{ + struct object *o = parse_object(sha1); + + while (o && o->type == OBJ_TAG) { + struct tag *t = (struct tag *) o; + if (!t->tagged) + break; /* broken repository */ + o->flags |= COMPLETE; + o = parse_object(t->tagged->sha1); + } + if (o && o->type == OBJ_COMMIT) { + struct commit *commit = (struct commit *)o; + commit->object.flags |= COMPLETE; + insert_by_date(commit, &complete); + } + return 0; +} + +static void mark_recent_complete_commits(unsigned long cutoff) +{ + while (complete && cutoff <= complete->item->date) { + if (verbose) + fprintf(stderr, "Marking %s as complete\n", + sha1_to_hex(complete->item->object.sha1)); + pop_most_recent_commit(&complete, COMPLETE); + } +} + +static void filter_refs(struct ref **refs, int nr_match, char **match) +{ + struct ref **return_refs; + struct ref *newlist = NULL; + struct ref **newtail = &newlist; + struct ref *ref, *next; + struct ref *fastarray[32]; + + if (nr_match && !fetch_all) { + if (ARRAY_SIZE(fastarray) < nr_match) + return_refs = xcalloc(nr_match, sizeof(struct ref *)); + else { + return_refs = fastarray; + memset(return_refs, 0, sizeof(struct ref *) * nr_match); + } + } + else + return_refs = NULL; + + for (ref = *refs; ref; ref = next) { + next = ref->next; + if (!memcmp(ref->name, "refs/", 5) && + check_ref_format(ref->name + 5)) + ; /* trash */ + else if (fetch_all && + (!depth || prefixcmp(ref->name, "refs/tags/") )) { + *newtail = ref; + ref->next = NULL; + newtail = &ref->next; + continue; + } + else { + int order = path_match(ref->name, nr_match, match); + if (order) { + return_refs[order-1] = ref; + continue; /* we will link it later */ + } + } + free(ref); + } + + if (!fetch_all) { + int i; + for (i = 0; i < nr_match; i++) { + ref = return_refs[i]; + if (ref) { + *newtail = ref; + ref->next = NULL; + newtail = &ref->next; + } + } + if (return_refs != fastarray) + free(return_refs); + } + *refs = newlist; +} + +static int everything_local(struct ref **refs, int nr_match, char **match) +{ + struct ref *ref; + int retval; + unsigned long cutoff = 0; + + track_object_refs = 0; + save_commit_buffer = 0; + + for (ref = *refs; ref; ref = ref->next) { + struct object *o; + + o = parse_object(ref->old_sha1); + if (!o) + continue; + + /* We already have it -- which may mean that we were + * in sync with the other side at some time after + * that (it is OK if we guess wrong here). + */ + if (o->type == OBJ_COMMIT) { + struct commit *commit = (struct commit *)o; + if (!cutoff || cutoff < commit->date) + cutoff = commit->date; + } + } + + if (!depth) { + for_each_ref(mark_complete, NULL); + if (cutoff) + mark_recent_complete_commits(cutoff); + } + + /* + * Mark all complete remote refs as common refs. + * Don't mark them common yet; the server has to be told so first. + */ + for (ref = *refs; ref; ref = ref->next) { + struct object *o = deref_tag(lookup_object(ref->old_sha1), + NULL, 0); + + if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE)) + continue; + + if (!(o->flags & SEEN)) { + rev_list_push((struct commit *)o, COMMON_REF | SEEN); + + mark_common((struct commit *)o, 1, 1); + } + } + + filter_refs(refs, nr_match, match); + + for (retval = 1, ref = *refs; ref ; ref = ref->next) { + const unsigned char *remote = ref->old_sha1; + unsigned char local[20]; + struct object *o; + + o = lookup_object(remote); + if (!o || !(o->flags & COMPLETE)) { + retval = 0; + if (!verbose) + continue; + fprintf(stderr, + "want %s (%s)\n", sha1_to_hex(remote), + ref->name); + continue; + } + + hashcpy(ref->new_sha1, local); + if (!verbose) + continue; + fprintf(stderr, + "already have %s (%s)\n", sha1_to_hex(remote), + ref->name); + } + return retval; +} + +static pid_t setup_sideband(int fd[2], int xd[2]) +{ + pid_t side_pid; + + if (!use_sideband) { + fd[0] = xd[0]; + fd[1] = xd[1]; + return 0; + } + /* xd[] is talking with upload-pack; subprocess reads from + * xd[0], spits out band#2 to stderr, and feeds us band#1 + * through our fd[0]. + */ + if (pipe(fd) < 0) + die("fetch-pack: unable to set up pipe"); + side_pid = fork(); + if (side_pid < 0) + die("fetch-pack: unable to fork off sideband demultiplexer"); + if (!side_pid) { + /* subprocess */ + close(fd[0]); + if (xd[0] != xd[1]) + close(xd[1]); + if (recv_sideband("fetch-pack", xd[0], fd[1], 2)) + exit(1); + exit(0); + } + close(xd[0]); + close(fd[1]); + fd[1] = xd[1]; + return side_pid; +} + +static int get_pack(int xd[2]) +{ + int status; + pid_t pid, side_pid; + int fd[2]; + const char *argv[20]; + char keep_arg[256]; + char hdr_arg[256]; + const char **av; + int do_keep = keep_pack; + + side_pid = setup_sideband(fd, xd); + + av = argv; + *hdr_arg = 0; + if (unpack_limit) { + struct pack_header header; + + if (read_pack_header(fd[0], &header)) + die("protocol error: bad pack header"); + snprintf(hdr_arg, sizeof(hdr_arg), "--pack_header=%u,%u", + ntohl(header.hdr_version), ntohl(header.hdr_entries)); + if (ntohl(header.hdr_entries) < unpack_limit) + do_keep = 0; + else + do_keep = 1; + } + + if (do_keep) { + *av++ = "index-pack"; + *av++ = "--stdin"; + if (!quiet && !no_progress) + *av++ = "-v"; + if (use_thin_pack) + *av++ = "--fix-thin"; + if (keep_pack > 1 || unpack_limit) { + int s = sprintf(keep_arg, + "--keep=fetch-pack %d on ", getpid()); + if (gethostname(keep_arg + s, sizeof(keep_arg) - s)) + strcpy(keep_arg + s, "localhost"); + *av++ = keep_arg; + } + } + else { + *av++ = "unpack-objects"; + if (quiet) + *av++ = "-q"; + } + if (*hdr_arg) + *av++ = hdr_arg; + *av++ = NULL; + + pid = fork(); + if (pid < 0) + die("fetch-pack: unable to fork off %s", argv[0]); + if (!pid) { + dup2(fd[0], 0); + close(fd[0]); + close(fd[1]); + execv_git_cmd(argv); + die("%s exec failed", argv[0]); + } + close(fd[0]); + close(fd[1]); + while (waitpid(pid, &status, 0) < 0) { + if (errno != EINTR) + die("waiting for %s: %s", argv[0], strerror(errno)); + } + if (WIFEXITED(status)) { + int code = WEXITSTATUS(status); + if (code) + die("%s died with error code %d", argv[0], code); + return 0; + } + if (WIFSIGNALED(status)) { + int sig = WTERMSIG(status); + die("%s died of signal %d", argv[0], sig); + } + die("%s died of unnatural causes %d", argv[0], status); +} + +static struct ref *do_fetch_pack(int fd[2], int nr_match, char **match) +{ + struct ref *ref; + unsigned char sha1[20]; + + get_remote_heads(fd[0], &ref, 0, NULL, 0); + if (is_repository_shallow() && !server_supports("shallow")) + die("Server does not support shallow clients"); + if (server_supports("multi_ack")) { + if (verbose) + fprintf(stderr, "Server supports multi_ack\n"); + multi_ack = 1; + } + if (server_supports("side-band-64k")) { + if (verbose) + fprintf(stderr, "Server supports side-band-64k\n"); + use_sideband = 2; + } + else if (server_supports("side-band")) { + if (verbose) + fprintf(stderr, "Server supports side-band\n"); + use_sideband = 1; + } + if (!ref) { + packet_flush(fd[1]); + die("no matching remote head"); + } + if (everything_local(&ref, nr_match, match)) { + packet_flush(fd[1]); + goto all_done; + } + if (find_common(fd, sha1, ref) < 0) + if (keep_pack != 1) + /* When cloning, it is not unusual to have + * no common commit. + */ + fprintf(stderr, "warning: no common commits\n"); + + if (get_pack(fd)) + die("git-fetch-pack: fetch failed."); + + all_done: + return ref; +} + +static int remove_duplicates(int nr_heads, char **heads) +{ + int src, dst; + + for (src = dst = 0; src < nr_heads; src++) { + /* If heads[src] is different from any of + * heads[0..dst], push it in. + */ + int i; + for (i = 0; i < dst; i++) { + if (!strcmp(heads[i], heads[src])) + break; + } + if (i < dst) + continue; + if (src != dst) + heads[dst] = heads[src]; + dst++; + } + heads[dst] = 0; + return dst; +} + +static int fetch_pack_config(const char *var, const char *value) +{ + if (strcmp(var, "fetch.unpacklimit") == 0) { + fetch_unpack_limit = git_config_int(var, value); + return 0; + } + + if (strcmp(var, "transfer.unpacklimit") == 0) { + transfer_unpack_limit = git_config_int(var, value); + return 0; + } + + return git_default_config(var, value); +} + +static struct lock_file lock; + +void setup_fetch_pack(struct fetch_pack_args *args) +{ + uploadpack = args->uploadpack; + quiet = args->quiet; + keep_pack = args->keep_pack; + if (args->unpacklimit >= 0) + unpack_limit = args->unpacklimit; + if (args->keep_pack) + unpack_limit = 0; + use_thin_pack = args->use_thin_pack; + fetch_all = args->fetch_all; + verbose = args->verbose; + depth = args->depth; + no_progress = args->no_progress; +} + +int cmd_fetch_pack(int argc, const char **argv, const char *prefix) +{ + int i, ret, nr_heads; + struct ref *ref; + char *dest = NULL, **heads; + + git_config(fetch_pack_config); + + if (0 <= transfer_unpack_limit) + unpack_limit = transfer_unpack_limit; + else if (0 <= fetch_unpack_limit) + unpack_limit = fetch_unpack_limit; + + nr_heads = 0; + heads = NULL; + for (i = 1; i < argc; i++) { + const char *arg = argv[i]; + + if (*arg == '-') { + if (!prefixcmp(arg, "--upload-pack=")) { + uploadpack = arg + 14; + continue; + } + if (!prefixcmp(arg, "--exec=")) { + uploadpack = arg + 7; + continue; + } + if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) { + quiet = 1; + continue; + } + if (!strcmp("--keep", arg) || !strcmp("-k", arg)) { + keep_pack++; + unpack_limit = 0; + continue; + } + if (!strcmp("--thin", arg)) { + use_thin_pack = 1; + continue; + } + if (!strcmp("--all", arg)) { + fetch_all = 1; + continue; + } + if (!strcmp("-v", arg)) { + verbose = 1; + continue; + } + if (!prefixcmp(arg, "--depth=")) { + depth = strtol(arg + 8, NULL, 0); + continue; + } + if (!strcmp("--no-progress", arg)) { + no_progress = 1; + continue; + } + usage(fetch_pack_usage); + } + dest = (char *)arg; + heads = (char **)(argv + i + 1); + nr_heads = argc - i - 1; + break; + } + if (!dest) + usage(fetch_pack_usage); + + ref = fetch_pack(dest, nr_heads, heads); + + ret = !ref; + + while (ref) { + printf("%s %s\n", + sha1_to_hex(ref->old_sha1), ref->name); + ref = ref->next; + } + + return ret; +} + +struct ref *fetch_pack(const char *dest, int nr_heads, char **heads) +{ + int i, ret; + int fd[2]; + pid_t pid; + struct ref *ref; + struct stat st; + + if (depth > 0) { + if (stat(git_path("shallow"), &st)) + st.st_mtime = 0; + } + + printf("connect to %s\n", dest); + + pid = git_connect(fd, (char *)dest, uploadpack, + verbose ? CONNECT_VERBOSE : 0); + if (pid < 0) + return NULL; + if (heads && nr_heads) + nr_heads = remove_duplicates(nr_heads, heads); + ref = do_fetch_pack(fd, nr_heads, heads); + close(fd[0]); + close(fd[1]); + ret = finish_connect(pid); + + if (!ret && nr_heads) { + /* If the heads to pull were given, we should have + * consumed all of them by matching the remote. + * Otherwise, 'git-fetch remote no-such-ref' would + * silently succeed without issuing an error. + */ + for (i = 0; i < nr_heads; i++) + if (heads[i] && heads[i][0]) { + error("no such remote ref %s", heads[i]); + ret = 1; + } + } + + if (!ret && depth > 0) { + struct cache_time mtime; + char *shallow = git_path("shallow"); + int fd; + + mtime.sec = st.st_mtime; +#ifdef USE_NSEC + mtime.usec = st.st_mtim.usec; +#endif + if (stat(shallow, &st)) { + if (mtime.sec) + die("shallow file was removed during fetch"); + } else if (st.st_mtime != mtime.sec +#ifdef USE_NSEC + || st.st_mtim.usec != mtime.usec +#endif + ) + die("shallow file was changed during fetch"); + + fd = hold_lock_file_for_update(&lock, shallow, 1); + if (!write_shallow_commits(fd, 0)) { + unlink(shallow); + rollback_lock_file(&lock); + } else { + close(fd); + commit_lock_file(&lock); + } + } + + if (ret) + ref = NULL; + + return ref; +} -- cgit v1.2.1 From 133296f00cd441b5525ccc3e82ee13cbfc62d246 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Fri, 14 Sep 2007 03:31:11 -0400 Subject: Remove unnecessary debugging from builtin-fetch The older git-fetch client did not produce all of this debugging information to stdout. Most end-users and Porcelain (e.g. StGIT, git-gui, qgit) do not want to see these low-level details on the console so they should be removed. Signed-off-by: Shawn O. Pearce Signed-off-by: Junio C Hamano --- builtin-fetch-pack.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'builtin-fetch-pack.c') diff --git a/builtin-fetch-pack.c b/builtin-fetch-pack.c index 3b217d96f..e77cd2671 100644 --- a/builtin-fetch-pack.c +++ b/builtin-fetch-pack.c @@ -767,8 +767,6 @@ struct ref *fetch_pack(const char *dest, int nr_heads, char **heads) st.st_mtime = 0; } - printf("connect to %s\n", dest); - pid = git_connect(fd, (char *)dest, uploadpack, verbose ? CONNECT_VERBOSE : 0); if (pid < 0) -- cgit v1.2.1 From 1788c39cd0742439b9bedc28bc10bc4d105b6c0f Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Fri, 14 Sep 2007 03:31:23 -0400 Subject: Remove pack.keep after ref updates in git-fetch If we are using a native packfile to perform a git-fetch invocation and the received packfile contained more than the configured limits of fetch.unpackLimit/transfer.unpackLimit then index-pack will output a single line saying "keep\t$sha1\n" to stdout. This line needs to be captured and retained so we can delete the corresponding .keep file ("$GIT_DIR/objects/pack/pack-$sha1.keep") once all refs have been safely updated. This trick has long been in use with git-fetch.sh and its lower level helper git-fetch--tool as a way to allow index-pack to save the new packfile before the refs have been updated and yet avoid a race with any concurrently running git-repack process. It was unfortunately lost when git-fetch.sh was converted to pure C and fetch--tool was no longer being invoked. Signed-off-by: Shawn O. Pearce Signed-off-by: Junio C Hamano --- builtin-fetch-pack.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) (limited to 'builtin-fetch-pack.c') diff --git a/builtin-fetch-pack.c b/builtin-fetch-pack.c index e77cd2671..b0936ccf0 100644 --- a/builtin-fetch-pack.c +++ b/builtin-fetch-pack.c @@ -493,7 +493,7 @@ static pid_t setup_sideband(int fd[2], int xd[2]) return side_pid; } -static int get_pack(int xd[2]) +static int get_pack(int xd[2], char **pack_lockfile) { int status; pid_t pid, side_pid; @@ -503,6 +503,7 @@ static int get_pack(int xd[2]) char hdr_arg[256]; const char **av; int do_keep = keep_pack; + int keep_pipe[2]; side_pid = setup_sideband(fd, xd); @@ -522,6 +523,8 @@ static int get_pack(int xd[2]) } if (do_keep) { + if (pack_lockfile && pipe(keep_pipe)) + die("fetch-pack: pipe setup failure: %s", strerror(errno)); *av++ = "index-pack"; *av++ = "--stdin"; if (!quiet && !no_progress) @@ -550,6 +553,11 @@ static int get_pack(int xd[2]) die("fetch-pack: unable to fork off %s", argv[0]); if (!pid) { dup2(fd[0], 0); + if (do_keep && pack_lockfile) { + dup2(keep_pipe[1], 1); + close(keep_pipe[0]); + close(keep_pipe[1]); + } close(fd[0]); close(fd[1]); execv_git_cmd(argv); @@ -557,6 +565,11 @@ static int get_pack(int xd[2]) } close(fd[0]); close(fd[1]); + if (do_keep && pack_lockfile) { + close(keep_pipe[1]); + *pack_lockfile = index_pack_lockfile(keep_pipe[0]); + close(keep_pipe[0]); + } while (waitpid(pid, &status, 0) < 0) { if (errno != EINTR) die("waiting for %s: %s", argv[0], strerror(errno)); @@ -574,7 +587,10 @@ static int get_pack(int xd[2]) die("%s died of unnatural causes %d", argv[0], status); } -static struct ref *do_fetch_pack(int fd[2], int nr_match, char **match) +static struct ref *do_fetch_pack(int fd[2], + int nr_match, + char **match, + char **pack_lockfile) { struct ref *ref; unsigned char sha1[20]; @@ -612,7 +628,7 @@ static struct ref *do_fetch_pack(int fd[2], int nr_match, char **match) */ fprintf(stderr, "warning: no common commits\n"); - if (get_pack(fd)) + if (get_pack(fd, pack_lockfile)) die("git-fetch-pack: fetch failed."); all_done: @@ -741,7 +757,7 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) if (!dest) usage(fetch_pack_usage); - ref = fetch_pack(dest, nr_heads, heads); + ref = fetch_pack(dest, nr_heads, heads, NULL); ret = !ref; @@ -754,7 +770,10 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) return ret; } -struct ref *fetch_pack(const char *dest, int nr_heads, char **heads) +struct ref *fetch_pack(const char *dest, + int nr_heads, + char **heads, + char **pack_lockfile) { int i, ret; int fd[2]; @@ -773,7 +792,7 @@ struct ref *fetch_pack(const char *dest, int nr_heads, char **heads) return NULL; if (heads && nr_heads) nr_heads = remove_duplicates(nr_heads, heads); - ref = do_fetch_pack(fd, nr_heads, heads); + ref = do_fetch_pack(fd, nr_heads, heads, pack_lockfile); close(fd[0]); close(fd[1]); ret = finish_connect(pid); -- cgit v1.2.1 From e8a37b89f7c4fd4a10104a1c4d6021a707f70613 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Fri, 14 Sep 2007 18:59:53 -0400 Subject: Fix builtin-fetch memory corruption by not overstepping array A long time ago Junio added this line to always ensure that the output array created by remove_duplicates() had a NULL as its terminating node. Today none of the downstream consumers of this array care about a NULL terminator; they only pay attention to the size of the array (as indicated by nr_heads). In (nearly?) all cases passing a NULL element will cause SIGSEGV failures. So this NULL terminal is not actually necessary. Unfortunately we cannot continue to NULL terminate the array at this point as the array may only have been allocated large enough to match the input of nr_heads. If there are no duplicates than we would be trying to store NULL into heads[nr_heads] and that may be outside of the array. My recent series to cleanup builtin-fetch changed the allocation of the heads array from 256 entries to exactly nr_heads thus ensuring we were always overstepping the array and causing memory corruption. Signed-off-by: Shawn O. Pearce Signed-off-by: Junio C Hamano --- builtin-fetch-pack.c | 1 - 1 file changed, 1 deletion(-) (limited to 'builtin-fetch-pack.c') diff --git a/builtin-fetch-pack.c b/builtin-fetch-pack.c index b0936ccf0..2977a9419 100644 --- a/builtin-fetch-pack.c +++ b/builtin-fetch-pack.c @@ -654,7 +654,6 @@ static int remove_duplicates(int nr_heads, char **heads) heads[dst] = heads[src]; dst++; } - heads[dst] = 0; return dst; } -- cgit v1.2.1 From fa74052922cf39e5a39ad7178d1b13c2da9b4519 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Wed, 19 Sep 2007 00:49:35 -0400 Subject: Always obtain fetch-pack arguments from struct fetch_pack_args Copying the arguments from a fetch_pack_args into static globals within the builtin-fetch-pack module is error-prone and may lead rise to cases where arguments supplied via the struct from the new fetch_pack() API may not be honored by the implementation. Here we reorganize all of the static globals into a single static struct fetch_pack_args instance and use memcpy() to move the data from the caller supplied structure into the globals before we execute our pack fetching implementation. This strategy is more robust to additions and deletions of properties. As keep_pack is a single bit we have also introduced lock_pack to mean not only download and store the packfile via index-pack but also to lock it against repacking by creating a .keep file when the packfile itself is stored. The caller must remove the .keep file when it is safe to do so. Signed-off-by: Shawn O. Pearce Signed-off-by: Junio C Hamano --- builtin-fetch-pack.c | 111 +++++++++++++++++++++------------------------------ 1 file changed, 46 insertions(+), 65 deletions(-) (limited to 'builtin-fetch-pack.c') diff --git a/builtin-fetch-pack.c b/builtin-fetch-pack.c index 2977a9419..77eb181b5 100644 --- a/builtin-fetch-pack.c +++ b/builtin-fetch-pack.c @@ -8,15 +8,11 @@ #include "sideband.h" #include "fetch-pack.h" -static int keep_pack; static int transfer_unpack_limit = -1; static int fetch_unpack_limit = -1; static int unpack_limit = 100; -static int quiet; -static int verbose; -static int fetch_all; -static int depth; -static int no_progress; +static struct fetch_pack_args args; + static const char fetch_pack_usage[] = "git-fetch-pack [--all] [--quiet|-q] [--keep|-k] [--thin] [--upload-pack=] [--depth=] [--no-progress] [-v] [:] [...]"; static const char *uploadpack = "git-upload-pack"; @@ -181,7 +177,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, (use_sideband == 2 ? " side-band-64k" : ""), (use_sideband == 1 ? " side-band" : ""), (use_thin_pack ? " thin-pack" : ""), - (no_progress ? " no-progress" : ""), + (args.no_progress ? " no-progress" : ""), " ofs-delta"); else packet_write(fd[1], "want %s\n", sha1_to_hex(remote)); @@ -189,13 +185,13 @@ static int find_common(int fd[2], unsigned char *result_sha1, } if (is_repository_shallow()) write_shallow_commits(fd[1], 1); - if (depth > 0) - packet_write(fd[1], "deepen %d", depth); + if (args.depth > 0) + packet_write(fd[1], "deepen %d", args.depth); packet_flush(fd[1]); if (!fetching) return 1; - if (depth > 0) { + if (args.depth > 0) { char line[1024]; unsigned char sha1[20]; int len; @@ -226,7 +222,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, retval = -1; while ((sha1 = get_rev())) { packet_write(fd[1], "have %s\n", sha1_to_hex(sha1)); - if (verbose) + if (args.verbose) fprintf(stderr, "have %s\n", sha1_to_hex(sha1)); in_vain++; if (!(31 & ++count)) { @@ -244,7 +240,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, do { ack = get_ack(fd[0], result_sha1); - if (verbose && ack) + if (args.verbose && ack) fprintf(stderr, "got ack %d %s\n", ack, sha1_to_hex(result_sha1)); if (ack == 1) { @@ -263,7 +259,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, } while (ack); flushes--; if (got_continue && MAX_IN_VAIN < in_vain) { - if (verbose) + if (args.verbose) fprintf(stderr, "giving up\n"); break; /* give up */ } @@ -271,7 +267,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, } done: packet_write(fd[1], "done\n"); - if (verbose) + if (args.verbose) fprintf(stderr, "done\n"); if (retval != 0) { multi_ack = 0; @@ -280,7 +276,7 @@ done: while (flushes || multi_ack) { int ack = get_ack(fd[0], result_sha1); if (ack) { - if (verbose) + if (args.verbose) fprintf(stderr, "got ack (%d) %s\n", ack, sha1_to_hex(result_sha1)); if (ack == 1) @@ -317,7 +313,7 @@ static int mark_complete(const char *path, const unsigned char *sha1, int flag, static void mark_recent_complete_commits(unsigned long cutoff) { while (complete && cutoff <= complete->item->date) { - if (verbose) + if (args.verbose) fprintf(stderr, "Marking %s as complete\n", sha1_to_hex(complete->item->object.sha1)); pop_most_recent_commit(&complete, COMPLETE); @@ -332,7 +328,7 @@ static void filter_refs(struct ref **refs, int nr_match, char **match) struct ref *ref, *next; struct ref *fastarray[32]; - if (nr_match && !fetch_all) { + if (nr_match && !args.fetch_all) { if (ARRAY_SIZE(fastarray) < nr_match) return_refs = xcalloc(nr_match, sizeof(struct ref *)); else { @@ -348,8 +344,8 @@ static void filter_refs(struct ref **refs, int nr_match, char **match) if (!memcmp(ref->name, "refs/", 5) && check_ref_format(ref->name + 5)) ; /* trash */ - else if (fetch_all && - (!depth || prefixcmp(ref->name, "refs/tags/") )) { + else if (args.fetch_all && + (!args.depth || prefixcmp(ref->name, "refs/tags/") )) { *newtail = ref; ref->next = NULL; newtail = &ref->next; @@ -365,7 +361,7 @@ static void filter_refs(struct ref **refs, int nr_match, char **match) free(ref); } - if (!fetch_all) { + if (!args.fetch_all) { int i; for (i = 0; i < nr_match; i++) { ref = return_refs[i]; @@ -408,7 +404,7 @@ static int everything_local(struct ref **refs, int nr_match, char **match) } } - if (!depth) { + if (!args.depth) { for_each_ref(mark_complete, NULL); if (cutoff) mark_recent_complete_commits(cutoff); @@ -442,7 +438,7 @@ static int everything_local(struct ref **refs, int nr_match, char **match) o = lookup_object(remote); if (!o || !(o->flags & COMPLETE)) { retval = 0; - if (!verbose) + if (!args.verbose) continue; fprintf(stderr, "want %s (%s)\n", sha1_to_hex(remote), @@ -451,7 +447,7 @@ static int everything_local(struct ref **refs, int nr_match, char **match) } hashcpy(ref->new_sha1, local); - if (!verbose) + if (!args.verbose) continue; fprintf(stderr, "already have %s (%s)\n", sha1_to_hex(remote), @@ -502,14 +498,14 @@ static int get_pack(int xd[2], char **pack_lockfile) char keep_arg[256]; char hdr_arg[256]; const char **av; - int do_keep = keep_pack; + int do_keep = args.keep_pack; int keep_pipe[2]; side_pid = setup_sideband(fd, xd); av = argv; *hdr_arg = 0; - if (unpack_limit) { + if (!args.keep_pack && unpack_limit) { struct pack_header header; if (read_pack_header(fd[0], &header)) @@ -527,11 +523,11 @@ static int get_pack(int xd[2], char **pack_lockfile) die("fetch-pack: pipe setup failure: %s", strerror(errno)); *av++ = "index-pack"; *av++ = "--stdin"; - if (!quiet && !no_progress) + if (!args.quiet && !args.no_progress) *av++ = "-v"; - if (use_thin_pack) + if (args.use_thin_pack) *av++ = "--fix-thin"; - if (keep_pack > 1 || unpack_limit) { + if (args.lock_pack || unpack_limit) { int s = sprintf(keep_arg, "--keep=fetch-pack %d on ", getpid()); if (gethostname(keep_arg + s, sizeof(keep_arg) - s)) @@ -541,7 +537,7 @@ static int get_pack(int xd[2], char **pack_lockfile) } else { *av++ = "unpack-objects"; - if (quiet) + if (args.quiet) *av++ = "-q"; } if (*hdr_arg) @@ -599,17 +595,17 @@ static struct ref *do_fetch_pack(int fd[2], if (is_repository_shallow() && !server_supports("shallow")) die("Server does not support shallow clients"); if (server_supports("multi_ack")) { - if (verbose) + if (args.verbose) fprintf(stderr, "Server supports multi_ack\n"); multi_ack = 1; } if (server_supports("side-band-64k")) { - if (verbose) + if (args.verbose) fprintf(stderr, "Server supports side-band-64k\n"); use_sideband = 2; } else if (server_supports("side-band")) { - if (verbose) + if (args.verbose) fprintf(stderr, "Server supports side-band\n"); use_sideband = 1; } @@ -622,7 +618,7 @@ static struct ref *do_fetch_pack(int fd[2], goto all_done; } if (find_common(fd, sha1, ref) < 0) - if (keep_pack != 1) + if (!args.keep_pack) /* When cloning, it is not unusual to have * no common commit. */ @@ -674,22 +670,6 @@ static int fetch_pack_config(const char *var, const char *value) static struct lock_file lock; -void setup_fetch_pack(struct fetch_pack_args *args) -{ - uploadpack = args->uploadpack; - quiet = args->quiet; - keep_pack = args->keep_pack; - if (args->unpacklimit >= 0) - unpack_limit = args->unpacklimit; - if (args->keep_pack) - unpack_limit = 0; - use_thin_pack = args->use_thin_pack; - fetch_all = args->fetch_all; - verbose = args->verbose; - depth = args->depth; - no_progress = args->no_progress; -} - int cmd_fetch_pack(int argc, const char **argv, const char *prefix) { int i, ret, nr_heads; @@ -710,40 +690,40 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) if (*arg == '-') { if (!prefixcmp(arg, "--upload-pack=")) { - uploadpack = arg + 14; + args.uploadpack = arg + 14; continue; } if (!prefixcmp(arg, "--exec=")) { - uploadpack = arg + 7; + args.uploadpack = arg + 7; continue; } if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) { - quiet = 1; + args.quiet = 1; continue; } if (!strcmp("--keep", arg) || !strcmp("-k", arg)) { - keep_pack++; - unpack_limit = 0; + args.lock_pack = args.keep_pack; + args.keep_pack = 1; continue; } if (!strcmp("--thin", arg)) { - use_thin_pack = 1; + args.use_thin_pack = 1; continue; } if (!strcmp("--all", arg)) { - fetch_all = 1; + args.fetch_all = 1; continue; } if (!strcmp("-v", arg)) { - verbose = 1; + args.verbose = 1; continue; } if (!prefixcmp(arg, "--depth=")) { - depth = strtol(arg + 8, NULL, 0); + args.depth = strtol(arg + 8, NULL, 0); continue; } if (!strcmp("--no-progress", arg)) { - no_progress = 1; + args.no_progress = 1; continue; } usage(fetch_pack_usage); @@ -756,8 +736,7 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) if (!dest) usage(fetch_pack_usage); - ref = fetch_pack(dest, nr_heads, heads, NULL); - + ref = fetch_pack(&args, dest, nr_heads, heads, NULL); ret = !ref; while (ref) { @@ -769,7 +748,8 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) return ret; } -struct ref *fetch_pack(const char *dest, +struct ref *fetch_pack(struct fetch_pack_args *my_args, + const char *dest, int nr_heads, char **heads, char **pack_lockfile) @@ -780,13 +760,14 @@ struct ref *fetch_pack(const char *dest, struct ref *ref; struct stat st; - if (depth > 0) { + memcpy(&args, my_args, sizeof(args)); + if (args.depth > 0) { if (stat(git_path("shallow"), &st)) st.st_mtime = 0; } pid = git_connect(fd, (char *)dest, uploadpack, - verbose ? CONNECT_VERBOSE : 0); + args.verbose ? CONNECT_VERBOSE : 0); if (pid < 0) return NULL; if (heads && nr_heads) @@ -809,7 +790,7 @@ struct ref *fetch_pack(const char *dest, } } - if (!ret && depth > 0) { + if (!ret && args.depth > 0) { struct cache_time mtime; char *shallow = git_path("shallow"); int fd; -- cgit v1.2.1 From 50ab5fd3fc16fbe01170059977533fa2c7c4d448 Mon Sep 17 00:00:00 2001 From: "Shawn O. Pearce" Date: Wed, 19 Sep 2007 00:49:39 -0400 Subject: Ensure builtin-fetch honors {fetch,transfer}.unpackLimit The only way to configure the unpacking limit is currently through the .git/config (or ~/.gitconfig) mechanism as we have no existing command line option interface to control this threshold on a per invocation basis. This was intentional by design as the storage policy of the repository should be a repository-wide decision and should not be subject to variations made on individual command executions. Earlier builtin-fetch was bypassing the unpacking limit chosen by the user through the configuration file as it did not reread the configuration options through fetch_pack_config if we called the internal fetch_pack() API directly. We now ensure we always run the config file through fetch_pack_config at least once in this process, thereby setting our unpackLimit properly. Signed-off-by: Shawn O. Pearce Signed-off-by: Junio C Hamano --- builtin-fetch-pack.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'builtin-fetch-pack.c') diff --git a/builtin-fetch-pack.c b/builtin-fetch-pack.c index 77eb181b5..8f25d509a 100644 --- a/builtin-fetch-pack.c +++ b/builtin-fetch-pack.c @@ -670,18 +670,24 @@ static int fetch_pack_config(const char *var, const char *value) static struct lock_file lock; -int cmd_fetch_pack(int argc, const char **argv, const char *prefix) +static void fetch_pack_setup(void) { - int i, ret, nr_heads; - struct ref *ref; - char *dest = NULL, **heads; - + static int did_setup; + if (did_setup) + return; git_config(fetch_pack_config); - if (0 <= transfer_unpack_limit) unpack_limit = transfer_unpack_limit; else if (0 <= fetch_unpack_limit) unpack_limit = fetch_unpack_limit; + did_setup = 1; +} + +int cmd_fetch_pack(int argc, const char **argv, const char *prefix) +{ + int i, ret, nr_heads; + struct ref *ref; + char *dest = NULL, **heads; nr_heads = 0; heads = NULL; @@ -760,6 +766,7 @@ struct ref *fetch_pack(struct fetch_pack_args *my_args, struct ref *ref; struct stat st; + fetch_pack_setup(); memcpy(&args, my_args, sizeof(args)); if (args.depth > 0) { if (stat(git_path("shallow"), &st)) -- cgit v1.2.1