aboutsummaryrefslogtreecommitdiff
path: root/t/lib-pack.sh
diff options
context:
space:
mode:
authorJeff King <peff@peff.net>2013-08-23 20:02:25 -0400
committerJunio C Hamano <gitster@pobox.com>2013-08-24 22:31:20 -0700
commit171bdaca698a69c5a43067e4d6d81abfc50f17d6 (patch)
tree367d62dceb3a3e2a96b6f15c4557925cedfe5f51 /t/lib-pack.sh
parent54c93cb4afc932ab125a59464bb52e04f9c36575 (diff)
downloadgit-171bdaca698a69c5a43067e4d6d81abfc50f17d6.tar.gz
git-171bdaca698a69c5a43067e4d6d81abfc50f17d6.tar.xz
sha1-lookup: handle duplicate keys with GIT_USE_LOOKUP
The sha1_entry_pos function tries to be smart about selecting the middle of a range for its binary search by looking at the value differences between the "lo" and "hi" constraints. However, it is unable to cope with entries with duplicate keys in the sorted list. We may hit a point in the search where both our "lo" and "hi" point to the same key. In this case, the range of values between our endpoints is 0, and trying to scale the difference between our key and the endpoints over that range is undefined (i.e., divide by zero). The current code catches this with an "assert(lov < hiv)". Moreover, after seeing that the first 20 byte of the key are the same, we will try to establish a value from the 21st byte. Which is nonsensical. Instead, we can detect the case that we are in a run of duplicates, and simply do a final comparison against any one of them (since they are all the same, it does not matter which). If the keys match, we have found our entry (or one of them, anyway). If not, then we know that we do not need to look further, as we must be in a run of the duplicate key. Signed-off-by: Jeff King <peff@peff.net> Acked-by: Nicolas Pitre <nico@fluxnic.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
Diffstat (limited to 't/lib-pack.sh')
-rw-r--r--t/lib-pack.sh78
1 files changed, 78 insertions, 0 deletions
diff --git a/t/lib-pack.sh b/t/lib-pack.sh
new file mode 100644
index 000000000..fecd5a027
--- /dev/null
+++ b/t/lib-pack.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+#
+# Support routines for hand-crafting weird or malicious packs.
+#
+# You can make a complete pack like:
+#
+# pack_header 2 >foo.pack &&
+# pack_obj e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 >>foo.pack &&
+# pack_obj e68fe8129b546b101aee9510c5328e7f21ca1d18 >>foo.pack &&
+# pack_trailer foo.pack
+
+# Print the big-endian 4-byte octal representation of $1
+uint32_octal () {
+ n=$1
+ printf '\%o' $(($n / 16777216)); n=$((n % 16777216))
+ printf '\%o' $(($n / 65536)); n=$((n % 65536))
+ printf '\%o' $(($n / 256)); n=$((n % 256))
+ printf '\%o' $(($n ));
+}
+
+# Print the big-endian 4-byte binary representation of $1
+uint32_binary () {
+ printf "$(uint32_octal "$1")"
+}
+
+# Print a pack header, version 2, for a pack with $1 objects
+pack_header () {
+ printf 'PACK' &&
+ printf '\0\0\0\2' &&
+ uint32_binary "$1"
+}
+
+# Print the pack data for object $1, as a delta against object $2 (or as a full
+# object if $2 is missing or empty). The output is suitable for including
+# directly in the packfile, and represents the entirety of the object entry.
+# Doing this on the fly (especially picking your deltas) is quite tricky, so we
+# have hardcoded some well-known objects. See the case statements below for the
+# complete list.
+pack_obj () {
+ case "$1" in
+ # empty blob
+ e69de29bb2d1d6434b8b29ae775ad8c2e48c5391)
+ case "$2" in
+ '')
+ printf '\060\170\234\003\0\0\0\0\1'
+ return
+ ;;
+ esac
+ ;;
+
+ # blob containing "\7\76"
+ e68fe8129b546b101aee9510c5328e7f21ca1d18)
+ case "$2" in
+ '')
+ printf '\062\170\234\143\267\3\0\0\116\0\106'
+ return
+ ;;
+ esac
+ ;;
+ esac
+
+ echo >&2 "BUG: don't know how to print $1${2:+ (from $2)}"
+ return 1
+}
+
+# Compute and append pack trailer to "$1"
+pack_trailer () {
+ test-sha1 -b <"$1" >trailer.tmp &&
+ cat trailer.tmp >>"$1" &&
+ rm -f trailer.tmp
+}
+
+# Remove any existing packs to make sure that
+# whatever we index next will be the pack that we
+# actually use.
+clear_packs () {
+ rm -f .git/objects/pack/*
+}