aboutsummaryrefslogtreecommitdiff
path: root/compat
diff options
context:
space:
mode:
authorJunio C Hamano <gitster@pobox.com>2014-02-27 14:01:48 -0800
committerJunio C Hamano <gitster@pobox.com>2014-02-27 14:01:48 -0800
commit0f9e62e0847c075678a7a5a748567d1e881d16f8 (patch)
tree8ef8989069ae40eef891b0964fc2cb8036a74e48 /compat
parent6784fab0ac1f973f22f1d4252f0e513d61be6c6b (diff)
parent6b5b3a27b7faf9d72efec28fa017408daf45cd00 (diff)
downloadgit-0f9e62e0847c075678a7a5a748567d1e881d16f8.tar.gz
git-0f9e62e0847c075678a7a5a748567d1e881d16f8.tar.xz
Merge branch 'jk/pack-bitmap'
Borrow the bitmap index into packfiles from JGit to speed up enumeration of objects involved in a commit range without having to fully traverse the history. * jk/pack-bitmap: (26 commits) ewah: unconditionally ntohll ewah data ewah: support platforms that require aligned reads read-cache: use get_be32 instead of hand-rolled ntoh_l block-sha1: factor out get_be and put_be wrappers do not discard revindex when re-preparing packfiles pack-bitmap: implement optional name_hash cache t/perf: add tests for pack bitmaps t: add basic bitmap functionality tests count-objects: recognize .bitmap in garbage-checking repack: consider bitmaps when performing repacks repack: handle optional files created by pack-objects repack: turn exts array into array-of-struct repack: stop using magic number for ARRAY_SIZE(exts) pack-objects: implement bitmap writing rev-list: add bitmap mode to speed up object lists pack-objects: use bitmaps when packing objects pack-objects: split add_object_entry pack-bitmap: add support for bitmap indexes documentation: add documentation for the bitmap format ewah: compressed bitmap implementation ...
Diffstat (limited to 'compat')
-rw-r--r--compat/bswap.h112
1 files changed, 111 insertions, 1 deletions
diff --git a/compat/bswap.h b/compat/bswap.h
index 5061214f7..120c6c1d3 100644
--- a/compat/bswap.h
+++ b/compat/bswap.h
@@ -17,7 +17,20 @@ static inline uint32_t default_swab32(uint32_t val)
((val & 0x000000ff) << 24));
}
+static inline uint64_t default_bswap64(uint64_t val)
+{
+ return (((val & (uint64_t)0x00000000000000ffULL) << 56) |
+ ((val & (uint64_t)0x000000000000ff00ULL) << 40) |
+ ((val & (uint64_t)0x0000000000ff0000ULL) << 24) |
+ ((val & (uint64_t)0x00000000ff000000ULL) << 8) |
+ ((val & (uint64_t)0x000000ff00000000ULL) >> 8) |
+ ((val & (uint64_t)0x0000ff0000000000ULL) >> 24) |
+ ((val & (uint64_t)0x00ff000000000000ULL) >> 40) |
+ ((val & (uint64_t)0xff00000000000000ULL) >> 56));
+}
+
#undef bswap32
+#undef bswap64
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
@@ -32,15 +45,42 @@ static inline uint32_t git_bswap32(uint32_t x)
return result;
}
+#define bswap64 git_bswap64
+#if defined(__x86_64__)
+static inline uint64_t git_bswap64(uint64_t x)
+{
+ uint64_t result;
+ if (__builtin_constant_p(x))
+ result = default_bswap64(x);
+ else
+ __asm__("bswap %q0" : "=r" (result) : "0" (x));
+ return result;
+}
+#else
+static inline uint64_t git_bswap64(uint64_t x)
+{
+ union { uint64_t i64; uint32_t i32[2]; } tmp, result;
+ if (__builtin_constant_p(x))
+ result.i64 = default_bswap64(x);
+ else {
+ tmp.i64 = x;
+ result.i32[0] = git_bswap32(tmp.i32[1]);
+ result.i32[1] = git_bswap32(tmp.i32[0]);
+ }
+ return result.i64;
+}
+#endif
+
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
#include <stdlib.h>
#define bswap32(x) _byteswap_ulong(x)
+#define bswap64(x) _byteswap_uint64(x)
#endif
-#ifdef bswap32
+#if defined(bswap32)
#undef ntohl
#undef htonl
@@ -48,3 +88,73 @@ static inline uint32_t git_bswap32(uint32_t x)
#define htonl(x) bswap32(x)
#endif
+
+#if defined(bswap64)
+
+#undef ntohll
+#undef htonll
+#define ntohll(x) bswap64(x)
+#define htonll(x) bswap64(x)
+
+#else
+
+#undef ntohll
+#undef htonll
+
+#if !defined(__BYTE_ORDER)
+# if defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
+# define __BYTE_ORDER BYTE_ORDER
+# define __LITTLE_ENDIAN LITTLE_ENDIAN
+# define __BIG_ENDIAN BIG_ENDIAN
+# endif
+#endif
+
+#if !defined(__BYTE_ORDER)
+# error "Cannot determine endianness"
+#endif
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+# define ntohll(n) (n)
+# define htonll(n) (n)
+#else
+# define ntohll(n) default_bswap64(n)
+# define htonll(n) default_bswap64(n)
+#endif
+
+#endif
+
+/*
+ * Performance might be improved if the CPU architecture is OK with
+ * unaligned 32-bit loads and a fast ntohl() is available.
+ * Otherwise fall back to byte loads and shifts which is portable,
+ * and is faster on architectures with memory alignment issues.
+ */
+
+#if defined(__i386__) || defined(__x86_64__) || \
+ defined(_M_IX86) || defined(_M_X64) || \
+ defined(__ppc__) || defined(__ppc64__) || \
+ defined(__powerpc__) || defined(__powerpc64__) || \
+ defined(__s390__) || defined(__s390x__)
+
+#define get_be16(p) ntohs(*(unsigned short *)(p))
+#define get_be32(p) ntohl(*(unsigned int *)(p))
+#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
+
+#else
+
+#define get_be16(p) ( \
+ (*((unsigned char *)(p) + 0) << 8) | \
+ (*((unsigned char *)(p) + 1) << 0) )
+#define get_be32(p) ( \
+ (*((unsigned char *)(p) + 0) << 24) | \
+ (*((unsigned char *)(p) + 1) << 16) | \
+ (*((unsigned char *)(p) + 2) << 8) | \
+ (*((unsigned char *)(p) + 3) << 0) )
+#define put_be32(p, v) do { \
+ unsigned int __v = (v); \
+ *((unsigned char *)(p) + 0) = __v >> 24; \
+ *((unsigned char *)(p) + 1) = __v >> 16; \
+ *((unsigned char *)(p) + 2) = __v >> 8; \
+ *((unsigned char *)(p) + 3) = __v >> 0; } while (0)
+
+#endif