aboutsummaryrefslogtreecommitdiff
path: root/object.c
diff options
context:
space:
mode:
authorJunio C Hamano <gitster@pobox.com>2012-04-15 22:50:38 -0700
committerJunio C Hamano <gitster@pobox.com>2012-04-15 22:50:39 -0700
commit47de6b0425c79081e64756dcc019bb26344bf7ad (patch)
tree74179a74db058b14979c3e7a5a5fc97b97965ce6 /object.c
parent30fd3a54256a54e5a6006a203c923e97641fb2c2 (diff)
parentda591a7f4bbe1a208cc5f955523506eb857c45ca (diff)
downloadgit-47de6b0425c79081e64756dcc019bb26344bf7ad.tar.gz
git-47de6b0425c79081e64756dcc019bb26344bf7ad.tar.xz
Merge branch 'nd/stream-more'
Use API to read blob data in smaller chunks in more places to reduce the memory footprint. By Nguyễn Thái Ngọc Duy (6) and Junio C Hamano (1) * nd/stream-more: update-server-info: respect core.bigfilethreshold fsck: use streaming API for writing lost-found blobs show: use streaming API for showing blobs parse_object: avoid putting whole blob in core cat-file: use streaming API to print blobs Add more large blob test cases streaming: make streaming-write-entry to be more reusable
Diffstat (limited to 'object.c')
-rw-r--r--object.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/object.c b/object.c
index 6b06297a5..0498b18d4 100644
--- a/object.c
+++ b/object.c
@@ -198,6 +198,17 @@ struct object *parse_object(const unsigned char *sha1)
if (obj && obj->parsed)
return obj;
+ if ((obj && obj->type == OBJ_BLOB) ||
+ (!obj && has_sha1_file(sha1) &&
+ sha1_object_info(sha1, NULL) == OBJ_BLOB)) {
+ if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
+ error("sha1 mismatch %s\n", sha1_to_hex(repl));
+ return NULL;
+ }
+ parse_blob_buffer(lookup_blob(sha1), NULL, 0);
+ return lookup_object(sha1);
+ }
+
buffer = read_sha1_file(sha1, &type, &size);
if (buffer) {
if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) {