diff options
author | Junio C Hamano <gitster@pobox.com> | 2012-04-15 22:50:38 -0700 |
---|---|---|
committer | Junio C Hamano <gitster@pobox.com> | 2012-04-15 22:50:39 -0700 |
commit | 47de6b0425c79081e64756dcc019bb26344bf7ad (patch) | |
tree | 74179a74db058b14979c3e7a5a5fc97b97965ce6 /object.c | |
parent | 30fd3a54256a54e5a6006a203c923e97641fb2c2 (diff) | |
parent | da591a7f4bbe1a208cc5f955523506eb857c45ca (diff) | |
download | git-47de6b0425c79081e64756dcc019bb26344bf7ad.tar.gz git-47de6b0425c79081e64756dcc019bb26344bf7ad.tar.xz |
Merge branch 'nd/stream-more'
Use API to read blob data in smaller chunks in more places to reduce the
memory footprint.
By Nguyễn Thái Ngọc Duy (6) and Junio C Hamano (1)
* nd/stream-more:
update-server-info: respect core.bigfilethreshold
fsck: use streaming API for writing lost-found blobs
show: use streaming API for showing blobs
parse_object: avoid putting whole blob in core
cat-file: use streaming API to print blobs
Add more large blob test cases
streaming: make streaming-write-entry to be more reusable
Diffstat (limited to 'object.c')
-rw-r--r-- | object.c | 11 |
1 files changed, 11 insertions, 0 deletions
@@ -198,6 +198,17 @@ struct object *parse_object(const unsigned char *sha1) if (obj && obj->parsed) return obj; + if ((obj && obj->type == OBJ_BLOB) || + (!obj && has_sha1_file(sha1) && + sha1_object_info(sha1, NULL) == OBJ_BLOB)) { + if (check_sha1_signature(repl, NULL, 0, NULL) < 0) { + error("sha1 mismatch %s\n", sha1_to_hex(repl)); + return NULL; + } + parse_blob_buffer(lookup_blob(sha1), NULL, 0); + return lookup_object(sha1); + } + buffer = read_sha1_file(sha1, &type, &size); if (buffer) { if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) { |