From d1b44bb76442431eca44279da32deed249dcf213 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 26 Sep 2024 11:22:32 -0400 Subject: [PATCH 1/8] finalize_object_file(): check for name collision before renaming We prefer link()/unlink() to rename() for object files, with the idea that we should prefer the data that is already on disk to what is incoming. But we may fall back to rename() if the user has configured us to do so, or if the filesystem seems not to support cross-directory links. This loses the "prefer what is on disk" property. We can mitigate this somewhat by trying to stat() the destination filename before doing the rename. This is racy, since the object could be created between the stat() and rename() calls. But in practice it is expanding the definition of "what is already on disk" to be the point that the function is called. That is enough to deal with any potential attacks where an attacker is trying to collide hashes with what's already in the repository. Co-authored-by: Jeff King Signed-off-by: Jeff King Signed-off-by: Taylor Blau Signed-off-by: Junio C Hamano --- object-file.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/object-file.c b/object-file.c index c5994202ba..683e6b2a0b 100644 --- a/object-file.c +++ b/object-file.c @@ -1904,6 +1904,7 @@ static void write_object_file_prepare_literally(const struct git_hash_algo *algo */ int finalize_object_file(const char *tmpfile, const char *filename) { + struct stat st; int ret = 0; if (object_creation_mode == OBJECT_CREATION_USES_RENAMES) @@ -1924,9 +1925,12 @@ int finalize_object_file(const char *tmpfile, const char *filename) */ if (ret && ret != EEXIST) { try_rename: - if (!rename(tmpfile, filename)) + if (!stat(filename, &st)) + ret = EEXIST; + else if (!rename(tmpfile, filename)) goto out; - ret = errno; + else + ret = errno; } unlink_or_warn(tmpfile); if (ret) { From 9ca7c2c13bc26afe673172a9b417a43519381968 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 26 Sep 2024 11:22:35 -0400 Subject: [PATCH 2/8] finalize_object_file(): refactor unlink_or_warn() placement As soon as we've tried to link() a temporary object into place, we then unlink() the tempfile immediately, whether we were successful or not. For the success case, this is because we no longer need the old file (it's now linked into place). For the error case, there are two outcomes. Either we got EEXIST, in which case we consider the collision to be a noop. Or we got a system error, in which we case we are just cleaning up after ourselves. Using a single line for all of these cases has some problems: - in the error case, our unlink() may clobber errno, which we use in the error message - for the collision case, there's a FIXME that indicates we should do a collision check. In preparation for implementing that, we'll need to actually hold on to the file. Split these three cases into their own calls to unlink_or_warn(). This is more verbose, but lets us do the right thing in each case. Co-authored-by: Jeff King Signed-off-by: Jeff King Signed-off-by: Taylor Blau Signed-off-by: Junio C Hamano --- object-file.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/object-file.c b/object-file.c index 683e6b2a0b..54a82a5f7a 100644 --- a/object-file.c +++ b/object-file.c @@ -1911,6 +1911,8 @@ int finalize_object_file(const char *tmpfile, const char *filename) goto try_rename; else if (link(tmpfile, filename)) ret = errno; + else + unlink_or_warn(tmpfile); /* * Coda hack - coda doesn't like cross-directory links, @@ -1932,12 +1934,15 @@ int finalize_object_file(const char *tmpfile, const char *filename) else ret = errno; } - unlink_or_warn(tmpfile); if (ret) { if (ret != EEXIST) { + int saved_errno = errno; + unlink_or_warn(tmpfile); + errno = saved_errno; return error_errno(_("unable to write file %s"), filename); } /* FIXME!!! Collision check here ? */ + unlink_or_warn(tmpfile); } out: From b1b8dfde6929ec9463eca0a858c4adb9786d7c93 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 26 Sep 2024 11:22:38 -0400 Subject: [PATCH 3/8] finalize_object_file(): implement collision check We've had "FIXME!!! Collision check here ?" in finalize_object_file() since aac1794132 (Improve sha1 object file writing., 2005-05-03). That is, when we try to write a file with the same name, we assume the on-disk contents are the same and blindly throw away the new copy. One of the reasons we never implemented this is because the files it moves are all named after the cryptographic hash of their contents (either loose objects, or packs which have their hash in the name these days). So we are unlikely to see such a collision by accident. And even though there are weaknesses in sha1, we assume they are mitigated by our use of sha1dc. So while it's a theoretical concern now, it hasn't been a priority. However, if we start using weaker hashes for pack checksums and names, this will become a practical concern. So in preparation, let's actually implement a byte-for-byte collision check. The new check will cause the write of new differing content to be a failure, rather than a silent noop, and we'll retain the temporary file on disk. If there's no collision present, we'll clean up the temporary file as usual after either rename()-ing or link()-ing it into place. Note that this may cause some extra computation when the files are in fact identical, but this should happen rarely. Loose objects are exempt from this check, and the collision check may be skipped by calling the _flags variant of this function with the FOF_SKIP_COLLISION_CHECK bit set. This is done for a couple of reasons: - We don't treat the hash of the loose object file's contents as a checksum, since the same loose object can be stored using different bytes on disk (e.g., when adjusting core.compression, using a different version of zlib, etc.). This is fundamentally different from cases where finalize_object_file() is operating over a file which uses the hash value as a checksum of the contents. In other words, a pair of identical loose objects can be stored using different bytes on disk, and that should not be treated as a collision. - We already use the path of the loose object as its hash value / object name, so checking for collisions at the content level doesn't add anything. Adding a content-level collision check would have to happen at a higher level than in finalize_object_file(), since (avoiding race conditions) writing an object loose which already exists in the repository will prevent us from even reaching finalize_object_file() via the object freshening code. There is a collision check in index-pack via its `check_collision()` function, but there isn't an analogous function in unpack-objects, which just feeds the result to write_object_file(). So skipping the collision check here does not change for better or worse the hardness of loose object writes. As a small note related to the latter bullet point above, we must teach the tmp-objdir routines to similarly skip the content-level collision checks when calling migrate_one() on a loose object file, which we do by setting the FOF_SKIP_COLLISION_CHECK bit when we are inside of a loose object shard. Co-authored-by: Jeff King Signed-off-by: Jeff King Helped-by: Elijah Newren Signed-off-by: Taylor Blau Signed-off-by: Junio C Hamano --- object-file.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++--- object-file.h | 6 +++++ tmp-objdir.c | 26 ++++++++++++++------ 3 files changed, 89 insertions(+), 10 deletions(-) diff --git a/object-file.c b/object-file.c index 54a82a5f7a..440c5c6850 100644 --- a/object-file.c +++ b/object-file.c @@ -1899,10 +1899,67 @@ static void write_object_file_prepare_literally(const struct git_hash_algo *algo hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen); } +static int check_collision(const char *filename_a, const char *filename_b) +{ + char buf_a[4096], buf_b[4096]; + int fd_a = -1, fd_b = -1; + int ret = 0; + + fd_a = open(filename_a, O_RDONLY); + if (fd_a < 0) { + ret = error_errno(_("unable to open %s"), filename_a); + goto out; + } + + fd_b = open(filename_b, O_RDONLY); + if (fd_b < 0) { + ret = error_errno(_("unable to open %s"), filename_b); + goto out; + } + + while (1) { + ssize_t sz_a, sz_b; + + sz_a = read_in_full(fd_a, buf_a, sizeof(buf_a)); + if (sz_a < 0) { + ret = error_errno(_("unable to read %s"), filename_a); + goto out; + } + + sz_b = read_in_full(fd_b, buf_b, sizeof(buf_b)); + if (sz_b < 0) { + ret = error_errno(_("unable to read %s"), filename_b); + goto out; + } + + if (sz_a != sz_b || memcmp(buf_a, buf_b, sz_a)) { + ret = error(_("files '%s' and '%s' differ in contents"), + filename_a, filename_b); + goto out; + } + + if (sz_a < sizeof(buf_a)) + break; + } + +out: + if (fd_a > -1) + close(fd_a); + if (fd_b > -1) + close(fd_b); + return ret; +} + /* * Move the just written object into its final resting place. */ int finalize_object_file(const char *tmpfile, const char *filename) +{ + return finalize_object_file_flags(tmpfile, filename, 0); +} + +int finalize_object_file_flags(const char *tmpfile, const char *filename, + enum finalize_object_file_flags flags) { struct stat st; int ret = 0; @@ -1941,7 +1998,9 @@ int finalize_object_file(const char *tmpfile, const char *filename) errno = saved_errno; return error_errno(_("unable to write file %s"), filename); } - /* FIXME!!! Collision check here ? */ + if (!(flags & FOF_SKIP_COLLISION_CHECK) && + check_collision(tmpfile, filename)) + return -1; unlink_or_warn(tmpfile); } @@ -2195,7 +2254,8 @@ static int write_loose_object(const struct object_id *oid, char *hdr, warning_errno(_("failed utime() on %s"), tmp_file.buf); } - return finalize_object_file(tmp_file.buf, filename.buf); + return finalize_object_file_flags(tmp_file.buf, filename.buf, + FOF_SKIP_COLLISION_CHECK); } static int freshen_loose_object(const struct object_id *oid) @@ -2317,7 +2377,8 @@ int stream_loose_object(struct input_stream *in_stream, size_t len, strbuf_release(&dir); } - err = finalize_object_file(tmp_file.buf, filename.buf); + err = finalize_object_file_flags(tmp_file.buf, filename.buf, + FOF_SKIP_COLLISION_CHECK); if (!err && compat) err = repo_add_loose_object_map(the_repository, oid, &compat_oid); cleanup: diff --git a/object-file.h b/object-file.h index d6414610f8..81b30d269c 100644 --- a/object-file.h +++ b/object-file.h @@ -117,7 +117,13 @@ int check_object_signature(struct repository *r, const struct object_id *oid, */ int stream_object_signature(struct repository *r, const struct object_id *oid); +enum finalize_object_file_flags { + FOF_SKIP_COLLISION_CHECK = 1, +}; + int finalize_object_file(const char *tmpfile, const char *filename); +int finalize_object_file_flags(const char *tmpfile, const char *filename, + enum finalize_object_file_flags flags); /* Helper to check and "touch" a file */ int check_and_freshen_file(const char *fn, int freshen); diff --git a/tmp-objdir.c b/tmp-objdir.c index a8e4553f27..9556f8fcdb 100644 --- a/tmp-objdir.c +++ b/tmp-objdir.c @@ -204,9 +204,11 @@ static int read_dir_paths(struct string_list *out, const char *path) return 0; } -static int migrate_paths(struct strbuf *src, struct strbuf *dst); +static int migrate_paths(struct strbuf *src, struct strbuf *dst, + enum finalize_object_file_flags flags); -static int migrate_one(struct strbuf *src, struct strbuf *dst) +static int migrate_one(struct strbuf *src, struct strbuf *dst, + enum finalize_object_file_flags flags) { struct stat st; @@ -218,12 +220,18 @@ static int migrate_one(struct strbuf *src, struct strbuf *dst) return -1; } else if (errno != EEXIST) return -1; - return migrate_paths(src, dst); + return migrate_paths(src, dst, flags); } - return finalize_object_file(src->buf, dst->buf); + return finalize_object_file_flags(src->buf, dst->buf, flags); } -static int migrate_paths(struct strbuf *src, struct strbuf *dst) +static int is_loose_object_shard(const char *name) +{ + return strlen(name) == 2 && isxdigit(name[0]) && isxdigit(name[1]); +} + +static int migrate_paths(struct strbuf *src, struct strbuf *dst, + enum finalize_object_file_flags flags) { size_t src_len = src->len, dst_len = dst->len; struct string_list paths = STRING_LIST_INIT_DUP; @@ -237,11 +245,15 @@ static int migrate_paths(struct strbuf *src, struct strbuf *dst) for (i = 0; i < paths.nr; i++) { const char *name = paths.items[i].string; + enum finalize_object_file_flags flags_copy = flags; strbuf_addf(src, "/%s", name); strbuf_addf(dst, "/%s", name); - ret |= migrate_one(src, dst); + if (is_loose_object_shard(name)) + flags_copy |= FOF_SKIP_COLLISION_CHECK; + + ret |= migrate_one(src, dst, flags_copy); strbuf_setlen(src, src_len); strbuf_setlen(dst, dst_len); @@ -269,7 +281,7 @@ int tmp_objdir_migrate(struct tmp_objdir *t) strbuf_addbuf(&src, &t->path); strbuf_addstr(&dst, get_object_directory()); - ret = migrate_paths(&src, &dst); + ret = migrate_paths(&src, &dst, 0); strbuf_release(&src); strbuf_release(&dst); From c177d3dc50d59042b1756e352e19f2dd8b01c25a Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 26 Sep 2024 11:22:41 -0400 Subject: [PATCH 4/8] pack-objects: use finalize_object_file() to rename pack/idx/etc In most places that write files to the object database (even packfiles via index-pack or fast-import), we use finalize_object_file(). This prefers link()/unlink() over rename(), because it means we will prefer data that is already in the repository to data that we are newly writing. We should do the same thing in pack-objects. Even though we don't think of it as accepting outside data (and thus not being susceptible to collision attacks), in theory a determined attacker could present just the right set of objects to cause an incremental repack to generate a pack with their desired hash. This has some test and real-world fallout, as seen in the adjustment to t5303 below. That test script assumes that we can "fix" corruption by repacking into a good state, including when the pack generated by that repack operation collides with a (corrupted) pack with the same hash. This violates our assumption from the previous adjustments to finalize_object_file() that if we're moving a new file over an existing one, that since their checksums match, so too must their contents. This makes "fixing" corruption like this a more explicit operation, since the test (and users, who may fix real-life corruption using a similar technique) must first move the broken contents out of the way. Note also that we now call adjust_shared_perm() twice. We already call adjust_shared_perm() in stage_tmp_packfiles(), and now call it again in finalize_object_file(). This is somewhat wasteful, but cleaning up the existing calls to adjust_shared_perm() is tricky (because sometimes we're writing to a tmpfile, and sometimes we're writing directly into the final destination), so let's tolerate some minor waste until we can more carefully clean up the now-redundant calls. Co-authored-by: Jeff King Signed-off-by: Jeff King Signed-off-by: Taylor Blau Signed-off-by: Junio C Hamano --- pack-write.c | 7 ++++--- t/t5303-pack-corruption-resilience.sh | 7 ++++++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pack-write.c b/pack-write.c index d07f03d0ab..e5beecd3a4 100644 --- a/pack-write.c +++ b/pack-write.c @@ -8,6 +8,7 @@ #include "csum-file.h" #include "remote.h" #include "chunk-format.h" +#include "object-file.h" #include "pack-mtimes.h" #include "pack-objects.h" #include "pack-revindex.h" @@ -527,9 +528,9 @@ static void rename_tmp_packfile(struct strbuf *name_prefix, const char *source, size_t name_prefix_len = name_prefix->len; strbuf_addstr(name_prefix, ext); - if (rename(source, name_prefix->buf)) - die_errno("unable to rename temporary file to '%s'", - name_prefix->buf); + if (finalize_object_file(source, name_prefix->buf)) + die("unable to rename temporary file to '%s'", + name_prefix->buf); strbuf_setlen(name_prefix, name_prefix_len); } diff --git a/t/t5303-pack-corruption-resilience.sh b/t/t5303-pack-corruption-resilience.sh index 61469ef4a6..e6a43ec9ae 100755 --- a/t/t5303-pack-corruption-resilience.sh +++ b/t/t5303-pack-corruption-resilience.sh @@ -44,9 +44,14 @@ create_new_pack() { } do_repack() { + for f in $pack.* + do + mv $f "$(echo $f | sed -e 's/pack-/pack-corrupt-/')" || return 1 + done && pack=$(printf "$blob_1\n$blob_2\n$blob_3\n" | git pack-objects $@ .git/objects/pack/pack) && - pack=".git/objects/pack/pack-${pack}" + pack=".git/objects/pack/pack-${pack}" && + rm -f .git/objects/pack/pack-corrupt-* } do_corrupt_object() { From 4c61a1d040b2b0ce4fa88d89e3169f6e766d4134 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 26 Sep 2024 11:22:44 -0400 Subject: [PATCH 5/8] sha1: do not redefine `platform_SHA_CTX` and friends Our in-tree SHA-1 wrappers all define platform_SHA_CTX and related macros to point at the opaque "context" type, init, update, and similar functions for each specific implementation. In hash.h, we use these platform_ variables to set up the function pointers for, e.g., the_hash_algo->init_fn(), etc. But while these header files have a header-specific macro that prevents them declaring their structs / functions multiple times, they unconditionally define the platform variables, making it impossible to load multiple SHA-1 implementations at once. As a prerequisite for loading a separate SHA-1 implementation for non-cryptographic uses, only define the platform_ variables if they have not already been defined. Signed-off-by: Taylor Blau Signed-off-by: Junio C Hamano --- block-sha1/sha1.h | 2 ++ sha1/openssl.h | 2 ++ sha1dc_git.h | 3 +++ 3 files changed, 7 insertions(+) diff --git a/block-sha1/sha1.h b/block-sha1/sha1.h index 9fb0441b98..47bb916636 100644 --- a/block-sha1/sha1.h +++ b/block-sha1/sha1.h @@ -16,7 +16,9 @@ void blk_SHA1_Init(blk_SHA_CTX *ctx); void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *dataIn, size_t len); void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx); +#ifndef platform_SHA_CTX #define platform_SHA_CTX blk_SHA_CTX #define platform_SHA1_Init blk_SHA1_Init #define platform_SHA1_Update blk_SHA1_Update #define platform_SHA1_Final blk_SHA1_Final +#endif diff --git a/sha1/openssl.h b/sha1/openssl.h index 006c1f4ba5..1038af47da 100644 --- a/sha1/openssl.h +++ b/sha1/openssl.h @@ -40,10 +40,12 @@ static inline void openssl_SHA1_Clone(struct openssl_SHA1_CTX *dst, EVP_MD_CTX_copy_ex(dst->ectx, src->ectx); } +#ifndef platform_SHA_CTX #define platform_SHA_CTX openssl_SHA1_CTX #define platform_SHA1_Init openssl_SHA1_Init #define platform_SHA1_Clone openssl_SHA1_Clone #define platform_SHA1_Update openssl_SHA1_Update #define platform_SHA1_Final openssl_SHA1_Final +#endif #endif /* SHA1_OPENSSL_H */ diff --git a/sha1dc_git.h b/sha1dc_git.h index 60e3ce8439..f6f880cabe 100644 --- a/sha1dc_git.h +++ b/sha1dc_git.h @@ -18,7 +18,10 @@ void git_SHA1DCFinal(unsigned char [20], SHA1_CTX *); void git_SHA1DCUpdate(SHA1_CTX *ctx, const void *data, unsigned long len); #define platform_SHA_IS_SHA1DC /* used by "test-tool sha1-is-sha1dc" */ + +#ifndef platform_SHA_CTX #define platform_SHA_CTX SHA1_CTX #define platform_SHA1_Init git_SHA1DCInit #define platform_SHA1_Update git_SHA1DCUpdate #define platform_SHA1_Final git_SHA1DCFinal +#endif From 253ed9ecfffa3e50b95e08bb513fdf9efcc5a85f Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 26 Sep 2024 11:22:47 -0400 Subject: [PATCH 6/8] hash.h: scaffolding for _unsafe hashing variants Git's default SHA-1 implementation is collision-detecting, which hardens us against known SHA-1 attacks against Git objects. This makes Git object writes safer at the expense of some speed when hashing through the collision-detecting implementation, which is slower than non-collision detecting alternatives. Prepare for loading a separate "unsafe" SHA-1 implementation that can be used for non-cryptographic purposes, like computing the checksum of files that use the hashwrite() API. This commit does not actually introduce any new compile-time knobs to control which implementation is used as the unsafe SHA-1 variant, but does add scaffolding so that the "git_hash_algo" structure has five new function pointers which are "unsafe" variants of the five existing hashing-related function pointers: - git_hash_init_fn unsafe_init_fn - git_hash_clone_fn unsafe_clone_fn - git_hash_update_fn unsafe_update_fn - git_hash_final_fn unsafe_final_fn - git_hash_final_oid_fn unsafe_final_oid_fn The following commit will introduce compile-time knobs to specify which SHA-1 implementation is used for non-cryptographic uses. Signed-off-by: Taylor Blau Signed-off-by: Junio C Hamano --- hash.h | 42 ++++++++++++++++++++++++++++++++++++++++++ object-file.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) diff --git a/hash.h b/hash.h index 72ffbc862e..96458b129f 100644 --- a/hash.h +++ b/hash.h @@ -44,14 +44,32 @@ #define platform_SHA1_Final SHA1_Final #endif +#ifndef platform_SHA_CTX_unsafe +# define platform_SHA_CTX_unsafe platform_SHA_CTX +# define platform_SHA1_Init_unsafe platform_SHA1_Init +# define platform_SHA1_Update_unsafe platform_SHA1_Update +# define platform_SHA1_Final_unsafe platform_SHA1_Final +# ifdef platform_SHA1_Clone +# define platform_SHA1_Clone_unsafe platform_SHA1_Clone +# endif +#endif + #define git_SHA_CTX platform_SHA_CTX #define git_SHA1_Init platform_SHA1_Init #define git_SHA1_Update platform_SHA1_Update #define git_SHA1_Final platform_SHA1_Final +#define git_SHA_CTX_unsafe platform_SHA_CTX_unsafe +#define git_SHA1_Init_unsafe platform_SHA1_Init_unsafe +#define git_SHA1_Update_unsafe platform_SHA1_Update_unsafe +#define git_SHA1_Final_unsafe platform_SHA1_Final_unsafe + #ifdef platform_SHA1_Clone #define git_SHA1_Clone platform_SHA1_Clone #endif +#ifdef platform_SHA1_Clone_unsafe +# define git_SHA1_Clone_unsafe platform_SHA1_Clone_unsafe +#endif #ifndef platform_SHA256_CTX #define platform_SHA256_CTX SHA256_CTX @@ -81,6 +99,13 @@ static inline void git_SHA1_Clone(git_SHA_CTX *dst, const git_SHA_CTX *src) memcpy(dst, src, sizeof(*dst)); } #endif +#ifndef SHA1_NEEDS_CLONE_HELPER_UNSAFE +static inline void git_SHA1_Clone_unsafe(git_SHA_CTX_unsafe *dst, + const git_SHA_CTX_unsafe *src) +{ + memcpy(dst, src, sizeof(*dst)); +} +#endif #ifndef SHA256_NEEDS_CLONE_HELPER static inline void git_SHA256_Clone(git_SHA256_CTX *dst, const git_SHA256_CTX *src) @@ -178,6 +203,8 @@ enum get_oid_result { /* A suitably aligned type for stack allocations of hash contexts. */ union git_hash_ctx { git_SHA_CTX sha1; + git_SHA_CTX_unsafe sha1_unsafe; + git_SHA256_CTX sha256; }; typedef union git_hash_ctx git_hash_ctx; @@ -222,6 +249,21 @@ struct git_hash_algo { /* The hash finalization function for object IDs. */ git_hash_final_oid_fn final_oid_fn; + /* The non-cryptographic hash initialization function. */ + git_hash_init_fn unsafe_init_fn; + + /* The non-cryptographic hash context cloning function. */ + git_hash_clone_fn unsafe_clone_fn; + + /* The non-cryptographic hash update function. */ + git_hash_update_fn unsafe_update_fn; + + /* The non-cryptographic hash finalization function. */ + git_hash_final_fn unsafe_final_fn; + + /* The non-cryptographic hash finalization function. */ + git_hash_final_oid_fn unsafe_final_oid_fn; + /* The OID of the empty tree. */ const struct object_id *empty_tree; diff --git a/object-file.c b/object-file.c index 440c5c6850..206ff625d9 100644 --- a/object-file.c +++ b/object-file.c @@ -115,6 +115,33 @@ static void git_hash_sha1_final_oid(struct object_id *oid, git_hash_ctx *ctx) oid->algo = GIT_HASH_SHA1; } +static void git_hash_sha1_init_unsafe(git_hash_ctx *ctx) +{ + git_SHA1_Init_unsafe(&ctx->sha1_unsafe); +} + +static void git_hash_sha1_clone_unsafe(git_hash_ctx *dst, const git_hash_ctx *src) +{ + git_SHA1_Clone_unsafe(&dst->sha1_unsafe, &src->sha1_unsafe); +} + +static void git_hash_sha1_update_unsafe(git_hash_ctx *ctx, const void *data, + size_t len) +{ + git_SHA1_Update_unsafe(&ctx->sha1_unsafe, data, len); +} + +static void git_hash_sha1_final_unsafe(unsigned char *hash, git_hash_ctx *ctx) +{ + git_SHA1_Final_unsafe(hash, &ctx->sha1_unsafe); +} + +static void git_hash_sha1_final_oid_unsafe(struct object_id *oid, git_hash_ctx *ctx) +{ + git_SHA1_Final_unsafe(oid->hash, &ctx->sha1_unsafe); + memset(oid->hash + GIT_SHA1_RAWSZ, 0, GIT_MAX_RAWSZ - GIT_SHA1_RAWSZ); + oid->algo = GIT_HASH_SHA1; +} static void git_hash_sha256_init(git_hash_ctx *ctx) { @@ -189,6 +216,11 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = { .update_fn = git_hash_unknown_update, .final_fn = git_hash_unknown_final, .final_oid_fn = git_hash_unknown_final_oid, + .unsafe_init_fn = git_hash_unknown_init, + .unsafe_clone_fn = git_hash_unknown_clone, + .unsafe_update_fn = git_hash_unknown_update, + .unsafe_final_fn = git_hash_unknown_final, + .unsafe_final_oid_fn = git_hash_unknown_final_oid, .empty_tree = NULL, .empty_blob = NULL, .null_oid = NULL, @@ -204,6 +236,11 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = { .update_fn = git_hash_sha1_update, .final_fn = git_hash_sha1_final, .final_oid_fn = git_hash_sha1_final_oid, + .unsafe_init_fn = git_hash_sha1_init_unsafe, + .unsafe_clone_fn = git_hash_sha1_clone_unsafe, + .unsafe_update_fn = git_hash_sha1_update_unsafe, + .unsafe_final_fn = git_hash_sha1_final_unsafe, + .unsafe_final_oid_fn = git_hash_sha1_final_oid_unsafe, .empty_tree = &empty_tree_oid, .empty_blob = &empty_blob_oid, .null_oid = &null_oid_sha1, @@ -219,6 +256,11 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = { .update_fn = git_hash_sha256_update, .final_fn = git_hash_sha256_final, .final_oid_fn = git_hash_sha256_final_oid, + .unsafe_init_fn = git_hash_sha256_init, + .unsafe_clone_fn = git_hash_sha256_clone, + .unsafe_update_fn = git_hash_sha256_update, + .unsafe_final_fn = git_hash_sha256_final, + .unsafe_final_oid_fn = git_hash_sha256_final_oid, .empty_tree = &empty_tree_oid_sha256, .empty_blob = &empty_blob_oid_sha256, .null_oid = &null_oid_sha256, From 06c92dafb86fd390285e20b743f5a61b45055546 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 26 Sep 2024 11:22:50 -0400 Subject: [PATCH 7/8] Makefile: allow specifying a SHA-1 for non-cryptographic uses Introduce _UNSAFE variants of the OPENSSL_SHA1, BLK_SHA1, and APPLE_COMMON_CRYPTO_SHA1 compile-time knobs which indicate which SHA-1 implementation is to be used for non-cryptographic uses. There are a couple of small implementation notes worth mentioning: - There is no way to select the collision detecting SHA-1 as the "fast" fallback, since the fast fallback is only for non-cryptographic uses, and is meant to be faster than our collision-detecting implementation. - There are no similar knobs for SHA-256, since no collision attacks are presently known and thus no collision-detecting implementations actually exist. Signed-off-by: Taylor Blau Signed-off-by: Junio C Hamano --- Makefile | 25 +++++++++++++++++++++++++ hash.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/Makefile b/Makefile index 91f65d7dc5..69b1e230a2 100644 --- a/Makefile +++ b/Makefile @@ -521,6 +521,10 @@ include shared.mak # Define APPLE_COMMON_CRYPTO_SHA1 to use Apple's CommonCrypto for # SHA-1. # +# Define the same Makefile knobs as above, but suffixed with _UNSAFE to +# use the corresponding implementations for unsafe SHA-1 hashing for +# non-cryptographic purposes. +# # If don't enable any of the *_SHA1 settings in this section, Git will # default to its built-in sha1collisiondetection library, which is a # collision-detecting sha1 This is slower, but may detect attempted @@ -1987,6 +1991,27 @@ endif endif endif +ifdef OPENSSL_SHA1_UNSAFE +ifndef OPENSSL_SHA1 + EXTLIBS += $(LIB_4_CRYPTO) + BASIC_CFLAGS += -DSHA1_OPENSSL_UNSAFE +endif +else +ifdef BLK_SHA1_UNSAFE +ifndef BLK_SHA1 + LIB_OBJS += block-sha1/sha1.o + BASIC_CFLAGS += -DSHA1_BLK_UNSAFE +endif +else +ifdef APPLE_COMMON_CRYPTO_SHA1_UNSAFE +ifndef APPLE_COMMON_CRYPTO_SHA1 + COMPAT_CFLAGS += -DCOMMON_DIGEST_FOR_OPENSSL + BASIC_CFLAGS += -DSHA1_APPLE_UNSAFE +endif +endif +endif +endif + ifdef OPENSSL_SHA256 EXTLIBS += $(LIB_4_CRYPTO) BASIC_CFLAGS += -DSHA256_OPENSSL diff --git a/hash.h b/hash.h index 96458b129f..f97f858307 100644 --- a/hash.h +++ b/hash.h @@ -15,6 +15,36 @@ #include "block-sha1/sha1.h" #endif +#if defined(SHA1_APPLE_UNSAFE) +# include +# define platform_SHA_CTX_unsafe CC_SHA1_CTX +# define platform_SHA1_Init_unsafe CC_SHA1_Init +# define platform_SHA1_Update_unsafe CC_SHA1_Update +# define platform_SHA1_Final_unsafe CC_SHA1_Final +#elif defined(SHA1_OPENSSL_UNSAFE) +# include +# if defined(OPENSSL_API_LEVEL) && OPENSSL_API_LEVEL >= 3 +# define SHA1_NEEDS_CLONE_HELPER_UNSAFE +# include "sha1/openssl.h" +# define platform_SHA_CTX_unsafe openssl_SHA1_CTX +# define platform_SHA1_Init_unsafe openssl_SHA1_Init +# define platform_SHA1_Clone_unsafe openssl_SHA1_Clone +# define platform_SHA1_Update_unsafe openssl_SHA1_Update +# define platform_SHA1_Final_unsafe openssl_SHA1_Final +# else +# define platform_SHA_CTX_unsafe SHA_CTX +# define platform_SHA1_Init_unsafe SHA1_Init +# define platform_SHA1_Update_unsafe SHA1_Update +# define platform_SHA1_Final_unsafe SHA1_Final +# endif +#elif defined(SHA1_BLK_UNSAFE) +# include "block-sha1/sha1.h" +# define platform_SHA_CTX_unsafe blk_SHA_CTX +# define platform_SHA1_Init_unsafe blk_SHA1_Init +# define platform_SHA1_Update_unsafe blk_SHA1_Update +# define platform_SHA1_Final_unsafe blk_SHA1_Final +#endif + #if defined(SHA256_NETTLE) #include "sha256/nettle.h" #elif defined(SHA256_GCRYPT) From 1b9e9be8b4694ea52d8aae93f311b1607c3576b7 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 26 Sep 2024 11:22:53 -0400 Subject: [PATCH 8/8] csum-file.c: use unsafe SHA-1 implementation when available Update hashwrite() and friends to use the unsafe_-variants of hashing functions, calling for e.g., "the_hash_algo->unsafe_update_fn()" instead of "the_hash_algo->update_fn()". These callers only use the_hash_algo to produce a checksum, which we depend on for data integrity, but not for cryptographic purposes, so these callers are safe to use the unsafe (non-collision detecting) SHA-1 implementation. To time this, I took a freshly packed copy of linux.git, and ran the following with and without the OPENSSL_SHA1_UNSAFE=1 build-knob. Both versions were compiled with -O3: $ git for-each-ref --format='%(objectname)' refs/heads refs/tags >in $ valgrind --tool=callgrind ~/src/git/git-pack-objects \ --revs --stdout --all-progress --use-bitmap-index /dev/null Without OPENSSL_SHA1_UNSAFE=1 (that is, using the collision-detecting SHA-1 implementation for both cryptographic and non-cryptographic purposes), we spend a significant amount of our instruction count in hashwrite(): $ callgrind_annotate --inclusive=yes | grep hashwrite | head -n1 159,998,868,413 (79.42%) /home/ttaylorr/src/git/csum-file.c:hashwrite [/home/ttaylorr/src/git/git-pack-objects] , and the resulting "clone" takes 19.219 seconds of wall clock time, 18.94 seconds of user time and 0.28 seconds of system time. Compiling with OPENSSL_SHA1_UNSAFE=1, we spend ~60% fewer instructions in hashwrite(): $ callgrind_annotate --inclusive=yes | grep hashwrite | head -n1 59,164,001,176 (58.79%) /home/ttaylorr/src/git/csum-file.c:hashwrite [/home/ttaylorr/src/git/git-pack-objects] , and generate the resulting "clone" much faster, in only 11.597 seconds of wall time, 11.37 seconds of user time, and 0.23 seconds of system time, for a ~40% speed-up. Signed-off-by: Taylor Blau Signed-off-by: Junio C Hamano --- csum-file.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/csum-file.c b/csum-file.c index bf82ad8f9f..c203ebf11b 100644 --- a/csum-file.c +++ b/csum-file.c @@ -50,7 +50,7 @@ void hashflush(struct hashfile *f) if (offset) { if (!f->skip_hash) - the_hash_algo->update_fn(&f->ctx, f->buffer, offset); + the_hash_algo->unsafe_update_fn(&f->ctx, f->buffer, offset); flush(f, f->buffer, offset); f->offset = 0; } @@ -73,7 +73,7 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result, if (f->skip_hash) hashclr(f->buffer, the_repository->hash_algo); else - the_hash_algo->final_fn(f->buffer, &f->ctx); + the_hash_algo->unsafe_final_fn(f->buffer, &f->ctx); if (result) hashcpy(result, f->buffer, the_repository->hash_algo); @@ -128,7 +128,7 @@ void hashwrite(struct hashfile *f, const void *buf, unsigned int count) * f->offset is necessarily zero. */ if (!f->skip_hash) - the_hash_algo->update_fn(&f->ctx, buf, nr); + the_hash_algo->unsafe_update_fn(&f->ctx, buf, nr); flush(f, buf, nr); } else { /* @@ -174,7 +174,7 @@ static struct hashfile *hashfd_internal(int fd, const char *name, f->name = name; f->do_crc = 0; f->skip_hash = 0; - the_hash_algo->init_fn(&f->ctx); + the_hash_algo->unsafe_init_fn(&f->ctx); f->buffer_len = buffer_len; f->buffer = xmalloc(buffer_len); @@ -208,7 +208,7 @@ void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpo { hashflush(f); checkpoint->offset = f->total; - the_hash_algo->clone_fn(&checkpoint->ctx, &f->ctx); + the_hash_algo->unsafe_clone_fn(&checkpoint->ctx, &f->ctx); } int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint) @@ -219,7 +219,7 @@ int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint lseek(f->fd, offset, SEEK_SET) != offset) return -1; f->total = offset; - the_hash_algo->clone_fn(&f->ctx, &checkpoint->ctx); + the_hash_algo->unsafe_clone_fn(&f->ctx, &checkpoint->ctx); f->offset = 0; /* hashflush() was called in checkpoint */ return 0; } @@ -245,9 +245,9 @@ int hashfile_checksum_valid(const unsigned char *data, size_t total_len) if (total_len < the_hash_algo->rawsz) return 0; /* say "too short"? */ - the_hash_algo->init_fn(&ctx); - the_hash_algo->update_fn(&ctx, data, data_len); - the_hash_algo->final_fn(got, &ctx); + the_hash_algo->unsafe_init_fn(&ctx); + the_hash_algo->unsafe_update_fn(&ctx, data, data_len); + the_hash_algo->unsafe_final_fn(got, &ctx); return hasheq(got, data + data_len, the_repository->hash_algo); }