1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-11-13 20:53:02 +01:00
git/tree.c
Jeff King c4ad00f8cc add object_as_type helper for casting objects
When we call lookup_commit, lookup_tree, etc, the logic goes
something like:

  1. Look for an existing object struct. If we don't have
     one, allocate and return a new one.

  2. Double check that any object we have is the expected
     type (and complain and return NULL otherwise).

  3. Convert an object with type OBJ_NONE (from a prior
     call to lookup_unknown_object) to the expected type.

We can encapsulate steps 2 and 3 in a helper function which
checks whether we have the expected object type, converts
OBJ_NONE as appropriate, and returns the object.

Not only does this shorten the code, but it also provides
one central location for converting OBJ_NONE objects into
objects of other types. Future patches will use that to
enforce type-specific invariants.

Since this is a refactoring, we would want it to behave
exactly as the current code. It takes a little reasoning to
see that this is the case:

  - for lookup_{commit,tree,etc} functions, we are just
    pulling steps 2 and 3 into a function that does the same
    thing.

  - for the call in peel_object, we currently only do step 3
    (but we want to consolidate it with the others, as
    mentioned above). However, step 2 is a noop here, as the
    surrounding conditional makes sure we have OBJ_NONE
    (which we want to keep to avoid an extraneous call to
    sha1_object_info).

  - for the call in lookup_commit_reference_gently, we are
    currently doing step 2 but not step 3. However, step 3
    is a noop here. The object we got will have just come
    from deref_tag, which must have figured out the type for
    each object in order to know when to stop peeling.
    Therefore the type will never be OBJ_NONE.

Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-07-28 10:14:33 -07:00

246 lines
6.1 KiB
C

#include "cache.h"
#include "cache-tree.h"
#include "tree.h"
#include "blob.h"
#include "commit.h"
#include "tag.h"
#include "tree-walk.h"
const char *tree_type = "tree";
static int read_one_entry_opt(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, int opt)
{
int len;
unsigned int size;
struct cache_entry *ce;
if (S_ISDIR(mode))
return READ_TREE_RECURSIVE;
len = strlen(pathname);
size = cache_entry_size(baselen + len);
ce = xcalloc(1, size);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = baselen + len;
memcpy(ce->name, base, baselen);
memcpy(ce->name + baselen, pathname, len+1);
hashcpy(ce->sha1, sha1);
return add_cache_entry(ce, opt);
}
static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, void *context)
{
return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
}
/*
* This is used when the caller knows there is no existing entries at
* the stage that will conflict with the entry being added.
*/
static int read_one_entry_quick(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, void *context)
{
return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
ADD_CACHE_JUST_APPEND);
}
static int read_tree_1(struct tree *tree, struct strbuf *base,
int stage, const struct pathspec *pathspec,
read_tree_fn_t fn, void *context)
{
struct tree_desc desc;
struct name_entry entry;
unsigned char sha1[20];
int len, oldlen = base->len;
enum interesting retval = entry_not_interesting;
if (parse_tree(tree))
return -1;
init_tree_desc(&desc, tree->buffer, tree->size);
while (tree_entry(&desc, &entry)) {
if (retval != all_entries_interesting) {
retval = tree_entry_interesting(&entry, base, 0, pathspec);
if (retval == all_entries_not_interesting)
break;
if (retval == entry_not_interesting)
continue;
}
switch (fn(entry.sha1, base->buf, base->len,
entry.path, entry.mode, stage, context)) {
case 0:
continue;
case READ_TREE_RECURSIVE:
break;
default:
return -1;
}
if (S_ISDIR(entry.mode))
hashcpy(sha1, entry.sha1);
else if (S_ISGITLINK(entry.mode)) {
struct commit *commit;
commit = lookup_commit(entry.sha1);
if (!commit)
die("Commit %s in submodule path %s%s not found",
sha1_to_hex(entry.sha1),
base->buf, entry.path);
if (parse_commit(commit))
die("Invalid commit %s in submodule path %s%s",
sha1_to_hex(entry.sha1),
base->buf, entry.path);
hashcpy(sha1, commit->tree->object.sha1);
}
else
continue;
len = tree_entry_len(&entry);
strbuf_add(base, entry.path, len);
strbuf_addch(base, '/');
retval = read_tree_1(lookup_tree(sha1),
base, stage, pathspec,
fn, context);
strbuf_setlen(base, oldlen);
if (retval)
return -1;
}
return 0;
}
int read_tree_recursive(struct tree *tree,
const char *base, int baselen,
int stage, const struct pathspec *pathspec,
read_tree_fn_t fn, void *context)
{
struct strbuf sb = STRBUF_INIT;
int ret;
strbuf_add(&sb, base, baselen);
ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
strbuf_release(&sb);
return ret;
}
static int cmp_cache_name_compare(const void *a_, const void *b_)
{
const struct cache_entry *ce1, *ce2;
ce1 = *((const struct cache_entry **)a_);
ce2 = *((const struct cache_entry **)b_);
return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
ce2->name, ce2->ce_namelen, ce_stage(ce2));
}
int read_tree(struct tree *tree, int stage, struct pathspec *match)
{
read_tree_fn_t fn = NULL;
int i, err;
/*
* Currently the only existing callers of this function all
* call it with stage=1 and after making sure there is nothing
* at that stage; we could always use read_one_entry_quick().
*
* But when we decide to straighten out git-read-tree not to
* use unpack_trees() in some cases, this will probably start
* to matter.
*/
/*
* See if we have cache entry at the stage. If so,
* do it the original slow way, otherwise, append and then
* sort at the end.
*/
for (i = 0; !fn && i < active_nr; i++) {
const struct cache_entry *ce = active_cache[i];
if (ce_stage(ce) == stage)
fn = read_one_entry;
}
if (!fn)
fn = read_one_entry_quick;
err = read_tree_recursive(tree, "", 0, stage, match, fn, NULL);
if (fn == read_one_entry || err)
return err;
/*
* Sort the cache entry -- we need to nuke the cache tree, though.
*/
cache_tree_free(&active_cache_tree);
qsort(active_cache, active_nr, sizeof(active_cache[0]),
cmp_cache_name_compare);
return 0;
}
struct tree *lookup_tree(const unsigned char *sha1)
{
struct object *obj = lookup_object(sha1);
if (!obj)
return create_object(sha1, alloc_tree_node());
return object_as_type(obj, OBJ_TREE, 0);
}
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
{
if (item->object.parsed)
return 0;
item->object.parsed = 1;
item->buffer = buffer;
item->size = size;
return 0;
}
int parse_tree(struct tree *item)
{
enum object_type type;
void *buffer;
unsigned long size;
if (item->object.parsed)
return 0;
buffer = read_sha1_file(item->object.sha1, &type, &size);
if (!buffer)
return error("Could not read %s",
sha1_to_hex(item->object.sha1));
if (type != OBJ_TREE) {
free(buffer);
return error("Object %s not a tree",
sha1_to_hex(item->object.sha1));
}
return parse_tree_buffer(item, buffer, size);
}
void free_tree_buffer(struct tree *tree)
{
free(tree->buffer);
tree->buffer = NULL;
tree->size = 0;
tree->object.parsed = 0;
}
struct tree *parse_tree_indirect(const unsigned char *sha1)
{
struct object *obj = parse_object(sha1);
do {
if (!obj)
return NULL;
if (obj->type == OBJ_TREE)
return (struct tree *) obj;
else if (obj->type == OBJ_COMMIT)
obj = &(((struct commit *) obj)->tree->object);
else if (obj->type == OBJ_TAG)
obj = ((struct tag *) obj)->tagged;
else
return NULL;
if (!obj->parsed)
parse_object(obj->sha1);
} while (1);
}