Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.kernel.org/pub/scm/git/git.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'tree.c')
-rw-r--r--tree.c119
1 files changed, 13 insertions, 106 deletions
diff --git a/tree.c b/tree.c
index a52479812c..410e3b477e 100644
--- a/tree.c
+++ b/tree.c
@@ -11,58 +11,10 @@
const char *tree_type = "tree";
-static int read_one_entry_opt(struct index_state *istate,
- const struct object_id *oid,
- const char *base, int baselen,
- const char *pathname,
- unsigned mode, int stage, int opt)
-{
- int len;
- struct cache_entry *ce;
-
- if (S_ISDIR(mode))
- return READ_TREE_RECURSIVE;
-
- len = strlen(pathname);
- ce = make_empty_cache_entry(istate, baselen + len);
-
- ce->ce_mode = create_ce_mode(mode);
- ce->ce_flags = create_ce_flags(stage);
- ce->ce_namelen = baselen + len;
- memcpy(ce->name, base, baselen);
- memcpy(ce->name + baselen, pathname, len+1);
- oidcpy(&ce->oid, oid);
- return add_index_entry(istate, ce, opt);
-}
-
-static int read_one_entry(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, int stage,
- void *context)
-{
- struct index_state *istate = context;
- return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
- mode, stage,
- ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
-}
-
-/*
- * This is used when the caller knows there is no existing entries at
- * the stage that will conflict with the entry being added.
- */
-static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, int stage,
- void *context)
-{
- struct index_state *istate = context;
- return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
- mode, stage,
- ADD_CACHE_JUST_APPEND);
-}
-
-static int read_tree_1(struct repository *r,
- struct tree *tree, struct strbuf *base,
- int stage, const struct pathspec *pathspec,
- read_tree_fn_t fn, void *context)
+int read_tree_at(struct repository *r,
+ struct tree *tree, struct strbuf *base,
+ const struct pathspec *pathspec,
+ read_tree_fn_t fn, void *context)
{
struct tree_desc desc;
struct name_entry entry;
@@ -86,7 +38,7 @@ static int read_tree_1(struct repository *r,
}
switch (fn(&entry.oid, base,
- entry.path, entry.mode, stage, context)) {
+ entry.path, entry.mode, context)) {
case 0:
continue;
case READ_TREE_RECURSIVE:
@@ -119,9 +71,9 @@ static int read_tree_1(struct repository *r,
len = tree_entry_len(&entry);
strbuf_add(base, entry.path, len);
strbuf_addch(base, '/');
- retval = read_tree_1(r, lookup_tree(r, &oid),
- base, stage, pathspec,
- fn, context);
+ retval = read_tree_at(r, lookup_tree(r, &oid),
+ base, pathspec,
+ fn, context);
strbuf_setlen(base, oldlen);
if (retval)
return -1;
@@ -129,17 +81,13 @@ static int read_tree_1(struct repository *r,
return 0;
}
-int read_tree_recursive(struct repository *r,
- struct tree *tree,
- const char *base, int baselen,
- int stage, const struct pathspec *pathspec,
- read_tree_fn_t fn, void *context)
+int read_tree(struct repository *r,
+ struct tree *tree,
+ const struct pathspec *pathspec,
+ read_tree_fn_t fn, void *context)
{
struct strbuf sb = STRBUF_INIT;
- int ret;
-
- strbuf_add(&sb, base, baselen);
- ret = read_tree_1(r, tree, &sb, stage, pathspec, fn, context);
+ int ret = read_tree_at(r, tree, &sb, pathspec, fn, context);
strbuf_release(&sb);
return ret;
}
@@ -154,47 +102,6 @@ int cmp_cache_name_compare(const void *a_, const void *b_)
ce2->name, ce2->ce_namelen, ce_stage(ce2));
}
-int read_tree(struct repository *r, struct tree *tree, int stage,
- struct pathspec *match, struct index_state *istate)
-{
- read_tree_fn_t fn = NULL;
- int i, err;
-
- /*
- * Currently the only existing callers of this function all
- * call it with stage=1 and after making sure there is nothing
- * at that stage; we could always use read_one_entry_quick().
- *
- * But when we decide to straighten out git-read-tree not to
- * use unpack_trees() in some cases, this will probably start
- * to matter.
- */
-
- /*
- * See if we have cache entry at the stage. If so,
- * do it the original slow way, otherwise, append and then
- * sort at the end.
- */
- for (i = 0; !fn && i < istate->cache_nr; i++) {
- const struct cache_entry *ce = istate->cache[i];
- if (ce_stage(ce) == stage)
- fn = read_one_entry;
- }
-
- if (!fn)
- fn = read_one_entry_quick;
- err = read_tree_recursive(r, tree, "", 0, stage, match, fn, istate);
- if (fn == read_one_entry || err)
- return err;
-
- /*
- * Sort the cache entry -- we need to nuke the cache tree, though.
- */
- cache_tree_free(&istate->cache_tree);
- QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
- return 0;
-}
-
struct tree *lookup_tree(struct repository *r, const struct object_id *oid)
{
struct object *obj = lookup_object(r, oid);