Squashed 'third_party/git/' content from commit cb71568594
git-subtree-dir: third_party/git git-subtree-split: cb715685942260375e1eb8153b0768a376e4ece7
This commit is contained in:
commit
1b593e1ea4
3629 changed files with 1139935 additions and 0 deletions
3201
refs/files-backend.c
Normal file
3201
refs/files-backend.c
Normal file
File diff suppressed because it is too large
Load diff
435
refs/iterator.c
Normal file
435
refs/iterator.c
Normal file
|
|
@ -0,0 +1,435 @@
|
|||
/*
|
||||
* Generic reference iterator infrastructure. See refs-internal.h for
|
||||
* documentation about the design and use of reference iterators.
|
||||
*/
|
||||
|
||||
#include "cache.h"
|
||||
#include "refs.h"
|
||||
#include "refs/refs-internal.h"
|
||||
#include "iterator.h"
|
||||
|
||||
int ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
return ref_iterator->vtable->advance(ref_iterator);
|
||||
}
|
||||
|
||||
int ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||
struct object_id *peeled)
|
||||
{
|
||||
return ref_iterator->vtable->peel(ref_iterator, peeled);
|
||||
}
|
||||
|
||||
int ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
return ref_iterator->vtable->abort(ref_iterator);
|
||||
}
|
||||
|
||||
void base_ref_iterator_init(struct ref_iterator *iter,
|
||||
struct ref_iterator_vtable *vtable,
|
||||
int ordered)
|
||||
{
|
||||
iter->vtable = vtable;
|
||||
iter->ordered = !!ordered;
|
||||
iter->refname = NULL;
|
||||
iter->oid = NULL;
|
||||
iter->flags = 0;
|
||||
}
|
||||
|
||||
void base_ref_iterator_free(struct ref_iterator *iter)
|
||||
{
|
||||
/* Help make use-after-free bugs fail quickly: */
|
||||
iter->vtable = NULL;
|
||||
free(iter);
|
||||
}
|
||||
|
||||
struct empty_ref_iterator {
|
||||
struct ref_iterator base;
|
||||
};
|
||||
|
||||
static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
return ref_iterator_abort(ref_iterator);
|
||||
}
|
||||
|
||||
static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||
struct object_id *peeled)
|
||||
{
|
||||
BUG("peel called for empty iterator");
|
||||
}
|
||||
|
||||
static int empty_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
base_ref_iterator_free(ref_iterator);
|
||||
return ITER_DONE;
|
||||
}
|
||||
|
||||
static struct ref_iterator_vtable empty_ref_iterator_vtable = {
|
||||
empty_ref_iterator_advance,
|
||||
empty_ref_iterator_peel,
|
||||
empty_ref_iterator_abort
|
||||
};
|
||||
|
||||
struct ref_iterator *empty_ref_iterator_begin(void)
|
||||
{
|
||||
struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
|
||||
struct ref_iterator *ref_iterator = &iter->base;
|
||||
|
||||
base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable, 1);
|
||||
return ref_iterator;
|
||||
}
|
||||
|
||||
int is_empty_ref_iterator(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
return ref_iterator->vtable == &empty_ref_iterator_vtable;
|
||||
}
|
||||
|
||||
struct merge_ref_iterator {
|
||||
struct ref_iterator base;
|
||||
|
||||
struct ref_iterator *iter0, *iter1;
|
||||
|
||||
ref_iterator_select_fn *select;
|
||||
void *cb_data;
|
||||
|
||||
/*
|
||||
* A pointer to iter0 or iter1 (whichever is supplying the
|
||||
* current value), or NULL if advance has not yet been called.
|
||||
*/
|
||||
struct ref_iterator **current;
|
||||
};
|
||||
|
||||
static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
struct merge_ref_iterator *iter =
|
||||
(struct merge_ref_iterator *)ref_iterator;
|
||||
int ok;
|
||||
|
||||
if (!iter->current) {
|
||||
/* Initialize: advance both iterators to their first entries */
|
||||
if ((ok = ref_iterator_advance(iter->iter0)) != ITER_OK) {
|
||||
iter->iter0 = NULL;
|
||||
if (ok == ITER_ERROR)
|
||||
goto error;
|
||||
}
|
||||
if ((ok = ref_iterator_advance(iter->iter1)) != ITER_OK) {
|
||||
iter->iter1 = NULL;
|
||||
if (ok == ITER_ERROR)
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Advance the current iterator past the just-used
|
||||
* entry:
|
||||
*/
|
||||
if ((ok = ref_iterator_advance(*iter->current)) != ITER_OK) {
|
||||
*iter->current = NULL;
|
||||
if (ok == ITER_ERROR)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* Loop until we find an entry that we can yield. */
|
||||
while (1) {
|
||||
struct ref_iterator **secondary;
|
||||
enum iterator_selection selection =
|
||||
iter->select(iter->iter0, iter->iter1, iter->cb_data);
|
||||
|
||||
if (selection == ITER_SELECT_DONE) {
|
||||
return ref_iterator_abort(ref_iterator);
|
||||
} else if (selection == ITER_SELECT_ERROR) {
|
||||
ref_iterator_abort(ref_iterator);
|
||||
return ITER_ERROR;
|
||||
}
|
||||
|
||||
if ((selection & ITER_CURRENT_SELECTION_MASK) == 0) {
|
||||
iter->current = &iter->iter0;
|
||||
secondary = &iter->iter1;
|
||||
} else {
|
||||
iter->current = &iter->iter1;
|
||||
secondary = &iter->iter0;
|
||||
}
|
||||
|
||||
if (selection & ITER_SKIP_SECONDARY) {
|
||||
if ((ok = ref_iterator_advance(*secondary)) != ITER_OK) {
|
||||
*secondary = NULL;
|
||||
if (ok == ITER_ERROR)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
if (selection & ITER_YIELD_CURRENT) {
|
||||
iter->base.refname = (*iter->current)->refname;
|
||||
iter->base.oid = (*iter->current)->oid;
|
||||
iter->base.flags = (*iter->current)->flags;
|
||||
return ITER_OK;
|
||||
}
|
||||
}
|
||||
|
||||
error:
|
||||
ref_iterator_abort(ref_iterator);
|
||||
return ITER_ERROR;
|
||||
}
|
||||
|
||||
static int merge_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||
struct object_id *peeled)
|
||||
{
|
||||
struct merge_ref_iterator *iter =
|
||||
(struct merge_ref_iterator *)ref_iterator;
|
||||
|
||||
if (!iter->current) {
|
||||
BUG("peel called before advance for merge iterator");
|
||||
}
|
||||
return ref_iterator_peel(*iter->current, peeled);
|
||||
}
|
||||
|
||||
static int merge_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
struct merge_ref_iterator *iter =
|
||||
(struct merge_ref_iterator *)ref_iterator;
|
||||
int ok = ITER_DONE;
|
||||
|
||||
if (iter->iter0) {
|
||||
if (ref_iterator_abort(iter->iter0) != ITER_DONE)
|
||||
ok = ITER_ERROR;
|
||||
}
|
||||
if (iter->iter1) {
|
||||
if (ref_iterator_abort(iter->iter1) != ITER_DONE)
|
||||
ok = ITER_ERROR;
|
||||
}
|
||||
base_ref_iterator_free(ref_iterator);
|
||||
return ok;
|
||||
}
|
||||
|
||||
static struct ref_iterator_vtable merge_ref_iterator_vtable = {
|
||||
merge_ref_iterator_advance,
|
||||
merge_ref_iterator_peel,
|
||||
merge_ref_iterator_abort
|
||||
};
|
||||
|
||||
struct ref_iterator *merge_ref_iterator_begin(
|
||||
int ordered,
|
||||
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
||||
ref_iterator_select_fn *select, void *cb_data)
|
||||
{
|
||||
struct merge_ref_iterator *iter = xcalloc(1, sizeof(*iter));
|
||||
struct ref_iterator *ref_iterator = &iter->base;
|
||||
|
||||
/*
|
||||
* We can't do the same kind of is_empty_ref_iterator()-style
|
||||
* optimization here as overlay_ref_iterator_begin() does,
|
||||
* because we don't know the semantics of the select function.
|
||||
* It might, for example, implement "intersect" by passing
|
||||
* references through only if they exist in both iterators.
|
||||
*/
|
||||
|
||||
base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable, ordered);
|
||||
iter->iter0 = iter0;
|
||||
iter->iter1 = iter1;
|
||||
iter->select = select;
|
||||
iter->cb_data = cb_data;
|
||||
iter->current = NULL;
|
||||
return ref_iterator;
|
||||
}
|
||||
|
||||
/*
|
||||
* A ref_iterator_select_fn that overlays the items from front on top
|
||||
* of those from back (like loose refs over packed refs). See
|
||||
* overlay_ref_iterator_begin().
|
||||
*/
|
||||
static enum iterator_selection overlay_iterator_select(
|
||||
struct ref_iterator *front, struct ref_iterator *back,
|
||||
void *cb_data)
|
||||
{
|
||||
int cmp;
|
||||
|
||||
if (!back)
|
||||
return front ? ITER_SELECT_0 : ITER_SELECT_DONE;
|
||||
else if (!front)
|
||||
return ITER_SELECT_1;
|
||||
|
||||
cmp = strcmp(front->refname, back->refname);
|
||||
|
||||
if (cmp < 0)
|
||||
return ITER_SELECT_0;
|
||||
else if (cmp > 0)
|
||||
return ITER_SELECT_1;
|
||||
else
|
||||
return ITER_SELECT_0_SKIP_1;
|
||||
}
|
||||
|
||||
struct ref_iterator *overlay_ref_iterator_begin(
|
||||
struct ref_iterator *front, struct ref_iterator *back)
|
||||
{
|
||||
/*
|
||||
* Optimization: if one of the iterators is empty, return the
|
||||
* other one rather than incurring the overhead of wrapping
|
||||
* them.
|
||||
*/
|
||||
if (is_empty_ref_iterator(front)) {
|
||||
ref_iterator_abort(front);
|
||||
return back;
|
||||
} else if (is_empty_ref_iterator(back)) {
|
||||
ref_iterator_abort(back);
|
||||
return front;
|
||||
} else if (!front->ordered || !back->ordered) {
|
||||
BUG("overlay_ref_iterator requires ordered inputs");
|
||||
}
|
||||
|
||||
return merge_ref_iterator_begin(1, front, back,
|
||||
overlay_iterator_select, NULL);
|
||||
}
|
||||
|
||||
struct prefix_ref_iterator {
|
||||
struct ref_iterator base;
|
||||
|
||||
struct ref_iterator *iter0;
|
||||
char *prefix;
|
||||
int trim;
|
||||
};
|
||||
|
||||
/* Return -1, 0, 1 if refname is before, inside, or after the prefix. */
|
||||
static int compare_prefix(const char *refname, const char *prefix)
|
||||
{
|
||||
while (*prefix) {
|
||||
if (*refname != *prefix)
|
||||
return ((unsigned char)*refname < (unsigned char)*prefix) ? -1 : +1;
|
||||
|
||||
refname++;
|
||||
prefix++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
struct prefix_ref_iterator *iter =
|
||||
(struct prefix_ref_iterator *)ref_iterator;
|
||||
int ok;
|
||||
|
||||
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
|
||||
int cmp = compare_prefix(iter->iter0->refname, iter->prefix);
|
||||
|
||||
if (cmp < 0)
|
||||
continue;
|
||||
|
||||
if (cmp > 0) {
|
||||
/*
|
||||
* If the source iterator is ordered, then we
|
||||
* can stop the iteration as soon as we see a
|
||||
* refname that comes after the prefix:
|
||||
*/
|
||||
if (iter->iter0->ordered) {
|
||||
ok = ref_iterator_abort(iter->iter0);
|
||||
break;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (iter->trim) {
|
||||
/*
|
||||
* It is nonsense to trim off characters that
|
||||
* you haven't already checked for via a
|
||||
* prefix check, whether via this
|
||||
* `prefix_ref_iterator` or upstream in
|
||||
* `iter0`). So if there wouldn't be at least
|
||||
* one character left in the refname after
|
||||
* trimming, report it as a bug:
|
||||
*/
|
||||
if (strlen(iter->iter0->refname) <= iter->trim)
|
||||
BUG("attempt to trim too many characters");
|
||||
iter->base.refname = iter->iter0->refname + iter->trim;
|
||||
} else {
|
||||
iter->base.refname = iter->iter0->refname;
|
||||
}
|
||||
|
||||
iter->base.oid = iter->iter0->oid;
|
||||
iter->base.flags = iter->iter0->flags;
|
||||
return ITER_OK;
|
||||
}
|
||||
|
||||
iter->iter0 = NULL;
|
||||
if (ref_iterator_abort(ref_iterator) != ITER_DONE)
|
||||
return ITER_ERROR;
|
||||
return ok;
|
||||
}
|
||||
|
||||
static int prefix_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||
struct object_id *peeled)
|
||||
{
|
||||
struct prefix_ref_iterator *iter =
|
||||
(struct prefix_ref_iterator *)ref_iterator;
|
||||
|
||||
return ref_iterator_peel(iter->iter0, peeled);
|
||||
}
|
||||
|
||||
static int prefix_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
struct prefix_ref_iterator *iter =
|
||||
(struct prefix_ref_iterator *)ref_iterator;
|
||||
int ok = ITER_DONE;
|
||||
|
||||
if (iter->iter0)
|
||||
ok = ref_iterator_abort(iter->iter0);
|
||||
free(iter->prefix);
|
||||
base_ref_iterator_free(ref_iterator);
|
||||
return ok;
|
||||
}
|
||||
|
||||
static struct ref_iterator_vtable prefix_ref_iterator_vtable = {
|
||||
prefix_ref_iterator_advance,
|
||||
prefix_ref_iterator_peel,
|
||||
prefix_ref_iterator_abort
|
||||
};
|
||||
|
||||
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
|
||||
const char *prefix,
|
||||
int trim)
|
||||
{
|
||||
struct prefix_ref_iterator *iter;
|
||||
struct ref_iterator *ref_iterator;
|
||||
|
||||
if (!*prefix && !trim)
|
||||
return iter0; /* optimization: no need to wrap iterator */
|
||||
|
||||
iter = xcalloc(1, sizeof(*iter));
|
||||
ref_iterator = &iter->base;
|
||||
|
||||
base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable, iter0->ordered);
|
||||
|
||||
iter->iter0 = iter0;
|
||||
iter->prefix = xstrdup(prefix);
|
||||
iter->trim = trim;
|
||||
|
||||
return ref_iterator;
|
||||
}
|
||||
|
||||
struct ref_iterator *current_ref_iter = NULL;
|
||||
|
||||
int do_for_each_repo_ref_iterator(struct repository *r, struct ref_iterator *iter,
|
||||
each_repo_ref_fn fn, void *cb_data)
|
||||
{
|
||||
int retval = 0, ok;
|
||||
struct ref_iterator *old_ref_iter = current_ref_iter;
|
||||
|
||||
current_ref_iter = iter;
|
||||
while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
|
||||
retval = fn(r, iter->refname, iter->oid, iter->flags, cb_data);
|
||||
if (retval) {
|
||||
/*
|
||||
* If ref_iterator_abort() returns ITER_ERROR,
|
||||
* we ignore that error in deference to the
|
||||
* callback function's return value.
|
||||
*/
|
||||
ref_iterator_abort(iter);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
current_ref_iter = old_ref_iter;
|
||||
if (ok == ITER_ERROR)
|
||||
return -1;
|
||||
return retval;
|
||||
}
|
||||
1660
refs/packed-backend.c
Normal file
1660
refs/packed-backend.c
Normal file
File diff suppressed because it is too large
Load diff
37
refs/packed-backend.h
Normal file
37
refs/packed-backend.h
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
#ifndef REFS_PACKED_BACKEND_H
|
||||
#define REFS_PACKED_BACKEND_H
|
||||
|
||||
struct ref_transaction;
|
||||
|
||||
/*
|
||||
* Support for storing references in a `packed-refs` file.
|
||||
*
|
||||
* Note that this backend doesn't check for D/F conflicts, because it
|
||||
* doesn't care about them. But usually it should be wrapped in a
|
||||
* `files_ref_store` that prevents D/F conflicts from being created,
|
||||
* even among packed refs.
|
||||
*/
|
||||
|
||||
struct ref_store *packed_ref_store_create(const char *path,
|
||||
unsigned int store_flags);
|
||||
|
||||
/*
|
||||
* Lock the packed-refs file for writing. Flags is passed to
|
||||
* hold_lock_file_for_update(). Return 0 on success. On errors, write
|
||||
* an error message to `err` and return a nonzero value.
|
||||
*/
|
||||
int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err);
|
||||
|
||||
void packed_refs_unlock(struct ref_store *ref_store);
|
||||
int packed_refs_is_locked(struct ref_store *ref_store);
|
||||
|
||||
/*
|
||||
* Return true if `transaction` really needs to be carried out against
|
||||
* the specified packed_ref_store, or false if it can be skipped
|
||||
* (i.e., because it is an obvious NOOP). `ref_store` must be locked
|
||||
* before calling this function.
|
||||
*/
|
||||
int is_packed_transaction_needed(struct ref_store *ref_store,
|
||||
struct ref_transaction *transaction);
|
||||
|
||||
#endif /* REFS_PACKED_BACKEND_H */
|
||||
551
refs/ref-cache.c
Normal file
551
refs/ref-cache.c
Normal file
|
|
@ -0,0 +1,551 @@
|
|||
#include "../cache.h"
|
||||
#include "../refs.h"
|
||||
#include "refs-internal.h"
|
||||
#include "ref-cache.h"
|
||||
#include "../iterator.h"
|
||||
|
||||
void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry)
|
||||
{
|
||||
ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc);
|
||||
dir->entries[dir->nr++] = entry;
|
||||
/* optimize for the case that entries are added in order */
|
||||
if (dir->nr == 1 ||
|
||||
(dir->nr == dir->sorted + 1 &&
|
||||
strcmp(dir->entries[dir->nr - 2]->name,
|
||||
dir->entries[dir->nr - 1]->name) < 0))
|
||||
dir->sorted = dir->nr;
|
||||
}
|
||||
|
||||
struct ref_dir *get_ref_dir(struct ref_entry *entry)
|
||||
{
|
||||
struct ref_dir *dir;
|
||||
assert(entry->flag & REF_DIR);
|
||||
dir = &entry->u.subdir;
|
||||
if (entry->flag & REF_INCOMPLETE) {
|
||||
if (!dir->cache->fill_ref_dir)
|
||||
BUG("incomplete ref_store without fill_ref_dir function");
|
||||
|
||||
dir->cache->fill_ref_dir(dir->cache->ref_store, dir, entry->name);
|
||||
entry->flag &= ~REF_INCOMPLETE;
|
||||
}
|
||||
return dir;
|
||||
}
|
||||
|
||||
struct ref_entry *create_ref_entry(const char *refname,
|
||||
const struct object_id *oid, int flag)
|
||||
{
|
||||
struct ref_entry *ref;
|
||||
|
||||
FLEX_ALLOC_STR(ref, name, refname);
|
||||
oidcpy(&ref->u.value.oid, oid);
|
||||
ref->flag = flag;
|
||||
return ref;
|
||||
}
|
||||
|
||||
struct ref_cache *create_ref_cache(struct ref_store *refs,
|
||||
fill_ref_dir_fn *fill_ref_dir)
|
||||
{
|
||||
struct ref_cache *ret = xcalloc(1, sizeof(*ret));
|
||||
|
||||
ret->ref_store = refs;
|
||||
ret->fill_ref_dir = fill_ref_dir;
|
||||
ret->root = create_dir_entry(ret, "", 0, 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void clear_ref_dir(struct ref_dir *dir);
|
||||
|
||||
static void free_ref_entry(struct ref_entry *entry)
|
||||
{
|
||||
if (entry->flag & REF_DIR) {
|
||||
/*
|
||||
* Do not use get_ref_dir() here, as that might
|
||||
* trigger the reading of loose refs.
|
||||
*/
|
||||
clear_ref_dir(&entry->u.subdir);
|
||||
}
|
||||
free(entry);
|
||||
}
|
||||
|
||||
void free_ref_cache(struct ref_cache *cache)
|
||||
{
|
||||
free_ref_entry(cache->root);
|
||||
free(cache);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear and free all entries in dir, recursively.
|
||||
*/
|
||||
static void clear_ref_dir(struct ref_dir *dir)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < dir->nr; i++)
|
||||
free_ref_entry(dir->entries[i]);
|
||||
FREE_AND_NULL(dir->entries);
|
||||
dir->sorted = dir->nr = dir->alloc = 0;
|
||||
}
|
||||
|
||||
struct ref_entry *create_dir_entry(struct ref_cache *cache,
|
||||
const char *dirname, size_t len,
|
||||
int incomplete)
|
||||
{
|
||||
struct ref_entry *direntry;
|
||||
|
||||
FLEX_ALLOC_MEM(direntry, name, dirname, len);
|
||||
direntry->u.subdir.cache = cache;
|
||||
direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE : 0);
|
||||
return direntry;
|
||||
}
|
||||
|
||||
static int ref_entry_cmp(const void *a, const void *b)
|
||||
{
|
||||
struct ref_entry *one = *(struct ref_entry **)a;
|
||||
struct ref_entry *two = *(struct ref_entry **)b;
|
||||
return strcmp(one->name, two->name);
|
||||
}
|
||||
|
||||
static void sort_ref_dir(struct ref_dir *dir);
|
||||
|
||||
struct string_slice {
|
||||
size_t len;
|
||||
const char *str;
|
||||
};
|
||||
|
||||
static int ref_entry_cmp_sslice(const void *key_, const void *ent_)
|
||||
{
|
||||
const struct string_slice *key = key_;
|
||||
const struct ref_entry *ent = *(const struct ref_entry * const *)ent_;
|
||||
int cmp = strncmp(key->str, ent->name, key->len);
|
||||
if (cmp)
|
||||
return cmp;
|
||||
return '\0' - (unsigned char)ent->name[key->len];
|
||||
}
|
||||
|
||||
int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len)
|
||||
{
|
||||
struct ref_entry **r;
|
||||
struct string_slice key;
|
||||
|
||||
if (refname == NULL || !dir->nr)
|
||||
return -1;
|
||||
|
||||
sort_ref_dir(dir);
|
||||
key.len = len;
|
||||
key.str = refname;
|
||||
r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries),
|
||||
ref_entry_cmp_sslice);
|
||||
|
||||
if (r == NULL)
|
||||
return -1;
|
||||
|
||||
return r - dir->entries;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search for a directory entry directly within dir (without
|
||||
* recursing). Sort dir if necessary. subdirname must be a directory
|
||||
* name (i.e., end in '/'). If mkdir is set, then create the
|
||||
* directory if it is missing; otherwise, return NULL if the desired
|
||||
* directory cannot be found. dir must already be complete.
|
||||
*/
|
||||
static struct ref_dir *search_for_subdir(struct ref_dir *dir,
|
||||
const char *subdirname, size_t len,
|
||||
int mkdir)
|
||||
{
|
||||
int entry_index = search_ref_dir(dir, subdirname, len);
|
||||
struct ref_entry *entry;
|
||||
if (entry_index == -1) {
|
||||
if (!mkdir)
|
||||
return NULL;
|
||||
/*
|
||||
* Since dir is complete, the absence of a subdir
|
||||
* means that the subdir really doesn't exist;
|
||||
* therefore, create an empty record for it but mark
|
||||
* the record complete.
|
||||
*/
|
||||
entry = create_dir_entry(dir->cache, subdirname, len, 0);
|
||||
add_entry_to_dir(dir, entry);
|
||||
} else {
|
||||
entry = dir->entries[entry_index];
|
||||
}
|
||||
return get_ref_dir(entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* If refname is a reference name, find the ref_dir within the dir
|
||||
* tree that should hold refname. If refname is a directory name
|
||||
* (i.e., it ends in '/'), then return that ref_dir itself. dir must
|
||||
* represent the top-level directory and must already be complete.
|
||||
* Sort ref_dirs and recurse into subdirectories as necessary. If
|
||||
* mkdir is set, then create any missing directories; otherwise,
|
||||
* return NULL if the desired directory cannot be found.
|
||||
*/
|
||||
static struct ref_dir *find_containing_dir(struct ref_dir *dir,
|
||||
const char *refname, int mkdir)
|
||||
{
|
||||
const char *slash;
|
||||
for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) {
|
||||
size_t dirnamelen = slash - refname + 1;
|
||||
struct ref_dir *subdir;
|
||||
subdir = search_for_subdir(dir, refname, dirnamelen, mkdir);
|
||||
if (!subdir) {
|
||||
dir = NULL;
|
||||
break;
|
||||
}
|
||||
dir = subdir;
|
||||
}
|
||||
|
||||
return dir;
|
||||
}
|
||||
|
||||
struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname)
|
||||
{
|
||||
int entry_index;
|
||||
struct ref_entry *entry;
|
||||
dir = find_containing_dir(dir, refname, 0);
|
||||
if (!dir)
|
||||
return NULL;
|
||||
entry_index = search_ref_dir(dir, refname, strlen(refname));
|
||||
if (entry_index == -1)
|
||||
return NULL;
|
||||
entry = dir->entries[entry_index];
|
||||
return (entry->flag & REF_DIR) ? NULL : entry;
|
||||
}
|
||||
|
||||
int remove_entry_from_dir(struct ref_dir *dir, const char *refname)
|
||||
{
|
||||
int refname_len = strlen(refname);
|
||||
int entry_index;
|
||||
struct ref_entry *entry;
|
||||
int is_dir = refname[refname_len - 1] == '/';
|
||||
if (is_dir) {
|
||||
/*
|
||||
* refname represents a reference directory. Remove
|
||||
* the trailing slash; otherwise we will get the
|
||||
* directory *representing* refname rather than the
|
||||
* one *containing* it.
|
||||
*/
|
||||
char *dirname = xmemdupz(refname, refname_len - 1);
|
||||
dir = find_containing_dir(dir, dirname, 0);
|
||||
free(dirname);
|
||||
} else {
|
||||
dir = find_containing_dir(dir, refname, 0);
|
||||
}
|
||||
if (!dir)
|
||||
return -1;
|
||||
entry_index = search_ref_dir(dir, refname, refname_len);
|
||||
if (entry_index == -1)
|
||||
return -1;
|
||||
entry = dir->entries[entry_index];
|
||||
|
||||
MOVE_ARRAY(&dir->entries[entry_index],
|
||||
&dir->entries[entry_index + 1], dir->nr - entry_index - 1);
|
||||
dir->nr--;
|
||||
if (dir->sorted > entry_index)
|
||||
dir->sorted--;
|
||||
free_ref_entry(entry);
|
||||
return dir->nr;
|
||||
}
|
||||
|
||||
int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref)
|
||||
{
|
||||
dir = find_containing_dir(dir, ref->name, 1);
|
||||
if (!dir)
|
||||
return -1;
|
||||
add_entry_to_dir(dir, ref);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Emit a warning and return true iff ref1 and ref2 have the same name
|
||||
* and the same oid. Die if they have the same name but different
|
||||
* oids.
|
||||
*/
|
||||
static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
|
||||
{
|
||||
if (strcmp(ref1->name, ref2->name))
|
||||
return 0;
|
||||
|
||||
/* Duplicate name; make sure that they don't conflict: */
|
||||
|
||||
if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR))
|
||||
/* This is impossible by construction */
|
||||
die("Reference directory conflict: %s", ref1->name);
|
||||
|
||||
if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid))
|
||||
die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
|
||||
|
||||
warning("Duplicated ref: %s", ref1->name);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sort the entries in dir non-recursively (if they are not already
|
||||
* sorted) and remove any duplicate entries.
|
||||
*/
|
||||
static void sort_ref_dir(struct ref_dir *dir)
|
||||
{
|
||||
int i, j;
|
||||
struct ref_entry *last = NULL;
|
||||
|
||||
/*
|
||||
* This check also prevents passing a zero-length array to qsort(),
|
||||
* which is a problem on some platforms.
|
||||
*/
|
||||
if (dir->sorted == dir->nr)
|
||||
return;
|
||||
|
||||
QSORT(dir->entries, dir->nr, ref_entry_cmp);
|
||||
|
||||
/* Remove any duplicates: */
|
||||
for (i = 0, j = 0; j < dir->nr; j++) {
|
||||
struct ref_entry *entry = dir->entries[j];
|
||||
if (last && is_dup_ref(last, entry))
|
||||
free_ref_entry(entry);
|
||||
else
|
||||
last = dir->entries[i++] = entry;
|
||||
}
|
||||
dir->sorted = dir->nr = i;
|
||||
}
|
||||
|
||||
enum prefix_state {
|
||||
/* All refs within the directory would match prefix: */
|
||||
PREFIX_CONTAINS_DIR,
|
||||
|
||||
/* Some, but not all, refs within the directory might match prefix: */
|
||||
PREFIX_WITHIN_DIR,
|
||||
|
||||
/* No refs within the directory could possibly match prefix: */
|
||||
PREFIX_EXCLUDES_DIR
|
||||
};
|
||||
|
||||
/*
|
||||
* Return a `prefix_state` constant describing the relationship
|
||||
* between the directory with the specified `dirname` and `prefix`.
|
||||
*/
|
||||
static enum prefix_state overlaps_prefix(const char *dirname,
|
||||
const char *prefix)
|
||||
{
|
||||
while (*prefix && *dirname == *prefix) {
|
||||
dirname++;
|
||||
prefix++;
|
||||
}
|
||||
if (!*prefix)
|
||||
return PREFIX_CONTAINS_DIR;
|
||||
else if (!*dirname)
|
||||
return PREFIX_WITHIN_DIR;
|
||||
else
|
||||
return PREFIX_EXCLUDES_DIR;
|
||||
}
|
||||
|
||||
/*
|
||||
* Load all of the refs from `dir` (recursively) that could possibly
|
||||
* contain references matching `prefix` into our in-memory cache. If
|
||||
* `prefix` is NULL, prime unconditionally.
|
||||
*/
|
||||
static void prime_ref_dir(struct ref_dir *dir, const char *prefix)
|
||||
{
|
||||
/*
|
||||
* The hard work of loading loose refs is done by get_ref_dir(), so we
|
||||
* just need to recurse through all of the sub-directories. We do not
|
||||
* even need to care about sorting, as traversal order does not matter
|
||||
* to us.
|
||||
*/
|
||||
int i;
|
||||
for (i = 0; i < dir->nr; i++) {
|
||||
struct ref_entry *entry = dir->entries[i];
|
||||
if (!(entry->flag & REF_DIR)) {
|
||||
/* Not a directory; no need to recurse. */
|
||||
} else if (!prefix) {
|
||||
/* Recurse in any case: */
|
||||
prime_ref_dir(get_ref_dir(entry), NULL);
|
||||
} else {
|
||||
switch (overlaps_prefix(entry->name, prefix)) {
|
||||
case PREFIX_CONTAINS_DIR:
|
||||
/*
|
||||
* Recurse, and from here down we
|
||||
* don't have to check the prefix
|
||||
* anymore:
|
||||
*/
|
||||
prime_ref_dir(get_ref_dir(entry), NULL);
|
||||
break;
|
||||
case PREFIX_WITHIN_DIR:
|
||||
prime_ref_dir(get_ref_dir(entry), prefix);
|
||||
break;
|
||||
case PREFIX_EXCLUDES_DIR:
|
||||
/* No need to prime this directory. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A level in the reference hierarchy that is currently being iterated
|
||||
* through.
|
||||
*/
|
||||
struct cache_ref_iterator_level {
|
||||
/*
|
||||
* The ref_dir being iterated over at this level. The ref_dir
|
||||
* is sorted before being stored here.
|
||||
*/
|
||||
struct ref_dir *dir;
|
||||
|
||||
enum prefix_state prefix_state;
|
||||
|
||||
/*
|
||||
* The index of the current entry within dir (which might
|
||||
* itself be a directory). If index == -1, then the iteration
|
||||
* hasn't yet begun. If index == dir->nr, then the iteration
|
||||
* through this level is over.
|
||||
*/
|
||||
int index;
|
||||
};
|
||||
|
||||
/*
|
||||
* Represent an iteration through a ref_dir in the memory cache. The
|
||||
* iteration recurses through subdirectories.
|
||||
*/
|
||||
struct cache_ref_iterator {
|
||||
struct ref_iterator base;
|
||||
|
||||
/*
|
||||
* The number of levels currently on the stack. This is always
|
||||
* at least 1, because when it becomes zero the iteration is
|
||||
* ended and this struct is freed.
|
||||
*/
|
||||
size_t levels_nr;
|
||||
|
||||
/* The number of levels that have been allocated on the stack */
|
||||
size_t levels_alloc;
|
||||
|
||||
/*
|
||||
* Only include references with this prefix in the iteration.
|
||||
* The prefix is matched textually, without regard for path
|
||||
* component boundaries.
|
||||
*/
|
||||
const char *prefix;
|
||||
|
||||
/*
|
||||
* A stack of levels. levels[0] is the uppermost level that is
|
||||
* being iterated over in this iteration. (This is not
|
||||
* necessary the top level in the references hierarchy. If we
|
||||
* are iterating through a subtree, then levels[0] will hold
|
||||
* the ref_dir for that subtree, and subsequent levels will go
|
||||
* on from there.)
|
||||
*/
|
||||
struct cache_ref_iterator_level *levels;
|
||||
};
|
||||
|
||||
static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
struct cache_ref_iterator *iter =
|
||||
(struct cache_ref_iterator *)ref_iterator;
|
||||
|
||||
while (1) {
|
||||
struct cache_ref_iterator_level *level =
|
||||
&iter->levels[iter->levels_nr - 1];
|
||||
struct ref_dir *dir = level->dir;
|
||||
struct ref_entry *entry;
|
||||
enum prefix_state entry_prefix_state;
|
||||
|
||||
if (level->index == -1)
|
||||
sort_ref_dir(dir);
|
||||
|
||||
if (++level->index == level->dir->nr) {
|
||||
/* This level is exhausted; pop up a level */
|
||||
if (--iter->levels_nr == 0)
|
||||
return ref_iterator_abort(ref_iterator);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
entry = dir->entries[level->index];
|
||||
|
||||
if (level->prefix_state == PREFIX_WITHIN_DIR) {
|
||||
entry_prefix_state = overlaps_prefix(entry->name, iter->prefix);
|
||||
if (entry_prefix_state == PREFIX_EXCLUDES_DIR)
|
||||
continue;
|
||||
} else {
|
||||
entry_prefix_state = level->prefix_state;
|
||||
}
|
||||
|
||||
if (entry->flag & REF_DIR) {
|
||||
/* push down a level */
|
||||
ALLOC_GROW(iter->levels, iter->levels_nr + 1,
|
||||
iter->levels_alloc);
|
||||
|
||||
level = &iter->levels[iter->levels_nr++];
|
||||
level->dir = get_ref_dir(entry);
|
||||
level->prefix_state = entry_prefix_state;
|
||||
level->index = -1;
|
||||
} else {
|
||||
iter->base.refname = entry->name;
|
||||
iter->base.oid = &entry->u.value.oid;
|
||||
iter->base.flags = entry->flag;
|
||||
return ITER_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||
struct object_id *peeled)
|
||||
{
|
||||
return peel_object(ref_iterator->oid, peeled);
|
||||
}
|
||||
|
||||
static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||
{
|
||||
struct cache_ref_iterator *iter =
|
||||
(struct cache_ref_iterator *)ref_iterator;
|
||||
|
||||
free((char *)iter->prefix);
|
||||
free(iter->levels);
|
||||
base_ref_iterator_free(ref_iterator);
|
||||
return ITER_DONE;
|
||||
}
|
||||
|
||||
static struct ref_iterator_vtable cache_ref_iterator_vtable = {
|
||||
cache_ref_iterator_advance,
|
||||
cache_ref_iterator_peel,
|
||||
cache_ref_iterator_abort
|
||||
};
|
||||
|
||||
struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
|
||||
const char *prefix,
|
||||
int prime_dir)
|
||||
{
|
||||
struct ref_dir *dir;
|
||||
struct cache_ref_iterator *iter;
|
||||
struct ref_iterator *ref_iterator;
|
||||
struct cache_ref_iterator_level *level;
|
||||
|
||||
dir = get_ref_dir(cache->root);
|
||||
if (prefix && *prefix)
|
||||
dir = find_containing_dir(dir, prefix, 0);
|
||||
if (!dir)
|
||||
/* There's nothing to iterate over. */
|
||||
return empty_ref_iterator_begin();
|
||||
|
||||
if (prime_dir)
|
||||
prime_ref_dir(dir, prefix);
|
||||
|
||||
iter = xcalloc(1, sizeof(*iter));
|
||||
ref_iterator = &iter->base;
|
||||
base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable, 1);
|
||||
ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
|
||||
|
||||
iter->levels_nr = 1;
|
||||
level = &iter->levels[0];
|
||||
level->index = -1;
|
||||
level->dir = dir;
|
||||
|
||||
if (prefix && *prefix) {
|
||||
iter->prefix = xstrdup(prefix);
|
||||
level->prefix_state = PREFIX_WITHIN_DIR;
|
||||
} else {
|
||||
level->prefix_state = PREFIX_CONTAINS_DIR;
|
||||
}
|
||||
|
||||
return ref_iterator;
|
||||
}
|
||||
243
refs/ref-cache.h
Normal file
243
refs/ref-cache.h
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
#ifndef REFS_REF_CACHE_H
|
||||
#define REFS_REF_CACHE_H
|
||||
|
||||
#include "cache.h"
|
||||
|
||||
struct ref_dir;
|
||||
struct ref_store;
|
||||
|
||||
/*
|
||||
* If this ref_cache is filled lazily, this function is used to load
|
||||
* information into the specified ref_dir (shallow or deep, at the
|
||||
* option of the ref_store). dirname includes a trailing slash.
|
||||
*/
|
||||
typedef void fill_ref_dir_fn(struct ref_store *ref_store,
|
||||
struct ref_dir *dir, const char *dirname);
|
||||
|
||||
struct ref_cache {
|
||||
struct ref_entry *root;
|
||||
|
||||
/* A pointer to the ref_store whose cache this is: */
|
||||
struct ref_store *ref_store;
|
||||
|
||||
/*
|
||||
* Function used (if necessary) to lazily-fill cache. May be
|
||||
* NULL.
|
||||
*/
|
||||
fill_ref_dir_fn *fill_ref_dir;
|
||||
};
|
||||
|
||||
/*
|
||||
* Information used (along with the information in ref_entry) to
|
||||
* describe a single cached reference. This data structure only
|
||||
* occurs embedded in a union in struct ref_entry, and only when
|
||||
* (ref_entry->flag & REF_DIR) is zero.
|
||||
*/
|
||||
struct ref_value {
|
||||
/*
|
||||
* The name of the object to which this reference resolves
|
||||
* (which may be a tag object). If REF_ISBROKEN, this is
|
||||
* null. If REF_ISSYMREF, then this is the name of the object
|
||||
* referred to by the last reference in the symlink chain.
|
||||
*/
|
||||
struct object_id oid;
|
||||
};
|
||||
|
||||
/*
|
||||
* Information used (along with the information in ref_entry) to
|
||||
* describe a level in the hierarchy of references. This data
|
||||
* structure only occurs embedded in a union in struct ref_entry, and
|
||||
* only when (ref_entry.flag & REF_DIR) is set. In that case,
|
||||
* (ref_entry.flag & REF_INCOMPLETE) determines whether the references
|
||||
* in the directory have already been read:
|
||||
*
|
||||
* (ref_entry.flag & REF_INCOMPLETE) unset -- a directory of loose
|
||||
* or packed references, already read.
|
||||
*
|
||||
* (ref_entry.flag & REF_INCOMPLETE) set -- a directory of loose
|
||||
* references that hasn't been read yet (nor has any of its
|
||||
* subdirectories).
|
||||
*
|
||||
* Entries within a directory are stored within a growable array of
|
||||
* pointers to ref_entries (entries, nr, alloc). Entries 0 <= i <
|
||||
* sorted are sorted by their component name in strcmp() order and the
|
||||
* remaining entries are unsorted.
|
||||
*
|
||||
* Loose references are read lazily, one directory at a time. When a
|
||||
* directory of loose references is read, then all of the references
|
||||
* in that directory are stored, and REF_INCOMPLETE stubs are created
|
||||
* for any subdirectories, but the subdirectories themselves are not
|
||||
* read. The reading is triggered by get_ref_dir().
|
||||
*/
|
||||
struct ref_dir {
|
||||
int nr, alloc;
|
||||
|
||||
/*
|
||||
* Entries with index 0 <= i < sorted are sorted by name. New
|
||||
* entries are appended to the list unsorted, and are sorted
|
||||
* only when required; thus we avoid the need to sort the list
|
||||
* after the addition of every reference.
|
||||
*/
|
||||
int sorted;
|
||||
|
||||
/* The ref_cache containing this entry: */
|
||||
struct ref_cache *cache;
|
||||
|
||||
struct ref_entry **entries;
|
||||
};
|
||||
|
||||
/*
|
||||
* Bit values for ref_entry::flag. REF_ISSYMREF=0x01,
|
||||
* REF_ISPACKED=0x02, REF_ISBROKEN=0x04 and REF_BAD_NAME=0x08 are
|
||||
* public values; see refs.h.
|
||||
*/
|
||||
|
||||
/* ref_entry represents a directory of references */
|
||||
#define REF_DIR 0x10
|
||||
|
||||
/*
|
||||
* Entry has not yet been read from disk (used only for REF_DIR
|
||||
* entries representing loose references)
|
||||
*/
|
||||
#define REF_INCOMPLETE 0x20
|
||||
|
||||
/*
|
||||
* A ref_entry represents either a reference or a "subdirectory" of
|
||||
* references.
|
||||
*
|
||||
* Each directory in the reference namespace is represented by a
|
||||
* ref_entry with (flags & REF_DIR) set and containing a subdir member
|
||||
* that holds the entries in that directory that have been read so
|
||||
* far. If (flags & REF_INCOMPLETE) is set, then the directory and
|
||||
* its subdirectories haven't been read yet. REF_INCOMPLETE is only
|
||||
* used for loose reference directories.
|
||||
*
|
||||
* References are represented by a ref_entry with (flags & REF_DIR)
|
||||
* unset and a value member that describes the reference's value. The
|
||||
* flag member is at the ref_entry level, but it is also needed to
|
||||
* interpret the contents of the value field (in other words, a
|
||||
* ref_value object is not very much use without the enclosing
|
||||
* ref_entry).
|
||||
*
|
||||
* Reference names cannot end with slash and directories' names are
|
||||
* always stored with a trailing slash (except for the top-level
|
||||
* directory, which is always denoted by ""). This has two nice
|
||||
* consequences: (1) when the entries in each subdir are sorted
|
||||
* lexicographically by name (as they usually are), the references in
|
||||
* a whole tree can be generated in lexicographic order by traversing
|
||||
* the tree in left-to-right, depth-first order; (2) the names of
|
||||
* references and subdirectories cannot conflict, and therefore the
|
||||
* presence of an empty subdirectory does not block the creation of a
|
||||
* similarly-named reference. (The fact that reference names with the
|
||||
* same leading components can conflict *with each other* is a
|
||||
* separate issue that is regulated by refs_verify_refname_available().)
|
||||
*
|
||||
* Please note that the name field contains the fully-qualified
|
||||
* reference (or subdirectory) name. Space could be saved by only
|
||||
* storing the relative names. But that would require the full names
|
||||
* to be generated on the fly when iterating in do_for_each_ref(), and
|
||||
* would break callback functions, who have always been able to assume
|
||||
* that the name strings that they are passed will not be freed during
|
||||
* the iteration.
|
||||
*/
|
||||
struct ref_entry {
|
||||
unsigned char flag; /* ISSYMREF? ISPACKED? */
|
||||
union {
|
||||
struct ref_value value; /* if not (flags&REF_DIR) */
|
||||
struct ref_dir subdir; /* if (flags&REF_DIR) */
|
||||
} u;
|
||||
/*
|
||||
* The full name of the reference (e.g., "refs/heads/master")
|
||||
* or the full name of the directory with a trailing slash
|
||||
* (e.g., "refs/heads/"):
|
||||
*/
|
||||
char name[FLEX_ARRAY];
|
||||
};
|
||||
|
||||
/*
|
||||
* Return the index of the entry with the given refname from the
|
||||
* ref_dir (non-recursively), sorting dir if necessary. Return -1 if
|
||||
* no such entry is found. dir must already be complete.
|
||||
*/
|
||||
int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len);
|
||||
|
||||
struct ref_dir *get_ref_dir(struct ref_entry *entry);
|
||||
|
||||
/*
|
||||
* Create a struct ref_entry object for the specified dirname.
|
||||
* dirname is the name of the directory with a trailing slash (e.g.,
|
||||
* "refs/heads/") or "" for the top-level directory.
|
||||
*/
|
||||
struct ref_entry *create_dir_entry(struct ref_cache *cache,
|
||||
const char *dirname, size_t len,
|
||||
int incomplete);
|
||||
|
||||
struct ref_entry *create_ref_entry(const char *refname,
|
||||
const struct object_id *oid, int flag);
|
||||
|
||||
/*
|
||||
* Return a pointer to a new `ref_cache`. Its top-level starts out
|
||||
* marked incomplete. If `fill_ref_dir` is non-NULL, it is the
|
||||
* function called to fill in incomplete directories in the
|
||||
* `ref_cache` when they are accessed. If it is NULL, then the whole
|
||||
* `ref_cache` must be filled (including clearing its directories'
|
||||
* `REF_INCOMPLETE` bits) before it is used, and `refs` can be NULL,
|
||||
* too.
|
||||
*/
|
||||
struct ref_cache *create_ref_cache(struct ref_store *refs,
|
||||
fill_ref_dir_fn *fill_ref_dir);
|
||||
|
||||
/*
|
||||
* Free the `ref_cache` and all of its associated data.
|
||||
*/
|
||||
void free_ref_cache(struct ref_cache *cache);
|
||||
|
||||
/*
|
||||
* Add a ref_entry to the end of dir (unsorted). Entry is always
|
||||
* stored directly in dir; no recursion into subdirectories is
|
||||
* done.
|
||||
*/
|
||||
void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry);
|
||||
|
||||
/*
|
||||
* Remove the entry with the given name from dir, recursing into
|
||||
* subdirectories as necessary. If refname is the name of a directory
|
||||
* (i.e., ends with '/'), then remove the directory and its contents.
|
||||
* If the removal was successful, return the number of entries
|
||||
* remaining in the directory entry that contained the deleted entry.
|
||||
* If the name was not found, return -1. Please note that this
|
||||
* function only deletes the entry from the cache; it does not delete
|
||||
* it from the filesystem or ensure that other cache entries (which
|
||||
* might be symbolic references to the removed entry) are updated.
|
||||
* Nor does it remove any containing dir entries that might be made
|
||||
* empty by the removal. dir must represent the top-level directory
|
||||
* and must already be complete.
|
||||
*/
|
||||
int remove_entry_from_dir(struct ref_dir *dir, const char *refname);
|
||||
|
||||
/*
|
||||
* Add a ref_entry to the ref_dir (unsorted), recursing into
|
||||
* subdirectories as necessary. dir must represent the top-level
|
||||
* directory. Return 0 on success.
|
||||
*/
|
||||
int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref);
|
||||
|
||||
/*
|
||||
* Find the value entry with the given name in dir, sorting ref_dirs
|
||||
* and recursing into subdirectories as necessary. If the name is not
|
||||
* found or it corresponds to a directory entry, return NULL.
|
||||
*/
|
||||
struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname);
|
||||
|
||||
/*
|
||||
* Start iterating over references in `cache`. If `prefix` is
|
||||
* specified, only include references whose names start with that
|
||||
* prefix. If `prime_dir` is true, then fill any incomplete
|
||||
* directories before beginning the iteration. The output is ordered
|
||||
* by refname.
|
||||
*/
|
||||
struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
|
||||
const char *prefix,
|
||||
int prime_dir);
|
||||
|
||||
#endif /* REFS_REF_CACHE_H */
|
||||
678
refs/refs-internal.h
Normal file
678
refs/refs-internal.h
Normal file
|
|
@ -0,0 +1,678 @@
|
|||
#ifndef REFS_REFS_INTERNAL_H
|
||||
#define REFS_REFS_INTERNAL_H
|
||||
|
||||
#include "cache.h"
|
||||
#include "refs.h"
|
||||
#include "iterator.h"
|
||||
|
||||
struct ref_transaction;
|
||||
|
||||
/*
|
||||
* Data structures and functions for the internal use of the refs
|
||||
* module. Code outside of the refs module should use only the public
|
||||
* functions defined in "refs.h", and should *not* include this file.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The following flags can appear in `ref_update::flags`. Their
|
||||
* numerical values must not conflict with those of REF_NO_DEREF and
|
||||
* REF_FORCE_CREATE_REFLOG, which are also stored in
|
||||
* `ref_update::flags`.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The reference should be updated to new_oid.
|
||||
*/
|
||||
#define REF_HAVE_NEW (1 << 2)
|
||||
|
||||
/*
|
||||
* The current reference's value should be checked to make sure that
|
||||
* it agrees with old_oid.
|
||||
*/
|
||||
#define REF_HAVE_OLD (1 << 3)
|
||||
|
||||
/*
|
||||
* Return the length of time to retry acquiring a loose reference lock
|
||||
* before giving up, in milliseconds:
|
||||
*/
|
||||
long get_files_ref_lock_timeout_ms(void);
|
||||
|
||||
/*
|
||||
* Return true iff refname is minimally safe. "Safe" here means that
|
||||
* deleting a loose reference by this name will not do any damage, for
|
||||
* example by causing a file that is not a reference to be deleted.
|
||||
* This function does not check that the reference name is legal; for
|
||||
* that, use check_refname_format().
|
||||
*
|
||||
* A refname that starts with "refs/" is considered safe iff it
|
||||
* doesn't contain any "." or ".." components or consecutive '/'
|
||||
* characters, end with '/', or (on Windows) contain any '\'
|
||||
* characters. Names that do not start with "refs/" are considered
|
||||
* safe iff they consist entirely of upper case characters and '_'
|
||||
* (like "HEAD" and "MERGE_HEAD" but not "config" or "FOO/BAR").
|
||||
*/
|
||||
int refname_is_safe(const char *refname);
|
||||
|
||||
/*
|
||||
* Helper function: return true if refname, which has the specified
|
||||
* oid and flags, can be resolved to an object in the database. If the
|
||||
* referred-to object does not exist, emit a warning and return false.
|
||||
*/
|
||||
int ref_resolves_to_object(const char *refname,
|
||||
const struct object_id *oid,
|
||||
unsigned int flags);
|
||||
|
||||
enum peel_status {
|
||||
/* object was peeled successfully: */
|
||||
PEEL_PEELED = 0,
|
||||
|
||||
/*
|
||||
* object cannot be peeled because the named object (or an
|
||||
* object referred to by a tag in the peel chain), does not
|
||||
* exist.
|
||||
*/
|
||||
PEEL_INVALID = -1,
|
||||
|
||||
/* object cannot be peeled because it is not a tag: */
|
||||
PEEL_NON_TAG = -2,
|
||||
|
||||
/* ref_entry contains no peeled value because it is a symref: */
|
||||
PEEL_IS_SYMREF = -3,
|
||||
|
||||
/*
|
||||
* ref_entry cannot be peeled because it is broken (i.e., the
|
||||
* symbolic reference cannot even be resolved to an object
|
||||
* name):
|
||||
*/
|
||||
PEEL_BROKEN = -4
|
||||
};
|
||||
|
||||
/*
|
||||
* Peel the named object; i.e., if the object is a tag, resolve the
|
||||
* tag recursively until a non-tag is found. If successful, store the
|
||||
* result to oid and return PEEL_PEELED. If the object is not a tag
|
||||
* or is not valid, return PEEL_NON_TAG or PEEL_INVALID, respectively,
|
||||
* and leave oid unchanged.
|
||||
*/
|
||||
enum peel_status peel_object(const struct object_id *name, struct object_id *oid);
|
||||
|
||||
/*
|
||||
* Copy the reflog message msg to sb while cleaning up the whitespaces.
|
||||
* Especially, convert LF to space, because reflog file is one line per entry.
|
||||
*/
|
||||
void copy_reflog_msg(struct strbuf *sb, const char *msg);
|
||||
|
||||
/**
|
||||
* Information needed for a single ref update. Set new_oid to the new
|
||||
* value or to null_oid to delete the ref. To check the old value
|
||||
* while the ref is locked, set (flags & REF_HAVE_OLD) and set old_oid
|
||||
* to the old value, or to null_oid to ensure the ref does not exist
|
||||
* before update.
|
||||
*/
|
||||
struct ref_update {
|
||||
/*
|
||||
* If (flags & REF_HAVE_NEW), set the reference to this value
|
||||
* (or delete it, if `new_oid` is `null_oid`).
|
||||
*/
|
||||
struct object_id new_oid;
|
||||
|
||||
/*
|
||||
* If (flags & REF_HAVE_OLD), check that the reference
|
||||
* previously had this value (or didn't previously exist, if
|
||||
* `old_oid` is `null_oid`).
|
||||
*/
|
||||
struct object_id old_oid;
|
||||
|
||||
/*
|
||||
* One or more of REF_NO_DEREF, REF_FORCE_CREATE_REFLOG,
|
||||
* REF_HAVE_NEW, REF_HAVE_OLD, or backend-specific flags.
|
||||
*/
|
||||
unsigned int flags;
|
||||
|
||||
void *backend_data;
|
||||
unsigned int type;
|
||||
char *msg;
|
||||
|
||||
/*
|
||||
* If this ref_update was split off of a symref update via
|
||||
* split_symref_update(), then this member points at that
|
||||
* update. This is used for two purposes:
|
||||
* 1. When reporting errors, we report the refname under which
|
||||
* the update was originally requested.
|
||||
* 2. When we read the old value of this reference, we
|
||||
* propagate it back to its parent update for recording in
|
||||
* the latter's reflog.
|
||||
*/
|
||||
struct ref_update *parent_update;
|
||||
|
||||
const char refname[FLEX_ARRAY];
|
||||
};
|
||||
|
||||
int refs_read_raw_ref(struct ref_store *ref_store,
|
||||
const char *refname, struct object_id *oid,
|
||||
struct strbuf *referent, unsigned int *type);
|
||||
|
||||
/*
|
||||
* Write an error to `err` and return a nonzero value iff the same
|
||||
* refname appears multiple times in `refnames`. `refnames` must be
|
||||
* sorted on entry to this function.
|
||||
*/
|
||||
int ref_update_reject_duplicates(struct string_list *refnames,
|
||||
struct strbuf *err);
|
||||
|
||||
/*
|
||||
* Add a ref_update with the specified properties to transaction, and
|
||||
* return a pointer to the new object. This function does not verify
|
||||
* that refname is well-formed. new_oid and old_oid are only
|
||||
* dereferenced if the REF_HAVE_NEW and REF_HAVE_OLD bits,
|
||||
* respectively, are set in flags.
|
||||
*/
|
||||
struct ref_update *ref_transaction_add_update(
|
||||
struct ref_transaction *transaction,
|
||||
const char *refname, unsigned int flags,
|
||||
const struct object_id *new_oid,
|
||||
const struct object_id *old_oid,
|
||||
const char *msg);
|
||||
|
||||
/*
|
||||
* Transaction states.
|
||||
*
|
||||
* OPEN: The transaction is initialized and new updates can still be
|
||||
* added to it. An OPEN transaction can be prepared,
|
||||
* committed, freed, or aborted (freeing and aborting an open
|
||||
* transaction are equivalent).
|
||||
*
|
||||
* PREPARED: ref_transaction_prepare(), which locks all of the
|
||||
* references involved in the update and checks that the
|
||||
* update has no errors, has been called successfully for the
|
||||
* transaction. A PREPARED transaction can be committed or
|
||||
* aborted.
|
||||
*
|
||||
* CLOSED: The transaction is no longer active. A transaction becomes
|
||||
* CLOSED if there is a failure while building the transaction
|
||||
* or if a transaction is committed or aborted. A CLOSED
|
||||
* transaction can only be freed.
|
||||
*/
|
||||
enum ref_transaction_state {
|
||||
REF_TRANSACTION_OPEN = 0,
|
||||
REF_TRANSACTION_PREPARED = 1,
|
||||
REF_TRANSACTION_CLOSED = 2
|
||||
};
|
||||
|
||||
/*
|
||||
* Data structure for holding a reference transaction, which can
|
||||
* consist of checks and updates to multiple references, carried out
|
||||
* as atomically as possible. This structure is opaque to callers.
|
||||
*/
|
||||
struct ref_transaction {
|
||||
struct ref_store *ref_store;
|
||||
struct ref_update **updates;
|
||||
size_t alloc;
|
||||
size_t nr;
|
||||
enum ref_transaction_state state;
|
||||
void *backend_data;
|
||||
};
|
||||
|
||||
/*
|
||||
* Check for entries in extras that are within the specified
|
||||
* directory, where dirname is a reference directory name including
|
||||
* the trailing slash (e.g., "refs/heads/foo/"). Ignore any
|
||||
* conflicting references that are found in skip. If there is a
|
||||
* conflicting reference, return its name.
|
||||
*
|
||||
* extras and skip must be sorted lists of reference names. Either one
|
||||
* can be NULL, signifying the empty list.
|
||||
*/
|
||||
const char *find_descendant_ref(const char *dirname,
|
||||
const struct string_list *extras,
|
||||
const struct string_list *skip);
|
||||
|
||||
/*
|
||||
* Check whether an attempt to rename old_refname to new_refname would
|
||||
* cause a D/F conflict with any existing reference (other than
|
||||
* possibly old_refname). If there would be a conflict, emit an error
|
||||
* message and return false; otherwise, return true.
|
||||
*
|
||||
* Note that this function is not safe against all races with other
|
||||
* processes (though rename_ref() catches some races that might get by
|
||||
* this check).
|
||||
*/
|
||||
int refs_rename_ref_available(struct ref_store *refs,
|
||||
const char *old_refname,
|
||||
const char *new_refname);
|
||||
|
||||
/* We allow "recursive" symbolic refs. Only within reason, though */
|
||||
#define SYMREF_MAXDEPTH 5
|
||||
|
||||
/* Include broken references in a do_for_each_ref*() iteration: */
|
||||
#define DO_FOR_EACH_INCLUDE_BROKEN 0x01
|
||||
|
||||
/*
|
||||
* Reference iterators
|
||||
*
|
||||
* A reference iterator encapsulates the state of an in-progress
|
||||
* iteration over references. Create an instance of `struct
|
||||
* ref_iterator` via one of the functions in this module.
|
||||
*
|
||||
* A freshly-created ref_iterator doesn't yet point at a reference. To
|
||||
* advance the iterator, call ref_iterator_advance(). If successful,
|
||||
* this sets the iterator's refname, oid, and flags fields to describe
|
||||
* the next reference and returns ITER_OK. The data pointed at by
|
||||
* refname and oid belong to the iterator; if you want to retain them
|
||||
* after calling ref_iterator_advance() again or calling
|
||||
* ref_iterator_abort(), you must make a copy. When the iteration has
|
||||
* been exhausted, ref_iterator_advance() releases any resources
|
||||
* assocated with the iteration, frees the ref_iterator object, and
|
||||
* returns ITER_DONE. If you want to abort the iteration early, call
|
||||
* ref_iterator_abort(), which also frees the ref_iterator object and
|
||||
* any associated resources. If there was an internal error advancing
|
||||
* to the next entry, ref_iterator_advance() aborts the iteration,
|
||||
* frees the ref_iterator, and returns ITER_ERROR.
|
||||
*
|
||||
* The reference currently being looked at can be peeled by calling
|
||||
* ref_iterator_peel(). This function is often faster than peel_ref(),
|
||||
* so it should be preferred when iterating over references.
|
||||
*
|
||||
* Putting it all together, a typical iteration looks like this:
|
||||
*
|
||||
* int ok;
|
||||
* struct ref_iterator *iter = ...;
|
||||
*
|
||||
* while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
|
||||
* if (want_to_stop_iteration()) {
|
||||
* ok = ref_iterator_abort(iter);
|
||||
* break;
|
||||
* }
|
||||
*
|
||||
* // Access information about the current reference:
|
||||
* if (!(iter->flags & REF_ISSYMREF))
|
||||
* printf("%s is %s\n", iter->refname, oid_to_hex(iter->oid));
|
||||
*
|
||||
* // If you need to peel the reference:
|
||||
* ref_iterator_peel(iter, &oid);
|
||||
* }
|
||||
*
|
||||
* if (ok != ITER_DONE)
|
||||
* handle_error();
|
||||
*/
|
||||
struct ref_iterator {
|
||||
struct ref_iterator_vtable *vtable;
|
||||
|
||||
/*
|
||||
* Does this `ref_iterator` iterate over references in order
|
||||
* by refname?
|
||||
*/
|
||||
unsigned int ordered : 1;
|
||||
|
||||
const char *refname;
|
||||
const struct object_id *oid;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* Advance the iterator to the first or next item and return ITER_OK.
|
||||
* If the iteration is exhausted, free the resources associated with
|
||||
* the ref_iterator and return ITER_DONE. On errors, free the iterator
|
||||
* resources and return ITER_ERROR. It is a bug to use ref_iterator or
|
||||
* call this function again after it has returned ITER_DONE or
|
||||
* ITER_ERROR.
|
||||
*/
|
||||
int ref_iterator_advance(struct ref_iterator *ref_iterator);
|
||||
|
||||
/*
|
||||
* If possible, peel the reference currently being viewed by the
|
||||
* iterator. Return 0 on success.
|
||||
*/
|
||||
int ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||
struct object_id *peeled);
|
||||
|
||||
/*
|
||||
* End the iteration before it has been exhausted, freeing the
|
||||
* reference iterator and any associated resources and returning
|
||||
* ITER_DONE. If the abort itself failed, return ITER_ERROR.
|
||||
*/
|
||||
int ref_iterator_abort(struct ref_iterator *ref_iterator);
|
||||
|
||||
/*
|
||||
* An iterator over nothing (its first ref_iterator_advance() call
|
||||
* returns ITER_DONE).
|
||||
*/
|
||||
struct ref_iterator *empty_ref_iterator_begin(void);
|
||||
|
||||
/*
|
||||
* Return true iff ref_iterator is an empty_ref_iterator.
|
||||
*/
|
||||
int is_empty_ref_iterator(struct ref_iterator *ref_iterator);
|
||||
|
||||
/*
|
||||
* Return an iterator that goes over each reference in `refs` for
|
||||
* which the refname begins with prefix. If trim is non-zero, then
|
||||
* trim that many characters off the beginning of each refname. flags
|
||||
* can be DO_FOR_EACH_INCLUDE_BROKEN to include broken references in
|
||||
* the iteration. The output is ordered by refname.
|
||||
*/
|
||||
struct ref_iterator *refs_ref_iterator_begin(
|
||||
struct ref_store *refs,
|
||||
const char *prefix, int trim, int flags);
|
||||
|
||||
/*
|
||||
* A callback function used to instruct merge_ref_iterator how to
|
||||
* interleave the entries from iter0 and iter1. The function should
|
||||
* return one of the constants defined in enum iterator_selection. It
|
||||
* must not advance either of the iterators itself.
|
||||
*
|
||||
* The function must be prepared to handle the case that iter0 and/or
|
||||
* iter1 is NULL, which indicates that the corresponding sub-iterator
|
||||
* has been exhausted. Its return value must be consistent with the
|
||||
* current states of the iterators; e.g., it must not return
|
||||
* ITER_SKIP_1 if iter1 has already been exhausted.
|
||||
*/
|
||||
typedef enum iterator_selection ref_iterator_select_fn(
|
||||
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
||||
void *cb_data);
|
||||
|
||||
/*
|
||||
* Iterate over the entries from iter0 and iter1, with the values
|
||||
* interleaved as directed by the select function. The iterator takes
|
||||
* ownership of iter0 and iter1 and frees them when the iteration is
|
||||
* over. A derived class should set `ordered` to 1 or 0 based on
|
||||
* whether it generates its output in order by reference name.
|
||||
*/
|
||||
struct ref_iterator *merge_ref_iterator_begin(
|
||||
int ordered,
|
||||
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
||||
ref_iterator_select_fn *select, void *cb_data);
|
||||
|
||||
/*
|
||||
* An iterator consisting of the union of the entries from front and
|
||||
* back. If there are entries common to the two sub-iterators, use the
|
||||
* one from front. Each iterator must iterate over its entries in
|
||||
* strcmp() order by refname for this to work.
|
||||
*
|
||||
* The new iterator takes ownership of its arguments and frees them
|
||||
* when the iteration is over. As a convenience to callers, if front
|
||||
* or back is an empty_ref_iterator, then abort that one immediately
|
||||
* and return the other iterator directly, without wrapping it.
|
||||
*/
|
||||
struct ref_iterator *overlay_ref_iterator_begin(
|
||||
struct ref_iterator *front, struct ref_iterator *back);
|
||||
|
||||
/*
|
||||
* Wrap iter0, only letting through the references whose names start
|
||||
* with prefix. If trim is set, set iter->refname to the name of the
|
||||
* reference with that many characters trimmed off the front;
|
||||
* otherwise set it to the full refname. The new iterator takes over
|
||||
* ownership of iter0 and frees it when iteration is over. It makes
|
||||
* its own copy of prefix.
|
||||
*
|
||||
* As an convenience to callers, if prefix is the empty string and
|
||||
* trim is zero, this function returns iter0 directly, without
|
||||
* wrapping it.
|
||||
*
|
||||
* The resulting ref_iterator is ordered if iter0 is.
|
||||
*/
|
||||
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
|
||||
const char *prefix,
|
||||
int trim);
|
||||
|
||||
/* Internal implementation of reference iteration: */
|
||||
|
||||
/*
|
||||
* Base class constructor for ref_iterators. Initialize the
|
||||
* ref_iterator part of iter, setting its vtable pointer as specified.
|
||||
* `ordered` should be set to 1 if the iterator will iterate over
|
||||
* references in order by refname; otherwise it should be set to 0.
|
||||
* This is meant to be called only by the initializers of derived
|
||||
* classes.
|
||||
*/
|
||||
void base_ref_iterator_init(struct ref_iterator *iter,
|
||||
struct ref_iterator_vtable *vtable,
|
||||
int ordered);
|
||||
|
||||
/*
|
||||
* Base class destructor for ref_iterators. Destroy the ref_iterator
|
||||
* part of iter and shallow-free the object. This is meant to be
|
||||
* called only by the destructors of derived classes.
|
||||
*/
|
||||
void base_ref_iterator_free(struct ref_iterator *iter);
|
||||
|
||||
/* Virtual function declarations for ref_iterators: */
|
||||
|
||||
typedef int ref_iterator_advance_fn(struct ref_iterator *ref_iterator);
|
||||
|
||||
typedef int ref_iterator_peel_fn(struct ref_iterator *ref_iterator,
|
||||
struct object_id *peeled);
|
||||
|
||||
/*
|
||||
* Implementations of this function should free any resources specific
|
||||
* to the derived class, then call base_ref_iterator_free() to clean
|
||||
* up and free the ref_iterator object.
|
||||
*/
|
||||
typedef int ref_iterator_abort_fn(struct ref_iterator *ref_iterator);
|
||||
|
||||
struct ref_iterator_vtable {
|
||||
ref_iterator_advance_fn *advance;
|
||||
ref_iterator_peel_fn *peel;
|
||||
ref_iterator_abort_fn *abort;
|
||||
};
|
||||
|
||||
/*
|
||||
* current_ref_iter is a performance hack: when iterating over
|
||||
* references using the for_each_ref*() functions, current_ref_iter is
|
||||
* set to the reference iterator before calling the callback function.
|
||||
* If the callback function calls peel_ref(), then peel_ref() first
|
||||
* checks whether the reference to be peeled is the one referred to by
|
||||
* the iterator (it usually is) and if so, asks the iterator for the
|
||||
* peeled version of the reference if it is available. This avoids a
|
||||
* refname lookup in a common case. current_ref_iter is set to NULL
|
||||
* when the iteration is over.
|
||||
*/
|
||||
extern struct ref_iterator *current_ref_iter;
|
||||
|
||||
/*
|
||||
* The common backend for the for_each_*ref* functions. Call fn for
|
||||
* each reference in iter. If the iterator itself ever returns
|
||||
* ITER_ERROR, return -1. If fn ever returns a non-zero value, stop
|
||||
* the iteration and return that value. Otherwise, return 0. In any
|
||||
* case, free the iterator when done. This function is basically an
|
||||
* adapter between the callback style of reference iteration and the
|
||||
* iterator style.
|
||||
*/
|
||||
int do_for_each_repo_ref_iterator(struct repository *r,
|
||||
struct ref_iterator *iter,
|
||||
each_repo_ref_fn fn, void *cb_data);
|
||||
|
||||
/*
|
||||
* Only include per-worktree refs in a do_for_each_ref*() iteration.
|
||||
* Normally this will be used with a files ref_store, since that's
|
||||
* where all reference backends will presumably store their
|
||||
* per-worktree refs.
|
||||
*/
|
||||
#define DO_FOR_EACH_PER_WORKTREE_ONLY 0x02
|
||||
|
||||
struct ref_store;
|
||||
|
||||
/* refs backends */
|
||||
|
||||
/* ref_store_init flags */
|
||||
#define REF_STORE_READ (1 << 0)
|
||||
#define REF_STORE_WRITE (1 << 1) /* can perform update operations */
|
||||
#define REF_STORE_ODB (1 << 2) /* has access to object database */
|
||||
#define REF_STORE_MAIN (1 << 3)
|
||||
#define REF_STORE_ALL_CAPS (REF_STORE_READ | \
|
||||
REF_STORE_WRITE | \
|
||||
REF_STORE_ODB | \
|
||||
REF_STORE_MAIN)
|
||||
|
||||
/*
|
||||
* Initialize the ref_store for the specified gitdir. These functions
|
||||
* should call base_ref_store_init() to initialize the shared part of
|
||||
* the ref_store and to record the ref_store for later lookup.
|
||||
*/
|
||||
typedef struct ref_store *ref_store_init_fn(const char *gitdir,
|
||||
unsigned int flags);
|
||||
|
||||
typedef int ref_init_db_fn(struct ref_store *refs, struct strbuf *err);
|
||||
|
||||
typedef int ref_transaction_prepare_fn(struct ref_store *refs,
|
||||
struct ref_transaction *transaction,
|
||||
struct strbuf *err);
|
||||
|
||||
typedef int ref_transaction_finish_fn(struct ref_store *refs,
|
||||
struct ref_transaction *transaction,
|
||||
struct strbuf *err);
|
||||
|
||||
typedef int ref_transaction_abort_fn(struct ref_store *refs,
|
||||
struct ref_transaction *transaction,
|
||||
struct strbuf *err);
|
||||
|
||||
typedef int ref_transaction_commit_fn(struct ref_store *refs,
|
||||
struct ref_transaction *transaction,
|
||||
struct strbuf *err);
|
||||
|
||||
typedef int pack_refs_fn(struct ref_store *ref_store, unsigned int flags);
|
||||
typedef int create_symref_fn(struct ref_store *ref_store,
|
||||
const char *ref_target,
|
||||
const char *refs_heads_master,
|
||||
const char *logmsg);
|
||||
typedef int delete_refs_fn(struct ref_store *ref_store, const char *msg,
|
||||
struct string_list *refnames, unsigned int flags);
|
||||
typedef int rename_ref_fn(struct ref_store *ref_store,
|
||||
const char *oldref, const char *newref,
|
||||
const char *logmsg);
|
||||
typedef int copy_ref_fn(struct ref_store *ref_store,
|
||||
const char *oldref, const char *newref,
|
||||
const char *logmsg);
|
||||
|
||||
/*
|
||||
* Iterate over the references in `ref_store` whose names start with
|
||||
* `prefix`. `prefix` is matched as a literal string, without regard
|
||||
* for path separators. If prefix is NULL or the empty string, iterate
|
||||
* over all references in `ref_store`. The output is ordered by
|
||||
* refname.
|
||||
*/
|
||||
typedef struct ref_iterator *ref_iterator_begin_fn(
|
||||
struct ref_store *ref_store,
|
||||
const char *prefix, unsigned int flags);
|
||||
|
||||
/* reflog functions */
|
||||
|
||||
/*
|
||||
* Iterate over the references in the specified ref_store that have a
|
||||
* reflog. The refs are iterated over in arbitrary order.
|
||||
*/
|
||||
typedef struct ref_iterator *reflog_iterator_begin_fn(
|
||||
struct ref_store *ref_store);
|
||||
|
||||
typedef int for_each_reflog_ent_fn(struct ref_store *ref_store,
|
||||
const char *refname,
|
||||
each_reflog_ent_fn fn,
|
||||
void *cb_data);
|
||||
typedef int for_each_reflog_ent_reverse_fn(struct ref_store *ref_store,
|
||||
const char *refname,
|
||||
each_reflog_ent_fn fn,
|
||||
void *cb_data);
|
||||
typedef int reflog_exists_fn(struct ref_store *ref_store, const char *refname);
|
||||
typedef int create_reflog_fn(struct ref_store *ref_store, const char *refname,
|
||||
int force_create, struct strbuf *err);
|
||||
typedef int delete_reflog_fn(struct ref_store *ref_store, const char *refname);
|
||||
typedef int reflog_expire_fn(struct ref_store *ref_store,
|
||||
const char *refname, const struct object_id *oid,
|
||||
unsigned int flags,
|
||||
reflog_expiry_prepare_fn prepare_fn,
|
||||
reflog_expiry_should_prune_fn should_prune_fn,
|
||||
reflog_expiry_cleanup_fn cleanup_fn,
|
||||
void *policy_cb_data);
|
||||
|
||||
/*
|
||||
* Read a reference from the specified reference store, non-recursively.
|
||||
* Set type to describe the reference, and:
|
||||
*
|
||||
* - If refname is the name of a normal reference, fill in oid
|
||||
* (leaving referent unchanged).
|
||||
*
|
||||
* - If refname is the name of a symbolic reference, write the full
|
||||
* name of the reference to which it refers (e.g.
|
||||
* "refs/heads/master") to referent and set the REF_ISSYMREF bit in
|
||||
* type (leaving oid unchanged). The caller is responsible for
|
||||
* validating that referent is a valid reference name.
|
||||
*
|
||||
* WARNING: refname might be used as part of a filename, so it is
|
||||
* important from a security standpoint that it be safe in the sense
|
||||
* of refname_is_safe(). Moreover, for symrefs this function sets
|
||||
* referent to whatever the repository says, which might not be a
|
||||
* properly-formatted or even safe reference name. NEITHER INPUT NOR
|
||||
* OUTPUT REFERENCE NAMES ARE VALIDATED WITHIN THIS FUNCTION.
|
||||
*
|
||||
* Return 0 on success. If the ref doesn't exist, set errno to ENOENT
|
||||
* and return -1. If the ref exists but is neither a symbolic ref nor
|
||||
* an object ID, it is broken; set REF_ISBROKEN in type, set errno to
|
||||
* EINVAL, and return -1. If there is another error reading the ref,
|
||||
* set errno appropriately and return -1.
|
||||
*
|
||||
* Backend-specific flags might be set in type as well, regardless of
|
||||
* outcome.
|
||||
*
|
||||
* It is OK for refname to point into referent. If so:
|
||||
*
|
||||
* - if the function succeeds with REF_ISSYMREF, referent will be
|
||||
* overwritten and the memory formerly pointed to by it might be
|
||||
* changed or even freed.
|
||||
*
|
||||
* - in all other cases, referent will be untouched, and therefore
|
||||
* refname will still be valid and unchanged.
|
||||
*/
|
||||
typedef int read_raw_ref_fn(struct ref_store *ref_store,
|
||||
const char *refname, struct object_id *oid,
|
||||
struct strbuf *referent, unsigned int *type);
|
||||
|
||||
struct ref_storage_be {
|
||||
struct ref_storage_be *next;
|
||||
const char *name;
|
||||
ref_store_init_fn *init;
|
||||
ref_init_db_fn *init_db;
|
||||
|
||||
ref_transaction_prepare_fn *transaction_prepare;
|
||||
ref_transaction_finish_fn *transaction_finish;
|
||||
ref_transaction_abort_fn *transaction_abort;
|
||||
ref_transaction_commit_fn *initial_transaction_commit;
|
||||
|
||||
pack_refs_fn *pack_refs;
|
||||
create_symref_fn *create_symref;
|
||||
delete_refs_fn *delete_refs;
|
||||
rename_ref_fn *rename_ref;
|
||||
copy_ref_fn *copy_ref;
|
||||
|
||||
ref_iterator_begin_fn *iterator_begin;
|
||||
read_raw_ref_fn *read_raw_ref;
|
||||
|
||||
reflog_iterator_begin_fn *reflog_iterator_begin;
|
||||
for_each_reflog_ent_fn *for_each_reflog_ent;
|
||||
for_each_reflog_ent_reverse_fn *for_each_reflog_ent_reverse;
|
||||
reflog_exists_fn *reflog_exists;
|
||||
create_reflog_fn *create_reflog;
|
||||
delete_reflog_fn *delete_reflog;
|
||||
reflog_expire_fn *reflog_expire;
|
||||
};
|
||||
|
||||
extern struct ref_storage_be refs_be_files;
|
||||
extern struct ref_storage_be refs_be_packed;
|
||||
|
||||
/*
|
||||
* A representation of the reference store for the main repository or
|
||||
* a submodule. The ref_store instances for submodules are kept in a
|
||||
* linked list.
|
||||
*/
|
||||
struct ref_store {
|
||||
/* The backend describing this ref_store's storage scheme: */
|
||||
const struct ref_storage_be *be;
|
||||
};
|
||||
|
||||
/*
|
||||
* Fill in the generic part of refs and add it to our collection of
|
||||
* reference stores.
|
||||
*/
|
||||
void base_ref_store_init(struct ref_store *refs,
|
||||
const struct ref_storage_be *be);
|
||||
|
||||
#endif /* REFS_REFS_INTERNAL_H */
|
||||
Loading…
Add table
Add a link
Reference in a new issue