|
|
|
|
@@ -34,24 +34,111 @@
|
|
|
|
|
*/
|
|
|
|
|
#define REF_UPDATE_VIA_HEAD (1 << 8)
|
|
|
|
|
|
|
|
|
|
struct reftable_backend {
|
|
|
|
|
struct reftable_stack *stack;
|
|
|
|
|
struct reftable_iterator it;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void reftable_backend_on_reload(void *payload)
|
|
|
|
|
{
|
|
|
|
|
struct reftable_backend *be = payload;
|
|
|
|
|
reftable_iterator_destroy(&be->it);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int reftable_backend_init(struct reftable_backend *be,
|
|
|
|
|
const char *path,
|
|
|
|
|
const struct reftable_write_options *_opts)
|
|
|
|
|
{
|
|
|
|
|
struct reftable_write_options opts = *_opts;
|
|
|
|
|
opts.on_reload = reftable_backend_on_reload;
|
|
|
|
|
opts.on_reload_payload = be;
|
|
|
|
|
return reftable_new_stack(&be->stack, path, &opts);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void reftable_backend_release(struct reftable_backend *be)
|
|
|
|
|
{
|
|
|
|
|
reftable_stack_destroy(be->stack);
|
|
|
|
|
be->stack = NULL;
|
|
|
|
|
reftable_iterator_destroy(&be->it);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int reftable_backend_read_ref(struct reftable_backend *be,
|
|
|
|
|
const char *refname,
|
|
|
|
|
struct object_id *oid,
|
|
|
|
|
struct strbuf *referent,
|
|
|
|
|
unsigned int *type)
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_record ref = {0};
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (!be->it.ops) {
|
|
|
|
|
ret = reftable_stack_init_ref_iterator(be->stack, &be->it);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = reftable_iterator_seek_ref(&be->it, refname);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_iterator_next_ref(&be->it, &ref);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
if (strcmp(ref.refname, refname)) {
|
|
|
|
|
ret = 1;
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ref.value_type == REFTABLE_REF_SYMREF) {
|
|
|
|
|
strbuf_reset(referent);
|
|
|
|
|
strbuf_addstr(referent, ref.value.symref);
|
|
|
|
|
*type |= REF_ISSYMREF;
|
|
|
|
|
} else if (reftable_ref_record_val1(&ref)) {
|
|
|
|
|
unsigned int hash_id;
|
|
|
|
|
|
|
|
|
|
switch (reftable_stack_hash_id(be->stack)) {
|
|
|
|
|
case REFTABLE_HASH_SHA1:
|
|
|
|
|
hash_id = GIT_HASH_SHA1;
|
|
|
|
|
break;
|
|
|
|
|
case REFTABLE_HASH_SHA256:
|
|
|
|
|
hash_id = GIT_HASH_SHA256;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
BUG("unhandled hash ID %d", reftable_stack_hash_id(be->stack));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
oidread(oid, reftable_ref_record_val1(&ref),
|
|
|
|
|
&hash_algos[hash_id]);
|
|
|
|
|
} else {
|
|
|
|
|
/* We got a tombstone, which should not happen. */
|
|
|
|
|
BUG("unhandled reference value type %d", ref.value_type);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
assert(ret != REFTABLE_API_ERROR);
|
|
|
|
|
reftable_ref_record_release(&ref);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct reftable_ref_store {
|
|
|
|
|
struct ref_store base;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The main stack refers to the common dir and thus contains common
|
|
|
|
|
* The main backend refers to the common dir and thus contains common
|
|
|
|
|
* refs as well as refs of the main repository.
|
|
|
|
|
*/
|
|
|
|
|
struct reftable_stack *main_stack;
|
|
|
|
|
struct reftable_backend main_backend;
|
|
|
|
|
/*
|
|
|
|
|
* The worktree stack refers to the gitdir in case the refdb is opened
|
|
|
|
|
* The worktree backend refers to the gitdir in case the refdb is opened
|
|
|
|
|
* via a worktree. It thus contains the per-worktree refs.
|
|
|
|
|
*/
|
|
|
|
|
struct reftable_stack *worktree_stack;
|
|
|
|
|
struct reftable_backend worktree_backend;
|
|
|
|
|
/*
|
|
|
|
|
* Map of worktree stacks by their respective worktree names. The map
|
|
|
|
|
* Map of worktree backends by their respective worktree names. The map
|
|
|
|
|
* is populated lazily when we try to resolve `worktrees/$worktree` refs.
|
|
|
|
|
*/
|
|
|
|
|
struct strmap worktree_stacks;
|
|
|
|
|
struct strmap worktree_backends;
|
|
|
|
|
struct reftable_write_options write_options;
|
|
|
|
|
|
|
|
|
|
unsigned int store_flags;
|
|
|
|
|
@@ -97,21 +184,25 @@ static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_sto
|
|
|
|
|
* like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
|
|
|
|
|
* those references in their normalized form.
|
|
|
|
|
*/
|
|
|
|
|
static struct reftable_stack *stack_for(struct reftable_ref_store *store,
|
|
|
|
|
const char *refname,
|
|
|
|
|
const char **rewritten_ref)
|
|
|
|
|
static int backend_for(struct reftable_backend **out,
|
|
|
|
|
struct reftable_ref_store *store,
|
|
|
|
|
const char *refname,
|
|
|
|
|
const char **rewritten_ref,
|
|
|
|
|
int reload)
|
|
|
|
|
{
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
const char *wtname;
|
|
|
|
|
int wtname_len;
|
|
|
|
|
|
|
|
|
|
if (!refname)
|
|
|
|
|
return store->main_stack;
|
|
|
|
|
if (!refname) {
|
|
|
|
|
be = &store->main_backend;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
|
|
|
|
|
case REF_WORKTREE_OTHER: {
|
|
|
|
|
static struct strbuf wtname_buf = STRBUF_INIT;
|
|
|
|
|
struct strbuf wt_dir = STRBUF_INIT;
|
|
|
|
|
struct reftable_stack *stack;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We're using a static buffer here so that we don't need to
|
|
|
|
|
@@ -125,40 +216,55 @@ static struct reftable_stack *stack_for(struct reftable_ref_store *store,
|
|
|
|
|
/*
|
|
|
|
|
* There is an edge case here: when the worktree references the
|
|
|
|
|
* current worktree, then we set up the stack once via
|
|
|
|
|
* `worktree_stacks` and once via `worktree_stack`. This is
|
|
|
|
|
* `worktree_backends` and once via `worktree_backend`. This is
|
|
|
|
|
* wasteful, but in the reading case it shouldn't matter. And
|
|
|
|
|
* in the writing case we would notice that the stack is locked
|
|
|
|
|
* already and error out when trying to write a reference via
|
|
|
|
|
* both stacks.
|
|
|
|
|
*/
|
|
|
|
|
stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
|
|
|
|
|
if (!stack) {
|
|
|
|
|
be = strmap_get(&store->worktree_backends, wtname_buf.buf);
|
|
|
|
|
if (!be) {
|
|
|
|
|
strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
|
|
|
|
|
store->base.repo->commondir, wtname_buf.buf);
|
|
|
|
|
|
|
|
|
|
store->err = reftable_new_stack(&stack, wt_dir.buf,
|
|
|
|
|
&store->write_options);
|
|
|
|
|
CALLOC_ARRAY(be, 1);
|
|
|
|
|
store->err = reftable_backend_init(be, wt_dir.buf,
|
|
|
|
|
&store->write_options);
|
|
|
|
|
assert(store->err != REFTABLE_API_ERROR);
|
|
|
|
|
strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
|
|
|
|
|
|
|
|
|
|
strmap_put(&store->worktree_backends, wtname_buf.buf, be);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
strbuf_release(&wt_dir);
|
|
|
|
|
return stack;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
case REF_WORKTREE_CURRENT:
|
|
|
|
|
/*
|
|
|
|
|
* If there is no worktree stack then we're currently in the
|
|
|
|
|
* main worktree. We thus return the main stack in that case.
|
|
|
|
|
*/
|
|
|
|
|
if (!store->worktree_stack)
|
|
|
|
|
return store->main_stack;
|
|
|
|
|
return store->worktree_stack;
|
|
|
|
|
if (!store->worktree_backend.stack)
|
|
|
|
|
be = &store->main_backend;
|
|
|
|
|
else
|
|
|
|
|
be = &store->worktree_backend;
|
|
|
|
|
goto out;
|
|
|
|
|
case REF_WORKTREE_MAIN:
|
|
|
|
|
case REF_WORKTREE_SHARED:
|
|
|
|
|
return store->main_stack;
|
|
|
|
|
be = &store->main_backend;
|
|
|
|
|
goto out;
|
|
|
|
|
default:
|
|
|
|
|
BUG("unhandled worktree reference type");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
if (reload) {
|
|
|
|
|
int ret = reftable_stack_reload(be->stack);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
*out = be;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int should_write_log(struct reftable_ref_store *refs, const char *refname)
|
|
|
|
|
@@ -207,38 +313,6 @@ static void fill_reftable_log_record(struct reftable_log_record *log, const stru
|
|
|
|
|
log->value.update.tz_offset = sign * atoi(tz_begin);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int read_ref_without_reload(struct reftable_ref_store *refs,
|
|
|
|
|
struct reftable_stack *stack,
|
|
|
|
|
const char *refname,
|
|
|
|
|
struct object_id *oid,
|
|
|
|
|
struct strbuf *referent,
|
|
|
|
|
unsigned int *type)
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_record ref = {0};
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_read_ref(stack, refname, &ref);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
if (ref.value_type == REFTABLE_REF_SYMREF) {
|
|
|
|
|
strbuf_reset(referent);
|
|
|
|
|
strbuf_addstr(referent, ref.value.symref);
|
|
|
|
|
*type |= REF_ISSYMREF;
|
|
|
|
|
} else if (reftable_ref_record_val1(&ref)) {
|
|
|
|
|
oidread(oid, reftable_ref_record_val1(&ref),
|
|
|
|
|
refs->base.repo->hash_algo);
|
|
|
|
|
} else {
|
|
|
|
|
/* We got a tombstone, which should not happen. */
|
|
|
|
|
BUG("unhandled reference value type %d", ref.value_type);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
assert(ret != REFTABLE_API_ERROR);
|
|
|
|
|
reftable_ref_record_release(&ref);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int reftable_be_config(const char *var, const char *value,
|
|
|
|
|
const struct config_context *ctx,
|
|
|
|
|
void *_opts)
|
|
|
|
|
@@ -292,7 +366,7 @@ static struct ref_store *reftable_be_init(struct repository *repo,
|
|
|
|
|
umask(mask);
|
|
|
|
|
|
|
|
|
|
base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
|
|
|
|
|
strmap_init(&refs->worktree_stacks);
|
|
|
|
|
strmap_init(&refs->worktree_backends);
|
|
|
|
|
refs->store_flags = store_flags;
|
|
|
|
|
refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
|
|
|
|
|
|
|
|
|
|
@@ -337,8 +411,8 @@ static struct ref_store *reftable_be_init(struct repository *repo,
|
|
|
|
|
strbuf_realpath(&path, gitdir, 0);
|
|
|
|
|
}
|
|
|
|
|
strbuf_addstr(&path, "/reftable");
|
|
|
|
|
refs->err = reftable_new_stack(&refs->main_stack, path.buf,
|
|
|
|
|
&refs->write_options);
|
|
|
|
|
refs->err = reftable_backend_init(&refs->main_backend, path.buf,
|
|
|
|
|
&refs->write_options);
|
|
|
|
|
if (refs->err)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
@@ -354,8 +428,8 @@ static struct ref_store *reftable_be_init(struct repository *repo,
|
|
|
|
|
strbuf_reset(&path);
|
|
|
|
|
strbuf_addf(&path, "%s/reftable", gitdir);
|
|
|
|
|
|
|
|
|
|
refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
|
|
|
|
|
&refs->write_options);
|
|
|
|
|
refs->err = reftable_backend_init(&refs->worktree_backend, path.buf,
|
|
|
|
|
&refs->write_options);
|
|
|
|
|
if (refs->err)
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
@@ -374,19 +448,17 @@ static void reftable_be_release(struct ref_store *ref_store)
|
|
|
|
|
struct strmap_entry *entry;
|
|
|
|
|
struct hashmap_iter iter;
|
|
|
|
|
|
|
|
|
|
if (refs->main_stack) {
|
|
|
|
|
reftable_stack_destroy(refs->main_stack);
|
|
|
|
|
refs->main_stack = NULL;
|
|
|
|
|
}
|
|
|
|
|
if (refs->main_backend.stack)
|
|
|
|
|
reftable_backend_release(&refs->main_backend);
|
|
|
|
|
if (refs->worktree_backend.stack)
|
|
|
|
|
reftable_backend_release(&refs->worktree_backend);
|
|
|
|
|
|
|
|
|
|
if (refs->worktree_stack) {
|
|
|
|
|
reftable_stack_destroy(refs->worktree_stack);
|
|
|
|
|
refs->worktree_stack = NULL;
|
|
|
|
|
strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
|
|
|
|
|
struct reftable_backend *be = entry->value;
|
|
|
|
|
reftable_backend_release(be);
|
|
|
|
|
free(be);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
strmap_for_each_entry(&refs->worktree_stacks, &iter, entry)
|
|
|
|
|
reftable_stack_destroy(entry->value);
|
|
|
|
|
strmap_clear(&refs->worktree_stacks, 0);
|
|
|
|
|
strmap_clear(&refs->worktree_backends, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int reftable_be_create_on_disk(struct ref_store *ref_store,
|
|
|
|
|
@@ -781,7 +853,7 @@ static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_sto
|
|
|
|
|
required_flags |= REF_STORE_ODB;
|
|
|
|
|
refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
|
|
|
|
|
|
|
|
|
|
main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix,
|
|
|
|
|
main_iter = ref_iterator_for_stack(refs, refs->main_backend.stack, prefix,
|
|
|
|
|
exclude_patterns, flags);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
@@ -789,14 +861,14 @@ static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_sto
|
|
|
|
|
* right now. If we aren't, then we return the common reftable
|
|
|
|
|
* iterator, only.
|
|
|
|
|
*/
|
|
|
|
|
if (!refs->worktree_stack)
|
|
|
|
|
if (!refs->worktree_backend.stack)
|
|
|
|
|
return &main_iter->base;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Otherwise we merge both the common and the per-worktree refs into a
|
|
|
|
|
* single iterator.
|
|
|
|
|
*/
|
|
|
|
|
worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix,
|
|
|
|
|
worktree_iter = ref_iterator_for_stack(refs, refs->worktree_backend.stack, prefix,
|
|
|
|
|
exclude_patterns, flags);
|
|
|
|
|
return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
|
|
|
|
|
ref_iterator_select, NULL);
|
|
|
|
|
@@ -811,17 +883,17 @@ static int reftable_be_read_raw_ref(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, refname, &refname);
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (refs->err < 0)
|
|
|
|
|
return refs->err;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = backend_for(&be, refs, refname, &refname, 1);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
|
|
|
|
|
ret = reftable_backend_read_ref(be, refname, oid, referent, type);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
return ret;
|
|
|
|
|
if (ret > 0) {
|
|
|
|
|
@@ -838,21 +910,18 @@ static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, refname, &refname);
|
|
|
|
|
struct reftable_ref_record ref = {0};
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
struct object_id oid;
|
|
|
|
|
unsigned int type = 0;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = backend_for(&be, refs, refname, &refname, 1);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_read_ref(stack, refname, &ref);
|
|
|
|
|
if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
|
|
|
|
|
strbuf_addstr(referent, ref.value.symref);
|
|
|
|
|
else
|
|
|
|
|
ret = reftable_backend_read_ref(be, refname, &oid, referent, &type);
|
|
|
|
|
if (type != REF_ISSYMREF)
|
|
|
|
|
ret = -1;
|
|
|
|
|
|
|
|
|
|
reftable_ref_record_release(&ref);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -863,7 +932,7 @@ struct reftable_transaction_update {
|
|
|
|
|
|
|
|
|
|
struct write_transaction_table_arg {
|
|
|
|
|
struct reftable_ref_store *refs;
|
|
|
|
|
struct reftable_stack *stack;
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
struct reftable_addition *addition;
|
|
|
|
|
struct reftable_transaction_update *updates;
|
|
|
|
|
size_t updates_nr;
|
|
|
|
|
@@ -898,27 +967,37 @@ static int prepare_transaction_update(struct write_transaction_table_arg **out,
|
|
|
|
|
struct ref_update *update,
|
|
|
|
|
struct strbuf *err)
|
|
|
|
|
{
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
|
|
|
|
|
struct write_transaction_table_arg *arg = NULL;
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
size_t i;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This function gets called in a loop, and we don't want to repeatedly
|
|
|
|
|
* reload the stack for every single ref update. Instead, we manually
|
|
|
|
|
* reload further down in the case where we haven't yet prepared the
|
|
|
|
|
* specific `reftable_backend`.
|
|
|
|
|
*/
|
|
|
|
|
ret = backend_for(&be, refs, update->refname, NULL, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Search for a preexisting stack update. If there is one then we add
|
|
|
|
|
* the update to it, otherwise we set up a new stack update.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; !arg && i < tx_data->args_nr; i++)
|
|
|
|
|
if (tx_data->args[i].stack == stack)
|
|
|
|
|
if (tx_data->args[i].be == be)
|
|
|
|
|
arg = &tx_data->args[i];
|
|
|
|
|
|
|
|
|
|
if (!arg) {
|
|
|
|
|
struct reftable_addition *addition;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = reftable_stack_reload(be->stack);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_new_addition(&addition, stack,
|
|
|
|
|
ret = reftable_stack_new_addition(&addition, be->stack,
|
|
|
|
|
REFTABLE_STACK_NEW_ADDITION_RELOAD);
|
|
|
|
|
if (ret) {
|
|
|
|
|
if (ret == REFTABLE_LOCK_ERROR)
|
|
|
|
|
@@ -930,7 +1009,7 @@ static int prepare_transaction_update(struct write_transaction_table_arg **out,
|
|
|
|
|
tx_data->args_alloc);
|
|
|
|
|
arg = &tx_data->args[tx_data->args_nr++];
|
|
|
|
|
arg->refs = refs;
|
|
|
|
|
arg->stack = stack;
|
|
|
|
|
arg->be = be;
|
|
|
|
|
arg->addition = addition;
|
|
|
|
|
arg->updates = NULL;
|
|
|
|
|
arg->updates_nr = 0;
|
|
|
|
|
@@ -985,6 +1064,7 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
|
|
|
|
|
struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
|
|
|
|
|
struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
|
|
|
|
|
struct reftable_transaction_data *tx_data = NULL;
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
struct object_id head_oid;
|
|
|
|
|
unsigned int head_type = 0;
|
|
|
|
|
size_t i;
|
|
|
|
|
@@ -1031,8 +1111,23 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = read_ref_without_reload(refs, stack_for(refs, "HEAD", NULL), "HEAD",
|
|
|
|
|
&head_oid, &head_referent, &head_type);
|
|
|
|
|
/*
|
|
|
|
|
* TODO: it's dubious whether we should reload the stack that "HEAD"
|
|
|
|
|
* belongs to or not. In theory, it may happen that we only modify
|
|
|
|
|
* stacks which are _not_ part of the "HEAD" stack. In that case we
|
|
|
|
|
* wouldn't have prepared any transaction for its stack and would not
|
|
|
|
|
* have reloaded it, which may mean that it is stale.
|
|
|
|
|
*
|
|
|
|
|
* On the other hand, reloading that stack without locking it feels
|
|
|
|
|
* wrong, too, as the value of "HEAD" could be modified concurrently at
|
|
|
|
|
* any point in time.
|
|
|
|
|
*/
|
|
|
|
|
ret = backend_for(&be, refs, "HEAD", NULL, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_backend_read_ref(be, "HEAD", &head_oid,
|
|
|
|
|
&head_referent, &head_type);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
ret = 0;
|
|
|
|
|
@@ -1040,10 +1135,18 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
|
|
|
|
|
for (i = 0; i < transaction->nr; i++) {
|
|
|
|
|
struct ref_update *u = transaction->updates[i];
|
|
|
|
|
struct object_id current_oid = {0};
|
|
|
|
|
struct reftable_stack *stack;
|
|
|
|
|
const char *rewritten_ref;
|
|
|
|
|
|
|
|
|
|
stack = stack_for(refs, u->refname, &rewritten_ref);
|
|
|
|
|
/*
|
|
|
|
|
* There is no need to reload the respective backends here as
|
|
|
|
|
* we have already reloaded them when preparing the transaction
|
|
|
|
|
* update. And given that the stacks have been locked there
|
|
|
|
|
* shouldn't have been any concurrent modifications of the
|
|
|
|
|
* stack.
|
|
|
|
|
*/
|
|
|
|
|
ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
/* Verify that the new object ID is valid. */
|
|
|
|
|
if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
|
|
|
|
|
@@ -1099,8 +1202,8 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
|
|
|
|
|
string_list_insert(&affected_refnames, new_update->refname);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = read_ref_without_reload(refs, stack, rewritten_ref,
|
|
|
|
|
¤t_oid, &referent, &u->type);
|
|
|
|
|
ret = reftable_backend_read_ref(be, rewritten_ref,
|
|
|
|
|
¤t_oid, &referent, &u->type);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
|
|
|
|
|
@@ -1303,7 +1406,7 @@ static int transaction_update_cmp(const void *a, const void *b)
|
|
|
|
|
static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
|
|
|
|
|
{
|
|
|
|
|
struct write_transaction_table_arg *arg = cb_data;
|
|
|
|
|
uint64_t ts = reftable_stack_next_update_index(arg->stack);
|
|
|
|
|
uint64_t ts = reftable_stack_next_update_index(arg->be->stack);
|
|
|
|
|
struct reftable_log_record *logs = NULL;
|
|
|
|
|
struct ident_split committer_ident = {0};
|
|
|
|
|
size_t logs_nr = 0, logs_alloc = 0, i;
|
|
|
|
|
@@ -1339,7 +1442,7 @@ static int write_transaction_table(struct reftable_writer *writer, void *cb_data
|
|
|
|
|
struct reftable_log_record log = {0};
|
|
|
|
|
struct reftable_iterator it = {0};
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_init_log_iterator(arg->stack, &it);
|
|
|
|
|
ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
@@ -1520,9 +1623,9 @@ static int reftable_be_pack_refs(struct ref_store *ref_store,
|
|
|
|
|
if (refs->err)
|
|
|
|
|
return refs->err;
|
|
|
|
|
|
|
|
|
|
stack = refs->worktree_stack;
|
|
|
|
|
stack = refs->worktree_backend.stack;
|
|
|
|
|
if (!stack)
|
|
|
|
|
stack = refs->main_stack;
|
|
|
|
|
stack = refs->main_backend.stack;
|
|
|
|
|
|
|
|
|
|
if (opts->flags & PACK_REFS_AUTO)
|
|
|
|
|
ret = reftable_stack_auto_compact(stack);
|
|
|
|
|
@@ -1553,7 +1656,7 @@ struct write_create_symref_arg {
|
|
|
|
|
|
|
|
|
|
struct write_copy_arg {
|
|
|
|
|
struct reftable_ref_store *refs;
|
|
|
|
|
struct reftable_stack *stack;
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
const char *oldname;
|
|
|
|
|
const char *newname;
|
|
|
|
|
const char *logmsg;
|
|
|
|
|
@@ -1578,7 +1681,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
|
|
|
|
|
if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
|
|
|
|
|
BUG("failed splitting committer info");
|
|
|
|
|
|
|
|
|
|
if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
|
|
|
|
|
if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) {
|
|
|
|
|
ret = error(_("refname %s not found"), arg->oldname);
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
@@ -1617,7 +1720,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
|
|
|
|
|
* the old branch and the creation of the new branch, and we cannot do
|
|
|
|
|
* two changes to a reflog in a single update.
|
|
|
|
|
*/
|
|
|
|
|
deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
|
|
|
|
|
deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack);
|
|
|
|
|
if (arg->delete_old)
|
|
|
|
|
creation_ts++;
|
|
|
|
|
reftable_writer_set_limits(writer, deletion_ts, creation_ts);
|
|
|
|
|
@@ -1660,8 +1763,8 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
|
|
|
|
|
memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
|
|
|
|
|
logs_nr++;
|
|
|
|
|
|
|
|
|
|
ret = read_ref_without_reload(arg->refs, arg->stack, "HEAD", &head_oid,
|
|
|
|
|
&head_referent, &head_type);
|
|
|
|
|
ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid,
|
|
|
|
|
&head_referent, &head_type);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
|
|
|
|
|
@@ -1704,7 +1807,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
|
|
|
|
|
* copy over all log entries from the old reflog. Last but not least,
|
|
|
|
|
* when renaming we also have to delete all the old reflog entries.
|
|
|
|
|
*/
|
|
|
|
|
ret = reftable_stack_init_log_iterator(arg->stack, &it);
|
|
|
|
|
ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
@@ -1777,10 +1880,8 @@ static int reftable_be_rename_ref(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
|
|
|
|
|
struct write_copy_arg arg = {
|
|
|
|
|
.refs = refs,
|
|
|
|
|
.stack = stack,
|
|
|
|
|
.oldname = oldrefname,
|
|
|
|
|
.newname = newrefname,
|
|
|
|
|
.logmsg = logmsg,
|
|
|
|
|
@@ -1792,10 +1893,10 @@ static int reftable_be_rename_ref(struct ref_store *ref_store,
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
ret = reftable_stack_add(stack, &write_copy_table, &arg);
|
|
|
|
|
ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
assert(ret != REFTABLE_API_ERROR);
|
|
|
|
|
@@ -1809,10 +1910,8 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
|
|
|
|
|
struct write_copy_arg arg = {
|
|
|
|
|
.refs = refs,
|
|
|
|
|
.stack = stack,
|
|
|
|
|
.oldname = oldrefname,
|
|
|
|
|
.newname = newrefname,
|
|
|
|
|
.logmsg = logmsg,
|
|
|
|
|
@@ -1823,10 +1922,10 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
ret = reftable_stack_add(stack, &write_copy_table, &arg);
|
|
|
|
|
ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
assert(ret != REFTABLE_API_ERROR);
|
|
|
|
|
@@ -1947,11 +2046,11 @@ static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
|
|
|
|
|
struct reftable_reflog_iterator *main_iter, *worktree_iter;
|
|
|
|
|
|
|
|
|
|
main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
|
|
|
|
|
if (!refs->worktree_stack)
|
|
|
|
|
main_iter = reflog_iterator_for_stack(refs, refs->main_backend.stack);
|
|
|
|
|
if (!refs->worktree_backend.stack)
|
|
|
|
|
return &main_iter->base;
|
|
|
|
|
|
|
|
|
|
worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
|
|
|
|
|
worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_backend.stack);
|
|
|
|
|
|
|
|
|
|
return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
|
|
|
|
|
ref_iterator_select, NULL);
|
|
|
|
|
@@ -1990,15 +2089,23 @@ static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, refname, &refname);
|
|
|
|
|
struct reftable_log_record log = {0};
|
|
|
|
|
struct reftable_iterator it = {0};
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (refs->err < 0)
|
|
|
|
|
return refs->err;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_init_log_iterator(stack, &it);
|
|
|
|
|
/*
|
|
|
|
|
* TODO: we should adapt this callsite to reload the stack. There is no
|
|
|
|
|
* obvious reason why we shouldn't.
|
|
|
|
|
*/
|
|
|
|
|
ret = backend_for(&be, refs, refname, &refname, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_init_log_iterator(be->stack, &it);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
@@ -2030,16 +2137,24 @@ static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, refname, &refname);
|
|
|
|
|
struct reftable_log_record *logs = NULL;
|
|
|
|
|
struct reftable_iterator it = {0};
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
size_t logs_alloc = 0, logs_nr = 0, i;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (refs->err < 0)
|
|
|
|
|
return refs->err;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_init_log_iterator(stack, &it);
|
|
|
|
|
/*
|
|
|
|
|
* TODO: we should adapt this callsite to reload the stack. There is no
|
|
|
|
|
* obvious reason why we shouldn't.
|
|
|
|
|
*/
|
|
|
|
|
ret = backend_for(&be, refs, refname, &refname, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_init_log_iterator(be->stack, &it);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
@@ -2079,20 +2194,20 @@ static int reftable_be_reflog_exists(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, refname, &refname);
|
|
|
|
|
struct reftable_log_record log = {0};
|
|
|
|
|
struct reftable_iterator it = {0};
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = refs->err;
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = backend_for(&be, refs, refname, &refname, 1);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_init_log_iterator(stack, &it);
|
|
|
|
|
ret = reftable_stack_init_log_iterator(be->stack, &it);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
@@ -2164,10 +2279,9 @@ static int reftable_be_create_reflog(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, refname, &refname);
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
struct write_reflog_existence_arg arg = {
|
|
|
|
|
.refs = refs,
|
|
|
|
|
.stack = stack,
|
|
|
|
|
.refname = refname,
|
|
|
|
|
};
|
|
|
|
|
int ret;
|
|
|
|
|
@@ -2176,11 +2290,12 @@ static int reftable_be_create_reflog(struct ref_store *ref_store,
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = backend_for(&be, refs, refname, &refname, 1);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto done;
|
|
|
|
|
arg.stack = be->stack;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
|
|
|
|
|
ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg);
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
return ret;
|
|
|
|
|
@@ -2238,17 +2353,18 @@ static int reftable_be_delete_reflog(struct ref_store *ref_store,
|
|
|
|
|
{
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, refname, &refname);
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
struct write_reflog_delete_arg arg = {
|
|
|
|
|
.stack = stack,
|
|
|
|
|
.refname = refname,
|
|
|
|
|
};
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = backend_for(&be, refs, refname, &refname, 1);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
|
|
|
|
|
arg.stack = be->stack;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg);
|
|
|
|
|
|
|
|
|
|
assert(ret != REFTABLE_API_ERROR);
|
|
|
|
|
return ret;
|
|
|
|
|
@@ -2347,26 +2463,27 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
|
|
|
|
|
*/
|
|
|
|
|
struct reftable_ref_store *refs =
|
|
|
|
|
reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
|
|
|
|
|
struct reftable_stack *stack = stack_for(refs, refname, &refname);
|
|
|
|
|
struct reftable_log_record *logs = NULL;
|
|
|
|
|
struct reftable_log_record *rewritten = NULL;
|
|
|
|
|
struct reftable_ref_record ref_record = {0};
|
|
|
|
|
struct reftable_iterator it = {0};
|
|
|
|
|
struct reftable_addition *add = NULL;
|
|
|
|
|
struct reflog_expiry_arg arg = {0};
|
|
|
|
|
struct reftable_backend *be;
|
|
|
|
|
struct object_id oid = {0};
|
|
|
|
|
struct strbuf referent = STRBUF_INIT;
|
|
|
|
|
uint8_t *last_hash = NULL;
|
|
|
|
|
size_t logs_nr = 0, logs_alloc = 0, i;
|
|
|
|
|
unsigned int type = 0;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (refs->err < 0)
|
|
|
|
|
return refs->err;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_reload(stack);
|
|
|
|
|
ret = backend_for(&be, refs, refname, &refname, 1);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_init_log_iterator(stack, &it);
|
|
|
|
|
ret = reftable_stack_init_log_iterator(be->stack, &it);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
@@ -2374,16 +2491,13 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_new_addition(&add, stack, 0);
|
|
|
|
|
ret = reftable_stack_new_addition(&add, be->stack, 0);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
ret = reftable_stack_read_ref(stack, refname, &ref_record);
|
|
|
|
|
ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto done;
|
|
|
|
|
if (reftable_ref_record_val1(&ref_record))
|
|
|
|
|
oidread(&oid, reftable_ref_record_val1(&ref_record),
|
|
|
|
|
ref_store->repo->hash_algo);
|
|
|
|
|
prepare_fn(refname, &oid, policy_cb_data);
|
|
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
|
@@ -2450,15 +2564,14 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
|
|
|
|
|
reftable_ref_record_val1(&ref_record))
|
|
|
|
|
if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash && !is_null_oid(&oid))
|
|
|
|
|
oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
|
|
|
|
|
|
|
|
|
|
arg.refs = refs;
|
|
|
|
|
arg.records = rewritten;
|
|
|
|
|
arg.len = logs_nr;
|
|
|
|
|
arg.stack = stack,
|
|
|
|
|
arg.refname = refname,
|
|
|
|
|
arg.stack = be->stack;
|
|
|
|
|
arg.refname = refname;
|
|
|
|
|
|
|
|
|
|
ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
@@ -2476,11 +2589,11 @@ done:
|
|
|
|
|
cleanup_fn(policy_cb_data);
|
|
|
|
|
assert(ret != REFTABLE_API_ERROR);
|
|
|
|
|
|
|
|
|
|
reftable_ref_record_release(&ref_record);
|
|
|
|
|
reftable_iterator_destroy(&it);
|
|
|
|
|
reftable_addition_destroy(add);
|
|
|
|
|
for (i = 0; i < logs_nr; i++)
|
|
|
|
|
reftable_log_record_release(&logs[i]);
|
|
|
|
|
strbuf_release(&referent);
|
|
|
|
|
free(logs);
|
|
|
|
|
free(rewritten);
|
|
|
|
|
return ret;
|
|
|
|
|
|