Merge branch 'ps/remove-packfile-store-get-packs' into ps/packed-git-in-object-store

* ps/remove-packfile-store-get-packs: (55 commits)
  packfile: rename `packfile_store_get_all_packs()`
  packfile: introduce macro to iterate through packs
  packfile: drop `packfile_store_get_packs()`
  builtin/grep: simplify how we preload packs
  builtin/gc: convert to use `packfile_store_get_all_packs()`
  object-name: convert to use `packfile_store_get_all_packs()`
  builtin/repack.c: clean up unused `#include`s
  repack: move `write_cruft_pack()` out of the builtin
  repack: move `write_filtered_pack()` out of the builtin
  repack: move `pack_kept_objects` to `struct pack_objects_args`
  repack: move `finish_pack_objects_cmd()` out of the builtin
  builtin/repack.c: pass `write_pack_opts` to `finish_pack_objects_cmd()`
  repack: extract `write_pack_opts_is_local()`
  repack: move `find_pack_prefix()` out of the builtin
  builtin/repack.c: use `write_pack_opts` within `write_cruft_pack()`
  builtin/repack.c: introduce `struct write_pack_opts`
  repack: 'write_midx_included_packs' API from the builtin
  builtin/repack.c: inline packs within `write_midx_included_packs()`
  builtin/repack.c: pass `repack_write_midx_opts` to `midx_included_packs`
  builtin/repack.c: inline `remove_redundant_bitmaps()`
  ...
This commit is contained in:
Junio C Hamano
2025-10-28 10:00:56 -07:00
30 changed files with 1924 additions and 1348 deletions

View File

@@ -1260,6 +1260,12 @@ LIB_OBJS += reftable/table.o
LIB_OBJS += reftable/tree.o
LIB_OBJS += reftable/writer.o
LIB_OBJS += remote.o
LIB_OBJS += repack.o
LIB_OBJS += repack-cruft.o
LIB_OBJS += repack-filtered.o
LIB_OBJS += repack-geometry.o
LIB_OBJS += repack-midx.o
LIB_OBJS += repack-promisor.o
LIB_OBJS += replace-object.o
LIB_OBJS += repo-settings.o
LIB_OBJS += repository.o

View File

@@ -852,10 +852,9 @@ static void batch_each_object(struct batch_options *opt,
if (bitmap && !for_each_bitmapped_object(bitmap, &opt->objects_filter,
batch_one_object_bitmapped, &payload)) {
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *pack;
for (pack = packfile_store_get_all_packs(packs); pack; pack = pack->next) {
repo_for_each_pack(the_repository, pack) {
if (bitmap_index_contains_pack(bitmap, pack) ||
open_pack_index(pack))
continue;

View File

@@ -122,7 +122,6 @@ int cmd_count_objects(int argc,
count_loose, count_cruft, NULL, NULL);
if (verbose) {
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
unsigned long num_pack = 0;
off_t size_pack = 0;
@@ -130,7 +129,7 @@ int cmd_count_objects(int argc,
struct strbuf pack_buf = STRBUF_INIT;
struct strbuf garbage_buf = STRBUF_INIT;
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (!p->pack_local)
continue;
if (open_pack_index(p))

View File

@@ -978,7 +978,7 @@ static int store_object(
if (e->idx.offset) {
duplicate_count_by_type[type]++;
return 1;
} else if (find_oid_pack(&oid, packfile_store_get_all_packs(packs))) {
} else if (find_oid_pack(&oid, packfile_store_get_packs(packs))) {
e->type = type;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1179,7 +1179,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
duplicate_count_by_type[OBJ_BLOB]++;
truncate_pack(&checkpoint);
} else if (find_oid_pack(&oid, packfile_store_get_all_packs(packs))) {
} else if (find_oid_pack(&oid, packfile_store_get_packs(packs))) {
e->type = OBJ_BLOB;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */

View File

@@ -867,20 +867,20 @@ static int mark_packed_for_connectivity(const struct object_id *oid,
static int check_pack_rev_indexes(struct repository *r, int show_progress)
{
struct packfile_store *packs = r->objects->packfiles;
struct progress *progress = NULL;
struct packed_git *p;
uint32_t pack_count = 0;
int res = 0;
if (show_progress) {
for (struct packed_git *p = packfile_store_get_all_packs(packs); p; p = p->next)
repo_for_each_pack(r, p)
pack_count++;
progress = start_delayed_progress(the_repository,
"Verifying reverse pack-indexes", pack_count);
pack_count = 0;
}
for (struct packed_git *p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(r, p) {
int load_error = load_pack_revindex_from_disk(p);
if (load_error < 0) {
@@ -1000,8 +1000,6 @@ int cmd_fsck(int argc,
for_each_packed_object(the_repository,
mark_packed_for_connectivity, NULL, 0);
} else {
struct packfile_store *packs = the_repository->objects->packfiles;
odb_prepare_alternates(the_repository->objects);
for (source = the_repository->objects->sources; source; source = source->next)
fsck_source(source);
@@ -1012,8 +1010,7 @@ int cmd_fsck(int argc,
struct progress *progress = NULL;
if (show_progress) {
for (p = packfile_store_get_all_packs(packs); p;
p = p->next) {
repo_for_each_pack(the_repository, p) {
if (open_pack_index(p))
continue;
total += p->num_objects;
@@ -1022,8 +1019,8 @@ int cmd_fsck(int argc,
progress = start_progress(the_repository,
_("Checking objects"), total);
}
for (p = packfile_store_get_all_packs(packs); p;
p = p->next) {
repo_for_each_pack(the_repository, p) {
/* verify gives error messages itself */
if (verify_pack(the_repository,
p, fsck_obj_buffer,

View File

@@ -487,10 +487,9 @@ static int too_many_loose_objects(struct gc_config *cfg)
static struct packed_git *find_base_packs(struct string_list *packs,
unsigned long limit)
{
struct packfile_store *packfiles = the_repository->objects->packfiles;
struct packed_git *p, *base = NULL;
for (p = packfile_store_get_all_packs(packfiles); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (!p->pack_local || p->is_cruft)
continue;
if (limit) {
@@ -509,14 +508,13 @@ static struct packed_git *find_base_packs(struct string_list *packs,
static int too_many_packs(struct gc_config *cfg)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
int cnt;
int cnt = 0;
if (cfg->gc_auto_pack_limit <= 0)
return 0;
for (cnt = 0, p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (!p->pack_local)
continue;
if (p->pack_keep)
@@ -1425,9 +1423,9 @@ static int incremental_repack_auto_condition(struct gc_config *cfg UNUSED)
if (incremental_repack_auto_limit < 0)
return 1;
for (p = packfile_store_get_packs(the_repository->objects->packfiles);
count < incremental_repack_auto_limit && p;
p = p->next) {
repo_for_each_pack(the_repository, p) {
if (count >= incremental_repack_auto_limit)
break;
if (!p->multi_pack_index)
count++;
}
@@ -1494,7 +1492,7 @@ static off_t get_auto_pack_size(void)
struct repository *r = the_repository;
odb_reprepare(r->objects);
for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
repo_for_each_pack(r, p) {
if (p->pack_size > max_size) {
second_largest_size = max_size;
max_size = p->pack_size;

View File

@@ -1214,7 +1214,7 @@ int cmd_grep(int argc,
if (recurse_submodules)
repo_read_gitmodules(the_repository, 1);
if (startup_info->have_repository)
(void)packfile_store_get_packs(the_repository->objects->packfiles);
packfile_store_prepare(the_repository->objects->packfiles);
start_threads(&opt);
} else {

View File

@@ -3831,12 +3831,10 @@ static int pack_mtime_cmp(const void *_a, const void *_b)
static void read_packs_list_from_stdin(struct rev_info *revs)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct strbuf buf = STRBUF_INIT;
struct string_list include_packs = STRING_LIST_INIT_DUP;
struct string_list exclude_packs = STRING_LIST_INIT_DUP;
struct string_list_item *item = NULL;
struct packed_git *p;
while (strbuf_getline(&buf, stdin) != EOF) {
@@ -3856,7 +3854,7 @@ static void read_packs_list_from_stdin(struct rev_info *revs)
string_list_sort(&exclude_packs);
string_list_remove_duplicates(&exclude_packs, 0);
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
const char *pack_name = pack_basename(p);
if ((item = string_list_lookup(&include_packs, pack_name)))
@@ -4077,7 +4075,6 @@ static void enumerate_cruft_objects(void)
static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
struct rev_info revs;
int ret;
@@ -4107,7 +4104,7 @@ static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs
* Re-mark only the fresh packs as kept so that objects in
* unknown packs do not halt the reachability traversal early.
*/
for (p = packfile_store_get_all_packs(packs); p; p = p->next)
repo_for_each_pack(the_repository, p)
p->pack_keep_in_core = 0;
mark_pack_kept_in_core(fresh_packs, 1);
@@ -4124,7 +4121,6 @@ static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs
static void read_cruft_objects(void)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct strbuf buf = STRBUF_INIT;
struct string_list discard_packs = STRING_LIST_INIT_DUP;
struct string_list fresh_packs = STRING_LIST_INIT_DUP;
@@ -4145,7 +4141,7 @@ static void read_cruft_objects(void)
string_list_sort(&discard_packs);
string_list_sort(&fresh_packs);
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
const char *pack_name = pack_basename(p);
struct string_list_item *item;
@@ -4398,7 +4394,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
struct packed_git *p;
p = (last_found != (void *)1) ? last_found :
packfile_store_get_all_packs(packs);
packfile_store_get_packs(packs);
while (p) {
if ((!p->pack_local || p->pack_keep ||
@@ -4408,7 +4404,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
return 1;
}
if (p == last_found)
p = packfile_store_get_all_packs(packs);
p = packfile_store_get_packs(packs);
else
p = p->next;
if (p == last_found)
@@ -4440,13 +4436,12 @@ static int loosened_object_can_be_discarded(const struct object_id *oid,
static void loosen_unused_packed_objects(void)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
uint32_t i;
uint32_t loosened_objects_nr = 0;
struct object_id oid;
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
@@ -4747,13 +4742,12 @@ static void get_object_list(struct rev_info *revs, struct strvec *argv)
static void add_extra_kept_packs(const struct string_list *names)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
if (!names->nr)
return;
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
const char *name = basename(p->pack_name);
int i;
@@ -5191,10 +5185,9 @@ int cmd_pack_objects(int argc,
add_extra_kept_packs(&keep_pack_list);
if (ignore_packed_keep_on_disk) {
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
for (p = packfile_store_get_all_packs(packs); p; p = p->next)
repo_for_each_pack(the_repository, p)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
@@ -5206,10 +5199,9 @@ int cmd_pack_objects(int argc,
* want to unset "local" based on looking at packs, as
* it also covers non-local objects
*/
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (!p->pack_local) {
have_non_local_packs = 1;
break;

View File

@@ -566,29 +566,23 @@ static struct pack_list * add_pack(struct packed_git *p)
static struct pack_list * add_pack_file(const char *filename)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p = packfile_store_get_all_packs(packs);
struct packed_git *p;
if (strlen(filename) < 40)
die("Bad pack filename: %s", filename);
while (p) {
repo_for_each_pack(the_repository, p)
if (strstr(p->pack_name, filename))
return add_pack(p);
p = p->next;
}
die("Filename %s not found in packed_git", filename);
}
static void load_all(void)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p = packfile_store_get_all_packs(packs);
struct packed_git *p;
while (p) {
repo_for_each_pack(the_repository, p)
add_pack(p);
p = p->next;
}
}
int cmd_pack_redundant(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) {

File diff suppressed because it is too large Load Diff

403
bulk-checkin.c Normal file
View File

@@ -0,0 +1,403 @@
/*
* Copyright (c) 2011, Google Inc.
*/
#define USE_THE_REPOSITORY_VARIABLE
#include "git-compat-util.h"
#include "bulk-checkin.h"
#include "environment.h"
#include "gettext.h"
#include "hex.h"
#include "lockfile.h"
#include "repository.h"
#include "csum-file.h"
#include "pack.h"
#include "strbuf.h"
#include "tmp-objdir.h"
#include "packfile.h"
#include "object-file.h"
#include "odb.h"
struct bulk_checkin_packfile {
char *pack_tmp_name;
struct hashfile *f;
off_t offset;
struct pack_idx_option pack_idx_opts;
struct pack_idx_entry **written;
uint32_t alloc_written;
uint32_t nr_written;
};
struct odb_transaction {
struct object_database *odb;
int nesting;
struct tmp_objdir *objdir;
struct bulk_checkin_packfile packfile;
};
static void finish_tmp_packfile(struct odb_transaction *transaction,
struct strbuf *basename,
unsigned char hash[])
{
struct bulk_checkin_packfile *state = &transaction->packfile;
struct repository *repo = transaction->odb->repo;
char *idx_tmp_name = NULL;
stage_tmp_packfiles(repo, basename, state->pack_tmp_name,
state->written, state->nr_written, NULL,
&state->pack_idx_opts, hash, &idx_tmp_name);
rename_tmp_packfile_idx(repo, basename, &idx_tmp_name);
free(idx_tmp_name);
}
static void flush_bulk_checkin_packfile(struct odb_transaction *transaction)
{
struct bulk_checkin_packfile *state = &transaction->packfile;
struct repository *repo = transaction->odb->repo;
unsigned char hash[GIT_MAX_RAWSZ];
struct strbuf packname = STRBUF_INIT;
if (!state->f)
return;
if (state->nr_written == 0) {
close(state->f->fd);
free_hashfile(state->f);
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK,
CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
fixup_pack_header_footer(repo->hash_algo, fd, hash, state->pack_tmp_name,
state->nr_written, hash,
state->offset);
close(fd);
}
strbuf_addf(&packname, "%s/pack/pack-%s.",
repo_get_object_directory(transaction->odb->repo),
hash_to_hex_algop(hash, repo->hash_algo));
finish_tmp_packfile(transaction, &packname, hash);
for (uint32_t i = 0; i < state->nr_written; i++)
free(state->written[i]);
clear_exit:
free(state->pack_tmp_name);
free(state->written);
memset(state, 0, sizeof(*state));
strbuf_release(&packname);
/* Make objects we just wrote available to ourselves */
odb_reprepare(repo->objects);
}
/*
* Cleanup after batch-mode fsync_object_files.
*/
static void flush_batch_fsync(struct odb_transaction *transaction)
{
struct strbuf temp_path = STRBUF_INIT;
struct tempfile *temp;
if (!transaction->objdir)
return;
/*
* Issue a full hardware flush against a temporary file to ensure
* that all objects are durable before any renames occur. The code in
* fsync_loose_object_bulk_checkin has already issued a writeout
* request, but it has not flushed any writeback cache in the storage
* hardware or any filesystem logs. This fsync call acts as a barrier
* to ensure that the data in each new object file is durable before
* the final name is visible.
*/
strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX",
repo_get_object_directory(transaction->odb->repo));
temp = xmks_tempfile(temp_path.buf);
fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
delete_tempfile(&temp);
strbuf_release(&temp_path);
/*
* Make the object files visible in the primary ODB after their data is
* fully durable.
*/
tmp_objdir_migrate(transaction->objdir);
transaction->objdir = NULL;
}
static int already_written(struct odb_transaction *transaction,
struct object_id *oid)
{
/* The object may already exist in the repository */
if (odb_has_object(transaction->odb, oid,
HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return 1;
/* Might want to keep the list sorted */
for (uint32_t i = 0; i < transaction->packfile.nr_written; i++)
if (oideq(&transaction->packfile.written[i]->oid, oid))
return 1;
/* This is a new object we need to keep */
return 0;
}
/*
* Read the contents from fd for size bytes, streaming it to the
* packfile in state while updating the hash in ctx. Signal a failure
* by returning a negative value when the resulting pack would exceed
* the pack size limit and this is not the first object in the pack,
* so that the caller can discard what we wrote from the current pack
* by truncating it and opening a new one. The caller will then call
* us again after rewinding the input fd.
*
* The already_hashed_to pointer is kept untouched by the caller to
* make sure we do not hash the same byte when we are called
* again. This way, the caller does not have to checkpoint its hash
* status before calling us just in case we ask it to call us again
* with a new pack.
*/
static int stream_blob_to_pack(struct bulk_checkin_packfile *state,
struct git_hash_ctx *ctx, off_t *already_hashed_to,
int fd, size_t size, const char *path,
unsigned flags)
{
git_zstream s;
unsigned char ibuf[16384];
unsigned char obuf[16384];
unsigned hdrlen;
int status = Z_OK;
int write_object = (flags & INDEX_WRITE_OBJECT);
off_t offset = 0;
git_deflate_init(&s, pack_compression_level);
hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), OBJ_BLOB, size);
s.next_out = obuf + hdrlen;
s.avail_out = sizeof(obuf) - hdrlen;
while (status != Z_STREAM_END) {
if (size && !s.avail_in) {
size_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
ssize_t read_result = read_in_full(fd, ibuf, rsize);
if (read_result < 0)
die_errno("failed to read from '%s'", path);
if ((size_t)read_result != rsize)
die("failed to read %u bytes from '%s'",
(unsigned)rsize, path);
offset += rsize;
if (*already_hashed_to < offset) {
size_t hsize = offset - *already_hashed_to;
if (rsize < hsize)
hsize = rsize;
if (hsize)
git_hash_update(ctx, ibuf, hsize);
*already_hashed_to = offset;
}
s.next_in = ibuf;
s.avail_in = rsize;
size -= rsize;
}
status = git_deflate(&s, size ? 0 : Z_FINISH);
if (!s.avail_out || status == Z_STREAM_END) {
if (write_object) {
size_t written = s.next_out - obuf;
/* would we bust the size limit? */
if (state->nr_written &&
pack_size_limit_cfg &&
pack_size_limit_cfg < state->offset + written) {
git_deflate_abort(&s);
return -1;
}
hashwrite(state->f, obuf, written);
state->offset += written;
}
s.next_out = obuf;
s.avail_out = sizeof(obuf);
}
switch (status) {
case Z_OK:
case Z_BUF_ERROR:
case Z_STREAM_END:
continue;
default:
die("unexpected deflate failure: %d", status);
}
}
git_deflate_end(&s);
return 0;
}
/* Lazily create backing packfile for the state */
static void prepare_to_stream(struct odb_transaction *transaction,
unsigned flags)
{
struct bulk_checkin_packfile *state = &transaction->packfile;
if (!(flags & INDEX_WRITE_OBJECT) || state->f)
return;
state->f = create_tmp_packfile(transaction->odb->repo,
&state->pack_tmp_name);
reset_pack_idx_option(&state->pack_idx_opts);
/* Pretend we are going to write only one object */
state->offset = write_pack_header(state->f, 1);
if (!state->offset)
die_errno("unable to write pack header");
}
int index_blob_bulk_checkin(struct odb_transaction *transaction,
struct object_id *result_oid, int fd, size_t size,
const char *path, unsigned flags)
{
struct bulk_checkin_packfile *state = &transaction->packfile;
off_t seekback, already_hashed_to;
struct git_hash_ctx ctx;
unsigned char obuf[16384];
unsigned header_len;
struct hashfile_checkpoint checkpoint;
struct pack_idx_entry *idx = NULL;
seekback = lseek(fd, 0, SEEK_CUR);
if (seekback == (off_t) -1)
return error("cannot find the current offset");
header_len = format_object_header((char *)obuf, sizeof(obuf),
OBJ_BLOB, size);
transaction->odb->repo->hash_algo->init_fn(&ctx);
git_hash_update(&ctx, obuf, header_len);
/* Note: idx is non-NULL when we are writing */
if ((flags & INDEX_WRITE_OBJECT) != 0) {
CALLOC_ARRAY(idx, 1);
prepare_to_stream(transaction, flags);
hashfile_checkpoint_init(state->f, &checkpoint);
}
already_hashed_to = 0;
while (1) {
prepare_to_stream(transaction, flags);
if (idx) {
hashfile_checkpoint(state->f, &checkpoint);
idx->offset = state->offset;
crc32_begin(state->f);
}
if (!stream_blob_to_pack(state, &ctx, &already_hashed_to,
fd, size, path, flags))
break;
/*
* Writing this object to the current pack will make
* it too big; we need to truncate it, start a new
* pack, and write into it.
*/
if (!idx)
BUG("should not happen");
hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
flush_bulk_checkin_packfile(transaction);
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
return error("cannot seek back");
}
git_hash_final_oid(result_oid, &ctx);
if (!idx)
return 0;
idx->crc32 = crc32_end(state->f);
if (already_written(transaction, result_oid)) {
hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
free(idx);
} else {
oidcpy(&idx->oid, result_oid);
ALLOC_GROW(state->written,
state->nr_written + 1,
state->alloc_written);
state->written[state->nr_written++] = idx;
}
return 0;
}
void prepare_loose_object_bulk_checkin(struct odb_transaction *transaction)
{
/*
* We lazily create the temporary object directory
* the first time an object might be added, since
* callers may not know whether any objects will be
* added at the time they call begin_odb_transaction.
*/
if (!transaction || transaction->objdir)
return;
transaction->objdir = tmp_objdir_create(transaction->odb->repo, "bulk-fsync");
if (transaction->objdir)
tmp_objdir_replace_primary_odb(transaction->objdir, 0);
}
void fsync_loose_object_bulk_checkin(struct odb_transaction *transaction,
int fd, const char *filename)
{
/*
* If we have an active ODB transaction, we issue a call that
* cleans the filesystem page cache but avoids a hardware flush
* command. Later on we will issue a single hardware flush
* before renaming the objects to their final names as part of
* flush_batch_fsync.
*/
if (!transaction || !transaction->objdir ||
git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
if (errno == ENOSYS)
warning(_("core.fsyncMethod = batch is unsupported on this platform"));
fsync_or_die(fd, filename);
}
}
struct odb_transaction *begin_odb_transaction(struct object_database *odb)
{
if (!odb->transaction) {
CALLOC_ARRAY(odb->transaction, 1);
odb->transaction->odb = odb;
}
odb->transaction->nesting += 1;
return odb->transaction;
}
void flush_odb_transaction(struct odb_transaction *transaction)
{
if (!transaction)
return;
flush_batch_fsync(transaction);
flush_bulk_checkin_packfile(transaction);
}
void end_odb_transaction(struct odb_transaction *transaction)
{
if (!transaction || transaction->nesting == 0)
BUG("Unbalanced ODB transaction nesting");
transaction->nesting -= 1;
if (transaction->nesting)
return;
flush_odb_transaction(transaction);
transaction->odb->transaction = NULL;
free(transaction);
}

View File

@@ -74,10 +74,9 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
*/
odb_reprepare(the_repository->objects);
do {
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (!p->pack_promisor)
continue;
if (find_pack_entry_one(oid, p))

View File

@@ -603,19 +603,18 @@ static void get_head(struct strbuf *hdr, char *arg UNUSED)
static void get_info_packs(struct strbuf *hdr, char *arg UNUSED)
{
size_t objdirlen = strlen(repo_get_object_directory(the_repository));
struct packfile_store *packs = the_repository->objects->packfiles;
struct strbuf buf = STRBUF_INIT;
struct packed_git *p;
size_t cnt = 0;
select_getanyfile(hdr);
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (p->pack_local)
cnt++;
}
strbuf_grow(&buf, cnt * 53 + 2);
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (p->pack_local)
strbuf_addf(&buf, "P %s\n", p->pack_name + objdirlen + 6);
}

3
http.c
View File

@@ -2416,7 +2416,6 @@ static char *fetch_pack_index(unsigned char *hash, const char *base_url)
static int fetch_and_setup_pack_index(struct packed_git **packs_head,
unsigned char *sha1, const char *base_url)
{
struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *new_pack, *p;
char *tmp_idx = NULL;
int ret;
@@ -2425,7 +2424,7 @@ static int fetch_and_setup_pack_index(struct packed_git **packs_head,
* If we already have the pack locally, no need to fetch its index or
* even add it to list; we already have all of its objects.
*/
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
if (hasheq(p->hash, sha1, the_repository->hash_algo))
return 0;
}

View File

@@ -463,6 +463,12 @@ libgit_sources = [
'reftable/tree.c',
'reftable/writer.c',
'remote.c',
'repack.c',
'repack-cruft.c',
'repack-filtered.c',
'repack-geometry.c',
'repack-midx.c',
'repack-promisor.c',
'replace-object.c',
'repo-settings.c',
'repository.c',

View File

@@ -213,9 +213,11 @@ static void find_short_packed_object(struct disambiguate_state *ds)
unique_in_midx(m, ds);
}
for (p = packfile_store_get_packs(ds->repo->objects->packfiles); p && !ds->ambiguous;
p = p->next)
repo_for_each_pack(ds->repo, p) {
if (ds->ambiguous)
break;
unique_in_pack(p, ds);
}
}
static int finish_object_disambiguation(struct disambiguate_state *ds,
@@ -805,7 +807,7 @@ static void find_abbrev_len_packed(struct min_abbrev_data *mad)
find_abbrev_len_for_midx(m, mad);
}
for (p = packfile_store_get_packs(mad->repo->objects->packfiles); p; p = p->next)
repo_for_each_pack(mad->repo, p)
find_abbrev_len_for_pack(p, mad);
}

View File

@@ -664,7 +664,7 @@ static int open_pack_bitmap(struct repository *r,
struct packed_git *p;
int ret = -1;
for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
repo_for_each_pack(r, p) {
if (open_pack_bitmap_1(bitmap_git, p) == 0) {
ret = 0;
/*
@@ -3347,6 +3347,7 @@ static int verify_bitmap_file(const struct git_hash_algo *algop,
int verify_bitmap_files(struct repository *r)
{
struct odb_source *source;
struct packed_git *p;
int res = 0;
odb_prepare_alternates(r->objects);
@@ -3362,8 +3363,7 @@ int verify_bitmap_files(struct repository *r)
free(midx_bitmap_name);
}
for (struct packed_git *p = packfile_store_get_all_packs(r->objects->packfiles);
p; p = p->next) {
repo_for_each_pack(r, p) {
char *pack_bitmap_name = pack_bitmap_filename(p);
res |= verify_bitmap_file(r->hash_algo, pack_bitmap_name);
free(pack_bitmap_name);

View File

@@ -87,7 +87,6 @@ struct object_entry *packlist_find(struct packing_data *pdata,
static void prepare_in_pack_by_idx(struct packing_data *pdata)
{
struct packfile_store *packs = pdata->repo->objects->packfiles;
struct packed_git **mapping, *p;
int cnt = 0, nr = 1U << OE_IN_PACK_BITS;
@@ -97,13 +96,13 @@ static void prepare_in_pack_by_idx(struct packing_data *pdata)
* (i.e. in_pack_idx also zero) should return NULL.
*/
mapping[cnt++] = NULL;
for (p = packfile_store_get_all_packs(packs); p; p = p->next, cnt++) {
repo_for_each_pack(pdata->repo, p) {
if (cnt == nr) {
free(mapping);
return;
}
p->index = cnt;
mapping[cnt] = p;
mapping[cnt++] = p;
}
pdata->in_pack_by_idx = mapping;
}

View File

@@ -1028,12 +1028,6 @@ void packfile_store_reprepare(struct packfile_store *store)
}
struct packed_git *packfile_store_get_packs(struct packfile_store *store)
{
packfile_store_prepare(store);
return store->packs;
}
struct packed_git *packfile_store_get_all_packs(struct packfile_store *store)
{
packfile_store_prepare(store);
@@ -2105,7 +2099,7 @@ struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
* covers, one kept and one not kept, but the midx returns only
* the non-kept version.
*/
for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
repo_for_each_pack(r, p) {
if ((p->pack_keep && (flags & ON_DISK_KEEP_PACKS)) ||
(p->pack_keep_in_core && (flags & IN_CORE_KEEP_PACKS))) {
ALLOC_GROW(packs, nr + 1, alloc);
@@ -2202,7 +2196,7 @@ int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
int r = 0;
int pack_errors = 0;
for (p = packfile_store_get_all_packs(repo->objects->packfiles); p; p = p->next) {
repo_for_each_pack(repo, p) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&

View File

@@ -137,16 +137,18 @@ void packfile_store_add_pack(struct packfile_store *store,
struct packed_git *pack);
/*
* Get packs managed by the given store. Does not load the MIDX or any packs
* referenced by it.
* Load and iterate through all packs of the given repository. This helper
* function will yield packfiles from all object sources connected to the
* repository.
*/
struct packed_git *packfile_store_get_packs(struct packfile_store *store);
#define repo_for_each_pack(repo, p) \
for (p = packfile_store_get_packs(repo->objects->packfiles); p; p = p->next)
/*
* Get all packs managed by the given store, including packfiles that are
* referenced by multi-pack indices.
*/
struct packed_git *packfile_store_get_all_packs(struct packfile_store *store);
struct packed_git *packfile_store_get_packs(struct packfile_store *store);
/*
* Get all packs in most-recently-used order.

98
repack-cruft.c Normal file
View File

@@ -0,0 +1,98 @@
#include "git-compat-util.h"
#include "repack.h"
#include "packfile.h"
#include "repository.h"
#include "run-command.h"
static void combine_small_cruft_packs(FILE *in, off_t combine_cruft_below_size,
struct existing_packs *existing)
{
struct packed_git *p;
struct strbuf buf = STRBUF_INIT;
size_t i;
repo_for_each_pack(existing->repo, p) {
if (!(p->is_cruft && p->pack_local))
continue;
strbuf_reset(&buf);
strbuf_addstr(&buf, pack_basename(p));
strbuf_strip_suffix(&buf, ".pack");
if (!string_list_has_string(&existing->cruft_packs, buf.buf))
continue;
if (p->pack_size < combine_cruft_below_size) {
fprintf(in, "-%s\n", pack_basename(p));
} else {
existing_packs_retain_cruft(existing, p);
fprintf(in, "%s\n", pack_basename(p));
}
}
for (i = 0; i < existing->non_kept_packs.nr; i++)
fprintf(in, "-%s.pack\n",
existing->non_kept_packs.items[i].string);
strbuf_release(&buf);
}
int write_cruft_pack(const struct write_pack_opts *opts,
const char *cruft_expiration,
unsigned long combine_cruft_below_size,
struct string_list *names,
struct existing_packs *existing)
{
struct child_process cmd = CHILD_PROCESS_INIT;
struct string_list_item *item;
FILE *in;
int ret;
const char *pack_prefix = write_pack_opts_pack_prefix(opts);
prepare_pack_objects(&cmd, opts->po_args, opts->destination);
strvec_push(&cmd.args, "--cruft");
if (cruft_expiration)
strvec_pushf(&cmd.args, "--cruft-expiration=%s",
cruft_expiration);
strvec_push(&cmd.args, "--non-empty");
cmd.in = -1;
ret = start_command(&cmd);
if (ret)
return ret;
/*
* names has a confusing double use: it both provides the list
* of just-written new packs, and accepts the name of the cruft
* pack we are writing.
*
* By the time it is read here, it contains only the pack(s)
* that were just written, which is exactly the set of packs we
* want to consider kept.
*
* If `--expire-to` is given, the double-use served by `names`
* ensures that the pack written to `--expire-to` excludes any
* objects contained in the cruft pack.
*/
in = xfdopen(cmd.in, "w");
for_each_string_list_item(item, names)
fprintf(in, "%s-%s.pack\n", pack_prefix, item->string);
if (combine_cruft_below_size && !cruft_expiration) {
combine_small_cruft_packs(in, combine_cruft_below_size,
existing);
} else {
for_each_string_list_item(item, &existing->non_kept_packs)
fprintf(in, "-%s.pack\n", item->string);
for_each_string_list_item(item, &existing->cruft_packs)
fprintf(in, "-%s.pack\n", item->string);
}
for_each_string_list_item(item, &existing->kept_packs)
fprintf(in, "%s.pack\n", item->string);
fclose(in);
return finish_pack_objects_cmd(existing->repo->hash_algo, opts, &cmd,
names);
}

51
repack-filtered.c Normal file
View File

@@ -0,0 +1,51 @@
#include "git-compat-util.h"
#include "repack.h"
#include "repository.h"
#include "run-command.h"
#include "string-list.h"
int write_filtered_pack(const struct write_pack_opts *opts,
struct existing_packs *existing,
struct string_list *names)
{
struct child_process cmd = CHILD_PROCESS_INIT;
struct string_list_item *item;
FILE *in;
int ret;
const char *caret;
const char *pack_prefix = write_pack_opts_pack_prefix(opts);
prepare_pack_objects(&cmd, opts->po_args, opts->destination);
strvec_push(&cmd.args, "--stdin-packs");
for_each_string_list_item(item, &existing->kept_packs)
strvec_pushf(&cmd.args, "--keep-pack=%s", item->string);
cmd.in = -1;
ret = start_command(&cmd);
if (ret)
return ret;
/*
* Here 'names' contains only the pack(s) that were just
* written, which is exactly the packs we want to keep. Also
* 'existing_kept_packs' already contains the packs in
* 'keep_pack_list'.
*/
in = xfdopen(cmd.in, "w");
for_each_string_list_item(item, names)
fprintf(in, "^%s-%s.pack\n", pack_prefix, item->string);
for_each_string_list_item(item, &existing->non_kept_packs)
fprintf(in, "%s.pack\n", item->string);
for_each_string_list_item(item, &existing->cruft_packs)
fprintf(in, "%s.pack\n", item->string);
caret = opts->po_args->pack_kept_objects ? "" : "^";
for_each_string_list_item(item, &existing->kept_packs)
fprintf(in, "%s%s.pack\n", caret, item->string);
fclose(in);
return finish_pack_objects_cmd(existing->repo->hash_algo, opts, &cmd,
names);
}

232
repack-geometry.c Normal file
View File

@@ -0,0 +1,232 @@
#define DISABLE_SIGN_COMPARE_WARNINGS
#include "git-compat-util.h"
#include "repack.h"
#include "repository.h"
#include "hex.h"
#include "packfile.h"
static uint32_t pack_geometry_weight(struct packed_git *p)
{
if (open_pack_index(p))
die(_("cannot open index for %s"), p->pack_name);
return p->num_objects;
}
static int pack_geometry_cmp(const void *va, const void *vb)
{
uint32_t aw = pack_geometry_weight(*(struct packed_git **)va),
bw = pack_geometry_weight(*(struct packed_git **)vb);
if (aw < bw)
return -1;
if (aw > bw)
return 1;
return 0;
}
void pack_geometry_init(struct pack_geometry *geometry,
struct existing_packs *existing,
const struct pack_objects_args *args)
{
struct packed_git *p;
struct strbuf buf = STRBUF_INIT;
repo_for_each_pack(existing->repo, p) {
if (args->local && !p->pack_local)
/*
* When asked to only repack local packfiles we skip
* over any packfiles that are borrowed from alternate
* object directories.
*/
continue;
if (!args->pack_kept_objects) {
/*
* Any pack that has its pack_keep bit set will
* appear in existing->kept_packs below, but
* this saves us from doing a more expensive
* check.
*/
if (p->pack_keep)
continue;
/*
* The pack may be kept via the --keep-pack
* option; check 'existing->kept_packs' to
* determine whether to ignore it.
*/
strbuf_reset(&buf);
strbuf_addstr(&buf, pack_basename(p));
strbuf_strip_suffix(&buf, ".pack");
if (string_list_has_string(&existing->kept_packs, buf.buf))
continue;
}
if (p->is_cruft)
continue;
ALLOC_GROW(geometry->pack,
geometry->pack_nr + 1,
geometry->pack_alloc);
geometry->pack[geometry->pack_nr] = p;
geometry->pack_nr++;
}
QSORT(geometry->pack, geometry->pack_nr, pack_geometry_cmp);
strbuf_release(&buf);
}
void pack_geometry_split(struct pack_geometry *geometry)
{
uint32_t i;
uint32_t split;
off_t total_size = 0;
if (!geometry->pack_nr) {
geometry->split = geometry->pack_nr;
return;
}
/*
* First, count the number of packs (in descending order of size) which
* already form a geometric progression.
*/
for (i = geometry->pack_nr - 1; i > 0; i--) {
struct packed_git *ours = geometry->pack[i];
struct packed_git *prev = geometry->pack[i - 1];
if (unsigned_mult_overflows(geometry->split_factor,
pack_geometry_weight(prev)))
die(_("pack %s too large to consider in geometric "
"progression"),
prev->pack_name);
if (pack_geometry_weight(ours) <
geometry->split_factor * pack_geometry_weight(prev))
break;
}
split = i;
if (split) {
/*
* Move the split one to the right, since the top element in the
* last-compared pair can't be in the progression. Only do this
* when we split in the middle of the array (otherwise if we got
* to the end, then the split is in the right place).
*/
split++;
}
/*
* Then, anything to the left of 'split' must be in a new pack. But,
* creating that new pack may cause packs in the heavy half to no longer
* form a geometric progression.
*
* Compute an expected size of the new pack, and then determine how many
* packs in the heavy half need to be joined into it (if any) to restore
* the geometric progression.
*/
for (i = 0; i < split; i++) {
struct packed_git *p = geometry->pack[i];
if (unsigned_add_overflows(total_size, pack_geometry_weight(p)))
die(_("pack %s too large to roll up"), p->pack_name);
total_size += pack_geometry_weight(p);
}
for (i = split; i < geometry->pack_nr; i++) {
struct packed_git *ours = geometry->pack[i];
if (unsigned_mult_overflows(geometry->split_factor,
total_size))
die(_("pack %s too large to roll up"), ours->pack_name);
if (pack_geometry_weight(ours) <
geometry->split_factor * total_size) {
if (unsigned_add_overflows(total_size,
pack_geometry_weight(ours)))
die(_("pack %s too large to roll up"),
ours->pack_name);
split++;
total_size += pack_geometry_weight(ours);
} else
break;
}
geometry->split = split;
}
struct packed_git *pack_geometry_preferred_pack(struct pack_geometry *geometry)
{
uint32_t i;
if (!geometry) {
/*
* No geometry means either an all-into-one repack (in which
* case there is only one pack left and it is the largest) or an
* incremental one.
*
* If repacking incrementally, then we could check the size of
* all packs to determine which should be preferred, but leave
* this for later.
*/
return NULL;
}
if (geometry->split == geometry->pack_nr)
return NULL;
/*
* The preferred pack is the largest pack above the split line. In
* other words, it is the largest pack that does not get rolled up in
* the geometric repack.
*/
for (i = geometry->pack_nr; i > geometry->split; i--)
/*
* A pack that is not local would never be included in a
* multi-pack index. We thus skip over any non-local packs.
*/
if (geometry->pack[i - 1]->pack_local)
return geometry->pack[i - 1];
return NULL;
}
void pack_geometry_remove_redundant(struct pack_geometry *geometry,
struct string_list *names,
struct existing_packs *existing,
const char *packdir)
{
const struct git_hash_algo *algop = existing->repo->hash_algo;
struct strbuf buf = STRBUF_INIT;
uint32_t i;
for (i = 0; i < geometry->split; i++) {
struct packed_git *p = geometry->pack[i];
if (string_list_has_string(names, hash_to_hex_algop(p->hash,
algop)))
continue;
strbuf_reset(&buf);
strbuf_addstr(&buf, pack_basename(p));
strbuf_strip_suffix(&buf, ".pack");
if ((p->pack_keep) ||
(string_list_has_string(&existing->kept_packs, buf.buf)))
continue;
repack_remove_redundant_pack(existing->repo, packdir, buf.buf);
}
strbuf_release(&buf);
}
void pack_geometry_release(struct pack_geometry *geometry)
{
if (!geometry)
return;
free(geometry->pack);
}

372
repack-midx.c Normal file
View File

@@ -0,0 +1,372 @@
#include "git-compat-util.h"
#include "repack.h"
#include "hash.h"
#include "hex.h"
#include "odb.h"
#include "oidset.h"
#include "pack-bitmap.h"
#include "refs.h"
#include "run-command.h"
#include "tempfile.h"
struct midx_snapshot_ref_data {
struct repository *repo;
struct tempfile *f;
struct oidset seen;
int preferred;
};
static int midx_snapshot_ref_one(const char *refname UNUSED,
const char *referent UNUSED,
const struct object_id *oid,
int flag UNUSED, void *_data)
{
struct midx_snapshot_ref_data *data = _data;
struct object_id peeled;
if (!peel_iterated_oid(data->repo, oid, &peeled))
oid = &peeled;
if (oidset_insert(&data->seen, oid))
return 0; /* already seen */
if (odb_read_object_info(data->repo->objects, oid, NULL) != OBJ_COMMIT)
return 0;
fprintf(data->f->fp, "%s%s\n", data->preferred ? "+" : "",
oid_to_hex(oid));
return 0;
}
void midx_snapshot_refs(struct repository *repo, struct tempfile *f)
{
struct midx_snapshot_ref_data data;
const struct string_list *preferred = bitmap_preferred_tips(repo);
data.repo = repo;
data.f = f;
data.preferred = 0;
oidset_init(&data.seen, 0);
if (!fdopen_tempfile(f, "w"))
die(_("could not open tempfile %s for writing"),
get_tempfile_path(f));
if (preferred) {
struct string_list_item *item;
data.preferred = 1;
for_each_string_list_item(item, preferred)
refs_for_each_ref_in(get_main_ref_store(repo),
item->string,
midx_snapshot_ref_one, &data);
data.preferred = 0;
}
refs_for_each_ref(get_main_ref_store(repo),
midx_snapshot_ref_one, &data);
if (close_tempfile_gently(f)) {
int save_errno = errno;
delete_tempfile(&f);
errno = save_errno;
die_errno(_("could not close refs snapshot tempfile"));
}
oidset_clear(&data.seen);
}
static int midx_has_unknown_packs(struct string_list *include,
struct pack_geometry *geometry,
struct existing_packs *existing)
{
struct string_list_item *item;
string_list_sort(include);
for_each_string_list_item(item, &existing->midx_packs) {
const char *pack_name = item->string;
/*
* Determine whether or not each MIDX'd pack from the existing
* MIDX (if any) is represented in the new MIDX. For each pack
* in the MIDX, it must either be:
*
* - In the "include" list of packs to be included in the new
* MIDX. Note this function is called before the include
* list is populated with any cruft pack(s).
*
* - Below the geometric split line (if using pack geometry),
* indicating that the pack won't be included in the new
* MIDX, but its contents were rolled up as part of the
* geometric repack.
*
* - In the existing non-kept packs list (if not using pack
* geometry), and marked as non-deleted.
*/
if (string_list_has_string(include, pack_name)) {
continue;
} else if (geometry) {
struct strbuf buf = STRBUF_INIT;
uint32_t j;
for (j = 0; j < geometry->split; j++) {
strbuf_reset(&buf);
strbuf_addstr(&buf, pack_basename(geometry->pack[j]));
strbuf_strip_suffix(&buf, ".pack");
strbuf_addstr(&buf, ".idx");
if (!strcmp(pack_name, buf.buf)) {
strbuf_release(&buf);
break;
}
}
strbuf_release(&buf);
if (j < geometry->split)
continue;
} else {
struct string_list_item *item;
item = string_list_lookup(&existing->non_kept_packs,
pack_name);
if (item && !existing_pack_is_marked_for_deletion(item))
continue;
}
/*
* If we got to this point, the MIDX includes some pack that we
* don't know about.
*/
return 1;
}
return 0;
}
static void midx_included_packs(struct string_list *include,
struct repack_write_midx_opts *opts)
{
struct existing_packs *existing = opts->existing;
struct pack_geometry *geometry = opts->geometry;
struct string_list *names = opts->names;
struct string_list_item *item;
struct strbuf buf = STRBUF_INIT;
for_each_string_list_item(item, &existing->kept_packs) {
strbuf_reset(&buf);
strbuf_addf(&buf, "%s.idx", item->string);
string_list_insert(include, buf.buf);
}
for_each_string_list_item(item, names) {
strbuf_reset(&buf);
strbuf_addf(&buf, "pack-%s.idx", item->string);
string_list_insert(include, buf.buf);
}
if (geometry->split_factor) {
uint32_t i;
for (i = geometry->split; i < geometry->pack_nr; i++) {
struct packed_git *p = geometry->pack[i];
/*
* The multi-pack index never refers to packfiles part
* of an alternate object database, so we skip these.
* While git-multi-pack-index(1) would silently ignore
* them anyway, this allows us to skip executing the
* command completely when we have only non-local
* packfiles.
*/
if (!p->pack_local)
continue;
strbuf_reset(&buf);
strbuf_addstr(&buf, pack_basename(p));
strbuf_strip_suffix(&buf, ".pack");
strbuf_addstr(&buf, ".idx");
string_list_insert(include, buf.buf);
}
} else {
for_each_string_list_item(item, &existing->non_kept_packs) {
if (existing_pack_is_marked_for_deletion(item))
continue;
strbuf_reset(&buf);
strbuf_addf(&buf, "%s.idx", item->string);
string_list_insert(include, buf.buf);
}
}
if (opts->midx_must_contain_cruft ||
midx_has_unknown_packs(include, geometry, existing)) {
/*
* If there are one or more unknown pack(s) present (see
* midx_has_unknown_packs() for what makes a pack
* "unknown") in the MIDX before the repack, keep them
* as they may be required to form a reachability
* closure if the MIDX is bitmapped.
*
* For example, a cruft pack can be required to form a
* reachability closure if the MIDX is bitmapped and one
* or more of the bitmap's selected commits reaches a
* once-cruft object that was later made reachable.
*/
for_each_string_list_item(item, &existing->cruft_packs) {
/*
* When doing a --geometric repack, there is no
* need to check for deleted packs, since we're
* by definition not doing an ALL_INTO_ONE
* repack (hence no packs will be deleted).
* Otherwise we must check for and exclude any
* packs which are enqueued for deletion.
*
* So we could omit the conditional below in the
* --geometric case, but doing so is unnecessary
* since no packs are marked as pending
* deletion (since we only call
* `existing_packs_mark_for_deletion()` when
* doing an all-into-one repack).
*/
if (existing_pack_is_marked_for_deletion(item))
continue;
strbuf_reset(&buf);
strbuf_addf(&buf, "%s.idx", item->string);
string_list_insert(include, buf.buf);
}
} else {
/*
* Modern versions of Git (with the appropriate
* configuration setting) will write new copies of
* once-cruft objects when doing a --geometric repack.
*
* If the MIDX has no cruft pack, new packs written
* during a --geometric repack will not rely on the
* cruft pack to form a reachability closure, so we can
* avoid including them in the MIDX in that case.
*/
;
}
strbuf_release(&buf);
}
static void remove_redundant_bitmaps(struct string_list *include,
const char *packdir)
{
struct strbuf path = STRBUF_INIT;
struct string_list_item *item;
size_t packdir_len;
strbuf_addstr(&path, packdir);
strbuf_addch(&path, '/');
packdir_len = path.len;
/*
* Remove any pack bitmaps corresponding to packs which are now
* included in the MIDX.
*/
for_each_string_list_item(item, include) {
strbuf_addstr(&path, item->string);
strbuf_strip_suffix(&path, ".idx");
strbuf_addstr(&path, ".bitmap");
if (unlink(path.buf) && errno != ENOENT)
warning_errno(_("could not remove stale bitmap: %s"),
path.buf);
strbuf_setlen(&path, packdir_len);
}
strbuf_release(&path);
}
int write_midx_included_packs(struct repack_write_midx_opts *opts)
{
struct child_process cmd = CHILD_PROCESS_INIT;
struct string_list include = STRING_LIST_INIT_DUP;
struct string_list_item *item;
struct packed_git *preferred = pack_geometry_preferred_pack(opts->geometry);
FILE *in;
int ret = 0;
midx_included_packs(&include, opts);
if (!include.nr)
goto done;
cmd.in = -1;
cmd.git_cmd = 1;
strvec_push(&cmd.args, "multi-pack-index");
strvec_pushl(&cmd.args, "write", "--stdin-packs", NULL);
if (opts->show_progress)
strvec_push(&cmd.args, "--progress");
else
strvec_push(&cmd.args, "--no-progress");
if (opts->write_bitmaps)
strvec_push(&cmd.args, "--bitmap");
if (preferred)
strvec_pushf(&cmd.args, "--preferred-pack=%s",
pack_basename(preferred));
else if (opts->names->nr) {
/* The largest pack was repacked, meaning that either
* one or two packs exist depending on whether the
* repository has a cruft pack or not.
*
* Select the non-cruft one as preferred to encourage
* pack-reuse among packs containing reachable objects
* over unreachable ones.
*
* (Note we could write multiple packs here if
* `--max-pack-size` was given, but any one of them
* will suffice, so pick the first one.)
*/
for_each_string_list_item(item, opts->names) {
struct generated_pack *pack = item->util;
if (generated_pack_has_ext(pack, ".mtimes"))
continue;
strvec_pushf(&cmd.args, "--preferred-pack=pack-%s.pack",
item->string);
break;
}
} else {
/*
* No packs were kept, and no packs were written. The
* only thing remaining are .keep packs (unless
* --pack-kept-objects was given).
*
* Set the `--preferred-pack` arbitrarily here.
*/
;
}
if (opts->refs_snapshot)
strvec_pushf(&cmd.args, "--refs-snapshot=%s",
opts->refs_snapshot);
ret = start_command(&cmd);
if (ret)
goto done;
in = xfdopen(cmd.in, "w");
for_each_string_list_item(item, &include)
fprintf(in, "%s\n", item->string);
fclose(in);
ret = finish_command(&cmd);
done:
if (!ret && opts->write_bitmaps)
remove_redundant_bitmaps(&include, opts->packdir);
string_list_clear(&include, 0);
return ret;
}

102
repack-promisor.c Normal file
View File

@@ -0,0 +1,102 @@
#include "git-compat-util.h"
#include "repack.h"
#include "hex.h"
#include "pack.h"
#include "packfile.h"
#include "path.h"
#include "repository.h"
#include "run-command.h"
struct write_oid_context {
struct child_process *cmd;
const struct git_hash_algo *algop;
};
/*
* Write oid to the given struct child_process's stdin, starting it first if
* necessary.
*/
static int write_oid(const struct object_id *oid,
struct packed_git *pack UNUSED,
uint32_t pos UNUSED, void *data)
{
struct write_oid_context *ctx = data;
struct child_process *cmd = ctx->cmd;
if (cmd->in == -1) {
if (start_command(cmd))
die(_("could not start pack-objects to repack promisor objects"));
}
if (write_in_full(cmd->in, oid_to_hex(oid), ctx->algop->hexsz) < 0 ||
write_in_full(cmd->in, "\n", 1) < 0)
die(_("failed to feed promisor objects to pack-objects"));
return 0;
}
void repack_promisor_objects(struct repository *repo,
const struct pack_objects_args *args,
struct string_list *names, const char *packtmp)
{
struct write_oid_context ctx;
struct child_process cmd = CHILD_PROCESS_INIT;
FILE *out;
struct strbuf line = STRBUF_INIT;
prepare_pack_objects(&cmd, args, packtmp);
cmd.in = -1;
/*
* NEEDSWORK: Giving pack-objects only the OIDs without any ordering
* hints may result in suboptimal deltas in the resulting pack. See if
* the OIDs can be sent with fake paths such that pack-objects can use a
* {type -> existing pack order} ordering when computing deltas instead
* of a {type -> size} ordering, which may produce better deltas.
*/
ctx.cmd = &cmd;
ctx.algop = repo->hash_algo;
for_each_packed_object(repo, write_oid, &ctx,
FOR_EACH_OBJECT_PROMISOR_ONLY);
if (cmd.in == -1) {
/* No packed objects; cmd was never started */
child_process_clear(&cmd);
return;
}
close(cmd.in);
out = xfdopen(cmd.out, "r");
while (strbuf_getline_lf(&line, out) != EOF) {
struct string_list_item *item;
char *promisor_name;
if (line.len != repo->hash_algo->hexsz)
die(_("repack: Expecting full hex object ID lines only from pack-objects."));
item = string_list_append(names, line.buf);
/*
* pack-objects creates the .pack and .idx files, but not the
* .promisor file. Create the .promisor file, which is empty.
*
* NEEDSWORK: fetch-pack sometimes generates non-empty
* .promisor files containing the ref names and associated
* hashes at the point of generation of the corresponding
* packfile, but this would not preserve their contents. Maybe
* concatenate the contents of all .promisor files instead of
* just creating a new empty file.
*/
promisor_name = mkpathdup("%s-%s.promisor", packtmp,
line.buf);
write_promisor_file(promisor_name, NULL, 0);
item->util = generated_pack_populate(item->string, packtmp);
free(promisor_name);
}
fclose(out);
if (finish_command(&cmd))
die(_("could not finish pack-objects to repack promisor objects"));
strbuf_release(&line);
}

359
repack.c Normal file
View File

@@ -0,0 +1,359 @@
#include "git-compat-util.h"
#include "dir.h"
#include "midx.h"
#include "odb.h"
#include "packfile.h"
#include "path.h"
#include "repack.h"
#include "repository.h"
#include "run-command.h"
#include "tempfile.h"
void prepare_pack_objects(struct child_process *cmd,
const struct pack_objects_args *args,
const char *out)
{
strvec_push(&cmd->args, "pack-objects");
if (args->window)
strvec_pushf(&cmd->args, "--window=%s", args->window);
if (args->window_memory)
strvec_pushf(&cmd->args, "--window-memory=%s", args->window_memory);
if (args->depth)
strvec_pushf(&cmd->args, "--depth=%s", args->depth);
if (args->threads)
strvec_pushf(&cmd->args, "--threads=%s", args->threads);
if (args->max_pack_size)
strvec_pushf(&cmd->args, "--max-pack-size=%lu", args->max_pack_size);
if (args->no_reuse_delta)
strvec_pushf(&cmd->args, "--no-reuse-delta");
if (args->no_reuse_object)
strvec_pushf(&cmd->args, "--no-reuse-object");
if (args->name_hash_version)
strvec_pushf(&cmd->args, "--name-hash-version=%d", args->name_hash_version);
if (args->path_walk)
strvec_pushf(&cmd->args, "--path-walk");
if (args->local)
strvec_push(&cmd->args, "--local");
if (args->quiet)
strvec_push(&cmd->args, "--quiet");
if (args->delta_base_offset)
strvec_push(&cmd->args, "--delta-base-offset");
if (!args->pack_kept_objects)
strvec_push(&cmd->args, "--honor-pack-keep");
strvec_push(&cmd->args, out);
cmd->git_cmd = 1;
cmd->out = -1;
}
void pack_objects_args_release(struct pack_objects_args *args)
{
free(args->window);
free(args->window_memory);
free(args->depth);
free(args->threads);
list_objects_filter_release(&args->filter_options);
}
void repack_remove_redundant_pack(struct repository *repo, const char *dir_name,
const char *base_name)
{
struct strbuf buf = STRBUF_INIT;
struct odb_source *source = repo->objects->sources;
struct multi_pack_index *m = get_multi_pack_index(source);
strbuf_addf(&buf, "%s.pack", base_name);
if (m && source->local && midx_contains_pack(m, buf.buf))
clear_midx_file(repo);
strbuf_insertf(&buf, 0, "%s/", dir_name);
unlink_pack_path(buf.buf, 1);
strbuf_release(&buf);
}
const char *write_pack_opts_pack_prefix(const struct write_pack_opts *opts)
{
const char *pack_prefix;
if (!skip_prefix(opts->packtmp, opts->packdir, &pack_prefix))
die(_("pack prefix %s does not begin with objdir %s"),
opts->packtmp, opts->packdir);
if (*pack_prefix == '/')
pack_prefix++;
return pack_prefix;
}
bool write_pack_opts_is_local(const struct write_pack_opts *opts)
{
return starts_with(opts->destination, opts->packdir);
}
int finish_pack_objects_cmd(const struct git_hash_algo *algop,
const struct write_pack_opts *opts,
struct child_process *cmd,
struct string_list *names)
{
FILE *out;
bool local = write_pack_opts_is_local(opts);
struct strbuf line = STRBUF_INIT;
out = xfdopen(cmd->out, "r");
while (strbuf_getline_lf(&line, out) != EOF) {
struct string_list_item *item;
if (line.len != algop->hexsz)
die(_("repack: Expecting full hex object ID lines only "
"from pack-objects."));
/*
* Avoid putting packs written outside of the repository in the
* list of names.
*/
if (local) {
item = string_list_append(names, line.buf);
item->util = generated_pack_populate(line.buf,
opts->packtmp);
}
}
fclose(out);
strbuf_release(&line);
return finish_command(cmd);
}
#define DELETE_PACK 1
#define RETAIN_PACK 2
void existing_packs_collect(struct existing_packs *existing,
const struct string_list *extra_keep)
{
struct packed_git *p;
struct strbuf buf = STRBUF_INIT;
repo_for_each_pack(existing->repo, p) {
size_t i;
const char *base;
if (p->multi_pack_index)
string_list_append(&existing->midx_packs,
pack_basename(p));
if (!p->pack_local)
continue;
base = pack_basename(p);
for (i = 0; i < extra_keep->nr; i++)
if (!fspathcmp(base, extra_keep->items[i].string))
break;
strbuf_reset(&buf);
strbuf_addstr(&buf, base);
strbuf_strip_suffix(&buf, ".pack");
if ((extra_keep->nr > 0 && i < extra_keep->nr) || p->pack_keep)
string_list_append(&existing->kept_packs, buf.buf);
else if (p->is_cruft)
string_list_append(&existing->cruft_packs, buf.buf);
else
string_list_append(&existing->non_kept_packs, buf.buf);
}
string_list_sort(&existing->kept_packs);
string_list_sort(&existing->non_kept_packs);
string_list_sort(&existing->cruft_packs);
string_list_sort(&existing->midx_packs);
strbuf_release(&buf);
}
int existing_packs_has_non_kept(const struct existing_packs *existing)
{
return existing->non_kept_packs.nr || existing->cruft_packs.nr;
}
static void existing_pack_mark_for_deletion(struct string_list_item *item)
{
item->util = (void*)((uintptr_t)item->util | DELETE_PACK);
}
static void existing_pack_unmark_for_deletion(struct string_list_item *item)
{
item->util = (void*)((uintptr_t)item->util & ~DELETE_PACK);
}
int existing_pack_is_marked_for_deletion(struct string_list_item *item)
{
return (uintptr_t)item->util & DELETE_PACK;
}
static void existing_packs_mark_retained(struct string_list_item *item)
{
item->util = (void*)((uintptr_t)item->util | RETAIN_PACK);
}
static int existing_pack_is_retained(struct string_list_item *item)
{
return (uintptr_t)item->util & RETAIN_PACK;
}
static void existing_packs_mark_for_deletion_1(const struct git_hash_algo *algop,
struct string_list *names,
struct string_list *list)
{
struct string_list_item *item;
const size_t hexsz = algop->hexsz;
for_each_string_list_item(item, list) {
char *sha1;
size_t len = strlen(item->string);
if (len < hexsz)
continue;
sha1 = item->string + len - hexsz;
if (existing_pack_is_retained(item)) {
existing_pack_unmark_for_deletion(item);
} else if (!string_list_has_string(names, sha1)) {
/*
* Mark this pack for deletion, which ensures
* that this pack won't be included in a MIDX
* (if `--write-midx` was given) and that we
* will actually delete this pack (if `-d` was
* given).
*/
existing_pack_mark_for_deletion(item);
}
}
}
void existing_packs_retain_cruft(struct existing_packs *existing,
struct packed_git *cruft)
{
struct strbuf buf = STRBUF_INIT;
struct string_list_item *item;
strbuf_addstr(&buf, pack_basename(cruft));
strbuf_strip_suffix(&buf, ".pack");
item = string_list_lookup(&existing->cruft_packs, buf.buf);
if (!item)
BUG("could not find cruft pack '%s'", pack_basename(cruft));
existing_packs_mark_retained(item);
strbuf_release(&buf);
}
void existing_packs_mark_for_deletion(struct existing_packs *existing,
struct string_list *names)
{
const struct git_hash_algo *algop = existing->repo->hash_algo;
existing_packs_mark_for_deletion_1(algop, names,
&existing->non_kept_packs);
existing_packs_mark_for_deletion_1(algop, names,
&existing->cruft_packs);
}
static void remove_redundant_packs_1(struct repository *repo,
struct string_list *packs,
const char *packdir)
{
struct string_list_item *item;
for_each_string_list_item(item, packs) {
if (!existing_pack_is_marked_for_deletion(item))
continue;
repack_remove_redundant_pack(repo, packdir, item->string);
}
}
void existing_packs_remove_redundant(struct existing_packs *existing,
const char *packdir)
{
remove_redundant_packs_1(existing->repo, &existing->non_kept_packs,
packdir);
remove_redundant_packs_1(existing->repo, &existing->cruft_packs,
packdir);
}
void existing_packs_release(struct existing_packs *existing)
{
string_list_clear(&existing->kept_packs, 0);
string_list_clear(&existing->non_kept_packs, 0);
string_list_clear(&existing->cruft_packs, 0);
string_list_clear(&existing->midx_packs, 0);
}
static struct {
const char *name;
unsigned optional:1;
} exts[] = {
{".pack"},
{".rev", 1},
{".mtimes", 1},
{".bitmap", 1},
{".promisor", 1},
{".idx"},
};
struct generated_pack {
struct tempfile *tempfiles[ARRAY_SIZE(exts)];
};
struct generated_pack *generated_pack_populate(const char *name,
const char *packtmp)
{
struct stat statbuf;
struct strbuf path = STRBUF_INIT;
struct generated_pack *pack = xcalloc(1, sizeof(*pack));
size_t i;
for (i = 0; i < ARRAY_SIZE(exts); i++) {
strbuf_reset(&path);
strbuf_addf(&path, "%s-%s%s", packtmp, name, exts[i].name);
if (stat(path.buf, &statbuf))
continue;
pack->tempfiles[i] = register_tempfile(path.buf);
}
strbuf_release(&path);
return pack;
}
int generated_pack_has_ext(const struct generated_pack *pack, const char *ext)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(exts); i++) {
if (strcmp(exts[i].name, ext))
continue;
return !!pack->tempfiles[i];
}
BUG("unknown pack extension: '%s'", ext);
}
void generated_pack_install(struct generated_pack *pack, const char *name,
const char *packdir, const char *packtmp)
{
size_t ext;
for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
char *fname;
fname = mkpathdup("%s/pack-%s%s", packdir, name,
exts[ext].name);
if (pack->tempfiles[ext]) {
const char *fname_old = get_tempfile_path(pack->tempfiles[ext]);
struct stat statbuffer;
if (!stat(fname_old, &statbuffer)) {
statbuffer.st_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
chmod(fname_old, statbuffer.st_mode);
}
if (rename_tempfile(&pack->tempfiles[ext], fname))
die_errno(_("renaming pack to '%s' failed"),
fname);
} else if (!exts[ext].optional)
die(_("pack-objects did not write a '%s' file for pack %s-%s"),
exts[ext].name, packtmp, name);
else if (unlink(fname) < 0 && errno != ENOENT)
die_errno(_("could not unlink: %s"), fname);
free(fname);
}
}

146
repack.h Normal file
View File

@@ -0,0 +1,146 @@
#ifndef REPACK_H
#define REPACK_H
#include "list-objects-filter-options.h"
#include "string-list.h"
struct pack_objects_args {
char *window;
char *window_memory;
char *depth;
char *threads;
unsigned long max_pack_size;
int no_reuse_delta;
int no_reuse_object;
int quiet;
int local;
int name_hash_version;
int path_walk;
int delta_base_offset;
int pack_kept_objects;
struct list_objects_filter_options filter_options;
};
#define PACK_OBJECTS_ARGS_INIT { \
.delta_base_offset = 1, \
.pack_kept_objects = -1, \
}
struct child_process;
void prepare_pack_objects(struct child_process *cmd,
const struct pack_objects_args *args,
const char *out);
void pack_objects_args_release(struct pack_objects_args *args);
void repack_remove_redundant_pack(struct repository *repo, const char *dir_name,
const char *base_name);
struct write_pack_opts {
struct pack_objects_args *po_args;
const char *destination;
const char *packdir;
const char *packtmp;
};
const char *write_pack_opts_pack_prefix(const struct write_pack_opts *opts);
bool write_pack_opts_is_local(const struct write_pack_opts *opts);
int finish_pack_objects_cmd(const struct git_hash_algo *algop,
const struct write_pack_opts *opts,
struct child_process *cmd,
struct string_list *names);
struct repository;
struct packed_git;
struct existing_packs {
struct repository *repo;
struct string_list kept_packs;
struct string_list non_kept_packs;
struct string_list cruft_packs;
struct string_list midx_packs;
};
#define EXISTING_PACKS_INIT { \
.kept_packs = STRING_LIST_INIT_DUP, \
.non_kept_packs = STRING_LIST_INIT_DUP, \
.cruft_packs = STRING_LIST_INIT_DUP, \
}
/*
* Adds all packs hex strings (pack-$HASH) to either packs->non_kept
* or packs->kept based on whether each pack has a corresponding
* .keep file or not. Packs without a .keep file are not to be kept
* if we are going to pack everything into one file.
*/
void existing_packs_collect(struct existing_packs *existing,
const struct string_list *extra_keep);
int existing_packs_has_non_kept(const struct existing_packs *existing);
int existing_pack_is_marked_for_deletion(struct string_list_item *item);
void existing_packs_retain_cruft(struct existing_packs *existing,
struct packed_git *cruft);
void existing_packs_mark_for_deletion(struct existing_packs *existing,
struct string_list *names);
void existing_packs_remove_redundant(struct existing_packs *existing,
const char *packdir);
void existing_packs_release(struct existing_packs *existing);
struct generated_pack;
struct generated_pack *generated_pack_populate(const char *name,
const char *packtmp);
int generated_pack_has_ext(const struct generated_pack *pack, const char *ext);
void generated_pack_install(struct generated_pack *pack, const char *name,
const char *packdir, const char *packtmp);
void repack_promisor_objects(struct repository *repo,
const struct pack_objects_args *args,
struct string_list *names, const char *packtmp);
struct pack_geometry {
struct packed_git **pack;
uint32_t pack_nr, pack_alloc;
uint32_t split;
int split_factor;
};
void pack_geometry_init(struct pack_geometry *geometry,
struct existing_packs *existing,
const struct pack_objects_args *args);
void pack_geometry_split(struct pack_geometry *geometry);
struct packed_git *pack_geometry_preferred_pack(struct pack_geometry *geometry);
void pack_geometry_remove_redundant(struct pack_geometry *geometry,
struct string_list *names,
struct existing_packs *existing,
const char *packdir);
void pack_geometry_release(struct pack_geometry *geometry);
struct tempfile;
struct repack_write_midx_opts {
struct existing_packs *existing;
struct pack_geometry *geometry;
struct string_list *names;
const char *refs_snapshot;
const char *packdir;
int show_progress;
int write_bitmaps;
int midx_must_contain_cruft;
};
void midx_snapshot_refs(struct repository *repo, struct tempfile *f);
int write_midx_included_packs(struct repack_write_midx_opts *opts);
int write_filtered_pack(const struct write_pack_opts *opts,
struct existing_packs *existing,
struct string_list *names);
int write_cruft_pack(const struct write_pack_opts *opts,
const char *cruft_expiration,
unsigned long combine_cruft_below_size,
struct string_list *names,
struct existing_packs *existing);
#endif /* REPACK_H */

View File

@@ -287,13 +287,12 @@ static int compare_info(const void *a_, const void *b_)
static void init_pack_info(struct repository *r, const char *infofile, int force)
{
struct packfile_store *packs = r->objects->packfiles;
struct packed_git *p;
int stale;
int i;
size_t alloc = 0;
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
repo_for_each_pack(r, p) {
/* we ignore things on alternate path since they are
* not available to the pullers in general.
*/

View File

@@ -39,11 +39,12 @@ int cmd__find_pack(int argc, const char **argv)
if (repo_get_oid(the_repository, argv[0], &oid))
die("cannot parse %s as an object name", argv[0]);
for (p = packfile_store_get_all_packs(the_repository->objects->packfiles); p; p = p->next)
repo_for_each_pack(the_repository, p) {
if (find_pack_entry_one(&oid, p)) {
printf("%s\n", p->pack_name);
actual_count++;
}
}
if (count > -1 && count != actual_count)
die("bad packfile count %d instead of %d", actual_count, count);

View File

@@ -37,7 +37,7 @@ int cmd__pack_mtimes(int argc, const char **argv)
if (argc != 2)
usage(pack_mtimes_usage);
for (p = packfile_store_get_all_packs(the_repository->objects->packfiles); p; p = p->next) {
repo_for_each_pack(the_repository, p) {
strbuf_addstr(&buf, basename(p->pack_name));
strbuf_strip_suffix(&buf, ".pack");
strbuf_addstr(&buf, ".mtimes");