revert(3p/git): Revert merge of git upstream at v2.26.2
This causes cgit to serve error pages, which is undesirable. This reverts commit5229c9b232, reversing changes made tof2b211131f.
This commit is contained in:
parent
6f8fbf4aa4
commit
93ba78d6f4
1006 changed files with 60537 additions and 148724 deletions
344
third_party/git/builtin/fetch.c
vendored
344
third_party/git/builtin/fetch.c
vendored
|
|
@ -7,7 +7,6 @@
|
|||
#include "refs.h"
|
||||
#include "refspec.h"
|
||||
#include "object-store.h"
|
||||
#include "oidset.h"
|
||||
#include "commit.h"
|
||||
#include "builtin.h"
|
||||
#include "string-list.h"
|
||||
|
|
@ -24,9 +23,6 @@
|
|||
#include "packfile.h"
|
||||
#include "list-objects-filter-options.h"
|
||||
#include "commit-reach.h"
|
||||
#include "branch.h"
|
||||
#include "promisor-remote.h"
|
||||
#include "commit-graph.h"
|
||||
|
||||
#define FORCED_UPDATES_DELAY_WARNING_IN_MS (10 * 1000)
|
||||
|
||||
|
|
@ -54,13 +50,11 @@ static int fetch_prune_tags_config = -1; /* unspecified */
|
|||
static int prune_tags = -1; /* unspecified */
|
||||
#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */
|
||||
|
||||
static int all, append, dry_run, force, keep, multiple, update_head_ok;
|
||||
static int verbosity, deepen_relative, set_upstream;
|
||||
static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative;
|
||||
static int progress = -1;
|
||||
static int enable_auto_gc = 1;
|
||||
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
|
||||
static int max_jobs = -1, submodule_fetch_jobs_config = -1;
|
||||
static int fetch_parallel_config = 1;
|
||||
static int max_children = 1;
|
||||
static enum transport_family family;
|
||||
static const char *depth;
|
||||
static const char *deepen_since;
|
||||
|
|
@ -77,7 +71,6 @@ static struct refspec refmap = REFSPEC_INIT_FETCH;
|
|||
static struct list_objects_filter_options filter_options;
|
||||
static struct string_list server_options = STRING_LIST_INIT_DUP;
|
||||
static struct string_list negotiation_tip = STRING_LIST_INIT_NODUP;
|
||||
static int fetch_write_commit_graph = -1;
|
||||
|
||||
static int git_fetch_config(const char *k, const char *v, void *cb)
|
||||
{
|
||||
|
|
@ -103,20 +96,13 @@ static int git_fetch_config(const char *k, const char *v, void *cb)
|
|||
}
|
||||
|
||||
if (!strcmp(k, "submodule.fetchjobs")) {
|
||||
submodule_fetch_jobs_config = parse_submodule_fetchjobs(k, v);
|
||||
max_children = parse_submodule_fetchjobs(k, v);
|
||||
return 0;
|
||||
} else if (!strcmp(k, "fetch.recursesubmodules")) {
|
||||
recurse_submodules = parse_fetch_recurse_submodules_arg(k, v);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(k, "fetch.parallel")) {
|
||||
fetch_parallel_config = git_config_int(k, v);
|
||||
if (fetch_parallel_config < 0)
|
||||
die(_("fetch.parallel cannot be negative"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
return git_default_config(k, v, cb);
|
||||
}
|
||||
|
||||
|
|
@ -137,8 +123,6 @@ static struct option builtin_fetch_options[] = {
|
|||
OPT__VERBOSITY(&verbosity),
|
||||
OPT_BOOL(0, "all", &all,
|
||||
N_("fetch from all remotes")),
|
||||
OPT_BOOL(0, "set-upstream", &set_upstream,
|
||||
N_("set upstream for git pull/fetch")),
|
||||
OPT_BOOL('a', "append", &append,
|
||||
N_("append to .git/FETCH_HEAD instead of overwriting")),
|
||||
OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
|
||||
|
|
@ -150,7 +134,7 @@ static struct option builtin_fetch_options[] = {
|
|||
N_("fetch all tags and associated objects"), TAGS_SET),
|
||||
OPT_SET_INT('n', NULL, &tags,
|
||||
N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
|
||||
OPT_INTEGER('j', "jobs", &max_jobs,
|
||||
OPT_INTEGER('j', "jobs", &max_children,
|
||||
N_("number of submodules fetched in parallel")),
|
||||
OPT_BOOL('p', "prune", &prune,
|
||||
N_("prune remote-tracking branches no longer on remote")),
|
||||
|
|
@ -199,8 +183,6 @@ static struct option builtin_fetch_options[] = {
|
|||
N_("run 'gc --auto' after fetching")),
|
||||
OPT_BOOL(0, "show-forced-updates", &fetch_show_forced_updates,
|
||||
N_("check for forced-updates on all updated branches")),
|
||||
OPT_BOOL(0, "write-commit-graph", &fetch_write_commit_graph,
|
||||
N_("write the commit-graph after fetching")),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
|
|
@ -257,31 +239,32 @@ static void add_merge_config(struct ref **head,
|
|||
}
|
||||
}
|
||||
|
||||
static void create_fetch_oidset(struct ref **head, struct oidset *out)
|
||||
static int will_fetch(struct ref **head, const unsigned char *sha1)
|
||||
{
|
||||
struct ref *rm = *head;
|
||||
while (rm) {
|
||||
oidset_insert(out, &rm->old_oid);
|
||||
if (hasheq(rm->old_oid.hash, sha1))
|
||||
return 1;
|
||||
rm = rm->next;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct refname_hash_entry {
|
||||
struct hashmap_entry ent;
|
||||
struct hashmap_entry ent; /* must be the first member */
|
||||
struct object_id oid;
|
||||
int ignore;
|
||||
char refname[FLEX_ARRAY];
|
||||
};
|
||||
|
||||
static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data,
|
||||
const struct hashmap_entry *eptr,
|
||||
const struct hashmap_entry *entry_or_key,
|
||||
const void *e1_,
|
||||
const void *e2_,
|
||||
const void *keydata)
|
||||
{
|
||||
const struct refname_hash_entry *e1, *e2;
|
||||
const struct refname_hash_entry *e1 = e1_;
|
||||
const struct refname_hash_entry *e2 = e2_;
|
||||
|
||||
e1 = container_of(eptr, const struct refname_hash_entry, ent);
|
||||
e2 = container_of(entry_or_key, const struct refname_hash_entry, ent);
|
||||
return strcmp(e1->refname, keydata ? keydata : e2->refname);
|
||||
}
|
||||
|
||||
|
|
@ -293,9 +276,9 @@ static struct refname_hash_entry *refname_hash_add(struct hashmap *map,
|
|||
size_t len = strlen(refname);
|
||||
|
||||
FLEX_ALLOC_MEM(ent, refname, refname, len);
|
||||
hashmap_entry_init(&ent->ent, strhash(refname));
|
||||
hashmap_entry_init(ent, strhash(refname));
|
||||
oidcpy(&ent->oid, oid);
|
||||
hashmap_add(map, &ent->ent);
|
||||
hashmap_add(map, ent);
|
||||
return ent;
|
||||
}
|
||||
|
||||
|
|
@ -330,16 +313,13 @@ static void find_non_local_tags(const struct ref *refs,
|
|||
{
|
||||
struct hashmap existing_refs;
|
||||
struct hashmap remote_refs;
|
||||
struct oidset fetch_oids = OIDSET_INIT;
|
||||
struct string_list remote_refs_list = STRING_LIST_INIT_NODUP;
|
||||
struct string_list_item *remote_ref_item;
|
||||
const struct ref *ref;
|
||||
struct refname_hash_entry *item = NULL;
|
||||
const int quick_flags = OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT;
|
||||
|
||||
refname_hash_init(&existing_refs);
|
||||
refname_hash_init(&remote_refs);
|
||||
create_fetch_oidset(head, &fetch_oids);
|
||||
|
||||
for_each_ref(add_one_refname, &existing_refs);
|
||||
for (ref = refs; ref; ref = ref->next) {
|
||||
|
|
@ -354,10 +334,11 @@ static void find_non_local_tags(const struct ref *refs,
|
|||
*/
|
||||
if (ends_with(ref->name, "^{}")) {
|
||||
if (item &&
|
||||
!has_object_file_with_flags(&ref->old_oid, quick_flags) &&
|
||||
!oidset_contains(&fetch_oids, &ref->old_oid) &&
|
||||
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
||||
!oidset_contains(&fetch_oids, &item->oid))
|
||||
!has_object_file_with_flags(&ref->old_oid,
|
||||
OBJECT_INFO_QUICK) &&
|
||||
!will_fetch(head, ref->old_oid.hash) &&
|
||||
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
|
||||
!will_fetch(head, item->oid.hash))
|
||||
clear_item(item);
|
||||
item = NULL;
|
||||
continue;
|
||||
|
|
@ -370,8 +351,8 @@ static void find_non_local_tags(const struct ref *refs,
|
|||
* fetch.
|
||||
*/
|
||||
if (item &&
|
||||
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
||||
!oidset_contains(&fetch_oids, &item->oid))
|
||||
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
|
||||
!will_fetch(head, item->oid.hash))
|
||||
clear_item(item);
|
||||
|
||||
item = NULL;
|
||||
|
|
@ -384,15 +365,15 @@ static void find_non_local_tags(const struct ref *refs,
|
|||
item = refname_hash_add(&remote_refs, ref->name, &ref->old_oid);
|
||||
string_list_insert(&remote_refs_list, ref->name);
|
||||
}
|
||||
hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
|
||||
hashmap_free(&existing_refs, 1);
|
||||
|
||||
/*
|
||||
* We may have a final lightweight tag that needs to be
|
||||
* checked to see if it needs fetching.
|
||||
*/
|
||||
if (item &&
|
||||
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
||||
!oidset_contains(&fetch_oids, &item->oid))
|
||||
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
|
||||
!will_fetch(head, item->oid.hash))
|
||||
clear_item(item);
|
||||
|
||||
/*
|
||||
|
|
@ -402,10 +383,8 @@ static void find_non_local_tags(const struct ref *refs,
|
|||
for_each_string_list_item(remote_ref_item, &remote_refs_list) {
|
||||
const char *refname = remote_ref_item->string;
|
||||
struct ref *rm;
|
||||
unsigned int hash = strhash(refname);
|
||||
|
||||
item = hashmap_get_entry_from_hash(&remote_refs, hash, refname,
|
||||
struct refname_hash_entry, ent);
|
||||
item = hashmap_get_from_hash(&remote_refs, strhash(refname), refname);
|
||||
if (!item)
|
||||
BUG("unseen remote ref?");
|
||||
|
||||
|
|
@ -419,9 +398,8 @@ static void find_non_local_tags(const struct ref *refs,
|
|||
**tail = rm;
|
||||
*tail = &rm->next;
|
||||
}
|
||||
hashmap_free_entries(&remote_refs, struct refname_hash_entry, ent);
|
||||
hashmap_free(&remote_refs, 1);
|
||||
string_list_clear(&remote_refs_list, 0);
|
||||
oidset_clear(&fetch_oids);
|
||||
}
|
||||
|
||||
static struct ref *get_ref_map(struct remote *remote,
|
||||
|
|
@ -538,18 +516,17 @@ static struct ref *get_ref_map(struct remote *remote,
|
|||
if (rm->peer_ref) {
|
||||
const char *refname = rm->peer_ref->name;
|
||||
struct refname_hash_entry *peer_item;
|
||||
unsigned int hash = strhash(refname);
|
||||
|
||||
peer_item = hashmap_get_entry_from_hash(&existing_refs,
|
||||
hash, refname,
|
||||
struct refname_hash_entry, ent);
|
||||
peer_item = hashmap_get_from_hash(&existing_refs,
|
||||
strhash(refname),
|
||||
refname);
|
||||
if (peer_item) {
|
||||
struct object_id *old_oid = &peer_item->oid;
|
||||
oidcpy(&rm->peer_ref->old_oid, old_oid);
|
||||
}
|
||||
}
|
||||
}
|
||||
hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
|
||||
hashmap_free(&existing_refs, 1);
|
||||
|
||||
return ref_map;
|
||||
}
|
||||
|
|
@ -906,17 +883,8 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
|
|||
url = xstrdup("foreign");
|
||||
|
||||
if (!connectivity_checked) {
|
||||
struct check_connected_options opt = CHECK_CONNECTED_INIT;
|
||||
|
||||
if (filter_options.choice)
|
||||
/*
|
||||
* Since a filter is specified, objects indirectly
|
||||
* referenced by refs are allowed to be absent.
|
||||
*/
|
||||
opt.check_refs_are_promisor_objects_only = 1;
|
||||
|
||||
rm = ref_map;
|
||||
if (check_connected(iterate_ref_map, &rm, &opt)) {
|
||||
if (check_connected(iterate_ref_map, &rm, NULL)) {
|
||||
rc = error(_("%s did not send all necessary objects\n"), url);
|
||||
goto abort;
|
||||
}
|
||||
|
|
@ -966,12 +934,18 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
|
|||
kind = "";
|
||||
what = "";
|
||||
}
|
||||
else if (skip_prefix(rm->name, "refs/heads/", &what))
|
||||
else if (starts_with(rm->name, "refs/heads/")) {
|
||||
kind = "branch";
|
||||
else if (skip_prefix(rm->name, "refs/tags/", &what))
|
||||
what = rm->name + 11;
|
||||
}
|
||||
else if (starts_with(rm->name, "refs/tags/")) {
|
||||
kind = "tag";
|
||||
else if (skip_prefix(rm->name, "refs/remotes/", &what))
|
||||
what = rm->name + 10;
|
||||
}
|
||||
else if (starts_with(rm->name, "refs/remotes/")) {
|
||||
kind = "remote-tracking branch";
|
||||
what = rm->name + 13;
|
||||
}
|
||||
else {
|
||||
kind = "";
|
||||
what = rm->name;
|
||||
|
|
@ -1080,8 +1054,7 @@ static int check_exist_and_connected(struct ref *ref_map)
|
|||
* we need all direct targets to exist.
|
||||
*/
|
||||
for (r = rm; r; r = r->next) {
|
||||
if (!has_object_file_with_flags(&r->old_oid,
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
if (!has_object_file(&r->old_oid))
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
@ -1092,11 +1065,8 @@ static int check_exist_and_connected(struct ref *ref_map)
|
|||
static int fetch_refs(struct transport *transport, struct ref *ref_map)
|
||||
{
|
||||
int ret = check_exist_and_connected(ref_map);
|
||||
if (ret) {
|
||||
trace2_region_enter("fetch", "fetch_refs", the_repository);
|
||||
if (ret)
|
||||
ret = transport_fetch_refs(transport, ref_map);
|
||||
trace2_region_leave("fetch", "fetch_refs", the_repository);
|
||||
}
|
||||
if (!ret)
|
||||
/*
|
||||
* Keep the new pack's ".keep" file around to allow the caller
|
||||
|
|
@ -1112,14 +1082,11 @@ static int consume_refs(struct transport *transport, struct ref *ref_map)
|
|||
{
|
||||
int connectivity_checked = transport->smart_options
|
||||
? transport->smart_options->connectivity_checked : 0;
|
||||
int ret;
|
||||
trace2_region_enter("fetch", "consume_refs", the_repository);
|
||||
ret = store_updated_refs(transport->url,
|
||||
transport->remote->name,
|
||||
connectivity_checked,
|
||||
ref_map);
|
||||
int ret = store_updated_refs(transport->url,
|
||||
transport->remote->name,
|
||||
connectivity_checked,
|
||||
ref_map);
|
||||
transport_unlock_pack(transport);
|
||||
trace2_region_leave("fetch", "consume_refs", the_repository);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -1271,10 +1238,13 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
|
|||
if (update_shallow)
|
||||
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
|
||||
if (filter_options.choice) {
|
||||
const char *spec =
|
||||
expand_list_objects_filter_spec(&filter_options);
|
||||
set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, spec);
|
||||
struct strbuf expanded_filter_spec = STRBUF_INIT;
|
||||
expand_list_objects_filter_spec(&filter_options,
|
||||
&expanded_filter_spec);
|
||||
set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
|
||||
expanded_filter_spec.buf);
|
||||
set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
|
||||
strbuf_release(&expanded_filter_spec);
|
||||
}
|
||||
if (negotiation_tip.nr) {
|
||||
if (transport->smart_options)
|
||||
|
|
@ -1364,11 +1334,9 @@ static int do_fetch(struct transport *transport,
|
|||
argv_array_push(&ref_prefixes, "refs/tags/");
|
||||
}
|
||||
|
||||
if (must_list_refs) {
|
||||
trace2_region_enter("fetch", "remote_refs", the_repository);
|
||||
if (must_list_refs)
|
||||
remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
|
||||
trace2_region_leave("fetch", "remote_refs", the_repository);
|
||||
} else
|
||||
else
|
||||
remote_refs = NULL;
|
||||
|
||||
argv_array_clear(&ref_prefixes);
|
||||
|
|
@ -1399,51 +1367,6 @@ static int do_fetch(struct transport *transport,
|
|||
retcode = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (set_upstream) {
|
||||
struct branch *branch = branch_get("HEAD");
|
||||
struct ref *rm;
|
||||
struct ref *source_ref = NULL;
|
||||
|
||||
/*
|
||||
* We're setting the upstream configuration for the
|
||||
* current branch. The relevant upstream is the
|
||||
* fetched branch that is meant to be merged with the
|
||||
* current one, i.e. the one fetched to FETCH_HEAD.
|
||||
*
|
||||
* When there are several such branches, consider the
|
||||
* request ambiguous and err on the safe side by doing
|
||||
* nothing and just emit a warning.
|
||||
*/
|
||||
for (rm = ref_map; rm; rm = rm->next) {
|
||||
if (!rm->peer_ref) {
|
||||
if (source_ref) {
|
||||
warning(_("multiple branches detected, incompatible with --set-upstream"));
|
||||
goto skip;
|
||||
} else {
|
||||
source_ref = rm;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (source_ref) {
|
||||
if (!strcmp(source_ref->name, "HEAD") ||
|
||||
starts_with(source_ref->name, "refs/heads/"))
|
||||
install_branch_config(0,
|
||||
branch->name,
|
||||
transport->remote->name,
|
||||
source_ref->name);
|
||||
else if (starts_with(source_ref->name, "refs/remotes/"))
|
||||
warning(_("not setting upstream for a remote remote-tracking branch"));
|
||||
else if (starts_with(source_ref->name, "refs/tags/"))
|
||||
warning(_("not setting upstream for a remote tag"));
|
||||
else
|
||||
warning(_("unknown branch type"));
|
||||
} else {
|
||||
warning(_("no source branch found.\n"
|
||||
"you need to specify exactly one branch with the --set-upstream option."));
|
||||
}
|
||||
}
|
||||
skip:
|
||||
free_refs(ref_map);
|
||||
|
||||
/* if neither --no-tags nor --tags was specified, do automated tag
|
||||
|
|
@ -1540,62 +1463,7 @@ static void add_options_to_argv(struct argv_array *argv)
|
|||
|
||||
}
|
||||
|
||||
/* Fetch multiple remotes in parallel */
|
||||
|
||||
struct parallel_fetch_state {
|
||||
const char **argv;
|
||||
struct string_list *remotes;
|
||||
int next, result;
|
||||
};
|
||||
|
||||
static int fetch_next_remote(struct child_process *cp, struct strbuf *out,
|
||||
void *cb, void **task_cb)
|
||||
{
|
||||
struct parallel_fetch_state *state = cb;
|
||||
char *remote;
|
||||
|
||||
if (state->next < 0 || state->next >= state->remotes->nr)
|
||||
return 0;
|
||||
|
||||
remote = state->remotes->items[state->next++].string;
|
||||
*task_cb = remote;
|
||||
|
||||
argv_array_pushv(&cp->args, state->argv);
|
||||
argv_array_push(&cp->args, remote);
|
||||
cp->git_cmd = 1;
|
||||
|
||||
if (verbosity >= 0)
|
||||
printf(_("Fetching %s\n"), remote);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int fetch_failed_to_start(struct strbuf *out, void *cb, void *task_cb)
|
||||
{
|
||||
struct parallel_fetch_state *state = cb;
|
||||
const char *remote = task_cb;
|
||||
|
||||
state->result = error(_("Could not fetch %s"), remote);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fetch_finished(int result, struct strbuf *out,
|
||||
void *cb, void *task_cb)
|
||||
{
|
||||
struct parallel_fetch_state *state = cb;
|
||||
const char *remote = task_cb;
|
||||
|
||||
if (result) {
|
||||
strbuf_addf(out, _("could not fetch '%s' (exit code: %d)\n"),
|
||||
remote, result);
|
||||
state->result = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fetch_multiple(struct string_list *list, int max_children)
|
||||
static int fetch_multiple(struct string_list *list)
|
||||
{
|
||||
int i, result = 0;
|
||||
struct argv_array argv = ARGV_ARRAY_INIT;
|
||||
|
|
@ -1606,38 +1474,23 @@ static int fetch_multiple(struct string_list *list, int max_children)
|
|||
return errcode;
|
||||
}
|
||||
|
||||
argv_array_pushl(&argv, "fetch", "--append", "--no-auto-gc",
|
||||
"--no-write-commit-graph", NULL);
|
||||
argv_array_pushl(&argv, "fetch", "--append", "--no-auto-gc", NULL);
|
||||
add_options_to_argv(&argv);
|
||||
|
||||
if (max_children != 1 && list->nr != 1) {
|
||||
struct parallel_fetch_state state = { argv.argv, list, 0, 0 };
|
||||
|
||||
argv_array_push(&argv, "--end-of-options");
|
||||
result = run_processes_parallel_tr2(max_children,
|
||||
&fetch_next_remote,
|
||||
&fetch_failed_to_start,
|
||||
&fetch_finished,
|
||||
&state,
|
||||
"fetch", "parallel/fetch");
|
||||
|
||||
if (!result)
|
||||
result = state.result;
|
||||
} else
|
||||
for (i = 0; i < list->nr; i++) {
|
||||
const char *name = list->items[i].string;
|
||||
argv_array_push(&argv, name);
|
||||
if (verbosity >= 0)
|
||||
printf(_("Fetching %s\n"), name);
|
||||
if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) {
|
||||
error(_("Could not fetch %s"), name);
|
||||
result = 1;
|
||||
}
|
||||
argv_array_pop(&argv);
|
||||
for (i = 0; i < list->nr; i++) {
|
||||
const char *name = list->items[i].string;
|
||||
argv_array_push(&argv, name);
|
||||
if (verbosity >= 0)
|
||||
printf(_("Fetching %s\n"), name);
|
||||
if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) {
|
||||
error(_("Could not fetch %s"), name);
|
||||
result = 1;
|
||||
}
|
||||
argv_array_pop(&argv);
|
||||
}
|
||||
|
||||
argv_array_clear(&argv);
|
||||
return !!result;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1657,27 +1510,37 @@ static inline void fetch_one_setup_partial(struct remote *remote)
|
|||
* If no prior partial clone/fetch and the current fetch DID NOT
|
||||
* request a partial-fetch, do a normal fetch.
|
||||
*/
|
||||
if (!has_promisor_remote() && !filter_options.choice)
|
||||
if (!repository_format_partial_clone && !filter_options.choice)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If this is a partial-fetch request, we enable partial on
|
||||
* this repo if not already enabled and remember the given
|
||||
* filter-spec as the default for subsequent fetches to this
|
||||
* remote.
|
||||
* If this is the FIRST partial-fetch request, we enable partial
|
||||
* on this repo and remember the given filter-spec as the default
|
||||
* for subsequent fetches to this remote.
|
||||
*/
|
||||
if (filter_options.choice) {
|
||||
if (!repository_format_partial_clone && filter_options.choice) {
|
||||
partial_clone_register(remote->name, &filter_options);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We are currently limited to only ONE promisor remote and only
|
||||
* allow partial-fetches from the promisor remote.
|
||||
*/
|
||||
if (strcmp(remote->name, repository_format_partial_clone)) {
|
||||
if (filter_options.choice)
|
||||
die(_("--filter can only be used with the remote "
|
||||
"configured in extensions.partialClone"));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do a partial-fetch from the promisor remote using either the
|
||||
* explicitly given filter-spec or inherit the filter-spec from
|
||||
* the config.
|
||||
*/
|
||||
if (!filter_options.choice)
|
||||
partial_clone_get_default_filter_spec(&filter_options, remote->name);
|
||||
partial_clone_get_default_filter_spec(&filter_options);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1763,13 +1626,14 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
|
|||
|
||||
packet_trace_identity("fetch");
|
||||
|
||||
fetch_if_missing = 0;
|
||||
|
||||
/* Record the command line for the reflog */
|
||||
strbuf_addstr(&default_rla, "fetch");
|
||||
for (i = 1; i < argc; i++)
|
||||
strbuf_addf(&default_rla, " %s", argv[i]);
|
||||
|
||||
fetch_config_from_gitmodules(&submodule_fetch_jobs_config,
|
||||
&recurse_submodules);
|
||||
fetch_config_from_gitmodules(&max_children, &recurse_submodules);
|
||||
git_config(git_fetch_config, NULL);
|
||||
|
||||
argc = parse_options(argc, argv, prefix,
|
||||
|
|
@ -1797,7 +1661,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
|
|||
if (depth || deepen_since || deepen_not.nr)
|
||||
deepen = 1;
|
||||
|
||||
if (filter_options.choice && !has_promisor_remote())
|
||||
if (filter_options.choice && !repository_format_partial_clone)
|
||||
die("--filter can only be used when extensions.partialClone is set");
|
||||
|
||||
if (all) {
|
||||
|
|
@ -1831,31 +1695,19 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
|
|||
}
|
||||
|
||||
if (remote) {
|
||||
if (filter_options.choice || has_promisor_remote())
|
||||
if (filter_options.choice || repository_format_partial_clone)
|
||||
fetch_one_setup_partial(remote);
|
||||
result = fetch_one(remote, argc, argv, prune_tags_ok);
|
||||
} else {
|
||||
int max_children = max_jobs;
|
||||
|
||||
if (filter_options.choice)
|
||||
die(_("--filter can only be used with the remote "
|
||||
"configured in extensions.partialclone"));
|
||||
|
||||
if (max_children < 0)
|
||||
max_children = fetch_parallel_config;
|
||||
|
||||
/* TODO should this also die if we have a previous partial-clone? */
|
||||
result = fetch_multiple(&list, max_children);
|
||||
result = fetch_multiple(&list);
|
||||
}
|
||||
|
||||
if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
|
||||
struct argv_array options = ARGV_ARRAY_INIT;
|
||||
int max_children = max_jobs;
|
||||
|
||||
if (max_children < 0)
|
||||
max_children = submodule_fetch_jobs_config;
|
||||
if (max_children < 0)
|
||||
max_children = fetch_parallel_config;
|
||||
|
||||
add_options_to_argv(&options);
|
||||
result = fetch_populated_submodules(the_repository,
|
||||
|
|
@ -1870,20 +1722,6 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
|
|||
|
||||
string_list_clear(&list, 0);
|
||||
|
||||
prepare_repo_settings(the_repository);
|
||||
if (fetch_write_commit_graph > 0 ||
|
||||
(fetch_write_commit_graph < 0 &&
|
||||
the_repository->settings.fetch_write_commit_graph)) {
|
||||
int commit_graph_flags = COMMIT_GRAPH_WRITE_SPLIT;
|
||||
|
||||
if (progress)
|
||||
commit_graph_flags |= COMMIT_GRAPH_WRITE_PROGRESS;
|
||||
|
||||
write_commit_graph_reachable(the_repository->objects->odb,
|
||||
commit_graph_flags,
|
||||
NULL);
|
||||
}
|
||||
|
||||
close_object_store(the_repository->objects);
|
||||
|
||||
if (enable_auto_gc) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue