merge(3p/git): Merge git upstream at v2.26.2

This commit is contained in:
Vincent Ambo 2020-05-22 17:46:45 +01:00
commit 5229c9b232
1006 changed files with 149006 additions and 60819 deletions

View file

@ -7,6 +7,7 @@
#include "refs.h"
#include "refspec.h"
#include "object-store.h"
#include "oidset.h"
#include "commit.h"
#include "builtin.h"
#include "string-list.h"
@ -23,6 +24,9 @@
#include "packfile.h"
#include "list-objects-filter-options.h"
#include "commit-reach.h"
#include "branch.h"
#include "promisor-remote.h"
#include "commit-graph.h"
#define FORCED_UPDATES_DELAY_WARNING_IN_MS (10 * 1000)
@ -50,11 +54,13 @@ static int fetch_prune_tags_config = -1; /* unspecified */
static int prune_tags = -1; /* unspecified */
#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */
static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative;
static int all, append, dry_run, force, keep, multiple, update_head_ok;
static int verbosity, deepen_relative, set_upstream;
static int progress = -1;
static int enable_auto_gc = 1;
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
static int max_children = 1;
static int max_jobs = -1, submodule_fetch_jobs_config = -1;
static int fetch_parallel_config = 1;
static enum transport_family family;
static const char *depth;
static const char *deepen_since;
@ -71,6 +77,7 @@ static struct refspec refmap = REFSPEC_INIT_FETCH;
static struct list_objects_filter_options filter_options;
static struct string_list server_options = STRING_LIST_INIT_DUP;
static struct string_list negotiation_tip = STRING_LIST_INIT_NODUP;
static int fetch_write_commit_graph = -1;
static int git_fetch_config(const char *k, const char *v, void *cb)
{
@ -96,13 +103,20 @@ static int git_fetch_config(const char *k, const char *v, void *cb)
}
if (!strcmp(k, "submodule.fetchjobs")) {
max_children = parse_submodule_fetchjobs(k, v);
submodule_fetch_jobs_config = parse_submodule_fetchjobs(k, v);
return 0;
} else if (!strcmp(k, "fetch.recursesubmodules")) {
recurse_submodules = parse_fetch_recurse_submodules_arg(k, v);
return 0;
}
if (!strcmp(k, "fetch.parallel")) {
fetch_parallel_config = git_config_int(k, v);
if (fetch_parallel_config < 0)
die(_("fetch.parallel cannot be negative"));
return 0;
}
return git_default_config(k, v, cb);
}
@ -123,6 +137,8 @@ static struct option builtin_fetch_options[] = {
OPT__VERBOSITY(&verbosity),
OPT_BOOL(0, "all", &all,
N_("fetch from all remotes")),
OPT_BOOL(0, "set-upstream", &set_upstream,
N_("set upstream for git pull/fetch")),
OPT_BOOL('a', "append", &append,
N_("append to .git/FETCH_HEAD instead of overwriting")),
OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
@ -134,7 +150,7 @@ static struct option builtin_fetch_options[] = {
N_("fetch all tags and associated objects"), TAGS_SET),
OPT_SET_INT('n', NULL, &tags,
N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
OPT_INTEGER('j', "jobs", &max_children,
OPT_INTEGER('j', "jobs", &max_jobs,
N_("number of submodules fetched in parallel")),
OPT_BOOL('p', "prune", &prune,
N_("prune remote-tracking branches no longer on remote")),
@ -183,6 +199,8 @@ static struct option builtin_fetch_options[] = {
N_("run 'gc --auto' after fetching")),
OPT_BOOL(0, "show-forced-updates", &fetch_show_forced_updates,
N_("check for forced-updates on all updated branches")),
OPT_BOOL(0, "write-commit-graph", &fetch_write_commit_graph,
N_("write the commit-graph after fetching")),
OPT_END()
};
@ -239,32 +257,31 @@ static void add_merge_config(struct ref **head,
}
}
static int will_fetch(struct ref **head, const unsigned char *sha1)
static void create_fetch_oidset(struct ref **head, struct oidset *out)
{
struct ref *rm = *head;
while (rm) {
if (hasheq(rm->old_oid.hash, sha1))
return 1;
oidset_insert(out, &rm->old_oid);
rm = rm->next;
}
return 0;
}
struct refname_hash_entry {
struct hashmap_entry ent; /* must be the first member */
struct hashmap_entry ent;
struct object_id oid;
int ignore;
char refname[FLEX_ARRAY];
};
static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data,
const void *e1_,
const void *e2_,
const struct hashmap_entry *eptr,
const struct hashmap_entry *entry_or_key,
const void *keydata)
{
const struct refname_hash_entry *e1 = e1_;
const struct refname_hash_entry *e2 = e2_;
const struct refname_hash_entry *e1, *e2;
e1 = container_of(eptr, const struct refname_hash_entry, ent);
e2 = container_of(entry_or_key, const struct refname_hash_entry, ent);
return strcmp(e1->refname, keydata ? keydata : e2->refname);
}
@ -276,9 +293,9 @@ static struct refname_hash_entry *refname_hash_add(struct hashmap *map,
size_t len = strlen(refname);
FLEX_ALLOC_MEM(ent, refname, refname, len);
hashmap_entry_init(ent, strhash(refname));
hashmap_entry_init(&ent->ent, strhash(refname));
oidcpy(&ent->oid, oid);
hashmap_add(map, ent);
hashmap_add(map, &ent->ent);
return ent;
}
@ -313,13 +330,16 @@ static void find_non_local_tags(const struct ref *refs,
{
struct hashmap existing_refs;
struct hashmap remote_refs;
struct oidset fetch_oids = OIDSET_INIT;
struct string_list remote_refs_list = STRING_LIST_INIT_NODUP;
struct string_list_item *remote_ref_item;
const struct ref *ref;
struct refname_hash_entry *item = NULL;
const int quick_flags = OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT;
refname_hash_init(&existing_refs);
refname_hash_init(&remote_refs);
create_fetch_oidset(head, &fetch_oids);
for_each_ref(add_one_refname, &existing_refs);
for (ref = refs; ref; ref = ref->next) {
@ -334,11 +354,10 @@ static void find_non_local_tags(const struct ref *refs,
*/
if (ends_with(ref->name, "^{}")) {
if (item &&
!has_object_file_with_flags(&ref->old_oid,
OBJECT_INFO_QUICK) &&
!will_fetch(head, ref->old_oid.hash) &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
!will_fetch(head, item->oid.hash))
!has_object_file_with_flags(&ref->old_oid, quick_flags) &&
!oidset_contains(&fetch_oids, &ref->old_oid) &&
!has_object_file_with_flags(&item->oid, quick_flags) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
item = NULL;
continue;
@ -351,8 +370,8 @@ static void find_non_local_tags(const struct ref *refs,
* fetch.
*/
if (item &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
!will_fetch(head, item->oid.hash))
!has_object_file_with_flags(&item->oid, quick_flags) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
item = NULL;
@ -365,15 +384,15 @@ static void find_non_local_tags(const struct ref *refs,
item = refname_hash_add(&remote_refs, ref->name, &ref->old_oid);
string_list_insert(&remote_refs_list, ref->name);
}
hashmap_free(&existing_refs, 1);
hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
/*
* We may have a final lightweight tag that needs to be
* checked to see if it needs fetching.
*/
if (item &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
!will_fetch(head, item->oid.hash))
!has_object_file_with_flags(&item->oid, quick_flags) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
/*
@ -383,8 +402,10 @@ static void find_non_local_tags(const struct ref *refs,
for_each_string_list_item(remote_ref_item, &remote_refs_list) {
const char *refname = remote_ref_item->string;
struct ref *rm;
unsigned int hash = strhash(refname);
item = hashmap_get_from_hash(&remote_refs, strhash(refname), refname);
item = hashmap_get_entry_from_hash(&remote_refs, hash, refname,
struct refname_hash_entry, ent);
if (!item)
BUG("unseen remote ref?");
@ -398,8 +419,9 @@ static void find_non_local_tags(const struct ref *refs,
**tail = rm;
*tail = &rm->next;
}
hashmap_free(&remote_refs, 1);
hashmap_free_entries(&remote_refs, struct refname_hash_entry, ent);
string_list_clear(&remote_refs_list, 0);
oidset_clear(&fetch_oids);
}
static struct ref *get_ref_map(struct remote *remote,
@ -516,17 +538,18 @@ static struct ref *get_ref_map(struct remote *remote,
if (rm->peer_ref) {
const char *refname = rm->peer_ref->name;
struct refname_hash_entry *peer_item;
unsigned int hash = strhash(refname);
peer_item = hashmap_get_from_hash(&existing_refs,
strhash(refname),
refname);
peer_item = hashmap_get_entry_from_hash(&existing_refs,
hash, refname,
struct refname_hash_entry, ent);
if (peer_item) {
struct object_id *old_oid = &peer_item->oid;
oidcpy(&rm->peer_ref->old_oid, old_oid);
}
}
}
hashmap_free(&existing_refs, 1);
hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
return ref_map;
}
@ -883,8 +906,17 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
url = xstrdup("foreign");
if (!connectivity_checked) {
struct check_connected_options opt = CHECK_CONNECTED_INIT;
if (filter_options.choice)
/*
* Since a filter is specified, objects indirectly
* referenced by refs are allowed to be absent.
*/
opt.check_refs_are_promisor_objects_only = 1;
rm = ref_map;
if (check_connected(iterate_ref_map, &rm, NULL)) {
if (check_connected(iterate_ref_map, &rm, &opt)) {
rc = error(_("%s did not send all necessary objects\n"), url);
goto abort;
}
@ -934,18 +966,12 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
kind = "";
what = "";
}
else if (starts_with(rm->name, "refs/heads/")) {
else if (skip_prefix(rm->name, "refs/heads/", &what))
kind = "branch";
what = rm->name + 11;
}
else if (starts_with(rm->name, "refs/tags/")) {
else if (skip_prefix(rm->name, "refs/tags/", &what))
kind = "tag";
what = rm->name + 10;
}
else if (starts_with(rm->name, "refs/remotes/")) {
else if (skip_prefix(rm->name, "refs/remotes/", &what))
kind = "remote-tracking branch";
what = rm->name + 13;
}
else {
kind = "";
what = rm->name;
@ -1054,7 +1080,8 @@ static int check_exist_and_connected(struct ref *ref_map)
* we need all direct targets to exist.
*/
for (r = rm; r; r = r->next) {
if (!has_object_file(&r->old_oid))
if (!has_object_file_with_flags(&r->old_oid,
OBJECT_INFO_SKIP_FETCH_OBJECT))
return -1;
}
@ -1065,8 +1092,11 @@ static int check_exist_and_connected(struct ref *ref_map)
static int fetch_refs(struct transport *transport, struct ref *ref_map)
{
int ret = check_exist_and_connected(ref_map);
if (ret)
if (ret) {
trace2_region_enter("fetch", "fetch_refs", the_repository);
ret = transport_fetch_refs(transport, ref_map);
trace2_region_leave("fetch", "fetch_refs", the_repository);
}
if (!ret)
/*
* Keep the new pack's ".keep" file around to allow the caller
@ -1082,11 +1112,14 @@ static int consume_refs(struct transport *transport, struct ref *ref_map)
{
int connectivity_checked = transport->smart_options
? transport->smart_options->connectivity_checked : 0;
int ret = store_updated_refs(transport->url,
transport->remote->name,
connectivity_checked,
ref_map);
int ret;
trace2_region_enter("fetch", "consume_refs", the_repository);
ret = store_updated_refs(transport->url,
transport->remote->name,
connectivity_checked,
ref_map);
transport_unlock_pack(transport);
trace2_region_leave("fetch", "consume_refs", the_repository);
return ret;
}
@ -1238,13 +1271,10 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
if (update_shallow)
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
if (filter_options.choice) {
struct strbuf expanded_filter_spec = STRBUF_INIT;
expand_list_objects_filter_spec(&filter_options,
&expanded_filter_spec);
set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
expanded_filter_spec.buf);
const char *spec =
expand_list_objects_filter_spec(&filter_options);
set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, spec);
set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
strbuf_release(&expanded_filter_spec);
}
if (negotiation_tip.nr) {
if (transport->smart_options)
@ -1334,9 +1364,11 @@ static int do_fetch(struct transport *transport,
argv_array_push(&ref_prefixes, "refs/tags/");
}
if (must_list_refs)
if (must_list_refs) {
trace2_region_enter("fetch", "remote_refs", the_repository);
remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
else
trace2_region_leave("fetch", "remote_refs", the_repository);
} else
remote_refs = NULL;
argv_array_clear(&ref_prefixes);
@ -1367,6 +1399,51 @@ static int do_fetch(struct transport *transport,
retcode = 1;
goto cleanup;
}
if (set_upstream) {
struct branch *branch = branch_get("HEAD");
struct ref *rm;
struct ref *source_ref = NULL;
/*
* We're setting the upstream configuration for the
* current branch. The relevant upstream is the
* fetched branch that is meant to be merged with the
* current one, i.e. the one fetched to FETCH_HEAD.
*
* When there are several such branches, consider the
* request ambiguous and err on the safe side by doing
* nothing and just emit a warning.
*/
for (rm = ref_map; rm; rm = rm->next) {
if (!rm->peer_ref) {
if (source_ref) {
warning(_("multiple branches detected, incompatible with --set-upstream"));
goto skip;
} else {
source_ref = rm;
}
}
}
if (source_ref) {
if (!strcmp(source_ref->name, "HEAD") ||
starts_with(source_ref->name, "refs/heads/"))
install_branch_config(0,
branch->name,
transport->remote->name,
source_ref->name);
else if (starts_with(source_ref->name, "refs/remotes/"))
warning(_("not setting upstream for a remote remote-tracking branch"));
else if (starts_with(source_ref->name, "refs/tags/"))
warning(_("not setting upstream for a remote tag"));
else
warning(_("unknown branch type"));
} else {
warning(_("no source branch found.\n"
"you need to specify exactly one branch with the --set-upstream option."));
}
}
skip:
free_refs(ref_map);
/* if neither --no-tags nor --tags was specified, do automated tag
@ -1463,7 +1540,62 @@ static void add_options_to_argv(struct argv_array *argv)
}
static int fetch_multiple(struct string_list *list)
/* Fetch multiple remotes in parallel */
struct parallel_fetch_state {
const char **argv;
struct string_list *remotes;
int next, result;
};
static int fetch_next_remote(struct child_process *cp, struct strbuf *out,
void *cb, void **task_cb)
{
struct parallel_fetch_state *state = cb;
char *remote;
if (state->next < 0 || state->next >= state->remotes->nr)
return 0;
remote = state->remotes->items[state->next++].string;
*task_cb = remote;
argv_array_pushv(&cp->args, state->argv);
argv_array_push(&cp->args, remote);
cp->git_cmd = 1;
if (verbosity >= 0)
printf(_("Fetching %s\n"), remote);
return 1;
}
static int fetch_failed_to_start(struct strbuf *out, void *cb, void *task_cb)
{
struct parallel_fetch_state *state = cb;
const char *remote = task_cb;
state->result = error(_("Could not fetch %s"), remote);
return 0;
}
static int fetch_finished(int result, struct strbuf *out,
void *cb, void *task_cb)
{
struct parallel_fetch_state *state = cb;
const char *remote = task_cb;
if (result) {
strbuf_addf(out, _("could not fetch '%s' (exit code: %d)\n"),
remote, result);
state->result = -1;
}
return 0;
}
static int fetch_multiple(struct string_list *list, int max_children)
{
int i, result = 0;
struct argv_array argv = ARGV_ARRAY_INIT;
@ -1474,23 +1606,38 @@ static int fetch_multiple(struct string_list *list)
return errcode;
}
argv_array_pushl(&argv, "fetch", "--append", "--no-auto-gc", NULL);
argv_array_pushl(&argv, "fetch", "--append", "--no-auto-gc",
"--no-write-commit-graph", NULL);
add_options_to_argv(&argv);
for (i = 0; i < list->nr; i++) {
const char *name = list->items[i].string;
argv_array_push(&argv, name);
if (verbosity >= 0)
printf(_("Fetching %s\n"), name);
if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) {
error(_("Could not fetch %s"), name);
result = 1;
if (max_children != 1 && list->nr != 1) {
struct parallel_fetch_state state = { argv.argv, list, 0, 0 };
argv_array_push(&argv, "--end-of-options");
result = run_processes_parallel_tr2(max_children,
&fetch_next_remote,
&fetch_failed_to_start,
&fetch_finished,
&state,
"fetch", "parallel/fetch");
if (!result)
result = state.result;
} else
for (i = 0; i < list->nr; i++) {
const char *name = list->items[i].string;
argv_array_push(&argv, name);
if (verbosity >= 0)
printf(_("Fetching %s\n"), name);
if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) {
error(_("Could not fetch %s"), name);
result = 1;
}
argv_array_pop(&argv);
}
argv_array_pop(&argv);
}
argv_array_clear(&argv);
return result;
return !!result;
}
/*
@ -1510,37 +1657,27 @@ static inline void fetch_one_setup_partial(struct remote *remote)
* If no prior partial clone/fetch and the current fetch DID NOT
* request a partial-fetch, do a normal fetch.
*/
if (!repository_format_partial_clone && !filter_options.choice)
if (!has_promisor_remote() && !filter_options.choice)
return;
/*
* If this is the FIRST partial-fetch request, we enable partial
* on this repo and remember the given filter-spec as the default
* for subsequent fetches to this remote.
* If this is a partial-fetch request, we enable partial on
* this repo if not already enabled and remember the given
* filter-spec as the default for subsequent fetches to this
* remote.
*/
if (!repository_format_partial_clone && filter_options.choice) {
if (filter_options.choice) {
partial_clone_register(remote->name, &filter_options);
return;
}
/*
* We are currently limited to only ONE promisor remote and only
* allow partial-fetches from the promisor remote.
*/
if (strcmp(remote->name, repository_format_partial_clone)) {
if (filter_options.choice)
die(_("--filter can only be used with the remote "
"configured in extensions.partialClone"));
return;
}
/*
* Do a partial-fetch from the promisor remote using either the
* explicitly given filter-spec or inherit the filter-spec from
* the config.
*/
if (!filter_options.choice)
partial_clone_get_default_filter_spec(&filter_options);
partial_clone_get_default_filter_spec(&filter_options, remote->name);
return;
}
@ -1626,14 +1763,13 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
packet_trace_identity("fetch");
fetch_if_missing = 0;
/* Record the command line for the reflog */
strbuf_addstr(&default_rla, "fetch");
for (i = 1; i < argc; i++)
strbuf_addf(&default_rla, " %s", argv[i]);
fetch_config_from_gitmodules(&max_children, &recurse_submodules);
fetch_config_from_gitmodules(&submodule_fetch_jobs_config,
&recurse_submodules);
git_config(git_fetch_config, NULL);
argc = parse_options(argc, argv, prefix,
@ -1661,7 +1797,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
if (depth || deepen_since || deepen_not.nr)
deepen = 1;
if (filter_options.choice && !repository_format_partial_clone)
if (filter_options.choice && !has_promisor_remote())
die("--filter can only be used when extensions.partialClone is set");
if (all) {
@ -1695,19 +1831,31 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
}
if (remote) {
if (filter_options.choice || repository_format_partial_clone)
if (filter_options.choice || has_promisor_remote())
fetch_one_setup_partial(remote);
result = fetch_one(remote, argc, argv, prune_tags_ok);
} else {
int max_children = max_jobs;
if (filter_options.choice)
die(_("--filter can only be used with the remote "
"configured in extensions.partialclone"));
if (max_children < 0)
max_children = fetch_parallel_config;
/* TODO should this also die if we have a previous partial-clone? */
result = fetch_multiple(&list);
result = fetch_multiple(&list, max_children);
}
if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
struct argv_array options = ARGV_ARRAY_INIT;
int max_children = max_jobs;
if (max_children < 0)
max_children = submodule_fetch_jobs_config;
if (max_children < 0)
max_children = fetch_parallel_config;
add_options_to_argv(&options);
result = fetch_populated_submodules(the_repository,
@ -1722,6 +1870,20 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
string_list_clear(&list, 0);
prepare_repo_settings(the_repository);
if (fetch_write_commit_graph > 0 ||
(fetch_write_commit_graph < 0 &&
the_repository->settings.fetch_write_commit_graph)) {
int commit_graph_flags = COMMIT_GRAPH_WRITE_SPLIT;
if (progress)
commit_graph_flags |= COMMIT_GRAPH_WRITE_PROGRESS;
write_commit_graph_reachable(the_repository->objects->odb,
commit_graph_flags,
NULL);
}
close_object_store(the_repository->objects);
if (enable_auto_gc) {