2012-10-26 17:53:49 +02:00
|
|
|
#include "cache.h"
|
2017-12-21 20:19:06 +01:00
|
|
|
#include "diff.h"
|
|
|
|
#include "diffcore.h"
|
2014-10-01 12:28:42 +02:00
|
|
|
#include "lockfile.h"
|
2012-10-26 17:53:49 +02:00
|
|
|
#include "commit.h"
|
|
|
|
#include "run-command.h"
|
|
|
|
#include "resolve-undo.h"
|
|
|
|
#include "tree-walk.h"
|
|
|
|
#include "unpack-trees.h"
|
|
|
|
#include "dir.h"
|
|
|
|
|
|
|
|
static const char *merge_argument(struct commit *commit)
|
|
|
|
{
|
2018-05-02 02:25:57 +02:00
|
|
|
return oid_to_hex(commit ? &commit->object.oid : the_hash_algo->empty_tree);
|
2012-10-26 17:53:49 +02:00
|
|
|
}
|
|
|
|
|
2017-12-21 20:19:06 +01:00
|
|
|
int index_has_changes(struct strbuf *sb)
|
|
|
|
{
|
|
|
|
struct object_id head;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!get_oid_tree("HEAD", &head)) {
|
|
|
|
struct diff_options opt;
|
|
|
|
|
|
|
|
diff_setup(&opt);
|
2017-12-22 21:48:38 +01:00
|
|
|
opt.flags.exit_with_status = 1;
|
2017-12-21 20:19:06 +01:00
|
|
|
if (!sb)
|
2017-12-22 21:48:38 +01:00
|
|
|
opt.flags.quick = 1;
|
2017-12-21 20:19:06 +01:00
|
|
|
do_diff_cache(&head, &opt);
|
|
|
|
diffcore_std(&opt);
|
|
|
|
for (i = 0; sb && i < diff_queued_diff.nr; i++) {
|
|
|
|
if (i)
|
|
|
|
strbuf_addch(sb, ' ');
|
|
|
|
strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
|
|
|
|
}
|
|
|
|
diff_flush(&opt);
|
2017-12-22 21:48:38 +01:00
|
|
|
return opt.flags.has_changes != 0;
|
2017-12-21 20:19:06 +01:00
|
|
|
} else {
|
|
|
|
for (i = 0; sb && i < active_nr; i++) {
|
|
|
|
if (i)
|
|
|
|
strbuf_addch(sb, ' ');
|
|
|
|
strbuf_addstr(sb, active_cache[i]->name);
|
|
|
|
}
|
|
|
|
return !!active_nr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-26 17:53:49 +02:00
|
|
|
int try_merge_command(const char *strategy, size_t xopts_nr,
|
|
|
|
const char **xopts, struct commit_list *common,
|
|
|
|
const char *head_arg, struct commit_list *remotes)
|
|
|
|
{
|
2014-06-19 23:29:31 +02:00
|
|
|
struct argv_array args = ARGV_ARRAY_INIT;
|
|
|
|
int i, ret;
|
2012-10-26 17:53:49 +02:00
|
|
|
struct commit_list *j;
|
|
|
|
|
2014-06-19 23:29:31 +02:00
|
|
|
argv_array_pushf(&args, "merge-%s", strategy);
|
|
|
|
for (i = 0; i < xopts_nr; i++)
|
|
|
|
argv_array_pushf(&args, "--%s", xopts[i]);
|
2012-10-26 17:53:49 +02:00
|
|
|
for (j = common; j; j = j->next)
|
2014-06-19 23:29:31 +02:00
|
|
|
argv_array_push(&args, merge_argument(j->item));
|
|
|
|
argv_array_push(&args, "--");
|
|
|
|
argv_array_push(&args, head_arg);
|
2012-10-26 17:53:49 +02:00
|
|
|
for (j = remotes; j; j = j->next)
|
2014-06-19 23:29:31 +02:00
|
|
|
argv_array_push(&args, merge_argument(j->item));
|
|
|
|
|
|
|
|
ret = run_command_v_opt(args.argv, RUN_GIT_CMD);
|
|
|
|
argv_array_clear(&args);
|
|
|
|
|
2012-10-26 17:53:49 +02:00
|
|
|
discard_cache();
|
|
|
|
if (read_cache() < 0)
|
|
|
|
die(_("failed to read the cache"));
|
|
|
|
resolve_undo_clear();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-05-07 00:10:33 +02:00
|
|
|
int checkout_fast_forward(const struct object_id *head,
|
|
|
|
const struct object_id *remote,
|
2012-10-26 17:53:49 +02:00
|
|
|
int overwrite_ignore)
|
|
|
|
{
|
|
|
|
struct tree *trees[MAX_UNPACK_TREES];
|
|
|
|
struct unpack_trees_options opts;
|
|
|
|
struct tree_desc t[MAX_UNPACK_TREES];
|
2014-06-13 14:19:23 +02:00
|
|
|
int i, nr_trees = 0;
|
2012-10-26 17:53:49 +02:00
|
|
|
struct dir_struct dir;
|
2017-10-05 22:32:04 +02:00
|
|
|
struct lock_file lock_file = LOCK_INIT;
|
2012-10-26 17:53:49 +02:00
|
|
|
|
|
|
|
refresh_cache(REFRESH_QUIET);
|
|
|
|
|
2017-10-05 22:32:04 +02:00
|
|
|
if (hold_locked_index(&lock_file, LOCK_REPORT_ON_ERROR) < 0)
|
2016-09-09 16:38:00 +02:00
|
|
|
return -1;
|
2012-10-26 17:53:49 +02:00
|
|
|
|
|
|
|
memset(&trees, 0, sizeof(trees));
|
|
|
|
memset(&t, 0, sizeof(t));
|
2018-05-20 12:17:34 +02:00
|
|
|
|
|
|
|
trees[nr_trees] = parse_tree_indirect(head);
|
|
|
|
if (!trees[nr_trees++]) {
|
|
|
|
rollback_lock_file(&lock_file);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
trees[nr_trees] = parse_tree_indirect(remote);
|
|
|
|
if (!trees[nr_trees++]) {
|
|
|
|
rollback_lock_file(&lock_file);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
for (i = 0; i < nr_trees; i++) {
|
|
|
|
parse_tree(trees[i]);
|
|
|
|
init_tree_desc(t+i, trees[i]->buffer, trees[i]->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&opts, 0, sizeof(opts));
|
2012-10-26 17:53:49 +02:00
|
|
|
if (overwrite_ignore) {
|
|
|
|
memset(&dir, 0, sizeof(dir));
|
|
|
|
dir.flags |= DIR_SHOW_IGNORED;
|
|
|
|
setup_standard_excludes(&dir);
|
|
|
|
opts.dir = &dir;
|
|
|
|
}
|
|
|
|
|
|
|
|
opts.head_idx = 1;
|
|
|
|
opts.src_index = &the_index;
|
|
|
|
opts.dst_index = &the_index;
|
|
|
|
opts.update = 1;
|
|
|
|
opts.verbose_update = 1;
|
|
|
|
opts.merge = 1;
|
|
|
|
opts.fn = twoway_merge;
|
|
|
|
setup_unpack_trees_porcelain(&opts, "merge");
|
|
|
|
|
2018-02-28 20:07:57 +01:00
|
|
|
if (unpack_trees(nr_trees, t, &opts)) {
|
|
|
|
rollback_lock_file(&lock_file);
|
2018-05-21 16:54:28 +02:00
|
|
|
clear_unpack_trees_porcelain(&opts);
|
2012-10-26 17:53:49 +02:00
|
|
|
return -1;
|
2018-02-28 20:07:57 +01:00
|
|
|
}
|
2018-05-21 16:54:28 +02:00
|
|
|
clear_unpack_trees_porcelain(&opts);
|
|
|
|
|
read-cache: leave lock in right state in `write_locked_index()`
If the original version of `write_locked_index()` returned with an
error, it didn't roll back the lockfile unless the error occured at the
very end, during closing/committing. See commit 03b866477 (read-cache:
new API write_locked_index instead of write_index/write_cache,
2014-06-13).
In commit 9f41c7a6b (read-cache: close index.lock in do_write_index,
2017-04-26), we learned to close the lock slightly earlier in the
callstack. That was mostly a side-effect of lockfiles being implemented
using temporary files, but didn't cause any real harm.
Recently, commit 076aa2cbd (tempfile: auto-allocate tempfiles on heap,
2017-09-05) introduced a subtle bug. If the temporary file is deleted
(i.e., the lockfile is rolled back), the tempfile-pointer in the `struct
lock_file` will be left dangling. Thus, an attempt to reuse the
lockfile, or even just to roll it back, will induce undefined behavior
-- most likely a crash.
Besides not crashing, we clearly want to make things consistent. The
guarantees which the lockfile-machinery itself provides is A) if we ask
to commit and it fails, roll back, and B) if we ask to close and it
fails, do _not_ roll back. Let's do the same for consistency.
Do not delete the temporary file in `do_write_index()`. One of its
callers, `write_locked_index()` will thereby avoid rolling back the
lock. The other caller, `write_shared_index()`, will delete its
temporary file anyway. Both of these callers will avoid undefined
behavior (crashing).
Teach `write_locked_index(..., COMMIT_LOCK)` to roll back the lock
before returning. If we have already succeeded and committed, it will be
a noop. Simplify the existing callers where we now have a superfluous
call to `rollback_lockfile()`. That should keep future readers from
wondering why the callers are inconsistent.
Signed-off-by: Martin Ågren <martin.agren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-06 22:12:13 +02:00
|
|
|
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
2016-09-09 16:38:00 +02:00
|
|
|
return error(_("unable to write new index file"));
|
2012-10-26 17:53:49 +02:00
|
|
|
return 0;
|
|
|
|
}
|