1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-10-31 14:27:54 +01:00
git/upload-pack.c

854 lines
20 KiB
C
Raw Normal View History

#include "cache.h"
#include "refs.h"
#include "pkt-line.h"
#include "sideband.h"
#include "tag.h"
#include "object.h"
#include "commit.h"
#include "exec_cmd.h"
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
#include "run-command.h"
#include "connect.h"
#include "sigchain.h"
#include "version.h"
upload/receive-pack: allow hiding ref hierarchies A repository may have refs that are only used for its internal bookkeeping purposes that should not be exposed to the others that come over the network. Teach upload-pack to omit some refs from its initial advertisement by paying attention to the uploadpack.hiderefs multi-valued configuration variable. Do the same to receive-pack via the receive.hiderefs variable. As a convenient short-hand, allow using transfer.hiderefs to set the value to both of these variables. Any ref that is under the hierarchies listed on the value of these variable is excluded from responses to requests made by "ls-remote", "fetch", etc. (for upload-pack) and "push" (for receive-pack). Because these hidden refs do not count as OUR_REF, an attempt to fetch objects at the tip of them will be rejected, and because these refs do not get advertised, "git push :" will not see local branches that have the same name as them as "matching" ones to be sent. An attempt to update/delete these hidden refs with an explicit refspec, e.g. "git push origin :refs/hidden/22", is rejected. This is not a new restriction. To the pusher, it would appear that there is no such ref, so its push request will conclude with "Now that I sent you all the data, it is time for you to update the refs. I saw that the ref did not exist when I started pushing, and I want the result to point at this commit". The receiving end will apply the compare-and-swap rule to this request and rejects the push with "Well, your update request conflicts with somebody else; I see there is such a ref.", which is the right thing to do. Otherwise a push to a hidden ref will always be "the last one wins", which is not a good default. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 01:08:30 +01:00
#include "string-list.h"
static const char upload_pack_usage[] = "git upload-pack [--strict] [--timeout=<n>] <dir>";
/* Remember to update object flag allocation in object.h */
#define THEY_HAVE (1u << 11)
#define OUR_REF (1u << 12)
#define WANTED (1u << 13)
#define COMMON_KNOWN (1u << 14)
#define REACHABLE (1u << 15)
#define SHALLOW (1u << 16)
#define NOT_SHALLOW (1u << 17)
#define CLIENT_SHALLOW (1u << 18)
#define HIDDEN_REF (1u << 19)
static unsigned long oldest_have;
static int multi_ack;
static int no_done;
static int use_thin_pack, use_ofs_delta, use_include_tag;
static int no_progress, daemon_mode;
static int allow_tip_sha1_in_want;
static int shallow_nr;
static struct object_array have_obj;
static struct object_array want_obj;
static struct object_array extra_edge_obj;
static unsigned int timeout;
static int keepalive = 5;
/* 0 for no sideband,
* otherwise maximum packet size (up to 65520 bytes).
*/
static int use_sideband;
static int advertise_refs;
static int stateless_rpc;
static void reset_timeout(void)
{
alarm(timeout);
}
static ssize_t send_client_data(int fd, const char *data, ssize_t sz)
{
if (use_sideband)
return send_sideband(1, fd, data, sz, use_sideband);
if (fd == 3)
/* emergency quit */
fd = 2;
if (fd == 2) {
/* XXX: are we happy to lose stuff here? */
xwrite(fd, data, sz);
return sz;
}
write_or_die(fd, data, sz);
return sz;
}
static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
{
FILE *fp = cb_data;
if (graft->nr_parent == -1)
fprintf(fp, "--shallow %s\n", oid_to_hex(&graft->oid));
return 0;
}
static void create_pack_file(void)
{
struct child_process pack_objects = CHILD_PROCESS_INIT;
char data[8193], progress[128];
char abort_msg[] = "aborting due to possible repository "
"corruption on the remote side.";
int buffered = -1;
ssize_t sz;
const char *argv[13];
int i, arg = 0;
FILE *pipe_fd;
if (shallow_nr) {
argv[arg++] = "--shallow-file";
argv[arg++] = "";
}
argv[arg++] = "pack-objects";
argv[arg++] = "--revs";
if (use_thin_pack)
argv[arg++] = "--thin";
argv[arg++] = "--stdout";
if (shallow_nr)
argv[arg++] = "--shallow";
if (!no_progress)
argv[arg++] = "--progress";
if (use_ofs_delta)
argv[arg++] = "--delta-base-offset";
if (use_include_tag)
argv[arg++] = "--include-tag";
argv[arg++] = NULL;
upload-pack: start pack-objects before async rev-list In a pthread-enabled version of upload-pack, there's a race condition that can cause a deadlock on the fflush(NULL) we call from run-command. What happens is this: 1. Upload-pack is informed we are doing a shallow clone. 2. We call start_async() to spawn a thread that will generate rev-list results to feed to pack-objects. It gets a file descriptor to a pipe which will eventually hook to pack-objects. 3. The rev-list thread uses fdopen to create a new output stream around the fd we gave it, called pack_pipe. 4. The thread writes results to pack_pipe. Outside of our control, libc is doing locking on the stream. We keep writing until the OS pipe buffer is full, and then we block in write(), still holding the lock. 5. The main thread now uses start_command to spawn pack-objects. Before forking, it calls fflush(NULL) to flush every stdio output buffer. It blocks trying to get the lock on pack_pipe. And we have a deadlock. The thread will block until somebody starts reading from the pipe. But nobody will read from the pipe until we finish flushing to the pipe. To fix this, we swap the start order: we start the pack-objects reader first, and then the rev-list writer after. Thus the problematic fflush(NULL) happens before we even open the new file descriptor (and even if it didn't, flushing should no longer block, as the reader at the end of the pipe is now active). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-04-06 23:33:33 +02:00
pack_objects.in = -1;
pack_objects.out = -1;
pack_objects.err = -1;
pack_objects.git_cmd = 1;
pack_objects.argv = argv;
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
if (start_command(&pack_objects))
die("git upload-pack: unable to fork git-pack-objects");
pipe_fd = xfdopen(pack_objects.in, "w");
if (shallow_nr)
for_each_commit_graft(write_one_shallow, pipe_fd);
for (i = 0; i < want_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
sha1_to_hex(want_obj.objects[i].item->sha1));
fprintf(pipe_fd, "--not\n");
for (i = 0; i < have_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
sha1_to_hex(have_obj.objects[i].item->sha1));
for (i = 0; i < extra_edge_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
sha1_to_hex(extra_edge_obj.objects[i].item->sha1));
fprintf(pipe_fd, "\n");
fflush(pipe_fd);
fclose(pipe_fd);
/* We read from pack_objects.err to capture stderr output for
* progress bar, and pack_objects.out to capture the pack data.
*/
while (1) {
struct pollfd pfd[2];
int pe, pu, pollsize;
int ret;
reset_timeout();
pollsize = 0;
pe = pu = -1;
if (0 <= pack_objects.out) {
pfd[pollsize].fd = pack_objects.out;
pfd[pollsize].events = POLLIN;
pu = pollsize;
pollsize++;
}
if (0 <= pack_objects.err) {
pfd[pollsize].fd = pack_objects.err;
pfd[pollsize].events = POLLIN;
pe = pollsize;
pollsize++;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
if (!pollsize)
break;
ret = poll(pfd, pollsize,
keepalive < 0 ? -1 : 1000 * keepalive);
if (ret < 0) {
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
if (errno != EINTR) {
error("poll failed, resuming: %s",
strerror(errno));
sleep(1);
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
continue;
}
if (0 <= pe && (pfd[pe].revents & (POLLIN|POLLHUP))) {
/* Status ready; we ship that in the side-band
* or dump to the standard error.
*/
sz = xread(pack_objects.err, progress,
sizeof(progress));
if (0 < sz)
send_client_data(2, progress, sz);
else if (sz == 0) {
close(pack_objects.err);
pack_objects.err = -1;
}
else
goto fail;
/* give priority to status messages */
continue;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
if (0 <= pu && (pfd[pu].revents & (POLLIN|POLLHUP))) {
/* Data ready; we keep the last byte to ourselves
* in case we detect broken rev-list, so that we
* can leave the stream corrupted. This is
* unfortunate -- unpack-objects would happily
* accept a valid packdata with trailing garbage,
* so appending garbage after we pass all the
* pack data is not good enough to signal
* breakage to downstream.
*/
char *cp = data;
ssize_t outsz = 0;
if (0 <= buffered) {
*cp++ = buffered;
outsz++;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
sz = xread(pack_objects.out, cp,
sizeof(data) - outsz);
if (0 < sz)
;
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
else if (sz == 0) {
close(pack_objects.out);
pack_objects.out = -1;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
else
goto fail;
sz += outsz;
if (1 < sz) {
buffered = data[sz-1] & 0xFF;
sz--;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
else
buffered = -1;
sz = send_client_data(1, data, sz);
if (sz < 0)
goto fail;
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
}
/*
* We hit the keepalive timeout without saying anything; send
* an empty message on the data sideband just to let the other
* side know we're still working on it, but don't have any data
* yet.
*
* If we don't have a sideband channel, there's no room in the
* protocol to say anything, so those clients are just out of
* luck.
*/
if (!ret && use_sideband) {
static const char buf[] = "0005\1";
write_or_die(1, buf, 5);
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
if (finish_command(&pack_objects)) {
error("git upload-pack: git-pack-objects died with error.");
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
goto fail;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
/* flush the data */
if (0 <= buffered) {
data[0] = buffered;
sz = send_client_data(1, data, 1);
if (sz < 0)
goto fail;
fprintf(stderr, "flushed.\n");
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 20:46:48 +01:00
if (use_sideband)
packet_flush(1);
return;
fail:
send_client_data(3, abort_msg, sizeof(abort_msg));
die("git upload-pack: %s", abort_msg);
}
static int got_sha1(char *hex, unsigned char *sha1)
{
struct object *o;
int we_knew_they_have = 0;
if (get_sha1_hex(hex, sha1))
die("git upload-pack: expected SHA1 object, got '%s'", hex);
if (!has_sha1_file(sha1))
return -1;
o = parse_object(sha1);
if (!o)
die("oops (%s)", sha1_to_hex(sha1));
2006-08-13 07:16:51 +02:00
if (o->type == OBJ_COMMIT) {
struct commit_list *parents;
struct commit *commit = (struct commit *)o;
if (o->flags & THEY_HAVE)
we_knew_they_have = 1;
else
o->flags |= THEY_HAVE;
if (!oldest_have || (commit->date < oldest_have))
oldest_have = commit->date;
for (parents = commit->parents;
parents;
parents = parents->next)
parents->item->object.flags |= THEY_HAVE;
}
if (!we_knew_they_have) {
add_object_array(o, NULL, &have_obj);
return 1;
}
return 0;
}
static int reachable(struct commit *want)
{
struct commit_list *work = NULL;
commit_list_insert_by_date(want, &work);
while (work) {
struct commit_list *list = work->next;
struct commit *commit = work->item;
free(work);
work = list;
if (commit->object.flags & THEY_HAVE) {
want->object.flags |= COMMON_KNOWN;
break;
}
if (!commit->object.parsed)
parse_object(commit->object.sha1);
if (commit->object.flags & REACHABLE)
continue;
commit->object.flags |= REACHABLE;
if (commit->date < oldest_have)
continue;
for (list = commit->parents; list; list = list->next) {
struct commit *parent = list->item;
if (!(parent->object.flags & REACHABLE))
commit_list_insert_by_date(parent, &work);
}
}
want->object.flags |= REACHABLE;
clear_commit_marks(want, REACHABLE);
free_commit_list(work);
return (want->object.flags & COMMON_KNOWN);
}
static int ok_to_give_up(void)
{
int i;
if (!have_obj.nr)
return 0;
for (i = 0; i < want_obj.nr; i++) {
struct object *want = want_obj.objects[i].item;
if (want->flags & COMMON_KNOWN)
continue;
want = deref_tag(want, "a want line", 0);
if (!want || want->type != OBJ_COMMIT) {
/* no way to tell if this is reachable by
* looking at the ancestry chain alone, so
* leave a note to ourselves not to worry about
* this object anymore.
*/
want_obj.objects[i].item->flags |= COMMON_KNOWN;
continue;
}
if (!reachable((struct commit *)want))
return 0;
}
return 1;
}
static int get_common_commits(void)
{
unsigned char sha1[20];
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 01:47:25 +01:00
char last_hex[41];
int got_common = 0;
int got_other = 0;
int sent_ready = 0;
save_commit_buffer = 0;
for (;;) {
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:02:57 +01:00
char *line = packet_read_line(0, NULL);
reset_timeout();
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:02:57 +01:00
if (!line) {
if (multi_ack == 2 && got_common
&& !got_other && ok_to_give_up()) {
sent_ready = 1;
packet_write(1, "ACK %s ready\n", last_hex);
}
if (have_obj.nr == 0 || multi_ack)
packet_write(1, "NAK\n");
if (no_done && sent_ready) {
packet_write(1, "ACK %s\n", last_hex);
return 0;
}
if (stateless_rpc)
exit(0);
got_common = 0;
got_other = 0;
continue;
}
if (starts_with(line, "have ")) {
switch (got_sha1(line+5, sha1)) {
case -1: /* they have what we do not */
got_other = 1;
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 01:47:25 +01:00
if (multi_ack && ok_to_give_up()) {
const char *hex = sha1_to_hex(sha1);
if (multi_ack == 2) {
sent_ready = 1;
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 01:47:25 +01:00
packet_write(1, "ACK %s ready\n", hex);
} else
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 01:47:25 +01:00
packet_write(1, "ACK %s continue\n", hex);
}
break;
default:
got_common = 1;
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 01:47:25 +01:00
memcpy(last_hex, sha1_to_hex(sha1), 41);
if (multi_ack == 2)
packet_write(1, "ACK %s common\n", last_hex);
else if (multi_ack)
packet_write(1, "ACK %s continue\n", last_hex);
else if (have_obj.nr == 1)
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 01:47:25 +01:00
packet_write(1, "ACK %s\n", last_hex);
break;
}
continue;
}
if (!strcmp(line, "done")) {
if (have_obj.nr > 0) {
if (multi_ack)
packet_write(1, "ACK %s\n", last_hex);
return 0;
}
packet_write(1, "NAK\n");
return -1;
}
die("git upload-pack: expected SHA1 list, got '%s'", line);
}
}
static int is_our_ref(struct object *o)
{
return o->flags &
((allow_tip_sha1_in_want ? HIDDEN_REF : 0) | OUR_REF);
}
static void check_non_tip(void)
{
static const char *argv[] = {
"rev-list", "--stdin", NULL,
};
static struct child_process cmd = CHILD_PROCESS_INIT;
struct object *o;
char namebuf[42]; /* ^ + SHA-1 + LF */
int i;
/* In the normal in-process case non-tip request can never happen */
if (!stateless_rpc)
goto error;
cmd.argv = argv;
cmd.git_cmd = 1;
cmd.no_stderr = 1;
cmd.in = -1;
cmd.out = -1;
if (start_command(&cmd))
goto error;
/*
* If rev-list --stdin encounters an unknown commit, it
* terminates, which will cause SIGPIPE in the write loop
* below.
*/
sigchain_push(SIGPIPE, SIG_IGN);
namebuf[0] = '^';
namebuf[41] = '\n';
for (i = get_max_object_index(); 0 < i; ) {
o = get_indexed_object(--i);
if (!o)
continue;
if (!is_our_ref(o))
continue;
memcpy(namebuf + 1, sha1_to_hex(o->sha1), 40);
if (write_in_full(cmd.in, namebuf, 42) < 0)
goto error;
}
namebuf[40] = '\n';
for (i = 0; i < want_obj.nr; i++) {
o = want_obj.objects[i].item;
if (is_our_ref(o))
continue;
memcpy(namebuf, sha1_to_hex(o->sha1), 40);
if (write_in_full(cmd.in, namebuf, 41) < 0)
goto error;
}
close(cmd.in);
sigchain_pop(SIGPIPE);
/*
* The commits out of the rev-list are not ancestors of
* our ref.
*/
i = read_in_full(cmd.out, namebuf, 1);
if (i)
goto error;
close(cmd.out);
/*
* rev-list may have died by encountering a bad commit
* in the history, in which case we do want to bail out
* even when it showed no commit.
*/
if (finish_command(&cmd))
goto error;
/* All the non-tip ones are ancestors of what we advertised */
return;
error:
/* Pick one of them (we know there at least is one) */
for (i = 0; i < want_obj.nr; i++) {
o = want_obj.objects[i].item;
if (!is_our_ref(o))
die("git upload-pack: not our ref %s",
sha1_to_hex(o->sha1));
}
}
static void receive_needs(void)
{
struct object_array shallows = OBJECT_ARRAY_INIT;
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:02:57 +01:00
int depth = 0;
int has_non_tip = 0;
shallow_nr = 0;
for (;;) {
struct object *o;
const char *features;
unsigned char sha1_buf[20];
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:02:57 +01:00
char *line = packet_read_line(0, NULL);
reset_timeout();
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:02:57 +01:00
if (!line)
break;
if (starts_with(line, "shallow ")) {
unsigned char sha1[20];
struct object *object;
upload-pack: use get_sha1_hex to parse "shallow" lines When we receive a line like "shallow <sha1>" from the client, we feed the <sha1> part to get_sha1. This is a mistake, as the argument on a shallow line is defined by Documentation/technical/pack-protocol.txt to contain an "obj-id". This is never defined in the BNF, but it is clear from the text and from the other uses that it is meant to be a hex sha1, not an arbitrary identifier (and that is what fetch-pack has always sent). We should be using get_sha1_hex instead, which doesn't allow the client to request arbitrary junk like "HEAD@{yesterday}". Because this is just marking shallow objects, the client couldn't actually do anything interesting (like fetching objects from unreachable reflog entries), but we should keep our parsing tight to be on the safe side. Because get_sha1 is for the most part a superset of get_sha1_hex, in theory the only behavior change should be disallowing non-hex object references. However, there is one interesting exception: get_sha1 will only parse a 40-character hex sha1 if the string has exactly 40 characters, whereas get_sha1_hex will just eat the first 40 characters, leaving the rest. That means that current versions of git-upload-pack will not accept a "shallow" packet that has a trailing newline, even though the protocol documentation is clear that newlines are allowed (even encouraged) in non-binary parts of the protocol. This never mattered in practice, though, because fetch-pack, contrary to the protocol documentation, does not include a newline in its shallow lines. JGit follows its lead (though it correctly is strict on the parsing end about wanting a hex object id). We do not adjust fetch-pack to send newlines here, as it would break communication with older versions of git (and there is no actual benefit to doing so, except for consistency with other parts of the protocol). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 20:53:33 +01:00
if (get_sha1_hex(line + 8, sha1))
die("invalid shallow line: %s", line);
object = parse_object(sha1);
if (!object)
continue;
if (object->type != OBJ_COMMIT)
die("invalid shallow object %s", sha1_to_hex(sha1));
if (!(object->flags & CLIENT_SHALLOW)) {
object->flags |= CLIENT_SHALLOW;
add_object_array(object, NULL, &shallows);
}
continue;
}
if (starts_with(line, "deepen ")) {
char *end;
depth = strtol(line + 7, &end, 0);
if (end == line + 7 || depth <= 0)
die("Invalid deepen: %s", line);
continue;
}
if (!starts_with(line, "want ") ||
get_sha1_hex(line+5, sha1_buf))
die("git upload-pack: protocol error, "
"expected to get sha, not '%s'", line);
features = line + 45;
if (parse_feature_request(features, "multi_ack_detailed"))
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 01:47:25 +01:00
multi_ack = 2;
else if (parse_feature_request(features, "multi_ack"))
multi_ack = 1;
if (parse_feature_request(features, "no-done"))
no_done = 1;
if (parse_feature_request(features, "thin-pack"))
use_thin_pack = 1;
if (parse_feature_request(features, "ofs-delta"))
use_ofs_delta = 1;
if (parse_feature_request(features, "side-band-64k"))
use_sideband = LARGE_PACKET_MAX;
else if (parse_feature_request(features, "side-band"))
use_sideband = DEFAULT_PACKET_MAX;
if (parse_feature_request(features, "no-progress"))
no_progress = 1;
if (parse_feature_request(features, "include-tag"))
use_include_tag = 1;
upload-pack: load non-tip "want" objects from disk It is a long-time security feature that upload-pack will not serve any "want" lines that do not correspond to the tip of one of our refs. Traditionally, this was enforced by checking the objects in the in-memory hash; they should have been loaded and received the OUR_REF flag during the advertisement. The stateless-rpc mode, however, has a race condition here: one process advertises, and another receives the want lines, so the refs may have changed in the interim. To address this, commit 051e400 added a new verification mode; if the object is not OUR_REF, we set a "has_non_tip" flag, and then later verify that the requested objects are reachable from our current tips. However, we still die immediately when the object is not in our in-memory hash, and at this point we should only have loaded our tip objects. So the check_non_tip code path does not ever actually trigger, as any non-tip objects would have already caused us to die. We can fix that by using parse_object instead of lookup_object, which will load the object from disk if it has not already been loaded. We still need to check that parse_object does not return NULL, though, as it is possible we do not have the object at all. A more appropriate error message would be "no such object" rather than "not our ref"; however, we do not want to leak information about what objects are or are not in the object database, so we continue to use the same "not our ref" message that would be produced by an unreachable object. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-16 11:28:30 +01:00
o = parse_object(sha1_buf);
if (!o)
die("git upload-pack: not our ref %s",
sha1_to_hex(sha1_buf));
if (!(o->flags & WANTED)) {
o->flags |= WANTED;
if (!is_our_ref(o))
has_non_tip = 1;
add_object_array(o, NULL, &want_obj);
}
}
/*
* We have sent all our refs already, and the other end
* should have chosen out of them. When we are operating
* in the stateless RPC mode, however, their choice may
* have been based on the set of older refs advertised
* by another process that handled the initial request.
*/
if (has_non_tip)
check_non_tip();
if (!use_sideband && daemon_mode)
no_progress = 1;
if (depth == 0 && shallows.nr == 0)
return;
if (depth > 0) {
struct commit_list *result = NULL, *backup = NULL;
int i;
if (depth == INFINITE_DEPTH && !is_repository_shallow())
for (i = 0; i < shallows.nr; i++) {
struct object *object = shallows.objects[i].item;
object->flags |= NOT_SHALLOW;
}
else
backup = result =
get_shallow_commits(&want_obj, depth,
SHALLOW, NOT_SHALLOW);
while (result) {
struct object *object = &result->item->object;
if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) {
packet_write(1, "shallow %s",
sha1_to_hex(object->sha1));
register_shallow(object->sha1);
shallow_nr++;
}
result = result->next;
}
free_commit_list(backup);
for (i = 0; i < shallows.nr; i++) {
struct object *object = shallows.objects[i].item;
if (object->flags & NOT_SHALLOW) {
struct commit_list *parents;
packet_write(1, "unshallow %s",
sha1_to_hex(object->sha1));
object->flags &= ~CLIENT_SHALLOW;
/* make sure the real parents are parsed */
unregister_shallow(object->sha1);
object->parsed = 0;
parse_commit_or_die((struct commit *)object);
parents = ((struct commit *)object)->parents;
while (parents) {
add_object_array(&parents->item->object,
NULL, &want_obj);
parents = parents->next;
}
add_object_array(object, NULL, &extra_edge_obj);
}
/* make sure commit traversal conforms to client */
register_shallow(object->sha1);
}
packet_flush(1);
} else
if (shallows.nr > 0) {
int i;
for (i = 0; i < shallows.nr; i++)
register_shallow(shallows.objects[i].item->sha1);
}
shallow_nr += shallows.nr;
free(shallows.objects);
}
upload/receive-pack: allow hiding ref hierarchies A repository may have refs that are only used for its internal bookkeeping purposes that should not be exposed to the others that come over the network. Teach upload-pack to omit some refs from its initial advertisement by paying attention to the uploadpack.hiderefs multi-valued configuration variable. Do the same to receive-pack via the receive.hiderefs variable. As a convenient short-hand, allow using transfer.hiderefs to set the value to both of these variables. Any ref that is under the hierarchies listed on the value of these variable is excluded from responses to requests made by "ls-remote", "fetch", etc. (for upload-pack) and "push" (for receive-pack). Because these hidden refs do not count as OUR_REF, an attempt to fetch objects at the tip of them will be rejected, and because these refs do not get advertised, "git push :" will not see local branches that have the same name as them as "matching" ones to be sent. An attempt to update/delete these hidden refs with an explicit refspec, e.g. "git push origin :refs/hidden/22", is rejected. This is not a new restriction. To the pusher, it would appear that there is no such ref, so its push request will conclude with "Now that I sent you all the data, it is time for you to update the refs. I saw that the ref did not exist when I started pushing, and I want the result to point at this commit". The receiving end will apply the compare-and-swap rule to this request and rejects the push with "Well, your update request conflicts with somebody else; I see there is such a ref.", which is the right thing to do. Otherwise a push to a hidden ref will always be "the last one wins", which is not a good default. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 01:08:30 +01:00
/* return non-zero if the ref is hidden, otherwise 0 */
static int mark_our_ref(const char *refname, const unsigned char *sha1)
{
struct object *o = lookup_unknown_object(sha1);
upload/receive-pack: allow hiding ref hierarchies A repository may have refs that are only used for its internal bookkeeping purposes that should not be exposed to the others that come over the network. Teach upload-pack to omit some refs from its initial advertisement by paying attention to the uploadpack.hiderefs multi-valued configuration variable. Do the same to receive-pack via the receive.hiderefs variable. As a convenient short-hand, allow using transfer.hiderefs to set the value to both of these variables. Any ref that is under the hierarchies listed on the value of these variable is excluded from responses to requests made by "ls-remote", "fetch", etc. (for upload-pack) and "push" (for receive-pack). Because these hidden refs do not count as OUR_REF, an attempt to fetch objects at the tip of them will be rejected, and because these refs do not get advertised, "git push :" will not see local branches that have the same name as them as "matching" ones to be sent. An attempt to update/delete these hidden refs with an explicit refspec, e.g. "git push origin :refs/hidden/22", is rejected. This is not a new restriction. To the pusher, it would appear that there is no such ref, so its push request will conclude with "Now that I sent you all the data, it is time for you to update the refs. I saw that the ref did not exist when I started pushing, and I want the result to point at this commit". The receiving end will apply the compare-and-swap rule to this request and rejects the push with "Well, your update request conflicts with somebody else; I see there is such a ref.", which is the right thing to do. Otherwise a push to a hidden ref will always be "the last one wins", which is not a good default. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 01:08:30 +01:00
if (ref_is_hidden(refname)) {
o->flags |= HIDDEN_REF;
upload/receive-pack: allow hiding ref hierarchies A repository may have refs that are only used for its internal bookkeeping purposes that should not be exposed to the others that come over the network. Teach upload-pack to omit some refs from its initial advertisement by paying attention to the uploadpack.hiderefs multi-valued configuration variable. Do the same to receive-pack via the receive.hiderefs variable. As a convenient short-hand, allow using transfer.hiderefs to set the value to both of these variables. Any ref that is under the hierarchies listed on the value of these variable is excluded from responses to requests made by "ls-remote", "fetch", etc. (for upload-pack) and "push" (for receive-pack). Because these hidden refs do not count as OUR_REF, an attempt to fetch objects at the tip of them will be rejected, and because these refs do not get advertised, "git push :" will not see local branches that have the same name as them as "matching" ones to be sent. An attempt to update/delete these hidden refs with an explicit refspec, e.g. "git push origin :refs/hidden/22", is rejected. This is not a new restriction. To the pusher, it would appear that there is no such ref, so its push request will conclude with "Now that I sent you all the data, it is time for you to update the refs. I saw that the ref did not exist when I started pushing, and I want the result to point at this commit". The receiving end will apply the compare-and-swap rule to this request and rejects the push with "Well, your update request conflicts with somebody else; I see there is such a ref.", which is the right thing to do. Otherwise a push to a hidden ref will always be "the last one wins", which is not a good default. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 01:08:30 +01:00
return 1;
}
o->flags |= OUR_REF;
return 0;
}
static int check_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
{
mark_our_ref(refname, sha1);
return 0;
}
static void format_symref_info(struct strbuf *buf, struct string_list *symref)
{
struct string_list_item *item;
if (!symref->nr)
return;
for_each_string_list_item(item, symref)
strbuf_addf(buf, " symref=%s:%s", item->string, (char *)item->util);
}
static int send_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
{
static const char *capabilities = "multi_ack thin-pack side-band"
" side-band-64k ofs-delta shallow no-progress"
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 01:47:25 +01:00
" include-tag multi_ack_detailed";
const char *refname_nons = strip_namespace(refname);
upload-pack: use peel_ref for ref advertisements When upload-pack advertises refs, we attempt to peel tags and advertise the peeled version. We currently hand-roll the tag dereferencing, and use as many optimizations as we can to avoid loading non-tag objects into memory. Not only has peel_ref recently learned these optimizations, too, but it also contains an even more important one: it has access to the "peeled" data from the pack-refs file. That means we can avoid not only loading annotated tags entirely, but also avoid doing any kind of object lookup at all. This cut the CPU time to advertise refs by 50% in the linux-2.6 repo, as measured by: echo 0000 | git-upload-pack . >/dev/null best-of-five, warm cache, objects and refs fully packed: [before] [after] real 0m0.026s real 0m0.013s user 0m0.024s user 0m0.008s sys 0m0.000s sys 0m0.000s Those numbers are irrelevantly small compared to an actual fetch. Here's a larger repo (400K refs, of which 12K are unique, and of which only 107 are unique annotated tags): [before] [after] real 0m0.704s real 0m0.596s user 0m0.600s user 0m0.496s sys 0m0.096s sys 0m0.092s This shows only a 15% speedup (mostly because it has fewer actual tags to parse), but a larger absolute value (100ms, which isn't a lot compared to a real fetch, but this advertisement happens on every fetch, even if the client is just finding out they are completely up to date). In truly pathological cases, where you have a large number of unique annotated tags, it can make an even bigger difference. Here are the numbers for a linux-2.6 repository that has had every seventh commit tagged (so about 50K tags): [before] [after] real 0m0.443s real 0m0.097s user 0m0.416s user 0m0.080s sys 0m0.024s sys 0m0.012s Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-10-04 10:03:33 +02:00
unsigned char peeled[20];
if (mark_our_ref(refname, sha1))
upload/receive-pack: allow hiding ref hierarchies A repository may have refs that are only used for its internal bookkeeping purposes that should not be exposed to the others that come over the network. Teach upload-pack to omit some refs from its initial advertisement by paying attention to the uploadpack.hiderefs multi-valued configuration variable. Do the same to receive-pack via the receive.hiderefs variable. As a convenient short-hand, allow using transfer.hiderefs to set the value to both of these variables. Any ref that is under the hierarchies listed on the value of these variable is excluded from responses to requests made by "ls-remote", "fetch", etc. (for upload-pack) and "push" (for receive-pack). Because these hidden refs do not count as OUR_REF, an attempt to fetch objects at the tip of them will be rejected, and because these refs do not get advertised, "git push :" will not see local branches that have the same name as them as "matching" ones to be sent. An attempt to update/delete these hidden refs with an explicit refspec, e.g. "git push origin :refs/hidden/22", is rejected. This is not a new restriction. To the pusher, it would appear that there is no such ref, so its push request will conclude with "Now that I sent you all the data, it is time for you to update the refs. I saw that the ref did not exist when I started pushing, and I want the result to point at this commit". The receiving end will apply the compare-and-swap rule to this request and rejects the push with "Well, your update request conflicts with somebody else; I see there is such a ref.", which is the right thing to do. Otherwise a push to a hidden ref will always be "the last one wins", which is not a good default. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 01:08:30 +01:00
return 0;
if (capabilities) {
struct strbuf symref_info = STRBUF_INIT;
format_symref_info(&symref_info, cb_data);
packet_write(1, "%s %s%c%s%s%s%s agent=%s\n",
sha1_to_hex(sha1), refname_nons,
0, capabilities,
allow_tip_sha1_in_want ? " allow-tip-sha1-in-want" : "",
stateless_rpc ? " no-done" : "",
symref_info.buf,
git_user_agent_sanitized());
strbuf_release(&symref_info);
} else {
packet_write(1, "%s %s\n", sha1_to_hex(sha1), refname_nons);
}
capabilities = NULL;
upload-pack: use peel_ref for ref advertisements When upload-pack advertises refs, we attempt to peel tags and advertise the peeled version. We currently hand-roll the tag dereferencing, and use as many optimizations as we can to avoid loading non-tag objects into memory. Not only has peel_ref recently learned these optimizations, too, but it also contains an even more important one: it has access to the "peeled" data from the pack-refs file. That means we can avoid not only loading annotated tags entirely, but also avoid doing any kind of object lookup at all. This cut the CPU time to advertise refs by 50% in the linux-2.6 repo, as measured by: echo 0000 | git-upload-pack . >/dev/null best-of-five, warm cache, objects and refs fully packed: [before] [after] real 0m0.026s real 0m0.013s user 0m0.024s user 0m0.008s sys 0m0.000s sys 0m0.000s Those numbers are irrelevantly small compared to an actual fetch. Here's a larger repo (400K refs, of which 12K are unique, and of which only 107 are unique annotated tags): [before] [after] real 0m0.704s real 0m0.596s user 0m0.600s user 0m0.496s sys 0m0.096s sys 0m0.092s This shows only a 15% speedup (mostly because it has fewer actual tags to parse), but a larger absolute value (100ms, which isn't a lot compared to a real fetch, but this advertisement happens on every fetch, even if the client is just finding out they are completely up to date). In truly pathological cases, where you have a large number of unique annotated tags, it can make an even bigger difference. Here are the numbers for a linux-2.6 repository that has had every seventh commit tagged (so about 50K tags): [before] [after] real 0m0.443s real 0m0.097s user 0m0.416s user 0m0.080s sys 0m0.024s sys 0m0.012s Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-10-04 10:03:33 +02:00
if (!peel_ref(refname, peeled))
packet_write(1, "%s %s^{}\n", sha1_to_hex(peeled), refname_nons);
return 0;
}
static int find_symref(const char *refname, const unsigned char *sha1, int flag,
void *cb_data)
{
const char *symref_target;
struct string_list_item *item;
unsigned char unused[20];
if ((flag & REF_ISSYMREF) == 0)
return 0;
symref_target = resolve_ref_unsafe(refname, 0, unused, &flag);
if (!symref_target || (flag & REF_ISSYMREF) == 0)
die("'%s' is a symref but it is not?", refname);
item = string_list_append(cb_data, refname);
item->util = xstrdup(symref_target);
return 0;
}
static void upload_pack(void)
{
struct string_list symref = STRING_LIST_INIT_DUP;
head_ref_namespaced(find_symref, &symref);
if (advertise_refs || !stateless_rpc) {
reset_timeout();
head_ref_namespaced(send_ref, &symref);
for_each_namespaced_ref(send_ref, &symref);
make the sender advertise shallow commits to the receiver If either receive-pack or upload-pack is called on a shallow repository, shallow commits (*) will be sent after the ref advertisement (but before the packet flush), so that the receiver has the full "shape" of the sender's commit graph. This will be needed for the receiver to update its .git/shallow if necessary. This breaks the protocol for all clients trying to push to a shallow repo, or fetch from one. Which is basically the same end result as today's "is_repository_shallow() && die()" in receive-pack and upload-pack. New clients will be made aware of shallow upstream and can make use of this information. The sender must send all shallow commits that are sent in the following pack. It may send more shallow commits than necessary. upload-pack for example may choose to advertise no shallow commits if it knows in advance that the pack it's going to send contains no shallow commits. But upload-pack is the server, so we choose the cheaper way, send full .git/shallow and let the client deal with it. Smart HTTP is not affected by this patch. Shallow support on smart-http comes later separately. (*) A shallow commit is a commit that terminates the revision walker. It is usually put in .git/shallow in order to keep the revision walker from going out of bound because there is no guarantee that objects behind this commit is available. Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-12-05 14:02:32 +01:00
advertise_shallow_grafts(1);
packet_flush(1);
} else {
head_ref_namespaced(check_ref, NULL);
for_each_namespaced_ref(check_ref, NULL);
}
string_list_clear(&symref, 1);
if (advertise_refs)
return;
receive_needs();
if (want_obj.nr) {
get_common_commits();
create_pack_file();
}
}
upload/receive-pack: allow hiding ref hierarchies A repository may have refs that are only used for its internal bookkeeping purposes that should not be exposed to the others that come over the network. Teach upload-pack to omit some refs from its initial advertisement by paying attention to the uploadpack.hiderefs multi-valued configuration variable. Do the same to receive-pack via the receive.hiderefs variable. As a convenient short-hand, allow using transfer.hiderefs to set the value to both of these variables. Any ref that is under the hierarchies listed on the value of these variable is excluded from responses to requests made by "ls-remote", "fetch", etc. (for upload-pack) and "push" (for receive-pack). Because these hidden refs do not count as OUR_REF, an attempt to fetch objects at the tip of them will be rejected, and because these refs do not get advertised, "git push :" will not see local branches that have the same name as them as "matching" ones to be sent. An attempt to update/delete these hidden refs with an explicit refspec, e.g. "git push origin :refs/hidden/22", is rejected. This is not a new restriction. To the pusher, it would appear that there is no such ref, so its push request will conclude with "Now that I sent you all the data, it is time for you to update the refs. I saw that the ref did not exist when I started pushing, and I want the result to point at this commit". The receiving end will apply the compare-and-swap rule to this request and rejects the push with "Well, your update request conflicts with somebody else; I see there is such a ref.", which is the right thing to do. Otherwise a push to a hidden ref will always be "the last one wins", which is not a good default. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 01:08:30 +01:00
static int upload_pack_config(const char *var, const char *value, void *unused)
{
if (!strcmp("uploadpack.allowtipsha1inwant", var))
allow_tip_sha1_in_want = git_config_bool(var, value);
else if (!strcmp("uploadpack.keepalive", var)) {
keepalive = git_config_int(var, value);
if (!keepalive)
keepalive = -1;
}
upload/receive-pack: allow hiding ref hierarchies A repository may have refs that are only used for its internal bookkeeping purposes that should not be exposed to the others that come over the network. Teach upload-pack to omit some refs from its initial advertisement by paying attention to the uploadpack.hiderefs multi-valued configuration variable. Do the same to receive-pack via the receive.hiderefs variable. As a convenient short-hand, allow using transfer.hiderefs to set the value to both of these variables. Any ref that is under the hierarchies listed on the value of these variable is excluded from responses to requests made by "ls-remote", "fetch", etc. (for upload-pack) and "push" (for receive-pack). Because these hidden refs do not count as OUR_REF, an attempt to fetch objects at the tip of them will be rejected, and because these refs do not get advertised, "git push :" will not see local branches that have the same name as them as "matching" ones to be sent. An attempt to update/delete these hidden refs with an explicit refspec, e.g. "git push origin :refs/hidden/22", is rejected. This is not a new restriction. To the pusher, it would appear that there is no such ref, so its push request will conclude with "Now that I sent you all the data, it is time for you to update the refs. I saw that the ref did not exist when I started pushing, and I want the result to point at this commit". The receiving end will apply the compare-and-swap rule to this request and rejects the push with "Well, your update request conflicts with somebody else; I see there is such a ref.", which is the right thing to do. Otherwise a push to a hidden ref will always be "the last one wins", which is not a good default. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 01:08:30 +01:00
return parse_hide_refs_config(var, value, "uploadpack");
}
int main(int argc, char **argv)
{
char *dir;
int i;
int strict = 0;
i18n: add infrastructure for translating Git with gettext Change the skeleton implementation of i18n in Git to one that can show localized strings to users for our C, Shell and Perl programs using either GNU libintl or the Solaris gettext implementation. This new internationalization support is enabled by default. If gettext isn't available, or if Git is compiled with NO_GETTEXT=YesPlease, Git falls back on its current behavior of showing interface messages in English. When using the autoconf script we'll auto-detect if the gettext libraries are installed and act appropriately. This change is somewhat large because as well as adding a C, Shell and Perl i18n interface we're adding a lot of tests for them, and for those tests to work we need a skeleton PO file to actually test translations. A minimal Icelandic translation is included for this purpose. Icelandic includes multi-byte characters which makes it easy to test various edge cases, and it's a language I happen to understand. The rest of the commit message goes into detail about various sub-parts of this commit. = Installation Gettext .mo files will be installed and looked for in the standard $(prefix)/share/locale path. GIT_TEXTDOMAINDIR can also be set to override that, but that's only intended to be used to test Git itself. = Perl Perl code that's to be localized should use the new Git::I18n module. It imports a __ function into the caller's package by default. Instead of using the high level Locale::TextDomain interface I've opted to use the low-level (equivalent to the C interface) Locale::Messages module, which Locale::TextDomain itself uses. Locale::TextDomain does a lot of redundant work we don't need, and some of it would potentially introduce bugs. It tries to set the $TEXTDOMAIN based on package of the caller, and has its own hardcoded paths where it'll search for messages. I found it easier just to completely avoid it rather than try to circumvent its behavior. In any case, this is an issue wholly internal Git::I18N. Its guts can be changed later if that's deemed necessary. See <AANLkTilYD_NyIZMyj9dHtVk-ylVBfvyxpCC7982LWnVd@mail.gmail.com> for a further elaboration on this topic. = Shell Shell code that's to be localized should use the git-sh-i18n library. It's basically just a wrapper for the system's gettext.sh. If gettext.sh isn't available we'll fall back on gettext(1) if it's available. The latter is available without the former on Solaris, which has its own non-GNU gettext implementation. We also need to emulate eval_gettext() there. If neither are present we'll use a dumb printf(1) fall-through wrapper. = About libcharset.h and langinfo.h We use libcharset to query the character set of the current locale if it's available. I.e. we'll use it instead of nl_langinfo if HAVE_LIBCHARSET_H is set. The GNU gettext manual recommends using langinfo.h's nl_langinfo(CODESET) to acquire the current character set, but on systems that have libcharset.h's locale_charset() using the latter is either saner, or the only option on those systems. GNU and Solaris have a nl_langinfo(CODESET), FreeBSD can use either, but MinGW and some others need to use libcharset.h's locale_charset() instead. =Credits This patch is based on work by Jeff Epler <jepler@unpythonic.net> who did the initial Makefile / C work, and a lot of comments from the Git mailing list, including Jonathan Nieder, Jakub Narebski, Johannes Sixt, Erik Faye-Lund, Peter Krefting, Junio C Hamano, Thomas Rast and others. [jc: squashed a small Makefile fix from Ramsay] Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-11-18 00:14:42 +01:00
git_setup_gettext();
packet_trace_identity("upload-pack");
git_extract_argv0_path(argv[0]);
check_replace_refs = 0;
for (i = 1; i < argc; i++) {
char *arg = argv[i];
if (arg[0] != '-')
break;
if (!strcmp(arg, "--advertise-refs")) {
advertise_refs = 1;
continue;
}
if (!strcmp(arg, "--stateless-rpc")) {
stateless_rpc = 1;
continue;
}
if (!strcmp(arg, "--strict")) {
strict = 1;
continue;
}
if (starts_with(arg, "--timeout=")) {
timeout = atoi(arg+10);
daemon_mode = 1;
continue;
}
if (!strcmp(arg, "--")) {
i++;
break;
}
}
if (i != argc-1)
usage(upload_pack_usage);
setup_path();
dir = argv[i];
if (!enter_repo(dir, strict))
die("'%s' does not appear to be a git repository", dir);
make the sender advertise shallow commits to the receiver If either receive-pack or upload-pack is called on a shallow repository, shallow commits (*) will be sent after the ref advertisement (but before the packet flush), so that the receiver has the full "shape" of the sender's commit graph. This will be needed for the receiver to update its .git/shallow if necessary. This breaks the protocol for all clients trying to push to a shallow repo, or fetch from one. Which is basically the same end result as today's "is_repository_shallow() && die()" in receive-pack and upload-pack. New clients will be made aware of shallow upstream and can make use of this information. The sender must send all shallow commits that are sent in the following pack. It may send more shallow commits than necessary. upload-pack for example may choose to advertise no shallow commits if it knows in advance that the pack it's going to send contains no shallow commits. But upload-pack is the server, so we choose the cheaper way, send full .git/shallow and let the client deal with it. Smart HTTP is not affected by this patch. Shallow support on smart-http comes later separately. (*) A shallow commit is a commit that terminates the revision walker. It is usually put in .git/shallow in order to keep the revision walker from going out of bound because there is no guarantee that objects behind this commit is available. Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-12-05 14:02:32 +01:00
upload/receive-pack: allow hiding ref hierarchies A repository may have refs that are only used for its internal bookkeeping purposes that should not be exposed to the others that come over the network. Teach upload-pack to omit some refs from its initial advertisement by paying attention to the uploadpack.hiderefs multi-valued configuration variable. Do the same to receive-pack via the receive.hiderefs variable. As a convenient short-hand, allow using transfer.hiderefs to set the value to both of these variables. Any ref that is under the hierarchies listed on the value of these variable is excluded from responses to requests made by "ls-remote", "fetch", etc. (for upload-pack) and "push" (for receive-pack). Because these hidden refs do not count as OUR_REF, an attempt to fetch objects at the tip of them will be rejected, and because these refs do not get advertised, "git push :" will not see local branches that have the same name as them as "matching" ones to be sent. An attempt to update/delete these hidden refs with an explicit refspec, e.g. "git push origin :refs/hidden/22", is rejected. This is not a new restriction. To the pusher, it would appear that there is no such ref, so its push request will conclude with "Now that I sent you all the data, it is time for you to update the refs. I saw that the ref did not exist when I started pushing, and I want the result to point at this commit". The receiving end will apply the compare-and-swap rule to this request and rejects the push with "Well, your update request conflicts with somebody else; I see there is such a ref.", which is the right thing to do. Otherwise a push to a hidden ref will always be "the last one wins", which is not a good default. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 01:08:30 +01:00
git_config(upload_pack_config, NULL);
upload_pack();
return 0;
}