1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-11-14 21:23:03 +01:00
git/t/t5500-fetch-pack.sh

183 lines
4 KiB
Bash
Raw Normal View History

#!/bin/sh
#
# Copyright (c) 2005 Johannes Schindelin
#
test_description='Testing multi_ack pack fetching
'
. ./test-lib.sh
# Test fetch-pack/upload-pack pair.
# Some convenience functions
add () {
name=$1
text="$@"
branch=`echo $name | sed -e 's/^\(.\).*$/\1/'`
parents=""
shift
while test $1; do
parents="$parents -p $1"
shift
done
echo "$text" > test.txt
git update-index --add test.txt
tree=$(git write-tree)
# make sure timestamps are in correct order
sec=$(($sec+1))
commit=$(echo "$text" | GIT_AUTHOR_DATE=$sec \
git commit-tree $tree $parents 2>>log2.txt)
export $name=$commit
echo $commit > .git/refs/heads/$branch
eval ${branch}TIP=$commit
}
count_objects () {
ls .git/objects/??/* 2>>log2.txt | wc -l | tr -d " "
}
test_expect_object_count () {
message=$1
count=$2
output="$(count_objects)"
test_expect_success \
"new object count $message" \
"test $count = $output"
}
pull_to_client () {
number=$1
heads=$2
count=$3
no_strict_count_check=$4
cd client
test_expect_success "$number pull" \
"git-fetch-pack -k -v .. $heads"
case "$heads" in *A*) echo $ATIP > .git/refs/heads/A;; esac
case "$heads" in *B*) echo $BTIP > .git/refs/heads/B;; esac
git symbolic-ref HEAD refs/heads/`echo $heads | sed -e 's/^\(.\).*$/\1/'`
test_expect_success "fsck" 'git fsck --full > fsck.txt 2>&1'
test_expect_success 'check downloaded results' \
'mv .git/objects/pack/pack-* . &&
p=`ls -1 pack-*.pack` &&
git unpack-objects <$p &&
git fsck --full'
test_expect_success "new object count after $number pull" \
'idx=`echo pack-*.idx` &&
pack_count=`git show-index <$idx | wc -l` &&
test $pack_count = $count'
test -z "$pack_count" && pack_count=0
if [ -z "$no_strict_count_check" ]; then
test_expect_success "minimal count" "test $count = $pack_count"
else
test $count != $pack_count && \
echo "WARNING: $pack_count objects transmitted, only $count of which were needed"
fi
rm -f pack-*
cd ..
}
# Here begins the actual testing
# A1 - ... - A20 - A21
# \
# B1 - B2 - .. - B70
# client pulls A20, B1. Then tracks only B. Then pulls A.
(
mkdir client &&
cd client &&
git init 2>> log2.txt &&
git config transfer.unpacklimit 0
)
add A1
prev=1; cur=2; while [ $cur -le 10 ]; do
add A$cur $(eval echo \$A$prev)
prev=$cur
cur=$(($cur+1))
done
add B1 $A1
echo $ATIP > .git/refs/heads/A
echo $BTIP > .git/refs/heads/B
git symbolic-ref HEAD refs/heads/B
pull_to_client 1st "B A" $((11*3))
add A11 $A10
prev=1; cur=2; while [ $cur -le 65 ]; do
add B$cur $(eval echo \$B$prev)
prev=$cur
cur=$(($cur+1))
done
pull_to_client 2nd "B" $((64*3))
pull_to_client 3rd "A" $((1*3)) # old fails
git-clone: aggressively optimize local clone behaviour. This changes the behaviour of cloning from a repository on the local machine, by defaulting to "-l" (use hardlinks to share files under .git/objects) and making "-l" a no-op. A new option, --no-hardlinks, is also added to cause file-level copy of files under .git/objects while still avoiding the normal "pack to pipe, then receive and index pack" network transfer overhead. The old behaviour of local cloning without -l nor -s is availble by specifying the source repository with the newly introduced file:///path/to/repo.git/ syntax (i.e. "same as network" cloning). * With --no-hardlinks (i.e. have all .git/objects/ copied via cpio) would not catch the source repository corruption, and also risks corrupted recipient repository if an alpha-particle hits memory cell while indexing and resolving deltas. As long as the recipient is created uncorrupted, you have a good back-up. * same-as-network is expensive, but it would catch the breakage of the source repository. It still risks corrupted recipient repository due to hardware failure. As long as the recipient is created uncorrupted, you have a good back-up. * The new default on the same filesystem, as long as the source repository is healthy, it is very likely that the recipient would be, too. Also it is very cheap. You do not get any back-up benefit, though. None of the method is resilient against the source repository corruption, so let's discount that from the comparison. Then the difference with and without --no-hardlinks matters primarily if you value the back-up benefit or not. If you want to use the cloned repository as a back-up, then it is cheaper to do a clone with --no-hardlinks and two git-fsck (source before clone, recipient after clone) than same-as-network clone, especially as you are likely to do a git-fsck on the recipient if you are so paranoid anyway. Which leads me to believe that being able to use file:/// is probably a good idea, if only for testability, but probably of little practical value. We default to hardlinked clone for everyday use, and paranoids can use --no-hardlinks as a way to make a back-up. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-08-02 08:42:36 +02:00
test_expect_success "clone shallow" "git-clone --depth 2 file://`pwd`/. shallow"
(cd shallow; git count-objects -v) > count.shallow
test_expect_success "clone shallow object count" \
"test \"in-pack: 18\" = \"$(grep in-pack count.shallow)\""
count_output () {
sed -e '/^in-pack:/d' -e '/^packs:/d' -e '/: 0$/d' "$1"
}
test_expect_success "clone shallow object count (part 2)" '
test -z "$(count_output count.shallow)"
'
test_expect_success "fsck in shallow repo" \
"(cd shallow; git fsck --full)"
#test_done; exit
add B66 $B65
add B67 $B66
test_expect_success "pull in shallow repo" \
"(cd shallow; git pull .. B)"
(cd shallow; git count-objects -v) > count.shallow
test_expect_success "clone shallow object count" \
"test \"count: 6\" = \"$(grep count count.shallow)\""
add B68 $B67
add B69 $B68
test_expect_success "deepening pull in shallow repo" \
"(cd shallow; git pull --depth 4 .. B)"
(cd shallow; git count-objects -v) > count.shallow
test_expect_success "clone shallow object count" \
"test \"count: 12\" = \"$(grep count count.shallow)\""
test_expect_success "deepening fetch in shallow repo" \
"(cd shallow; git fetch --depth 4 .. A:A)"
(cd shallow; git count-objects -v) > count.shallow
test_expect_success "clone shallow object count" \
"test \"count: 18\" = \"$(grep count count.shallow)\""
Sane use of test_expect_failure Originally, test_expect_failure was designed to be the opposite of test_expect_success, but this was a bad decision. Most tests run a series of commands that leads to the single command that needs to be tested, like this: test_expect_{success,failure} 'test title' ' setup1 && setup2 && setup3 && what is to be tested ' And expecting a failure exit from the whole sequence misses the point of writing tests. Your setup$N that are supposed to succeed may have failed without even reaching what you are trying to test. The only valid use of test_expect_failure is to check a trivial single command that is expected to fail, which is a minority in tests of Porcelain-ish commands. This large-ish patch rewrites all uses of test_expect_failure to use test_expect_success and rewrites the condition of what is tested, like this: test_expect_success 'test title' ' setup1 && setup2 && setup3 && ! this command should fail ' test_expect_failure is redefined to serve as a reminder that that test *should* succeed but due to a known breakage in git it currently does not pass. So if git-foo command should create a file 'bar' but you discovered a bug that it doesn't, you can write a test like this: test_expect_failure 'git-foo should create bar' ' rm -f bar && git foo && test -f bar ' This construct acts similar to test_expect_success, but instead of reporting "ok/FAIL" like test_expect_success does, the outcome is reported as "FIXED/still broken". Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 10:50:53 +01:00
test_expect_success "pull in shallow repo with missing merge base" \
"(cd shallow && ! git pull --depth 4 .. A)"
test_done