Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.kernel.org/pub/scm/git/git.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/CONTRIBUTING.md3
-rw-r--r--Documentation/CodingGuidelines7
-rw-r--r--Documentation/Makefile1
-rw-r--r--Documentation/MyFirstContribution.txt4
-rw-r--r--Documentation/RelNotes/2.28.0.txt85
-rw-r--r--Documentation/SubmittingPatches15
-rw-r--r--Documentation/git-diff.txt20
-rw-r--r--Documentation/git-fetch.txt8
-rw-r--r--Documentation/git-http-fetch.txt9
-rw-r--r--Documentation/git-index-pack.txt8
-rw-r--r--Documentation/git-ls-remote.txt4
-rw-r--r--Documentation/git-show-index.txt11
-rw-r--r--Documentation/git-sparse-checkout.txt30
-rw-r--r--Documentation/git-worktree.txt4
-rw-r--r--Documentation/git.txt9
-rw-r--r--Documentation/giteveryday.txt10
-rw-r--r--Documentation/githooks.txt29
-rw-r--r--Documentation/gitremote-helpers.txt33
-rw-r--r--Documentation/gitworkflows.txt16
-rw-r--r--Documentation/howto/maintain-git.txt52
-rw-r--r--Documentation/howto/rebase-from-internal-branch.txt32
-rw-r--r--Documentation/howto/revert-branch-rebase.txt32
-rw-r--r--Documentation/howto/update-hook-example.txt6
-rw-r--r--Documentation/pretty-formats.txt4
-rw-r--r--Documentation/technical/packfile-uri.txt78
-rw-r--r--Documentation/technical/protocol-capabilities.txt15
-rw-r--r--Documentation/technical/protocol-v2.txt57
-rw-r--r--Documentation/technical/reftable.txt1083
-rw-r--r--Documentation/user-manual.txt2
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--alloc.c18
-rw-r--r--alloc.h2
-rw-r--r--blame.c2
-rw-r--r--blob.c2
-rw-r--r--bloom.c7
-rw-r--r--branch.c2
-rw-r--r--bugreport.c4
-rw-r--r--builtin/branch.c4
-rw-r--r--builtin/clean.c49
-rw-r--r--builtin/clone.c22
-rw-r--r--builtin/commit-graph.c2
-rw-r--r--builtin/config.c2
-rw-r--r--builtin/diff-files.c7
-rw-r--r--builtin/diff.c147
-rw-r--r--builtin/fetch-pack.c17
-rw-r--r--builtin/fetch.c12
-rw-r--r--builtin/fsck.c4
-rw-r--r--builtin/index-pack.c14
-rw-r--r--builtin/ls-remote.c4
-rw-r--r--builtin/pack-objects.c76
-rw-r--r--builtin/pull.c4
-rw-r--r--builtin/receive-pack.c10
-rw-r--r--builtin/reflog.c2
-rw-r--r--builtin/show-index.c29
-rw-r--r--builtin/sparse-checkout.c6
-rw-r--r--builtin/submodule--helper.c44
-rw-r--r--builtin/worktree.c158
-rw-r--r--bundle.c22
-rw-r--r--bundle.h1
-rw-r--r--cache.h1
-rw-r--r--command-list.txt2
-rw-r--r--commit-graph.c130
-rw-r--r--commit-graph.h10
-rw-r--r--commit-reach.c93
-rw-r--r--commit-reach.h4
-rw-r--r--commit.c12
-rw-r--r--commit.h2
-rwxr-xr-xcompat/vcbuild/scripts/clink.pl4
-rw-r--r--connect.c138
-rw-r--r--connect.h3
-rw-r--r--connected.c8
-rw-r--r--contrib/coccinelle/commit.cocci18
-rw-r--r--contrib/completion/git-completion.bash252
-rw-r--r--contrib/completion/git-prompt.sh26
-rw-r--r--diff.c5
-rw-r--r--dir.c65
-rw-r--r--fetch-pack.c151
-rw-r--r--fetch-pack.h2
-rw-r--r--fuzz-commit-graph.c2
-rw-r--r--git-compat-util.h6
-rwxr-xr-xgit-cvsexportcommit.perl14
-rwxr-xr-xgit-cvsimport.perl8
-rwxr-xr-xgit-cvsserver.perl37
-rwxr-xr-xgit-submodule.sh32
-rwxr-xr-xgit-svn.perl25
-rw-r--r--git.c2
-rw-r--r--http-fetch.c126
-rw-r--r--http-push.c16
-rw-r--r--http-walker.c5
-rw-r--r--http.c117
-rw-r--r--http.h24
-rw-r--r--list-objects-filter-options.c3
-rw-r--r--object-store.h1
-rw-r--r--object.c4
-rw-r--r--object.h9
-rw-r--r--packfile.c1
-rw-r--r--perl/Git/IndexInfo.pm6
-rw-r--r--perl/Git/SVN.pm83
-rw-r--r--perl/Git/SVN/Editor.pm8
-rw-r--r--perl/Git/SVN/Fetcher.pm6
-rw-r--r--perl/Git/SVN/Log.pm2
-rw-r--r--perl/Git/SVN/Ra.pm4
-rw-r--r--pkt-line.c1
-rw-r--r--pkt-line.h3
-rw-r--r--ref-filter.c2
-rw-r--r--refs.c78
-rw-r--r--refs.h18
-rw-r--r--refs/refs-internal.h18
-rw-r--r--remote-curl.c46
-rw-r--r--repository.h6
-rw-r--r--revision.c24
-rw-r--r--revision.h7
-rw-r--r--send-pack.c6
-rw-r--r--serve.c27
-rw-r--r--setup.c42
-rw-r--r--strbuf.c5
-rw-r--r--strbuf.h1
-rw-r--r--t/README14
-rw-r--r--t/helper/test-oid-array.c3
-rw-r--r--t/helper/test-reach.c4
-rw-r--r--t/helper/test-ref-store.c2
-rw-r--r--t/lib-git-svn.sh17
-rwxr-xr-xt/perf/p1400-update-ref.sh32
-rwxr-xr-xt/t0002-gitfile.sh2
-rwxr-xr-xt/t0410-partial-clone.sh23
-rwxr-xr-xt/t1050-large.sh6
-rwxr-xr-xt/t1090-sparse-checkout-scope.sh1
-rwxr-xr-xt/t1091-sparse-checkout-builtin.sh22
-rwxr-xr-xt/t1302-repo-version.sh6
-rwxr-xr-xt/t1400-update-ref.sh32
-rwxr-xr-xt/t1416-ref-transaction-hooks.sh109
-rwxr-xr-xt/t1506-rev-parse-diagnosis.sh2
-rwxr-xr-xt/t2203-add-intent.sh53
-rwxr-xr-xt/t2401-worktree-prune.sh24
-rwxr-xr-xt/t2403-worktree-move.sh21
-rwxr-xr-xt/t2404-worktree-config.sh4
-rwxr-xr-xt/t3200-branch.sh69
-rwxr-xr-xt/t3430-rebase-merges.sh2
-rwxr-xr-xt/t4014-format-patch.sh8
-rwxr-xr-xt/t4068-diff-symmetric.sh91
-rwxr-xr-xt/t5300-pack-object.sh9
-rwxr-xr-xt/t5302-pack-index.sh356
-rwxr-xr-xt/t5318-commit-graph.sh29
-rwxr-xr-xt/t5500-fetch-pack.sh6
-rwxr-xr-xt/t5505-remote.sh8
-rwxr-xr-xt/t5516-fetch-push.sh16
-rwxr-xr-xt/t5540-http-push-webdav.sh16
-rwxr-xr-xt/t5541-http-push-smart.sh15
-rwxr-xr-xt/t5550-http-fetch-dumb.sh48
-rwxr-xr-xt/t5551-http-fetch-smart.sh38
-rwxr-xr-xt/t5562-http-backend-content-length.sh5
-rwxr-xr-xt/t5701-git-serve.sh25
-rwxr-xr-xt/t5702-protocol-v2.sh91
-rwxr-xr-xt/t5703-upload-pack-ref-in-want.sh19
-rwxr-xr-xt/t5704-protocol-violations.sh2
-rwxr-xr-xt/t5801/git-remote-testgit6
-rwxr-xr-xt/t6050-replace.sh2
-rwxr-xr-xt/t6132-pathspec-exclude.sh33
-rwxr-xr-xt/t9020-remote-svn.sh4
-rwxr-xr-xt/t9100-git-svn-basic.sh19
-rwxr-xr-xt/t9101-git-svn-props.sh12
-rwxr-xr-xt/t9104-git-svn-follow-parent.sh3
-rwxr-xr-xt/t9108-git-svn-glob.sh4
-rwxr-xr-xt/t9109-git-svn-multi-glob.sh6
-rwxr-xr-xt/t9168-git-svn-partially-globbed-names.sh8
-rwxr-xr-xt/t9902-completion.sh459
-rw-r--r--t/test-lib.sh1
-rw-r--r--tag.c2
-rw-r--r--transport-helper.c29
-rw-r--r--transport.c30
-rw-r--r--transport.h14
-rw-r--r--tree.c2
-rw-r--r--upload-pack.c639
-rw-r--r--worktree.c20
-rw-r--r--worktree.h11
-rw-r--r--wrapper.c8
-rw-r--r--wt-status.c41
-rw-r--r--wt-status.h2
178 files changed, 5330 insertions, 1348 deletions
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index e7b4e2f3c2..c8755e38de 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -16,4 +16,7 @@ If you prefer video, then [this talk](https://www.youtube.com/watch?v=Q7i_qQW__q
might be useful to you as the presenter walks you through the contribution
process by example.
+Or, you can follow the ["My First Contribution"](https://git-scm.com/docs/MyFirstContribution)
+tutorial for another example of the contribution process.
+
Your friendly Git community!
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines
index 227f46ae40..45465bc0c9 100644
--- a/Documentation/CodingGuidelines
+++ b/Documentation/CodingGuidelines
@@ -489,16 +489,11 @@ For Python scripts:
- We follow PEP-8 (http://www.python.org/dev/peps/pep-0008/).
- - As a minimum, we aim to be compatible with Python 2.6 and 2.7.
+ - As a minimum, we aim to be compatible with Python 2.7.
- Where required libraries do not restrict us to Python 2, we try to
also be compatible with Python 3.1 and later.
- - When you must differentiate between Unicode literals and byte string
- literals, it is OK to use the 'b' prefix. Even though the Python
- documentation for version 2.6 does not mention this prefix, it has
- been supported since version 2.6.0.
-
Error Messages
- Do not end error messages with a full stop.
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 15d9d04f31..ecd0b340b1 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -93,6 +93,7 @@ TECH_DOCS += technical/protocol-capabilities
TECH_DOCS += technical/protocol-common
TECH_DOCS += technical/protocol-v2
TECH_DOCS += technical/racy-git
+TECH_DOCS += technical/reftable
TECH_DOCS += technical/send-pack-pipeline
TECH_DOCS += technical/shallow
TECH_DOCS += technical/signature-format
diff --git a/Documentation/MyFirstContribution.txt b/Documentation/MyFirstContribution.txt
index 427274df4d..d85c9b5143 100644
--- a/Documentation/MyFirstContribution.txt
+++ b/Documentation/MyFirstContribution.txt
@@ -1179,8 +1179,8 @@ look at the section below this one for some context.)
[[after-approval]]
=== After Review Approval
-The Git project has four integration branches: `pu`, `next`, `master`, and
-`maint`. Your change will be placed into `pu` fairly early on by the maintainer
+The Git project has four integration branches: `seen`, `next`, `master`, and
+`maint`. Your change will be placed into `seen` fairly early on by the maintainer
while it is still in the review process; from there, when it is ready for wider
testing, it will be merged into `next`. Plenty of early testers use `next` and
may report issues. Eventually, changes in `next` will make it to `master`,
diff --git a/Documentation/RelNotes/2.28.0.txt b/Documentation/RelNotes/2.28.0.txt
index b1a23e94c9..02e150efcb 100644
--- a/Documentation/RelNotes/2.28.0.txt
+++ b/Documentation/RelNotes/2.28.0.txt
@@ -10,6 +10,15 @@ Backward compatibility notes
easily opt into a set of newer features, which use of the v2
transport protocol is now a part of.
+ * It used to be that setting extensions.* configuration variables
+ alone, while leaving core.repositoryFormatVersion=0, made these
+ settings effective, which was a wrong thing to do. In version 0,
+ there was no special meaning in extensions.* configuration
+ variables. This has been corrected. If you need these repository
+ extensions to be effective, the core.repositoryFormatVersion
+ variable needs to be updated to 1 after vetting these extensions.*
+ variables are set correctly.
+
UI, Workflows & Features
@@ -19,6 +28,19 @@ UI, Workflows & Features
* The check in "git fsck" to ensure that the tree objects are sorted
still had corner cases it missed unsorted entries.
+ * The interface to redact sensitive information in the trace output
+ has been simplified.
+
+ * The command line completion (in contrib/) learned to complete
+ options that the "git switch" command takes.
+
+ * "git diff" used to take arguments in random and nonsense range
+ notation, e.g. "git diff A..B C", "git diff A..B C...D", etc.,
+ which has been cleaned up.
+
+ * "git diff-files" has been taught to say paths that are marked as
+ intent-to-add are new files, not modified from an empty blob.
+
Performance, Internal Implementation, Development Support etc.
@@ -44,6 +66,31 @@ Performance, Internal Implementation, Development Support etc.
* Support for GIT_CURL_VERBOSE has been rewritten in terms of
GIT_TRACE_CURL.
+ * Preliminary clean-ups around refs API, plus file format
+ specification documentation for the reftable backend.
+
+ * Workaround breakage in MSVC build, where "curl-config --cflags"
+ gives settings appropriate for GCC build.
+
+ * Code clean-up of "git clean" resulted in a fix of recent
+ performance regression.
+
+ * Code clean-up in the codepath that serves "git fetch" continues.
+
+ * "git merge-base --is-ancestor" is taught to take advantage of the
+ commit graph.
+
+ * Rewrite of parts of the scripted "git submodule" Porcelain command
+ continues; this time it is "git submodule set-branch" subcommand's
+ turn.
+
+ * The "fetch/clone" protocol has been updated to allow the server to
+ instruct the clients to grab pre-packaged packfile(s) in addition
+ to the packed object data coming over the wire.
+
+ * A misdesigned strbuf_write_fd() function has been retired.
+
+
Fixes since v2.27
-----------------
@@ -78,6 +125,44 @@ Fixes since v2.27
validating the arguments.
(merge 4d9005ff5d cb/bisect-helper-parser-fix later to maint).
+ * Reduce memory usage during "diff --quiet" in a worktree with too
+ many stat-unmatched paths.
+ (merge d2d7fbe129 jk/diff-memuse-optim-with-stat-unmatch later to maint).
+
+ * The reflog entries for "git clone" and "git fetch" did not
+ anonymize the URL they operated on.
+ (merge 46da295a77 js/reflog-anonymize-for-clone-and-fetch later to maint).
+
+ * The behaviour of "sparse-checkout" in the state "git clone
+ --no-checkout" left was changed accidentally in 2.27, which has
+ been corrected.
+
+ * Use of negative pathspec, while collecting paths including
+ untracked ones in the working tree, was broken.
+
+ * The same worktree directory must be registered only once, but
+ "git worktree move" allowed this invariant to be violated, which
+ has been corrected.
+ (merge 810382ed37 es/worktree-duplicate-paths later to maint).
+
+ * The effect of sparse checkout settings on submodules is documented.
+ (merge e7d7c73249 en/sparse-with-submodule-doc later to maint).
+
+ * Code clean-up around "git branch" with a minor bugfix.
+ (merge dc44639904 dl/branch-cleanup later to maint).
+
+ * A branch name used in a test has been clarified to match what is
+ going on.
+ (merge 08dc26061f pb/t4014-unslave later to maint).
+
+ * An in-code comment in "git diff" has been updated.
+ (merge c592fd4c83 dl/diff-usage-comment-update later to maint).
+
* Other code cleanup, docfix, build fix, etc.
(merge 2c31a7aa44 jx/pkt-line-doc-count-fix later to maint).
(merge d63ae31962 cb/t5608-cleanup later to maint).
+ (merge 788db145c7 dl/t-readme-spell-git-correctly later to maint).
+ (merge 45a87a83bb dl/python-2.7-is-the-floor-version later to maint).
+ (merge b75a219904 es/advertise-contribution-doc later to maint).
+ (merge 0c9a4f638a rs/pull-leakfix later to maint).
+ (merge d546fe2874 rs/commit-reach-leakfix later to maint).
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 4515cab519..291b61e262 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -3,8 +3,9 @@ Submitting Patches
== Guidelines
-Here are some guidelines for people who want to contribute their code
-to this software.
+Here are some guidelines for people who want to contribute their code to this
+software. There is also a link:MyFirstContribution.html[step-by-step tutorial]
+available which covers many of these same guidelines.
[[base-branch]]
=== Decide what to base your work on.
@@ -18,7 +19,7 @@ change is relevant to.
base your work on the tip of the topic.
* A new feature should be based on `master` in general. If the new
- feature depends on a topic that is in `pu`, but not in `master`,
+ feature depends on a topic that is in `seen`, but not in `master`,
base your work on the tip of that topic.
* Corrections and enhancements to a topic not yet in `master` should
@@ -27,7 +28,7 @@ change is relevant to.
into the series.
* In the exceptional case that a new feature depends on several topics
- not in `master`, start working on `next` or `pu` privately and send
+ not in `master`, start working on `next` or `seen` privately and send
out patches for discussion. Before the final merge, you may have to
wait until some of the dependent topics graduate to `master`, and
rebase your work.
@@ -37,7 +38,7 @@ change is relevant to.
these parts should be based on their trees.
To find the tip of a topic branch, run `git log --first-parent
-master..pu` and look for the merge commit. The second parent of this
+master..seen` and look for the merge commit. The second parent of this
commit is the tip of the topic branch.
[[separate-commits]]
@@ -423,7 +424,7 @@ help you find out who they are.
and cooked further and eventually graduates to `master`.
In any time between the (2)-(3) cycle, the maintainer may pick it up
-from the list and queue it to `pu`, in order to make it easier for
+from the list and queue it to `seen`, in order to make it easier for
people play with it without having to pick up and apply the patch to
their trees themselves.
@@ -434,7 +435,7 @@ their trees themselves.
master. `git pull --rebase` will automatically skip already-applied
patches, and will let you know. This works only if you rebase on top
of the branch in which your patch has been merged (i.e. it will not
- tell you if your patch is merged in pu if you rebase on top of
+ tell you if your patch is merged in `seen` if you rebase on top of
master).
* Read the Git mailing list, the maintainer regularly posts messages
diff --git a/Documentation/git-diff.txt b/Documentation/git-diff.txt
index 37781cf175..1018110ddc 100644
--- a/Documentation/git-diff.txt
+++ b/Documentation/git-diff.txt
@@ -11,15 +11,17 @@ SYNOPSIS
[verse]
'git diff' [<options>] [<commit>] [--] [<path>...]
'git diff' [<options>] --cached [<commit>] [--] [<path>...]
-'git diff' [<options>] <commit> <commit> [--] [<path>...]
+'git diff' [<options>] <commit> [<commit>...] <commit> [--] [<path>...]
+'git diff' [<options>] <commit>...<commit> [--] [<path>...]
'git diff' [<options>] <blob> <blob>
'git diff' [<options>] --no-index [--] <path> <path>
DESCRIPTION
-----------
Show changes between the working tree and the index or a tree, changes
-between the index and a tree, changes between two trees, changes between
-two blob objects, or changes between two files on disk.
+between the index and a tree, changes between two trees, changes resulting
+from a merge, changes between two blob objects, or changes between two
+files on disk.
'git diff' [<options>] [--] [<path>...]::
@@ -67,6 +69,15 @@ two blob objects, or changes between two files on disk.
one side is omitted, it will have the same effect as
using HEAD instead.
+'git diff' [<options>] <commit> [<commit>...] <commit> [--] [<path>...]::
+
+ This form is to view the results of a merge commit. The first
+ listed <commit> must be the merge itself; the remaining two or
+ more commits should be its parents. A convenient way to produce
+ the desired set of revisions is to use the {caret}@ suffix.
+ For instance, if `master` names a merge commit, `git diff master
+ master^@` gives the same combined diff as `git show master`.
+
'git diff' [<options>] <commit>\...<commit> [--] [<path>...]::
This form is to view the changes on the branch containing
@@ -196,7 +207,8 @@ linkgit:git-difftool[1],
linkgit:git-log[1],
linkgit:gitdiffcore[7],
linkgit:git-format-patch[1],
-linkgit:git-apply[1]
+linkgit:git-apply[1],
+linkgit:git-show[1]
GIT
---
diff --git a/Documentation/git-fetch.txt b/Documentation/git-fetch.txt
index 5b1909fdf4..45b6d8e633 100644
--- a/Documentation/git-fetch.txt
+++ b/Documentation/git-fetch.txt
@@ -255,14 +255,14 @@ refspec.
* Using refspecs explicitly:
+
------------------------------------------------
-$ git fetch origin +pu:pu maint:tmp
+$ git fetch origin +seen:seen maint:tmp
------------------------------------------------
+
-This updates (or creates, as necessary) branches `pu` and `tmp` in
+This updates (or creates, as necessary) branches `seen` and `tmp` in
the local repository by fetching from the branches (respectively)
-`pu` and `maint` from the remote repository.
+`seen` and `maint` from the remote repository.
+
-The `pu` branch will be updated even if it does not fast-forward,
+The `seen` branch will be updated even if it does not fast-forward,
because it is prefixed with a plus sign; `tmp` will not be.
* Peek at a remote's branch, without configuring the remote in your local
diff --git a/Documentation/git-http-fetch.txt b/Documentation/git-http-fetch.txt
index 666b042679..4deb4893f5 100644
--- a/Documentation/git-http-fetch.txt
+++ b/Documentation/git-http-fetch.txt
@@ -9,7 +9,7 @@ git-http-fetch - Download from a remote Git repository via HTTP
SYNOPSIS
--------
[verse]
-'git http-fetch' [-c] [-t] [-a] [-d] [-v] [-w filename] [--recover] [--stdin] <commit> <url>
+'git http-fetch' [-c] [-t] [-a] [-d] [-v] [-w filename] [--recover] [--stdin | --packfile=<hash> | <commit>] <url>
DESCRIPTION
-----------
@@ -40,6 +40,13 @@ commit-id::
<commit-id>['\t'<filename-as-in--w>]
+--packfile=<hash>::
+ Instead of a commit id on the command line (which is not expected in
+ this case), 'git http-fetch' fetches the packfile directly at the given
+ URL and uses index-pack to generate corresponding .idx and .keep files.
+ The hash is used to determine the name of the temporary file and is
+ arbitrary. The output of index-pack is printed to stdout.
+
--recover::
Verify that everything reachable from target is fetched. Used after
an earlier fetch is interrupted.
diff --git a/Documentation/git-index-pack.txt b/Documentation/git-index-pack.txt
index d5b7560bfe..9316d9a80b 100644
--- a/Documentation/git-index-pack.txt
+++ b/Documentation/git-index-pack.txt
@@ -93,6 +93,14 @@ OPTIONS
--max-input-size=<size>::
Die, if the pack is larger than <size>.
+--object-format=<hash-algorithm>::
+ Specify the given object format (hash algorithm) for the pack. The valid
+ values are 'sha1' and (if enabled) 'sha256'. The default is the algorithm for
+ the current repository (set by `extensions.objectFormat`), or 'sha1' if no
+ value is set or outside a repository.
++
+This option cannot be used with --stdin.
+
NOTES
-----
diff --git a/Documentation/git-ls-remote.txt b/Documentation/git-ls-remote.txt
index 0a5c8b7d49..492e573856 100644
--- a/Documentation/git-ls-remote.txt
+++ b/Documentation/git-ls-remote.txt
@@ -101,9 +101,9 @@ f25a265a342aed6041ab0cc484224d9ca54b6f41 refs/tags/v0.99.1
7ceca275d047c90c0c7d5afb13ab97efdf51bd6e refs/tags/v0.99.3
c5db5456ae3b0873fc659c19fafdde22313cc441 refs/tags/v0.99.2
0918385dbd9656cab0d1d81ba7453d49bbc16250 refs/tags/junio-gpg-pub
-$ git ls-remote http://www.kernel.org/pub/scm/git/git.git master pu rc
+$ git ls-remote http://www.kernel.org/pub/scm/git/git.git master seen rc
5fe978a5381f1fbad26a80e682ddd2a401966740 refs/heads/master
-c781a84b5204fb294c9ccc79f8b3baceeb32c061 refs/heads/pu
+c781a84b5204fb294c9ccc79f8b3baceeb32c061 refs/heads/seen
$ git remote add korg http://www.kernel.org/pub/scm/git/git.git
$ git ls-remote --tags korg v\*
d6602ec5194c87b0fc87103ca4d67251c76f233a refs/tags/v0.99
diff --git a/Documentation/git-show-index.txt b/Documentation/git-show-index.txt
index 424e4ba84c..39b1d8eaa1 100644
--- a/Documentation/git-show-index.txt
+++ b/Documentation/git-show-index.txt
@@ -9,7 +9,7 @@ git-show-index - Show packed archive index
SYNOPSIS
--------
[verse]
-'git show-index'
+'git show-index' [--object-format=<hash-algorithm>]
DESCRIPTION
@@ -36,6 +36,15 @@ Note that you can get more information on a packfile by calling
linkgit:git-verify-pack[1]. However, as this command considers only the
index file itself, it's both faster and more flexible.
+OPTIONS
+-------
+
+--object-format=<hash-algorithm>::
+ Specify the given object format (hash algorithm) for the index file. The
+ valid values are 'sha1' and (if enabled) 'sha256'. The default is the
+ algorithm for the current repository (set by `extensions.objectFormat`), or
+ 'sha1' if no value is set or outside a repository..
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt
index 7c8943af7a..a0eeaeb02e 100644
--- a/Documentation/git-sparse-checkout.txt
+++ b/Documentation/git-sparse-checkout.txt
@@ -200,10 +200,32 @@ directory.
SUBMODULES
----------
-If your repository contains one or more submodules, then those submodules will
-appear based on which you initialized with the `git submodule` command. If
-your sparse-checkout patterns exclude an initialized submodule, then that
-submodule will still appear in your working directory.
+If your repository contains one or more submodules, then submodules
+are populated based on interactions with the `git submodule` command.
+Specifically, `git submodule init -- <path>` will ensure the submodule
+at `<path>` is present, while `git submodule deinit [-f] -- <path>`
+will remove the files for the submodule at `<path>` (including any
+untracked files, uncommitted changes, and unpushed history). Similar
+to how sparse-checkout removes files from the working tree but still
+leaves entries in the index, deinitialized submodules are removed from
+the working directory but still have an entry in the index.
+
+Since submodules may have unpushed changes or untracked files,
+removing them could result in data loss. Thus, changing sparse
+inclusion/exclusion rules will not cause an already checked out
+submodule to be removed from the working copy. Said another way, just
+as `checkout` will not cause submodules to be automatically removed or
+initialized even when switching between branches that remove or add
+submodules, using `sparse-checkout` to reduce or expand the scope of
+"interesting" files will not cause submodules to be automatically
+deinitialized or initialized either.
+
+Further, the above facts mean that there are multiple reasons that
+"tracked" files might not be present in the working copy: sparsity
+pattern application from sparse-checkout, and submodule initialization
+state. Thus, commands like `git grep` that work on tracked files in
+the working copy may return results that are limited by either or both
+of these restrictions.
SEE ALSO
diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt
index 85d92c9761..4796c3c05e 100644
--- a/Documentation/git-worktree.txt
+++ b/Documentation/git-worktree.txt
@@ -126,7 +126,9 @@ OPTIONS
locked working tree path, specify `--force` twice.
+
`move` refuses to move a locked working tree unless `--force` is specified
-twice.
+twice. If the destination is already assigned to some other working tree but is
+missing (for instance, if `<new-path>` was deleted manually), then `--force`
+allows the move to proceed; use --force twice if the destination is locked.
+
`remove` refuses to remove an unclean working tree unless `--force` is used.
To remove a locked working tree, specify `--force` twice.
diff --git a/Documentation/git.txt b/Documentation/git.txt
index 40bd32f590..3e50065198 100644
--- a/Documentation/git.txt
+++ b/Documentation/git.txt
@@ -775,11 +775,10 @@ for full details.
See `GIT_TRACE2` for available trace output options and
link:technical/api-trace2.html[Trace2 documentation] for full details.
-`GIT_REDACT_COOKIES`::
- This can be set to a comma-separated list of strings. When a curl trace
- is enabled (see `GIT_TRACE_CURL` above), whenever a "Cookies:" header
- sent by the client is dumped, values of cookies whose key is in that
- list (case-sensitive) are redacted.
+`GIT_TRACE_REDACT`::
+ By default, when tracing is activated, Git redacts the values of
+ cookies, the "Authorization:" header, and the "Proxy-Authorization:"
+ header. Set this variable to `0` to prevent this redaction.
`GIT_LITERAL_PATHSPECS`::
Setting this variable to `1` will cause Git to treat all
diff --git a/Documentation/giteveryday.txt b/Documentation/giteveryday.txt
index 1bd919f92b..faba2ef088 100644
--- a/Documentation/giteveryday.txt
+++ b/Documentation/giteveryday.txt
@@ -278,13 +278,13 @@ $ git am -3 -i -s ./+to-apply <4>
$ compile/test
$ git switch -c hold/linus && git am -3 -i -s ./+hold-linus <5>
$ git switch topic/one && git rebase master <6>
-$ git switch -C pu next <7>
+$ git switch -C seen next <7>
$ git merge topic/one topic/two && git merge hold/linus <8>
$ git switch maint
$ git cherry-pick master~4 <9>
$ compile/test
$ git tag -s -m "GIT 0.99.9x" v0.99.9x <10>
-$ git fetch ko && for branch in master maint next pu <11>
+$ git fetch ko && for branch in master maint next seen <11>
do
git show-branch ko/$branch $branch <12>
done
@@ -294,14 +294,14 @@ $ git push --follow-tags ko <13>
<1> see what you were in the middle of doing, if anything.
<2> see which branches haven't been merged into `master` yet.
Likewise for any other integration branches e.g. `maint`, `next`
-and `pu` (potential updates).
+and `seen`.
<3> read mails, save ones that are applicable, and save others
that are not quite ready (other mail readers are available).
<4> apply them, interactively, with your sign-offs.
<5> create topic branch as needed and apply, again with sign-offs.
<6> rebase internal topic branch that has not been merged to the
master or exposed as a part of a stable branch.
-<7> restart `pu` every time from the next.
+<7> restart `seen` every time from the next.
<8> and bundle topic branches still cooking.
<9> backport a critical fix.
<10> create a signed tag.
@@ -323,7 +323,7 @@ repository at kernel.org, and looks like this:
fetch = refs/heads/*:refs/remotes/ko/*
push = refs/heads/master
push = refs/heads/next
- push = +refs/heads/pu
+ push = +refs/heads/seen
push = refs/heads/maint
------------
diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt
index 81f2a87e88..642471109f 100644
--- a/Documentation/githooks.txt
+++ b/Documentation/githooks.txt
@@ -404,6 +404,35 @@ Both standard output and standard error output are forwarded to
`git send-pack` on the other end, so you can simply `echo` messages
for the user.
+ref-transaction
+~~~~~~~~~~~~~~~
+
+This hook is invoked by any Git command that performs reference
+updates. It executes whenever a reference transaction is prepared,
+committed or aborted and may thus get called multiple times.
+
+The hook takes exactly one argument, which is the current state the
+given reference transaction is in:
+
+ - "prepared": All reference updates have been queued to the
+ transaction and references were locked on disk.
+
+ - "committed": The reference transaction was committed and all
+ references now have their respective new value.
+
+ - "aborted": The reference transaction was aborted, no changes
+ were performed and the locks have been released.
+
+For each reference update that was added to the transaction, the hook
+receives on standard input a line of the format:
+
+ <old-value> SP <new-value> SP <ref-name> LF
+
+The exit status of the hook is ignored for any state except for the
+"prepared" state. In the "prepared" state, a non-zero exit status will
+cause the transaction to be aborted. The hook will not be called with
+"aborted" state in that case.
+
push-to-checkout
~~~~~~~~~~~~~~~~
diff --git a/Documentation/gitremote-helpers.txt b/Documentation/gitremote-helpers.txt
index 93baeeb029..6f1e269ae4 100644
--- a/Documentation/gitremote-helpers.txt
+++ b/Documentation/gitremote-helpers.txt
@@ -238,6 +238,9 @@ the remote repository.
`--signed-tags=verbatim` to linkgit:git-fast-export[1]. In the
absence of this capability, Git will use `--signed-tags=warn-strip`.
+'object-format'::
+ This indicates that the helper is able to interact with the remote
+ side using an explicit hash algorithm extension.
COMMANDS
@@ -257,12 +260,14 @@ Support for this command is mandatory.
'list'::
Lists the refs, one per line, in the format "<value> <name>
[<attr> ...]". The value may be a hex sha1 hash, "@<dest>" for
- a symref, or "?" to indicate that the helper could not get the
- value of the ref. A space-separated list of attributes follows
- the name; unrecognized attributes are ignored. The list ends
- with a blank line.
+ a symref, ":<keyword> <value>" for a key-value pair, or
+ "?" to indicate that the helper could not get the value of the
+ ref. A space-separated list of attributes follows the name;
+ unrecognized attributes are ignored. The list ends with a
+ blank line.
+
See REF LIST ATTRIBUTES for a list of currently defined attributes.
+See REF LIST KEYWORDS for a list of currently defined keywords.
+
Supported if the helper has the "fetch" or "import" capability.
@@ -432,6 +437,18 @@ attributes are defined.
This ref is unchanged since the last import or fetch, although
the helper cannot necessarily determine what value that produced.
+REF LIST KEYWORDS
+-----------------
+
+The 'list' command may produce a list of key-value pairs.
+The following keys are defined.
+
+'object-format'::
+ The refs are using the given hash algorithm. This keyword is only
+ used if the server and client both support the object-format
+ extension.
+
+
OPTIONS
-------
@@ -516,6 +533,14 @@ set by Git if the remote helper has the 'option' capability.
transaction. If successful, all refs will be updated, or none will. If the
remote side does not support this capability, the push will fail.
+'option object-format' {'true'|algorithm}::
+ If 'true', indicate that the caller wants hash algorithm information
+ to be passed back from the remote. This mode is used when fetching
+ refs.
++
+If set to an algorithm, indicate that the caller wants to interact with
+the remote side using that algorithm.
+
SEE ALSO
--------
linkgit:git-remote[1]
diff --git a/Documentation/gitworkflows.txt b/Documentation/gitworkflows.txt
index abc0dc6bc7..2db7ba7842 100644
--- a/Documentation/gitworkflows.txt
+++ b/Documentation/gitworkflows.txt
@@ -85,15 +85,15 @@ As a given feature goes from experimental to stable, it also
There is a fourth official branch that is used slightly differently:
-* 'pu' (proposed updates) is an integration branch for things that are
- not quite ready for inclusion yet (see "Integration Branches"
- below).
+* 'seen' (patches seen by the maintainer) is an integration branch for
+ things that are not quite ready for inclusion yet (see "Integration
+ Branches" below).
Each of the four branches is usually a direct descendant of the one
above it.
Conceptually, the feature enters at an unstable branch (usually 'next'
-or 'pu'), and "graduates" to 'master' for the next release once it is
+or 'seen'), and "graduates" to 'master' for the next release once it is
considered stable enough.
@@ -207,7 +207,7 @@ If you make it (very) clear that this branch is going to be deleted
right after the testing, you can even publish this branch, for example
to give the testers a chance to work with it, or other developers a
chance to see if their in-progress work will be compatible. `git.git`
-has such an official throw-away integration branch called 'pu'.
+has such an official throw-away integration branch called 'seen'.
Branch management for a release
@@ -291,7 +291,7 @@ This will not happen if the content of the branches was verified as
described in the previous section.
-Branch management for next and pu after a feature release
+Branch management for next and seen after a feature release
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After a feature release, the integration branch 'next' may optionally be
@@ -319,8 +319,8 @@ so.
If you do this, then you should make a public announcement indicating
that 'next' was rewound and rebuilt.
-The same rewind and rebuild process may be followed for 'pu'. A public
-announcement is not necessary since 'pu' is a throw-away branch, as
+The same rewind and rebuild process may be followed for 'seen'. A public
+announcement is not necessary since 'seen' is a throw-away branch, as
described above.
diff --git a/Documentation/howto/maintain-git.txt b/Documentation/howto/maintain-git.txt
index 73be8b49f8..a67130debb 100644
--- a/Documentation/howto/maintain-git.txt
+++ b/Documentation/howto/maintain-git.txt
@@ -66,7 +66,7 @@ this mailing list after each feature release is made.
demonstrated to be regression free. New changes are tested
in 'next' before merged to 'master'.
- - 'pu' branch is used to publish other proposed changes that do
+ - 'seen' branch is used to publish other proposed changes that do
not yet pass the criteria set for 'next'.
- The tips of 'master' and 'maint' branches will not be rewound to
@@ -76,7 +76,7 @@ this mailing list after each feature release is made.
of the cycle.
- Usually 'master' contains all of 'maint' and 'next' contains all
- of 'master'. 'pu' contains all the topics merged to 'next', but
+ of 'master'. 'seen' contains all the topics merged to 'next', but
is rebuilt directly on 'master'.
- The tip of 'master' is meant to be more stable than any
@@ -229,12 +229,12 @@ by doing the following:
series?)
- Prepare 'jch' branch, which is used to represent somewhere
- between 'master' and 'pu' and often is slightly ahead of 'next'.
+ between 'master' and 'seen' and often is slightly ahead of 'next'.
- $ Meta/Reintegrate master..pu >Meta/redo-jch.sh
+ $ Meta/Reintegrate master..seen >Meta/redo-jch.sh
The result is a script that lists topics to be merged in order to
- rebuild 'pu' as the input to Meta/Reintegrate script. Remove
+ rebuild 'seen' as the input to Meta/Reintegrate script. Remove
later topics that should not be in 'jch' yet. Add a line that
consists of '### match next' before the name of the first topic
in the output that should be in 'jch' but not in 'next' yet.
@@ -291,29 +291,29 @@ by doing the following:
merged to 'master'. This may lose '### match next' marker;
add it again to the appropriate place when it happens.
- - Rebuild 'pu'.
+ - Rebuild 'seen'.
- $ Meta/Reintegrate master..pu >Meta/redo-pu.sh
+ $ Meta/Reintegrate master..seen >Meta/redo-seen.sh
- Edit the result by adding new topics that are not still in 'pu'
+ Edit the result by adding new topics that are not still in 'seen'
in the script. Then
- $ git checkout -B pu jch
- $ sh Meta/redo-pu.sh
+ $ git checkout -B seen jch
+ $ sh Meta/redo-seen.sh
- When all is well, clean up the redo-pu.sh script with
+ When all is well, clean up the redo-seen.sh script with
- $ sh Meta/redo-pu.sh -u
+ $ sh Meta/redo-seen.sh -u
Double check by running
- $ git branch --no-merged pu
+ $ git branch --no-merged seen
to see there is no unexpected leftover topics.
At this point, build-test the result for semantic conflicts, and
if there are, prepare an appropriate merge-fix first (see
- appendix), and rebuild the 'pu' branch from scratch, starting at
+ appendix), and rebuild the 'seen' branch from scratch, starting at
the tip of 'jch'.
- Update "What's cooking" message to review the updates to
@@ -323,14 +323,14 @@ by doing the following:
$ Meta/cook
- This script inspects the history between master..pu, finds tips
+ This script inspects the history between master..seen, finds tips
of topic branches, compares what it found with the current
contents in Meta/whats-cooking.txt, and updates that file.
- Topics not listed in the file but are found in master..pu are
+ Topics not listed in the file but are found in master..seen are
added to the "New topics" section, topics listed in the file that
- are no longer found in master..pu are moved to the "Graduated to
+ are no longer found in master..seen are moved to the "Graduated to
master" section, and topics whose commits changed their states
- (e.g. used to be only in 'pu', now merged to 'next') are updated
+ (e.g. used to be only in 'seen', now merged to 'next') are updated
with change markers "<<" and ">>".
Look for lines enclosed in "<<" and ">>"; they hold contents from
@@ -360,7 +360,7 @@ Observations
Some observations to be made.
* Each topic is tested individually, and also together with other
- topics cooking first in 'pu', then in 'jch' and then in 'next'.
+ topics cooking first in 'seen', then in 'jch' and then in 'next'.
Until it matures, no part of it is merged to 'master'.
* A topic already in 'next' can get fixes while still in
@@ -411,7 +411,7 @@ new use of the variable under its old name. When these two topics
are merged together, the reference to the variable newly added by
the latter topic will still use the old name in the result.
-The Meta/Reintegrate script that is used by redo-jch and redo-pu
+The Meta/Reintegrate script that is used by redo-jch and redo-seen
scripts implements a crude but usable way to work this issue around.
When the script merges branch $X, it checks if "refs/merge-fix/$X"
exists, and if so, the effect of it is squashed into the result of
@@ -431,14 +431,14 @@ commit that can be squashed into a result of mechanical merge to
correct semantic conflicts.
After finding that the result of merging branch "ai/topic" to an
-integration branch had such a semantic conflict, say pu~4, check the
+integration branch had such a semantic conflict, say seen~4, check the
problematic merge out on a detached HEAD, edit the working tree to
fix the semantic conflict, and make a separate commit to record the
fix-up:
- $ git checkout pu~4
+ $ git checkout seen~4
$ git show -s --pretty=%s ;# double check
- Merge branch 'ai/topic' to pu
+ Merge branch 'ai/topic' to seen
$ edit
$ git commit -m 'merge-fix/ai/topic' -a
@@ -450,9 +450,9 @@ result:
Then double check the result by asking Meta/Reintegrate to redo the
merge:
- $ git checkout pu~5 ;# the parent of the problem merge
+ $ git checkout seen~5 ;# the parent of the problem merge
$ echo ai/topic | Meta/Reintegrate
- $ git diff pu~4
+ $ git diff seen~4
This time, because you prepared refs/merge-fix/ai/topic, the
resulting merge should have been tweaked to include the fix for the
@@ -464,7 +464,7 @@ branch needs this merge-fix is because another branch merged earlier
to the integration branch changed the underlying assumption ai/topic
branch made (e.g. ai/topic branch added a site to refer to a
variable, while the other branch renamed that variable and adjusted
-existing use sites), and if you changed redo-jch (or redo-pu) script
+existing use sites), and if you changed redo-jch (or redo-seen) script
to merge ai/topic branch before the other branch, then the above
merge-fix should not be applied while merging ai/topic, but should
instead be applied while merging the other branch. You would need
diff --git a/Documentation/howto/rebase-from-internal-branch.txt b/Documentation/howto/rebase-from-internal-branch.txt
index 02cb5f758d..f2e10a7ec8 100644
--- a/Documentation/howto/rebase-from-internal-branch.txt
+++ b/Documentation/howto/rebase-from-internal-branch.txt
@@ -4,7 +4,7 @@ Cc: Petr Baudis <pasky@suse.cz>, Linus Torvalds <torvalds@osdl.org>
Subject: Re: sending changesets from the middle of a git tree
Date: Sun, 14 Aug 2005 18:37:39 -0700
Abstract: In this article, JC talks about how he rebases the
- public "pu" branch using the core Git tools when he updates
+ public "seen" branch using the core Git tools when he updates
the "master" branch, and how "rebase" works. Also discussed
is how this applies to individual developers who sends patches
upstream.
@@ -20,8 +20,8 @@ Petr Baudis <pasky@suse.cz> writes:
> where Junio C Hamano <junkio@cox.net> told me that...
>> Linus Torvalds <torvalds@osdl.org> writes:
>>
->> > Junio, maybe you want to talk about how you move patches from your "pu"
->> > branch to the real branches.
+>> > Junio, maybe you want to talk about how you move patches from your
+>> > "seen" branch to the real branches.
>>
> Actually, wouldn't this be also precisely for what StGIT is intended to?
--------------------------------------
@@ -33,12 +33,12 @@ the kind of task StGIT is designed to do.
I just have done a simpler one, this time using only the core
Git tools.
-I had a handful of commits that were ahead of master in pu, and I
+I had a handful of commits that were ahead of master in 'seen', and I
wanted to add some documentation bypassing my usual habit of
-placing new things in pu first. At the beginning, the commit
+placing new things in 'seen' first. At the beginning, the commit
ancestry graph looked like this:
- *"pu" head
+ *"seen" head
master --> #1 --> #2 --> #3
So I started from master, made a bunch of edits, and committed:
@@ -50,7 +50,7 @@ So I started from master, made a bunch of edits, and committed:
After the commit, the ancestry graph would look like this:
- *"pu" head
+ *"seen" head
master^ --> #1 --> #2 --> #3
\
\---> master
@@ -58,31 +58,31 @@ After the commit, the ancestry graph would look like this:
The old master is now master^ (the first parent of the master).
The new master commit holds my documentation updates.
-Now I have to deal with "pu" branch.
+Now I have to deal with "seen" branch.
This is the kind of situation I used to have all the time when
Linus was the maintainer and I was a contributor, when you look
-at "master" branch being the "maintainer" branch, and "pu"
+at "master" branch being the "maintainer" branch, and "seen"
branch being the "contributor" branch. Your work started at the
tip of the "maintainer" branch some time ago, you made a lot of
progress in the meantime, and now the maintainer branch has some
other commits you do not have yet. And "git rebase" was written
with the explicit purpose of helping to maintain branches like
-"pu". You _could_ merge master to pu and keep going, but if you
+"seen". You _could_ merge master to 'seen' and keep going, but if you
eventually want to cherrypick and merge some but not necessarily
all changes back to the master branch, it often makes later
operations for _you_ easier if you rebase (i.e. carry forward
-your changes) "pu" rather than merge. So I ran "git rebase":
+your changes) "seen" rather than merge. So I ran "git rebase":
- $ git checkout pu
- $ git rebase master pu
+ $ git checkout seen
+ $ git rebase master seen
What this does is to pick all the commits since the current
-branch (note that I now am on "pu" branch) forked from the
+branch (note that I now am on "seen" branch) forked from the
master branch, and forward port these changes.
master^ --> #1 --> #2 --> #3
- \ *"pu" head
+ \ *"seen" head
\---> master --> #1' --> #2' --> #3'
The diff between master^ and #1 is applied to master and
@@ -92,7 +92,7 @@ commits are made similarly out of #2 and #3 commits.
Old #3 is not recorded in any of the .git/refs/heads/ file
anymore, so after doing this you will have dangling commit if
-you ran fsck-cache, which is normal. After testing "pu", you
+you ran fsck-cache, which is normal. After testing "seen", you
can run "git prune" to get rid of those original three commits.
While I am talking about "git rebase", I should talk about how
diff --git a/Documentation/howto/revert-branch-rebase.txt b/Documentation/howto/revert-branch-rebase.txt
index 149508e13b..a3e5595a56 100644
--- a/Documentation/howto/revert-branch-rebase.txt
+++ b/Documentation/howto/revert-branch-rebase.txt
@@ -15,7 +15,7 @@ One of the changes I pulled into the 'master' branch turns out to
break building Git with GCC 2.95. While they were well-intentioned
portability fixes, keeping things working with gcc-2.95 was also
important. Here is what I did to revert the change in the 'master'
-branch and to adjust the 'pu' branch, using core Git tools and
+branch and to adjust the 'seen' branch, using core Git tools and
barebone Porcelain.
First, prepare a throw-away branch in case I screw things up.
@@ -104,11 +104,11 @@ $ git diff master..revert-c99
says nothing.
-Then we rebase the 'pu' branch as usual.
+Then we rebase the 'seen' branch as usual.
------------------------------------------------
-$ git checkout pu
-$ git tag pu-anchor pu
+$ git checkout seen
+$ git tag seen-anchor seen
$ git rebase master
* Applying: Redo "revert" using three-way merge machinery.
First trying simple merge strategy to cherry-pick.
@@ -127,11 +127,11 @@ First trying simple merge strategy to cherry-pick.
First trying simple merge strategy to cherry-pick.
------------------------------------------------
-The temporary tag 'pu-anchor' is me just being careful, in case 'git
+The temporary tag 'seen-anchor' is me just being careful, in case 'git
rebase' screws up. After this, I can do these for sanity check:
------------------------------------------------
-$ git diff pu-anchor..pu ;# make sure we got the master fix.
+$ git diff seen-anchor..seen ;# make sure we got the master fix.
$ make CC=gcc-2.95 clean test ;# make sure it fixed the breakage.
$ make clean test ;# make sure it did not cause other breakage.
------------------------------------------------
@@ -140,7 +140,7 @@ Everything is in the good order. I do not need the temporary branch
or tag anymore, so remove them:
------------------------------------------------
-$ rm -f .git/refs/tags/pu-anchor
+$ rm -f .git/refs/tags/seen-anchor
$ git branch -d revert-c99
------------------------------------------------
@@ -168,18 +168,18 @@ Committed merge 7fb9b7262a1d1e0a47bbfdcbbcf50ce0635d3f8f
And the final repository status looks like this:
------------------------------------------------
-$ git show-branch --more=1 master pu rc
+$ git show-branch --more=1 master seen rc
! [master] Revert "Replace zero-length array decls with []."
- ! [pu] git-repack: Add option to repack all objects.
+ ! [seen] git-repack: Add option to repack all objects.
* [rc] Merge refs/heads/master from .
---
- + [pu] git-repack: Add option to repack all objects.
- + [pu~1] More documentation updates.
- + [pu~2] Show commits in topo order and name all commits.
- + [pu~3] mailinfo and applymbox updates
- + [pu~4] Document "git cherry-pick" and "git revert"
- + [pu~5] Remove git-apply-patch-script.
- + [pu~6] Redo "revert" using three-way merge machinery.
+ + [seen] git-repack: Add option to repack all objects.
+ + [seen~1] More documentation updates.
+ + [seen~2] Show commits in topo order and name all commits.
+ + [seen~3] mailinfo and applymbox updates
+ + [seen~4] Document "git cherry-pick" and "git revert"
+ + [seen~5] Remove git-apply-patch-script.
+ + [seen~6] Redo "revert" using three-way merge machinery.
- [rc] Merge refs/heads/master from .
++* [master] Revert "Replace zero-length array decls with []."
- [rc~1] Merge refs/heads/master from .
diff --git a/Documentation/howto/update-hook-example.txt b/Documentation/howto/update-hook-example.txt
index 89821ec74f..151ee84ceb 100644
--- a/Documentation/howto/update-hook-example.txt
+++ b/Documentation/howto/update-hook-example.txt
@@ -179,7 +179,7 @@ allowed-groups, to describe which heads can be pushed into by
whom. The format of each file would look like this:
refs/heads/master junio
- +refs/heads/pu junio
+ +refs/heads/seen junio
refs/heads/cogito$ pasky
refs/heads/bw/.* linus
refs/heads/tmp/.* .*
@@ -187,6 +187,6 @@ whom. The format of each file would look like this:
With this, Linus can push or create "bw/penguin" or "bw/zebra"
or "bw/panda" branches, Pasky can do only "cogito", and JC can
-do master and pu branches and make versioned tags. And anybody
-can do tmp/blah branches. The '+' sign at the pu record means
+do master and "seen" branches and make versioned tags. And anybody
+can do tmp/blah branches. The '+' sign at the "seen" record means
that JC can make non-fast-forward pushes on it.
diff --git a/Documentation/pretty-formats.txt b/Documentation/pretty-formats.txt
index 547a552463..84bbc7439a 100644
--- a/Documentation/pretty-formats.txt
+++ b/Documentation/pretty-formats.txt
@@ -196,8 +196,8 @@ The placeholders are:
'%ce':: committer email
'%cE':: committer email (respecting .mailmap, see
linkgit:git-shortlog[1] or linkgit:git-blame[1])
-'%cl':: author email local-part (the part before the '@' sign)
-'%cL':: author local-part (see '%cl') respecting .mailmap, see
+'%cl':: committer email local-part (the part before the '@' sign)
+'%cL':: committer local-part (see '%cl') respecting .mailmap, see
linkgit:git-shortlog[1] or linkgit:git-blame[1])
'%cd':: committer date (format respects --date= option)
'%cD':: committer date, RFC2822 style
diff --git a/Documentation/technical/packfile-uri.txt b/Documentation/technical/packfile-uri.txt
new file mode 100644
index 0000000000..318713abc3
--- /dev/null
+++ b/Documentation/technical/packfile-uri.txt
@@ -0,0 +1,78 @@
+Packfile URIs
+=============
+
+This feature allows servers to serve part of their packfile response as URIs.
+This allows server designs that improve scalability in bandwidth and CPU usage
+(for example, by serving some data through a CDN), and (in the future) provides
+some measure of resumability to clients.
+
+This feature is available only in protocol version 2.
+
+Protocol
+--------
+
+The server advertises the `packfile-uris` capability.
+
+If the client then communicates which protocols (HTTPS, etc.) it supports with
+a `packfile-uris` argument, the server MAY send a `packfile-uris` section
+directly before the `packfile` section (right after `wanted-refs` if it is
+sent) containing URIs of any of the given protocols. The URIs point to
+packfiles that use only features that the client has declared that it supports
+(e.g. ofs-delta and thin-pack). See protocol-v2.txt for the documentation of
+this section.
+
+Clients should then download and index all the given URIs (in addition to
+downloading and indexing the packfile given in the `packfile` section of the
+response) before performing the connectivity check.
+
+Server design
+-------------
+
+The server can be trivially made compatible with the proposed protocol by
+having it advertise `packfile-uris`, tolerating the client sending
+`packfile-uris`, and never sending any `packfile-uris` section. But we should
+include some sort of non-trivial implementation in the Minimum Viable Product,
+at least so that we can test the client.
+
+This is the implementation: a feature, marked experimental, that allows the
+server to be configured by one or more `uploadpack.blobPackfileUri=<sha1>
+<uri>` entries. Whenever the list of objects to be sent is assembled, all such
+blobs are excluded, replaced with URIs. The client will download those URIs,
+expecting them to each point to packfiles containing single blobs.
+
+Client design
+-------------
+
+The client has a config variable `fetch.uriprotocols` that determines which
+protocols the end user is willing to use. By default, this is empty.
+
+When the client downloads the given URIs, it should store them with "keep"
+files, just like it does with the packfile in the `packfile` section. These
+additional "keep" files can only be removed after the refs have been updated -
+just like the "keep" file for the packfile in the `packfile` section.
+
+The division of work (initial fetch + additional URIs) introduces convenient
+points for resumption of an interrupted clone - such resumption can be done
+after the Minimum Viable Product (see "Future work").
+
+Future work
+-----------
+
+The protocol design allows some evolution of the server and client without any
+need for protocol changes, so only a small-scoped design is included here to
+form the MVP. For example, the following can be done:
+
+ * On the server, more sophisticated means of excluding objects (e.g. by
+ specifying a commit to represent that commit and all objects that it
+ references).
+ * On the client, resumption of clone. If a clone is interrupted, information
+ could be recorded in the repository's config and a "clone-resume" command
+ can resume the clone in progress. (Resumption of subsequent fetches is more
+ difficult because that must deal with the user wanting to use the repository
+ even after the fetch was interrupted.)
+
+There are some possible features that will require a change in protocol:
+
+ * Additional HTTP headers (e.g. authentication)
+ * Byte range support
+ * Different file formats referenced by URIs (e.g. raw object)
diff --git a/Documentation/technical/protocol-capabilities.txt b/Documentation/technical/protocol-capabilities.txt
index 2b267c0da6..36ccd14f97 100644
--- a/Documentation/technical/protocol-capabilities.txt
+++ b/Documentation/technical/protocol-capabilities.txt
@@ -176,6 +176,21 @@ agent strings are purely informative for statistics and debugging
purposes, and MUST NOT be used to programmatically assume the presence
or absence of particular features.
+object-format
+-------------
+
+This capability, which takes a hash algorithm as an argument, indicates
+that the server supports the given hash algorithms. It may be sent
+multiple times; if so, the first one given is the one used in the ref
+advertisement.
+
+When provided by the client, this indicates that it intends to use the
+given hash algorithm to communicate. The algorithm provided must be one
+that the server supports.
+
+If this capability is not provided, it is assumed that the only
+supported algorithm is SHA-1.
+
symref
------
diff --git a/Documentation/technical/protocol-v2.txt b/Documentation/technical/protocol-v2.txt
index 3996d70891..e597b74da3 100644
--- a/Documentation/technical/protocol-v2.txt
+++ b/Documentation/technical/protocol-v2.txt
@@ -325,13 +325,26 @@ included in the client's request:
indicating its sideband (1, 2, or 3), and the server may send "0005\2"
(a PKT-LINE of sideband 2 with no payload) as a keepalive packet.
+If the 'packfile-uris' feature is advertised, the following argument
+can be included in the client's request as well as the potential
+addition of the 'packfile-uris' section in the server's response as
+explained below.
+
+ packfile-uris <comma-separated list of protocols>
+ Indicates to the server that the client is willing to receive
+ URIs of any of the given protocols in place of objects in the
+ sent packfile. Before performing the connectivity check, the
+ client should download from all given URIs. Currently, the
+ protocols supported are "http" and "https".
+
The response of `fetch` is broken into a number of sections separated by
delimiter packets (0001), with each section beginning with its section
-header.
+header. Most sections are sent only when the packfile is sent.
- output = *section
- section = (acknowledgments | shallow-info | wanted-refs | packfile)
- (flush-pkt | delim-pkt)
+ output = acknowledgements flush-pkt |
+ [acknowledgments delim-pkt] [shallow-info delim-pkt]
+ [wanted-refs delim-pkt] [packfile-uris delim-pkt]
+ packfile flush-pkt
acknowledgments = PKT-LINE("acknowledgments" LF)
(nak | *ack)
@@ -349,13 +362,17 @@ header.
*PKT-LINE(wanted-ref LF)
wanted-ref = obj-id SP refname
+ packfile-uris = PKT-LINE("packfile-uris" LF) *packfile-uri
+ packfile-uri = PKT-LINE(40*(HEXDIGIT) SP *%x20-ff LF)
+
packfile = PKT-LINE("packfile" LF)
*PKT-LINE(%x01-03 *%x00-ff)
acknowledgments section
- * If the client determines that it is finished with negotiations
- by sending a "done" line, the acknowledgments sections MUST be
- omitted from the server's response.
+ * If the client determines that it is finished with negotiations by
+ sending a "done" line (thus requiring the server to send a packfile),
+ the acknowledgments sections MUST be omitted from the server's
+ response.
* Always begins with the section header "acknowledgments"
@@ -406,9 +423,6 @@ header.
which the client has not indicated was shallow as a part of
its request.
- * This section is only included if a packfile section is also
- included in the response.
-
wanted-refs section
* This section is only included if the client has requested a
ref using a 'want-ref' line and if a packfile section is also
@@ -422,6 +436,20 @@ header.
* The server MUST NOT send any refs which were not requested
using 'want-ref' lines.
+ packfile-uris section
+ * This section is only included if the client sent
+ 'packfile-uris' and the server has at least one such URI to
+ send.
+
+ * Always begins with the section header "packfile-uris".
+
+ * For each URI the server sends, it sends a hash of the pack's
+ contents (as output by git index-pack) followed by the URI.
+
+ * The hashes are 40 hex characters long. When Git upgrades to a new
+ hash algorithm, this might need to be updated. (It should match
+ whatever index-pack outputs after "pack\t" or "keep\t".
+
packfile section
* This section is only included if the client has sent 'want'
lines in its request and either requested that no more
@@ -455,3 +483,12 @@ included in a request. This is done by sending each option as a
a request.
The provided options must not contain a NUL or LF character.
+
+ object-format
+~~~~~~~~~~~~~~~
+
+The server can advertise the `object-format` capability with a value `X` (in the
+form `object-format=X`) to notify the client that the server is able to deal
+with objects using hash algorithm X. If not specified, the server is assumed to
+only handle SHA-1. If the client would like to use a hash algorithm other than
+SHA-1, it should specify its object-format string.
diff --git a/Documentation/technical/reftable.txt b/Documentation/technical/reftable.txt
new file mode 100644
index 0000000000..2951840e9c
--- /dev/null
+++ b/Documentation/technical/reftable.txt
@@ -0,0 +1,1083 @@
+reftable
+--------
+
+Overview
+~~~~~~~~
+
+Problem statement
+^^^^^^^^^^^^^^^^^
+
+Some repositories contain a lot of references (e.g. android at 866k,
+rails at 31k). The existing packed-refs format takes up a lot of space
+(e.g. 62M), and does not scale with additional references. Lookup of a
+single reference requires linearly scanning the file.
+
+Atomic pushes modifying multiple references require copying the entire
+packed-refs file, which can be a considerable amount of data moved
+(e.g. 62M in, 62M out) for even small transactions (2 refs modified).
+
+Repositories with many loose references occupy a large number of disk
+blocks from the local file system, as each reference is its own file
+storing 41 bytes (and another file for the corresponding reflog). This
+negatively affects the number of inodes available when a large number of
+repositories are stored on the same filesystem. Readers can be penalized
+due to the larger number of syscalls required to traverse and read the
+`$GIT_DIR/refs` directory.
+
+
+Objectives
+^^^^^^^^^^
+
+* Near constant time lookup for any single reference, even when the
+repository is cold and not in process or kernel cache.
+* Near constant time verification if an object name is referred to by at least
+one reference (for allow-tip-sha1-in-want).
+* Efficient enumeration of an entire namespace, such as `refs/tags/`.
+* Support atomic push with `O(size_of_update)` operations.
+* Combine reflog storage with ref storage for small transactions.
+* Separate reflog storage for base refs and historical logs.
+
+Description
+^^^^^^^^^^^
+
+A reftable file is a portable binary file format customized for
+reference storage. References are sorted, enabling linear scans, binary
+search lookup, and range scans.
+
+Storage in the file is organized into variable sized blocks. Prefix
+compression is used within a single block to reduce disk space. Block
+size and alignment is tunable by the writer.
+
+Performance
+^^^^^^^^^^^
+
+Space used, packed-refs vs. reftable:
+
+[cols=",>,>,>,>,>",options="header",]
+|===============================================================
+|repository |packed-refs |reftable |% original |avg ref |avg obj
+|android |62.2 M |36.1 M |58.0% |33 bytes |5 bytes
+|rails |1.8 M |1.1 M |57.7% |29 bytes |4 bytes
+|git |78.7 K |48.1 K |61.0% |50 bytes |4 bytes
+|git (heads) |332 b |269 b |81.0% |33 bytes |0 bytes
+|===============================================================
+
+Scan (read 866k refs), by reference name lookup (single ref from 866k
+refs), and by SHA-1 lookup (refs with that SHA-1, from 866k refs):
+
+[cols=",>,>,>,>",options="header",]
+|=========================================================
+|format |cache |scan |by name |by SHA-1
+|packed-refs |cold |402 ms |409,660.1 usec |412,535.8 usec
+|packed-refs |hot | |6,844.6 usec |20,110.1 usec
+|reftable |cold |112 ms |33.9 usec |323.2 usec
+|reftable |hot | |20.2 usec |320.8 usec
+|=========================================================
+
+Space used for 149,932 log entries for 43,061 refs, reflog vs. reftable:
+
+[cols=",>,>",options="header",]
+|================================
+|format |size |avg entry
+|$GIT_DIR/logs |173 M |1209 bytes
+|reftable |5 M |37 bytes
+|================================
+
+Details
+~~~~~~~
+
+Peeling
+^^^^^^^
+
+References stored in a reftable are peeled, a record for an annotated
+(or signed) tag records both the tag object, and the object it refers
+to. This is analogous to storage in the packed-refs format.
+
+Reference name encoding
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Reference names are an uninterpreted sequence of bytes that must pass
+linkgit:git-check-ref-format[1] as a valid reference name.
+
+Key unicity
+^^^^^^^^^^^
+
+Each entry must have a unique key; repeated keys are disallowed.
+
+Network byte order
+^^^^^^^^^^^^^^^^^^
+
+All multi-byte, fixed width fields are in network byte order.
+
+Varint encoding
+^^^^^^^^^^^^^^^
+
+Varint encoding is identical to the ofs-delta encoding method used
+within pack files.
+
+Decoder works such as:
+
+....
+val = buf[ptr] & 0x7f
+while (buf[ptr] & 0x80) {
+ ptr++
+ val = ((val + 1) << 7) | (buf[ptr] & 0x7f)
+}
+....
+
+Ordering
+^^^^^^^^
+
+Blocks are lexicographically ordered by their first reference.
+
+Directory/file conflicts
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The reftable format accepts both `refs/heads/foo` and
+`refs/heads/foo/bar` as distinct references.
+
+This property is useful for retaining log records in reftable, but may
+confuse versions of Git using `$GIT_DIR/refs` directory tree to maintain
+references. Users of reftable may choose to continue to reject `foo` and
+`foo/bar` type conflicts to prevent problems for peers.
+
+File format
+~~~~~~~~~~~
+
+Structure
+^^^^^^^^^
+
+A reftable file has the following high-level structure:
+
+....
+first_block {
+ header
+ first_ref_block
+}
+ref_block*
+ref_index*
+obj_block*
+obj_index*
+log_block*
+log_index*
+footer
+....
+
+A log-only file omits the `ref_block`, `ref_index`, `obj_block` and
+`obj_index` sections, containing only the file header and log block:
+
+....
+first_block {
+ header
+}
+log_block*
+log_index*
+footer
+....
+
+in a log-only file the first log block immediately follows the file
+header, without padding to block alignment.
+
+Block size
+^^^^^^^^^^
+
+The file's block size is arbitrarily determined by the writer, and does
+not have to be a power of 2. The block size must be larger than the
+longest reference name or log entry used in the repository, as
+references cannot span blocks.
+
+Powers of two that are friendly to the virtual memory system or
+filesystem (such as 4k or 8k) are recommended. Larger sizes (64k) can
+yield better compression, with a possible increased cost incurred by
+readers during access.
+
+The largest block size is `16777215` bytes (15.99 MiB).
+
+Block alignment
+^^^^^^^^^^^^^^^
+
+Writers may choose to align blocks at multiples of the block size by
+including `padding` filled with NUL bytes at the end of a block to round
+out to the chosen alignment. When alignment is used, writers must
+specify the alignment with the file header's `block_size` field.
+
+Block alignment is not required by the file format. Unaligned files must
+set `block_size = 0` in the file header, and omit `padding`. Unaligned
+files with more than one ref block must include the link:#Ref-index[ref
+index] to support fast lookup. Readers must be able to read both aligned
+and non-aligned files.
+
+Very small files (e.g. a single ref block) may omit `padding` and the ref
+index to reduce total file size.
+
+Header (version 1)
+^^^^^^^^^^^^^^^^^^
+
+A 24-byte header appears at the beginning of the file:
+
+....
+'REFT'
+uint8( version_number = 1 )
+uint24( block_size )
+uint64( min_update_index )
+uint64( max_update_index )
+....
+
+Aligned files must specify `block_size` to configure readers with the
+expected block alignment. Unaligned files must set `block_size = 0`.
+
+The `min_update_index` and `max_update_index` describe bounds for the
+`update_index` field of all log records in this file. When reftables are
+used in a stack for link:#Update-transactions[transactions], these
+fields can order the files such that the prior file's
+`max_update_index + 1` is the next file's `min_update_index`.
+
+Header (version 2)
+^^^^^^^^^^^^^^^^^^
+
+A 28-byte header appears at the beginning of the file:
+
+....
+'REFT'
+uint8( version_number = 2 )
+uint24( block_size )
+uint64( min_update_index )
+uint64( max_update_index )
+uint32( hash_id )
+....
+
+The header is identical to `version_number=1`, with the 4-byte hash ID
+("sha1" for SHA1 and "s256" for SHA-256) append to the header.
+
+For maximum backward compatibility, it is recommended to use version 1 when
+writing SHA1 reftables.
+
+First ref block
+^^^^^^^^^^^^^^^
+
+The first ref block shares the same block as the file header, and is 24
+bytes smaller than all other blocks in the file. The first block
+immediately begins after the file header, at position 24.
+
+If the first block is a log block (a log-only file), its block header
+begins immediately at position 24.
+
+Ref block format
+^^^^^^^^^^^^^^^^
+
+A ref block is written as:
+
+....
+'r'
+uint24( block_len )
+ref_record+
+uint24( restart_offset )+
+uint16( restart_count )
+
+padding?
+....
+
+Blocks begin with `block_type = 'r'` and a 3-byte `block_len` which
+encodes the number of bytes in the block up to, but not including the
+optional `padding`. This is always less than or equal to the file's
+block size. In the first ref block, `block_len` includes 24 bytes for
+the file header.
+
+The 2-byte `restart_count` stores the number of entries in the
+`restart_offset` list, which must not be empty. Readers can use
+`restart_count` to binary search between restarts before starting a
+linear scan.
+
+Exactly `restart_count` 3-byte `restart_offset` values precedes the
+`restart_count`. Offsets are relative to the start of the block and
+refer to the first byte of any `ref_record` whose name has not been
+prefix compressed. Entries in the `restart_offset` list must be sorted,
+ascending. Readers can start linear scans from any of these records.
+
+A variable number of `ref_record` fill the middle of the block,
+describing reference names and values. The format is described below.
+
+As the first ref block shares the first file block with the file header,
+all `restart_offset` in the first block are relative to the start of the
+file (position 0), and include the file header. This forces the first
+`restart_offset` to be `28`.
+
+ref record
+++++++++++
+
+A `ref_record` describes a single reference, storing both the name and
+its value(s). Records are formatted as:
+
+....
+varint( prefix_length )
+varint( (suffix_length << 3) | value_type )
+suffix
+varint( update_index_delta )
+value?
+....
+
+The `prefix_length` field specifies how many leading bytes of the prior
+reference record's name should be copied to obtain this reference's
+name. This must be 0 for the first reference in any block, and also must
+be 0 for any `ref_record` whose offset is listed in the `restart_offset`
+table at the end of the block.
+
+Recovering a reference name from any `ref_record` is a simple concat:
+
+....
+this_name = prior_name[0..prefix_length] + suffix
+....
+
+The `suffix_length` value provides the number of bytes available in
+`suffix` to copy from `suffix` to complete the reference name.
+
+The `update_index` that last modified the reference can be obtained by
+adding `update_index_delta` to the `min_update_index` from the file
+header: `min_update_index + update_index_delta`.
+
+The `value` follows. Its format is determined by `value_type`, one of
+the following:
+
+* `0x0`: deletion; no value data (see transactions, below)
+* `0x1`: one object name; value of the ref
+* `0x2`: two object names; value of the ref, peeled target
+* `0x3`: symbolic reference: `varint( target_len ) target`
+
+Symbolic references use `0x3`, followed by the complete name of the
+reference target. No compression is applied to the target name.
+
+Types `0x4..0x7` are reserved for future use.
+
+Ref index
+^^^^^^^^^
+
+The ref index stores the name of the last reference from every ref block
+in the file, enabling reduced disk seeks for lookups. Any reference can
+be found by searching the index, identifying the containing block, and
+searching within that block.
+
+The index may be organized into a multi-level index, where the 1st level
+index block points to additional ref index blocks (2nd level), which may
+in turn point to either additional index blocks (e.g. 3rd level) or ref
+blocks (leaf level). Disk reads required to access a ref go up with
+higher index levels. Multi-level indexes may be required to ensure no
+single index block exceeds the file format's max block size of
+`16777215` bytes (15.99 MiB). To achieve constant O(1) disk seeks for
+lookups the index must be a single level, which is permitted to exceed
+the file's configured block size, but not the format's max block size of
+15.99 MiB.
+
+If present, the ref index block(s) appears after the last ref block.
+
+If there are at least 4 ref blocks, a ref index block should be written
+to improve lookup times. Cold reads using the index require 2 disk reads
+(read index, read block), and binary searching < 4 blocks also requires
+<= 2 reads. Omitting the index block from smaller files saves space.
+
+If the file is unaligned and contains more than one ref block, the ref
+index must be written.
+
+Index block format:
+
+....
+'i'
+uint24( block_len )
+index_record+
+uint24( restart_offset )+
+uint16( restart_count )
+
+padding?
+....
+
+The index blocks begin with `block_type = 'i'` and a 3-byte `block_len`
+which encodes the number of bytes in the block, up to but not including
+the optional `padding`.
+
+The `restart_offset` and `restart_count` fields are identical in format,
+meaning and usage as in ref blocks.
+
+To reduce the number of reads required for random access in very large
+files the index block may be larger than other blocks. However, readers
+must hold the entire index in memory to benefit from this, so it's a
+time-space tradeoff in both file size and reader memory.
+
+Increasing the file's block size decreases the index size. Alternatively
+a multi-level index may be used, keeping index blocks within the file's
+block size, but increasing the number of blocks that need to be
+accessed.
+
+index record
+++++++++++++
+
+An index record describes the last entry in another block. Index records
+are written as:
+
+....
+varint( prefix_length )
+varint( (suffix_length << 3) | 0 )
+suffix
+varint( block_position )
+....
+
+Index records use prefix compression exactly like `ref_record`.
+
+Index records store `block_position` after the suffix, specifying the
+absolute position in bytes (from the start of the file) of the block
+that ends with this reference. Readers can seek to `block_position` to
+begin reading the block header.
+
+Readers must examine the block header at `block_position` to determine
+if the next block is another level index block, or the leaf-level ref
+block.
+
+Reading the index
++++++++++++++++++
+
+Readers loading the ref index must first read the footer (below) to
+obtain `ref_index_position`. If not present, the position will be 0. The
+`ref_index_position` is for the 1st level root of the ref index.
+
+Obj block format
+^^^^^^^^^^^^^^^^
+
+Object blocks are optional. Writers may choose to omit object blocks,
+especially if readers will not use the object name to ref mapping.
+
+Object blocks use unique, abbreviated 2-32 object name keys, mapping to
+ref blocks containing references pointing to that object directly, or as
+the peeled value of an annotated tag. Like ref blocks, object blocks use
+the file's standard block size. The abbrevation length is available in
+the footer as `obj_id_len`.
+
+To save space in small files, object blocks may be omitted if the ref
+index is not present, as brute force search will only need to read a few
+ref blocks. When missing, readers should brute force a linear search of
+all references to lookup by object name.
+
+An object block is written as:
+
+....
+'o'
+uint24( block_len )
+obj_record+
+uint24( restart_offset )+
+uint16( restart_count )
+
+padding?
+....
+
+Fields are identical to ref block. Binary search using the restart table
+works the same as in reference blocks.
+
+Because object names are abbreviated by writers to the shortest unique
+abbreviation within the reftable, obj key lengths have a variable length. Their
+length must be at least 2 bytes. Readers must compare only for common prefix
+match within an obj block or obj index.
+
+obj record
+++++++++++
+
+An `obj_record` describes a single object abbreviation, and the blocks
+containing references using that unique abbreviation:
+
+....
+varint( prefix_length )
+varint( (suffix_length << 3) | cnt_3 )
+suffix
+varint( cnt_large )?
+varint( position_delta )*
+....
+
+Like in reference blocks, abbreviations are prefix compressed within an
+obj block. On large reftables with many unique objects, higher block
+sizes (64k), and higher restart interval (128), a `prefix_length` of 2
+or 3 and `suffix_length` of 3 may be common in obj records (unique
+abbreviation of 5-6 raw bytes, 10-12 hex digits).
+
+Each record contains `position_count` number of positions for matching
+ref blocks. For 1-7 positions the count is stored in `cnt_3`. When
+`cnt_3 = 0` the actual count follows in a varint, `cnt_large`.
+
+The use of `cnt_3` bets most objects are pointed to by only a single
+reference, some may be pointed to by a couple of references, and very
+few (if any) are pointed to by more than 7 references.
+
+A special case exists when `cnt_3 = 0` and `cnt_large = 0`: there are no
+`position_delta`, but at least one reference starts with this
+abbreviation. A reader that needs exact reference names must scan all
+references to find which specific references have the desired object.
+Writers should use this format when the `position_delta` list would have
+overflowed the file's block size due to a high number of references
+pointing to the same object.
+
+The first `position_delta` is the position from the start of the file.
+Additional `position_delta` entries are sorted ascending and relative to
+the prior entry, e.g. a reader would perform:
+
+....
+pos = position_delta[0]
+prior = pos
+for (j = 1; j < position_count; j++) {
+ pos = prior + position_delta[j]
+ prior = pos
+}
+....
+
+With a position in hand, a reader must linearly scan the ref block,
+starting from the first `ref_record`, testing each reference's object names
+(for `value_type = 0x1` or `0x2`) for full equality. Faster searching by
+object name within a single ref block is not supported by the reftable format.
+Smaller block sizes reduce the number of candidates this step must
+consider.
+
+Obj index
+^^^^^^^^^
+
+The obj index stores the abbreviation from the last entry for every obj
+block in the file, enabling reduced disk seeks for all lookups. It is
+formatted exactly the same as the ref index, but refers to obj blocks.
+
+The obj index should be present if obj blocks are present, as obj blocks
+should only be written in larger files.
+
+Readers loading the obj index must first read the footer (below) to
+obtain `obj_index_position`. If not present, the position will be 0.
+
+Log block format
+^^^^^^^^^^^^^^^^
+
+Unlike ref and obj blocks, log blocks are always unaligned.
+
+Log blocks are variable in size, and do not match the `block_size`
+specified in the file header or footer. Writers should choose an
+appropriate buffer size to prepare a log block for deflation, such as
+`2 * block_size`.
+
+A log block is written as:
+
+....
+'g'
+uint24( block_len )
+zlib_deflate {
+ log_record+
+ uint24( restart_offset )+
+ uint16( restart_count )
+}
+....
+
+Log blocks look similar to ref blocks, except `block_type = 'g'`.
+
+The 4-byte block header is followed by the deflated block contents using
+zlib deflate. The `block_len` in the header is the inflated size
+(including 4-byte block header), and should be used by readers to
+preallocate the inflation output buffer. A log block's `block_len` may
+exceed the file's block size.
+
+Offsets within the log block (e.g. `restart_offset`) still include the
+4-byte header. Readers may prefer prefixing the inflation output buffer
+with the 4-byte header.
+
+Within the deflate container, a variable number of `log_record` describe
+reference changes. The log record format is described below. See ref
+block format (above) for a description of `restart_offset` and
+`restart_count`.
+
+Because log blocks have no alignment or padding between blocks, readers
+must keep track of the bytes consumed by the inflater to know where the
+next log block begins.
+
+log record
+++++++++++
+
+Log record keys are structured as:
+
+....
+ref_name '\0' reverse_int64( update_index )
+....
+
+where `update_index` is the unique transaction identifier. The
+`update_index` field must be unique within the scope of a `ref_name`.
+See the update transactions section below for further details.
+
+The `reverse_int64` function inverses the value so lexicographical
+ordering the network byte order encoding sorts the more recent records
+with higher `update_index` values first:
+
+....
+reverse_int64(int64 t) {
+ return 0xffffffffffffffff - t;
+}
+....
+
+Log records have a similar starting structure to ref and index records,
+utilizing the same prefix compression scheme applied to the log record
+key described above.
+
+....
+ varint( prefix_length )
+ varint( (suffix_length << 3) | log_type )
+ suffix
+ log_data {
+ old_id
+ new_id
+ varint( name_length ) name
+ varint( email_length ) email
+ varint( time_seconds )
+ sint16( tz_offset )
+ varint( message_length ) message
+ }?
+....
+
+Log record entries use `log_type` to indicate what follows:
+
+* `0x0`: deletion; no log data.
+* `0x1`: standard git reflog data using `log_data` above.
+
+The `log_type = 0x0` is mostly useful for `git stash drop`, removing an
+entry from the reflog of `refs/stash` in a transaction file (below),
+without needing to rewrite larger files. Readers reading a stack of
+reflogs must treat this as a deletion.
+
+For `log_type = 0x1`, the `log_data` section follows
+linkgit:git-update-ref[1] logging and includes:
+
+* two object names (old id, new id)
+* varint string of committer's name
+* varint string of committer's email
+* varint time in seconds since epoch (Jan 1, 1970)
+* 2-byte timezone offset in minutes (signed)
+* varint string of message
+
+`tz_offset` is the absolute number of minutes from GMT the committer was
+at the time of the update. For example `GMT-0800` is encoded in reftable
+as `sint16(-480)` and `GMT+0230` is `sint16(150)`.
+
+The committer email does not contain `<` or `>`, it's the value normally
+found between the `<>` in a git commit object header.
+
+The `message_length` may be 0, in which case there was no message
+supplied for the update.
+
+Contrary to traditional reflog (which is a file), renames are encoded as
+a combination of ref deletion and ref creation. A deletion is a log
+record with a zero new_id, and a creation is a log record with a zero old_id.
+
+Reading the log
++++++++++++++++
+
+Readers accessing the log must first read the footer (below) to
+determine the `log_position`. The first block of the log begins at
+`log_position` bytes since the start of the file. The `log_position` is
+not block aligned.
+
+Importing logs
+++++++++++++++
+
+When importing from `$GIT_DIR/logs` writers should globally order all
+log records roughly by timestamp while preserving file order, and assign
+unique, increasing `update_index` values for each log line. Newer log
+records get higher `update_index` values.
+
+Although an import may write only a single reftable file, the reftable
+file must span many unique `update_index`, as each log line requires its
+own `update_index` to preserve semantics.
+
+Log index
+^^^^^^^^^
+
+The log index stores the log key
+(`refname \0 reverse_int64(update_index)`) for the last log record of
+every log block in the file, supporting bounded-time lookup.
+
+A log index block must be written if 2 or more log blocks are written to
+the file. If present, the log index appears after the last log block.
+There is no padding used to align the log index to block alignment.
+
+Log index format is identical to ref index, except the keys are 9 bytes
+longer to include `'\0'` and the 8-byte `reverse_int64(update_index)`.
+Records use `block_position` to refer to the start of a log block.
+
+Reading the index
++++++++++++++++++
+
+Readers loading the log index must first read the footer (below) to
+obtain `log_index_position`. If not present, the position will be 0.
+
+Footer
+^^^^^^
+
+After the last block of the file, a file footer is written. It begins
+like the file header, but is extended with additional data.
+
+....
+ HEADER
+
+ uint64( ref_index_position )
+ uint64( (obj_position << 5) | obj_id_len )
+ uint64( obj_index_position )
+
+ uint64( log_position )
+ uint64( log_index_position )
+
+ uint32( CRC-32 of above )
+....
+
+If a section is missing (e.g. ref index) the corresponding position
+field (e.g. `ref_index_position`) will be 0.
+
+* `obj_position`: byte position for the first obj block.
+* `obj_id_len`: number of bytes used to abbreviate object names in
+obj blocks.
+* `log_position`: byte position for the first log block.
+* `ref_index_position`: byte position for the start of the ref index.
+* `obj_index_position`: byte position for the start of the obj index.
+* `log_index_position`: byte position for the start of the log index.
+
+The size of the footer is 68 bytes for version 1, and 72 bytes for
+version 2.
+
+Reading the footer
+++++++++++++++++++
+
+Readers must first read the file start to determine the version
+number. Then they seek to `file_length - FOOTER_LENGTH` to access the
+footer. A trusted external source (such as `stat(2)`) is necessary to
+obtain `file_length`. When reading the footer, readers must verify:
+
+* 4-byte magic is correct
+* 1-byte version number is recognized
+* 4-byte CRC-32 matches the other 64 bytes (including magic, and
+version)
+
+Once verified, the other fields of the footer can be accessed.
+
+Empty tables
+++++++++++++
+
+A reftable may be empty. In this case, the file starts with a header
+and is immediately followed by a footer.
+
+Binary search
+^^^^^^^^^^^^^
+
+Binary search within a block is supported by the `restart_offset` fields
+at the end of the block. Readers can binary search through the restart
+table to locate between which two restart points the sought reference or
+key should appear.
+
+Each record identified by a `restart_offset` stores the complete key in
+the `suffix` field of the record, making the compare operation during
+binary search straightforward.
+
+Once a restart point lexicographically before the sought reference has
+been identified, readers can linearly scan through the following record
+entries to locate the sought record, terminating if the current record
+sorts after (and therefore the sought key is not present).
+
+Restart point selection
++++++++++++++++++++++++
+
+Writers determine the restart points at file creation. The process is
+arbitrary, but every 16 or 64 records is recommended. Every 16 may be
+more suitable for smaller block sizes (4k or 8k), every 64 for larger
+block sizes (64k).
+
+More frequent restart points reduces prefix compression and increases
+space consumed by the restart table, both of which increase file size.
+
+Less frequent restart points makes prefix compression more effective,
+decreasing overall file size, with increased penalties for readers
+walking through more records after the binary search step.
+
+A maximum of `65535` restart points per block is supported.
+
+Considerations
+~~~~~~~~~~~~~~
+
+Lightweight refs dominate
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The reftable format assumes the vast majority of references are single
+object names valued with common prefixes, such as Gerrit Code Review's
+`refs/changes/` namespace, GitHub's `refs/pulls/` namespace, or many
+lightweight tags in the `refs/tags/` namespace.
+
+Annotated tags storing the peeled object cost an additional object name per
+reference.
+
+Low overhead
+^^^^^^^^^^^^
+
+A reftable with very few references (e.g. git.git with 5 heads) is 269
+bytes for reftable, vs. 332 bytes for packed-refs. This supports
+reftable scaling down for transaction logs (below).
+
+Block size
+^^^^^^^^^^
+
+For a Gerrit Code Review type repository with many change refs, larger
+block sizes (64 KiB) and less frequent restart points (every 64) yield
+better compression due to more references within the block compressing
+against the prior reference.
+
+Larger block sizes reduce the index size, as the reftable will require
+fewer blocks to store the same number of references.
+
+Minimal disk seeks
+^^^^^^^^^^^^^^^^^^
+
+Assuming the index block has been loaded into memory, binary searching
+for any single reference requires exactly 1 disk seek to load the
+containing block.
+
+Scans and lookups dominate
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Scanning all references and lookup by name (or namespace such as
+`refs/heads/`) are the most common activities performed on repositories.
+Object names are stored directly with references to optimize this use case.
+
+Logs are infrequently read
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Logs are infrequently accessed, but can be large. Deflating log blocks
+saves disk space, with some increased penalty at read time.
+
+Logs are stored in an isolated section from refs, reducing the burden on
+reference readers that want to ignore logs. Further, historical logs can
+be isolated into log-only files.
+
+Logs are read backwards
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Logs are frequently accessed backwards (most recent N records for master
+to answer `master@{4}`), so log records are grouped by reference, and
+sorted descending by update index.
+
+Repository format
+~~~~~~~~~~~~~~~~~
+
+Version 1
+^^^^^^^^^
+
+A repository must set its `$GIT_DIR/config` to configure reftable:
+
+....
+[core]
+ repositoryformatversion = 1
+[extensions]
+ refStorage = reftable
+....
+
+Layout
+^^^^^^
+
+A collection of reftable files are stored in the `$GIT_DIR/reftable/`
+directory:
+
+....
+00000001-00000001.log
+00000002-00000002.ref
+00000003-00000003.ref
+....
+
+where reftable files are named by a unique name such as produced by the
+function `${min_update_index}-${max_update_index}.ref`.
+
+Log-only files use the `.log` extension, while ref-only and mixed ref
+and log files use `.ref`. extension.
+
+The stack ordering file is `$GIT_DIR/reftable/tables.list` and lists the
+current files, one per line, in order, from oldest (base) to newest
+(most recent):
+
+....
+$ cat .git/reftable/tables.list
+00000001-00000001.log
+00000002-00000002.ref
+00000003-00000003.ref
+....
+
+Readers must read `$GIT_DIR/reftable/tables.list` to determine which
+files are relevant right now, and search through the stack in reverse
+order (last reftable is examined first).
+
+Reftable files not listed in `tables.list` may be new (and about to be
+added to the stack by the active writer), or ancient and ready to be
+pruned.
+
+Backward compatibility
+^^^^^^^^^^^^^^^^^^^^^^
+
+Older clients should continue to recognize the directory as a git
+repository so they don't look for an enclosing repository in parent
+directories. To this end, a reftable-enabled repository must contain the
+following dummy files
+
+* `.git/HEAD`, a regular file containing `ref: refs/heads/.invalid`.
+* `.git/refs/`, a directory
+* `.git/refs/heads`, a regular file
+
+Readers
+^^^^^^^
+
+Readers can obtain a consistent snapshot of the reference space by
+following:
+
+1. Open and read the `tables.list` file.
+2. Open each of the reftable files that it mentions.
+3. If any of the files is missing, goto 1.
+4. Read from the now-open files as long as necessary.
+
+Update transactions
+^^^^^^^^^^^^^^^^^^^
+
+Although reftables are immutable, mutations are supported by writing a
+new reftable and atomically appending it to the stack:
+
+1. Acquire `tables.list.lock`.
+2. Read `tables.list` to determine current reftables.
+3. Select `update_index` to be most recent file's
+`max_update_index + 1`.
+4. Prepare temp reftable `tmp_XXXXXX`, including log entries.
+5. Rename `tmp_XXXXXX` to `${update_index}-${update_index}.ref`.
+6. Copy `tables.list` to `tables.list.lock`, appending file from (5).
+7. Rename `tables.list.lock` to `tables.list`.
+
+During step 4 the new file's `min_update_index` and `max_update_index`
+are both set to the `update_index` selected by step 3. All log records
+for the transaction use the same `update_index` in their keys. This
+enables later correlation of which references were updated by the same
+transaction.
+
+Because a single `tables.list.lock` file is used to manage locking, the
+repository is single-threaded for writers. Writers may have to busy-spin
+(with backoff) around creating `tables.list.lock`, for up to an
+acceptable wait period, aborting if the repository is too busy to
+mutate. Application servers wrapped around repositories (e.g. Gerrit
+Code Review) can layer their own lock/wait queue to improve fairness to
+writers.
+
+Reference deletions
+^^^^^^^^^^^^^^^^^^^
+
+Deletion of any reference can be explicitly stored by setting the `type`
+to `0x0` and omitting the `value` field of the `ref_record`. This serves
+as a tombstone, overriding any assertions about the existence of the
+reference from earlier files in the stack.
+
+Compaction
+^^^^^^^^^^
+
+A partial stack of reftables can be compacted by merging references
+using a straightforward merge join across reftables, selecting the most
+recent value for output, and omitting deleted references that do not
+appear in remaining, lower reftables.
+
+A compacted reftable should set its `min_update_index` to the smallest
+of the input files' `min_update_index`, and its `max_update_index`
+likewise to the largest input `max_update_index`.
+
+For sake of illustration, assume the stack currently consists of
+reftable files (from oldest to newest): A, B, C, and D. The compactor is
+going to compact B and C, leaving A and D alone.
+
+1. Obtain lock `tables.list.lock` and read the `tables.list` file.
+2. Obtain locks `B.lock` and `C.lock`. Ownership of these locks
+prevents other processes from trying to compact these files.
+3. Release `tables.list.lock`.
+4. Compact `B` and `C` into a temp file
+`${min_update_index}-${max_update_index}_XXXXXX`.
+5. Reacquire lock `tables.list.lock`.
+6. Verify that `B` and `C` are still in the stack, in that order. This
+should always be the case, assuming that other processes are adhering to
+the locking protocol.
+7. Rename `${min_update_index}-${max_update_index}_XXXXXX` to
+`${min_update_index}-${max_update_index}.ref`.
+8. Write the new stack to `tables.list.lock`, replacing `B` and `C`
+with the file from (4).
+9. Rename `tables.list.lock` to `tables.list`.
+10. Delete `B` and `C`, perhaps after a short sleep to avoid forcing
+readers to backtrack.
+
+This strategy permits compactions to proceed independently of updates.
+
+Each reftable (compacted or not) is uniquely identified by its name, so
+open reftables can be cached by their name.
+
+Alternatives considered
+~~~~~~~~~~~~~~~~~~~~~~~
+
+bzip packed-refs
+^^^^^^^^^^^^^^^^
+
+`bzip2` can significantly shrink a large packed-refs file (e.g. 62 MiB
+compresses to 23 MiB, 37%). However the bzip format does not support
+random access to a single reference. Readers must inflate and discard
+while performing a linear scan.
+
+Breaking packed-refs into chunks (individually compressing each chunk)
+would reduce the amount of data a reader must inflate, but still leaves
+the problem of indexing chunks to support readers efficiently locating
+the correct chunk.
+
+Given the compression achieved by reftable's encoding, it does not seem
+necessary to add the complexity of bzip/gzip/zlib.
+
+Michael Haggerty's alternate format
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Michael Haggerty proposed
+link:https://lore.kernel.org/git/CAMy9T_HCnyc1g8XWOOWhe7nN0aEFyyBskV2aOMb_fe%2BwGvEJ7A%40mail.gmail.com/[an
+alternate] format to reftable on the Git mailing list. This format uses
+smaller chunks, without the restart table, and avoids block alignment
+with padding. Reflog entries immediately follow each ref, and are thus
+interleaved between refs.
+
+Performance testing indicates reftable is faster for lookups (51%
+faster, 11.2 usec vs. 5.4 usec), although reftable produces a slightly
+larger file (+ ~3.2%, 28.3M vs 29.2M):
+
+[cols=">,>,>,>",options="header",]
+|=====================================
+|format |size |seek cold |seek hot
+|mh-alt |28.3 M |23.4 usec |11.2 usec
+|reftable |29.2 M |19.9 usec |5.4 usec
+|=====================================
+
+JGit Ketch RefTree
+^^^^^^^^^^^^^^^^^^
+
+https://dev.eclipse.org/mhonarc/lists/jgit-dev/msg03073.html[JGit Ketch]
+proposed
+link:https://lore.kernel.org/git/CAJo%3DhJvnAPNAdDcAAwAvU9C4RVeQdoS3Ev9WTguHx4fD0V_nOg%40mail.gmail.com/[RefTree],
+an encoding of references inside Git tree objects stored as part of the
+repository's object database.
+
+The RefTree format adds additional load on the object database storage
+layer (more loose objects, more objects in packs), and relies heavily on
+the packer's delta compression to save space. Namespaces which are flat
+(e.g. thousands of tags in refs/tags) initially create very large loose
+objects, and so RefTree does not address the problem of copying many
+references to modify a handful.
+
+Flat namespaces are not efficiently searchable in RefTree, as tree
+objects in canonical formatting cannot be binary searched. This fails
+the need to handle a large number of references in a single namespace,
+such as GitHub's `refs/pulls`, or a project with many tags.
+
+LMDB
+^^^^
+
+David Turner proposed
+https://lore.kernel.org/git/1455772670-21142-26-git-send-email-dturner@twopensource.com/[using
+LMDB], as LMDB is lightweight (64k of runtime code) and GPL-compatible
+license.
+
+A downside of LMDB is its reliance on a single C implementation. This
+makes embedding inside JGit (a popular reimplementation of Git)
+difficult, and hoisting onto virtual storage (for JGit DFS) virtually
+impossible.
+
+A common format that can be supported by all major Git implementations
+(git-core, JGit, libgit2) is strongly preferred.
diff --git a/Documentation/user-manual.txt b/Documentation/user-manual.txt
index 833652983f..fd480b8645 100644
--- a/Documentation/user-manual.txt
+++ b/Documentation/user-manual.txt
@@ -347,7 +347,7 @@ $ git branch -r
origin/man
origin/master
origin/next
- origin/pu
+ origin/seen
origin/todo
------------------------------------------------
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 06a5333ee6..7b0cfeb92e 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.27.0
+DEF_VER=v2.27.GIT
LF='
'
diff --git a/alloc.c b/alloc.c
index 1c64c4dd16..957a0af362 100644
--- a/alloc.c
+++ b/alloc.c
@@ -99,23 +99,27 @@ void *alloc_object_node(struct repository *r)
return obj;
}
-static unsigned int alloc_commit_index(struct repository *r)
+/*
+ * The returned count is to be used as an index into commit slabs,
+ * that are *NOT* maintained per repository, and that is why a single
+ * global counter is used.
+ */
+static unsigned int alloc_commit_index(void)
{
- return r->parsed_objects->commit_count++;
+ static unsigned int parsed_commits_count;
+ return parsed_commits_count++;
}
-void init_commit_node(struct repository *r, struct commit *c)
+void init_commit_node(struct commit *c)
{
c->object.type = OBJ_COMMIT;
- c->index = alloc_commit_index(r);
- c->graph_pos = COMMIT_NOT_FROM_GRAPH;
- c->generation = GENERATION_NUMBER_INFINITY;
+ c->index = alloc_commit_index();
}
void *alloc_commit_node(struct repository *r)
{
struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit));
- init_commit_node(r, c);
+ init_commit_node(c);
return c;
}
diff --git a/alloc.h b/alloc.h
index ed1071c11e..371d388b55 100644
--- a/alloc.h
+++ b/alloc.h
@@ -9,7 +9,7 @@ struct repository;
void *alloc_blob_node(struct repository *r);
void *alloc_tree_node(struct repository *r);
-void init_commit_node(struct repository *r, struct commit *c);
+void init_commit_node(struct commit *c);
void *alloc_commit_node(struct repository *r);
void *alloc_tag_node(struct repository *r);
void *alloc_object_node(struct repository *r);
diff --git a/blame.c b/blame.c
index da7e28800e..82fa16d658 100644
--- a/blame.c
+++ b/blame.c
@@ -1272,7 +1272,7 @@ static int maybe_changed_path(struct repository *r,
if (!bd)
return 1;
- if (origin->commit->generation == GENERATION_NUMBER_INFINITY)
+ if (commit_graph_generation(origin->commit) == GENERATION_NUMBER_INFINITY)
return 1;
filter = get_bloom_filter(r, origin->commit, 0);
diff --git a/blob.c b/blob.c
index 36f9abda19..182718aba9 100644
--- a/blob.c
+++ b/blob.c
@@ -10,7 +10,7 @@ struct blob *lookup_blob(struct repository *r, const struct object_id *oid)
struct object *obj = lookup_object(r, oid);
if (!obj)
return create_object(r, oid, alloc_blob_node(r));
- return object_as_type(r, obj, OBJ_BLOB, 0);
+ return object_as_type(obj, OBJ_BLOB, 0);
}
int parse_blob_buffer(struct blob *item, void *buffer, unsigned long size)
diff --git a/bloom.c b/bloom.c
index 6c7611847a..6a7f2f2bdc 100644
--- a/bloom.c
+++ b/bloom.c
@@ -33,15 +33,16 @@ static int load_bloom_filter_from_graph(struct commit_graph *g,
struct commit *c)
{
uint32_t lex_pos, start_index, end_index;
+ uint32_t graph_pos = commit_graph_position(c);
- while (c->graph_pos < g->num_commits_in_base)
+ while (graph_pos < g->num_commits_in_base)
g = g->base_graph;
/* The commit graph commit 'c' lives in doesn't carry bloom filters. */
if (!g->chunk_bloom_indexes)
return 0;
- lex_pos = c->graph_pos - g->num_commits_in_base;
+ lex_pos = graph_pos - g->num_commits_in_base;
end_index = get_be32(g->chunk_bloom_indexes + 4 * lex_pos);
@@ -193,7 +194,7 @@ struct bloom_filter *get_bloom_filter(struct repository *r,
if (!filter->data) {
load_commit_graph_info(r, c);
- if (c->graph_pos != COMMIT_NOT_FROM_GRAPH &&
+ if (commit_graph_position(c) != COMMIT_NOT_FROM_GRAPH &&
r->objects->commit_graph->chunk_bloom_indexes) {
if (load_bloom_filter_from_graph(r->objects->commit_graph, filter, c))
return filter;
diff --git a/branch.c b/branch.c
index 2d9e7675a6..7095f78058 100644
--- a/branch.c
+++ b/branch.c
@@ -370,7 +370,7 @@ int replace_each_worktree_head_symref(const char *oldref, const char *newref,
const char *logmsg)
{
int ret = 0;
- struct worktree **worktrees = get_worktrees(0);
+ struct worktree **worktrees = get_worktrees();
int i;
for (i = 0; worktrees[i]; i++) {
diff --git a/bugreport.c b/bugreport.c
index 28f4568b01..09579e268d 100644
--- a/bugreport.c
+++ b/bugreport.c
@@ -180,7 +180,9 @@ int cmd_main(int argc, const char **argv)
die(_("couldn't create a new file at '%s'"), report_path.buf);
}
- strbuf_write_fd(&buffer, report);
+ if (write_in_full(report, buffer.buf, buffer.len) < 0)
+ die_errno(_("unable to write to %s"), report_path.buf);
+
close(report);
/*
diff --git a/builtin/branch.c b/builtin/branch.c
index accb61b1aa..e82301fb1b 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -468,7 +468,7 @@ static void print_current_branch_name(void)
static void reject_rebase_or_bisect_branch(const char *target)
{
- struct worktree **worktrees = get_worktrees(0);
+ struct worktree **worktrees = get_worktrees();
int i;
for (i = 0; worktrees[i]; i++) {
@@ -693,7 +693,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
list = 1;
if (!!delete + !!rename + !!copy + !!new_upstream + !!show_current +
- list + unset_upstream > 1)
+ list + edit_description + unset_upstream > 1)
usage_with_options(builtin_branch_usage, options);
if (filter.abbrev == -1)
diff --git a/builtin/clean.c b/builtin/clean.c
index 4ca12bc0c0..5a9c29a558 100644
--- a/builtin/clean.c
+++ b/builtin/clean.c
@@ -924,12 +924,6 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
0);
memset(&dir, 0, sizeof(dir));
- if (ignored_only)
- dir.flags |= DIR_SHOW_IGNORED;
-
- if (ignored && ignored_only)
- die(_("-x and -X cannot be used together"));
-
if (!interactive && !dry_run && !force) {
if (config_set)
die(_("clean.requireForce set to true and neither -i, -n, nor -f given; "
@@ -946,6 +940,13 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
dir.flags |= DIR_SHOW_OTHER_DIRECTORIES;
+ if (ignored && ignored_only)
+ die(_("-x and -X cannot be used together"));
+ if (!ignored)
+ setup_standard_excludes(&dir);
+ if (ignored_only)
+ dir.flags |= DIR_SHOW_IGNORED;
+
if (argc) {
/*
* Remaining args implies pathspecs specified, and we should
@@ -954,15 +955,41 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
remove_directories = 1;
}
- if (remove_directories)
- dir.flags |= DIR_SHOW_IGNORED_TOO | DIR_KEEP_UNTRACKED_CONTENTS;
+ if (remove_directories && !ignored_only) {
+ /*
+ * We need to know about ignored files too:
+ *
+ * If (ignored), then we will delete ignored files as well.
+ *
+ * If (!ignored), then even though we not are doing
+ * anything with ignored files, we need to know about them
+ * so that we can avoid deleting a directory of untracked
+ * files that also contains an ignored file within it.
+ *
+ * For the (!ignored) case, since we only need to avoid
+ * deleting ignored files, we can set
+ * DIR_SHOW_IGNORED_TOO_MODE_MATCHING in order to avoid
+ * recursing into a directory which is itself ignored.
+ */
+ dir.flags |= DIR_SHOW_IGNORED_TOO;
+ if (!ignored)
+ dir.flags |= DIR_SHOW_IGNORED_TOO_MODE_MATCHING;
+
+ /*
+ * Let the fill_directory() machinery know that we aren't
+ * just recursing to collect the ignored files; we want all
+ * the untracked ones so that we can delete them. (Note:
+ * we could also set DIR_KEEP_UNTRACKED_CONTENTS when
+ * ignored_only is true, since DIR_KEEP_UNTRACKED_CONTENTS
+ * only has effect in combination with DIR_SHOW_IGNORED_TOO. It makes
+ * the code clearer to exclude it, though.
+ */
+ dir.flags |= DIR_KEEP_UNTRACKED_CONTENTS;
+ }
if (read_cache() < 0)
die(_("index file corrupt"));
- if (!ignored)
- setup_standard_excludes(&dir);
-
pl = add_pattern_list(&dir, EXC_CMDL, "--exclude option");
for (i = 0; i < exclude_list.nr; i++)
add_pattern(exclude_list.items[i].string, "", 0, pl, -(i+1));
diff --git a/builtin/clone.c b/builtin/clone.c
index a924e3d780..bef70745c0 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -945,7 +945,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
{
int is_bundle = 0, is_local;
const char *repo_name, *repo, *work_tree, *git_dir;
- char *path, *dir;
+ char *path, *dir, *display_repo = NULL;
int dest_exists;
const struct ref *refs, *remote_head;
const struct ref *remote_head_points_at;
@@ -1000,10 +1000,11 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
path = get_repo_path(repo_name, &is_bundle);
if (path)
repo = absolute_pathdup(repo_name);
- else if (!strchr(repo_name, ':'))
- die(_("repository '%s' does not exist"), repo_name);
- else
+ else if (strchr(repo_name, ':')) {
repo = repo_name;
+ display_repo = transport_anonymize_url(repo);
+ } else
+ die(_("repository '%s' does not exist"), repo_name);
/* no need to be strict, transport_set_option() will validate it again */
if (option_depth && atoi(option_depth) < 1)
@@ -1020,7 +1021,9 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
die(_("destination path '%s' already exists and is not "
"an empty directory."), dir);
- strbuf_addf(&reflog_msg, "clone: from %s", repo);
+ strbuf_addf(&reflog_msg, "clone: from %s",
+ display_repo ? display_repo : repo);
+ free(display_repo);
if (option_bare)
work_tree = NULL;
@@ -1218,6 +1221,15 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
refs = transport_get_remote_refs(transport, &ref_prefixes);
if (refs) {
+ int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport));
+
+ /*
+ * Now that we know what algorithm the remote side is using,
+ * let's set ours to the same thing.
+ */
+ initialize_repository_version(hash_algo);
+ repo_set_hash_algo(the_repository, hash_algo);
+
mapped_refs = wanted_peer_refs(refs, &remote->fetch);
/*
* transport_get_remote_refs() may return refs with null sha-1
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index 75455da138..f6797e2a9f 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -154,7 +154,7 @@ static int read_one_commit(struct oidset *commits, struct progress *progress,
NULL, 0);
if (!result)
return error(_("invalid object: %s"), hash);
- else if (object_as_type(the_repository, result, OBJ_COMMIT, 1))
+ else if (object_as_type(result, OBJ_COMMIT, 1))
oidset_insert(commits, &result->oid);
display_progress(progress, oidset_size(commits));
diff --git a/builtin/config.c b/builtin/config.c
index ee4aef6a35..5e39f61885 100644
--- a/builtin/config.c
+++ b/builtin/config.c
@@ -672,7 +672,7 @@ int cmd_config(int argc, const char **argv, const char *prefix)
given_config_source.file = git_pathdup("config");
given_config_source.scope = CONFIG_SCOPE_LOCAL;
} else if (use_worktree_config) {
- struct worktree **worktrees = get_worktrees(0);
+ struct worktree **worktrees = get_worktrees();
if (repository_format_worktree_config)
given_config_source.file = git_pathdup("config.worktree");
else if (worktrees[0] && worktrees[1])
diff --git a/builtin/diff-files.c b/builtin/diff-files.c
index 86ae474fbf..1e352dd8f7 100644
--- a/builtin/diff-files.c
+++ b/builtin/diff-files.c
@@ -28,6 +28,13 @@ int cmd_diff_files(int argc, const char **argv, const char *prefix)
git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
repo_init_revisions(the_repository, &rev, prefix);
rev.abbrev = 0;
+
+ /*
+ * Consider "intent-to-add" files as new by default, unless
+ * explicitly specified in the command line or anywhere else.
+ */
+ rev.diffopt.ita_invisible_in_index = 1;
+
precompose_argv(argc, argv);
argc = setup_revisions(argc, argv, &rev, NULL);
diff --git a/builtin/diff.c b/builtin/diff.c
index 8537b17bd5..8c36da09b6 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -6,6 +6,7 @@
#define USE_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "config.h"
+#include "ewah/ewok.h"
#include "lockfile.h"
#include "color.h"
#include "commit.h"
@@ -23,7 +24,13 @@
#define DIFF_NO_INDEX_IMPLICIT 2
static const char builtin_diff_usage[] =
-"git diff [<options>] [<commit> [<commit>]] [--] [<path>...]";
+"git diff [<options>] [<commit>] [--] [<path>...]\n"
+" or: git diff [<options>] --cached [<commit>] [--] [<path>...]\n"
+" or: git diff [<options>] <commit> [<commit>...] <commit> [--] [<path>...]\n"
+" or: git diff [<options>] <commit>...<commit>] [--] [<path>...]\n"
+" or: git diff [<options>] <blob> <blob>]\n"
+" or: git diff [<options>] --no-index [--] <path> <path>]\n"
+COMMON_DIFF_OPTIONS_HELP;
static const char *blob_path(struct object_array_entry *entry)
{
@@ -254,6 +261,108 @@ static int builtin_diff_files(struct rev_info *revs, int argc, const char **argv
return run_diff_files(revs, options);
}
+struct symdiff {
+ struct bitmap *skip;
+ int warn;
+ const char *base, *left, *right;
+};
+
+/*
+ * Check for symmetric-difference arguments, and if present, arrange
+ * everything we need to know to handle them correctly. As a bonus,
+ * weed out all bogus range-based revision specifications, e.g.,
+ * "git diff A..B C..D" or "git diff A..B C" get rejected.
+ *
+ * For an actual symmetric diff, *symdiff is set this way:
+ *
+ * - its skip is non-NULL and marks *all* rev->pending.objects[i]
+ * indices that the caller should ignore (extra merge bases, of
+ * which there might be many, and A in A...B). Note that the
+ * chosen merge base and right side are NOT marked.
+ * - warn is set if there are multiple merge bases.
+ * - base, left, and right point to the names to use in a
+ * warning about multiple merge bases.
+ *
+ * If there is no symmetric diff argument, sym->skip is NULL and
+ * sym->warn is cleared. The remaining fields are not set.
+ */
+static void symdiff_prepare(struct rev_info *rev, struct symdiff *sym)
+{
+ int i, is_symdiff = 0, basecount = 0, othercount = 0;
+ int lpos = -1, rpos = -1, basepos = -1;
+ struct bitmap *map = NULL;
+
+ /*
+ * Use the whence fields to find merge bases and left and
+ * right parts of symmetric difference, so that we do not
+ * depend on the order that revisions are parsed. If there
+ * are any revs that aren't from these sources, we have a
+ * "git diff C A...B" or "git diff A...B C" case. Or we
+ * could even get "git diff A...B C...E", for instance.
+ *
+ * If we don't have just one merge base, we pick one
+ * at random.
+ *
+ * NB: REV_CMD_LEFT, REV_CMD_RIGHT are also used for A..B,
+ * so we must check for SYMMETRIC_LEFT too. The two arrays
+ * rev->pending.objects and rev->cmdline.rev are parallel.
+ */
+ for (i = 0; i < rev->cmdline.nr; i++) {
+ struct object *obj = rev->pending.objects[i].item;
+ switch (rev->cmdline.rev[i].whence) {
+ case REV_CMD_MERGE_BASE:
+ if (basepos < 0)
+ basepos = i;
+ basecount++;
+ break; /* do mark all bases */
+ case REV_CMD_LEFT:
+ if (lpos >= 0)
+ usage(builtin_diff_usage);
+ lpos = i;
+ if (obj->flags & SYMMETRIC_LEFT) {
+ is_symdiff = 1;
+ break; /* do mark A */
+ }
+ continue;
+ case REV_CMD_RIGHT:
+ if (rpos >= 0)
+ usage(builtin_diff_usage);
+ rpos = i;
+ continue; /* don't mark B */
+ case REV_CMD_PARENTS_ONLY:
+ case REV_CMD_REF:
+ case REV_CMD_REV:
+ othercount++;
+ continue;
+ }
+ if (map == NULL)
+ map = bitmap_new();
+ bitmap_set(map, i);
+ }
+
+ /*
+ * Forbid any additional revs for both A...B and A..B.
+ */
+ if (lpos >= 0 && othercount > 0)
+ usage(builtin_diff_usage);
+
+ if (!is_symdiff) {
+ bitmap_free(map);
+ sym->warn = 0;
+ sym->skip = NULL;
+ return;
+ }
+
+ sym->left = rev->pending.objects[lpos].name;
+ sym->right = rev->pending.objects[rpos].name;
+ sym->base = rev->pending.objects[basepos].name;
+ if (basecount == 0)
+ die(_("%s...%s: no merge base"), sym->left, sym->right);
+ bitmap_unset(map, basepos); /* unmark the base we want */
+ sym->warn = basecount > 1;
+ sym->skip = map;
+}
+
int cmd_diff(int argc, const char **argv, const char *prefix)
{
int i;
@@ -263,19 +372,29 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
struct object_array_entry *blob[2];
int nongit = 0, no_index = 0;
int result = 0;
+ struct symdiff sdiff;
/*
* We could get N tree-ish in the rev.pending_objects list.
- * Also there could be M blobs there, and P pathspecs.
+ * Also there could be M blobs there, and P pathspecs. --cached may
+ * also be present.
*
* N=0, M=0:
- * cache vs files (diff-files)
+ * cache vs files (diff-files)
+ *
+ * N=0, M=0, --cached:
+ * HEAD vs cache (diff-index --cached)
+ *
* N=0, M=2:
* compare two random blobs. P must be zero.
+ *
* N=0, M=1, P=1:
- * compare a blob with a working tree file.
+ * compare a blob with a working tree file.
*
* N=1, M=0:
+ * tree vs files (diff-index)
+ *
+ * N=1, M=0, --cached:
* tree vs cache (diff-index --cached)
*
* N=2, M=0:
@@ -382,6 +501,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
}
}
+ symdiff_prepare(&rev, &sdiff);
for (i = 0; i < rev.pending.nr; i++) {
struct object_array_entry *entry = &rev.pending.objects[i];
struct object *obj = entry->item;
@@ -396,6 +516,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
obj = &get_commit_tree(((struct commit *)obj))->object;
if (obj->type == OBJ_TREE) {
+ if (sdiff.skip && bitmap_get(sdiff.skip, i))
+ continue;
obj->flags |= flags;
add_object_array(obj, name, &ent);
} else if (obj->type == OBJ_BLOB) {
@@ -437,21 +559,12 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
usage(builtin_diff_usage);
else if (ent.nr == 1)
result = builtin_diff_index(&rev, argc, argv);
- else if (ent.nr == 2)
+ else if (ent.nr == 2) {
+ if (sdiff.warn)
+ warning(_("%s...%s: multiple merge bases, using %s"),
+ sdiff.left, sdiff.right, sdiff.base);
result = builtin_diff_tree(&rev, argc, argv,
&ent.objects[0], &ent.objects[1]);
- else if (ent.objects[0].item->flags & UNINTERESTING) {
- /*
- * diff A...B where there is at least one merge base
- * between A and B. We have ent.objects[0] ==
- * merge-base, ent.objects[ents-2] == A, and
- * ent.objects[ents-1] == B. Show diff between the
- * base and B. Note that we pick one merge base at
- * random if there are more than one.
- */
- result = builtin_diff_tree(&rev, argc, argv,
- &ent.objects[0],
- &ent.objects[ent.nr-1]);
} else
result = builtin_diff_combined(&rev, argc, argv,
ent.objects, ent.nr);
diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c
index 94b0c89b82..bbb5c96167 100644
--- a/builtin/fetch-pack.c
+++ b/builtin/fetch-pack.c
@@ -48,8 +48,8 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
struct ref **sought = NULL;
int nr_sought = 0, alloc_sought = 0;
int fd[2];
- char *pack_lockfile = NULL;
- char **pack_lockfile_ptr = NULL;
+ struct string_list pack_lockfiles = STRING_LIST_INIT_DUP;
+ struct string_list *pack_lockfiles_ptr = NULL;
struct child_process *conn;
struct fetch_pack_args args;
struct oid_array shallow = OID_ARRAY_INIT;
@@ -134,7 +134,7 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
}
if (!strcmp("--lock-pack", arg)) {
args.lock_pack = 1;
- pack_lockfile_ptr = &pack_lockfile;
+ pack_lockfiles_ptr = &pack_lockfiles;
continue;
}
if (!strcmp("--check-self-contained-and-connected", arg)) {
@@ -235,10 +235,15 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
}
ref = fetch_pack(&args, fd, ref, sought, nr_sought,
- &shallow, pack_lockfile_ptr, version);
- if (pack_lockfile) {
- printf("lock %s\n", pack_lockfile);
+ &shallow, pack_lockfiles_ptr, version);
+ if (pack_lockfiles.nr) {
+ int i;
+
+ printf("lock %s\n", pack_lockfiles.items[0].string);
fflush(stdout);
+ for (i = 1; i < pack_lockfiles.nr; i++)
+ warning(_("Lockfile created but not reported: %s"),
+ pack_lockfiles.items[i].string);
}
if (args.check_self_contained_and_connected &&
args.self_contained_and_connected) {
diff --git a/builtin/fetch.c b/builtin/fetch.c
index b5788c16bf..82ac4be8a5 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -1758,8 +1758,13 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
/* Record the command line for the reflog */
strbuf_addstr(&default_rla, "fetch");
- for (i = 1; i < argc; i++)
- strbuf_addf(&default_rla, " %s", argv[i]);
+ for (i = 1; i < argc; i++) {
+ /* This handles non-URLs gracefully */
+ char *anon = transport_anonymize_url(argv[i]);
+
+ strbuf_addf(&default_rla, " %s", anon);
+ free(anon);
+ }
fetch_config_from_gitmodules(&submodule_fetch_jobs_config,
&recurse_submodules);
@@ -1790,9 +1795,6 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
if (depth || deepen_since || deepen_not.nr)
deepen = 1;
- if (filter_options.choice && !has_promisor_remote())
- die("--filter can only be used when extensions.partialClone is set");
-
if (all) {
if (argc == 1)
die(_("fetch --all does not take a repository argument"));
diff --git a/builtin/fsck.c b/builtin/fsck.c
index f02cbdb439..37aa07da78 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -241,7 +241,7 @@ static void mark_unreachable_referents(const struct object_id *oid)
enum object_type type = oid_object_info(the_repository,
&obj->oid, NULL);
if (type > 0)
- object_as_type(the_repository, obj, type, 0);
+ object_as_type(obj, type, 0);
}
options.walk = mark_used;
@@ -577,7 +577,7 @@ static void get_default_heads(void)
for_each_rawref(fsck_handle_ref, NULL);
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
for (p = worktrees; *p; p++) {
struct worktree *wt = *p;
struct strbuf ref = STRBUF_INIT;
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index f176dd28c8..f865666db9 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1555,13 +1555,9 @@ static void read_v2_anomalous_offsets(struct packed_git *p,
{
const uint32_t *idx1, *idx2;
uint32_t i;
- const uint32_t hashwords = the_hash_algo->rawsz / sizeof(uint32_t);
/* The address of the 4-byte offset table */
- idx1 = (((const uint32_t *)p->index_data)
- + 2 /* 8-byte header */
- + 256 /* fan out */
- + hashwords * p->num_objects /* object ID table */
+ idx1 = (((const uint32_t *)((const uint8_t *)p->index_data + p->crc_offset))
+ p->num_objects /* CRC32 table */
);
@@ -1671,6 +1667,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
unsigned char pack_hash[GIT_MAX_RAWSZ];
unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
int report_end_of_input = 0;
+ int hash_algo = 0;
/*
* index-pack never needs to fetch missing objects except when
@@ -1764,6 +1761,11 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
die(_("bad %s"), arg);
} else if (skip_prefix(arg, "--max-input-size=", &arg)) {
max_input_size = strtoumax(arg, NULL, 10);
+ } else if (skip_prefix(arg, "--object-format=", &arg)) {
+ hash_algo = hash_algo_by_name(arg);
+ if (hash_algo == GIT_HASH_UNKNOWN)
+ die(_("unknown hash algorithm '%s'"), arg);
+ repo_set_hash_algo(the_repository, hash_algo);
} else
usage(index_pack_usage);
continue;
@@ -1780,6 +1782,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
die(_("--fix-thin cannot be used without --stdin"));
if (from_stdin && !startup_info->have_repository)
die(_("--stdin requires a git repository"));
+ if (from_stdin && hash_algo)
+ die(_("--object-format cannot be used with --stdin"));
if (!index_name && pack_name)
index_name = derive_filename(pack_name, "idx", &index_name_buf);
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index 6ef519514b..3a4dd12903 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -118,6 +118,10 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
transport->server_options = &server_options;
ref = transport_get_remote_refs(transport, &ref_prefixes);
+ if (ref) {
+ int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport));
+ repo_set_hash_algo(the_repository, hash_algo);
+ }
if (transport_disconnect(transport)) {
UNLEAK(sorting);
return 1;
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index c5b433a23f..7016b28485 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -117,6 +117,8 @@ static unsigned long window_memory_limit = 0;
static struct list_objects_filter_options filter_options;
+static struct string_list uri_protocols = STRING_LIST_INIT_NODUP;
+
enum missing_action {
MA_ERROR = 0, /* fail if any missing objects are encountered */
MA_ALLOW_ANY, /* silently allow ALL missing objects */
@@ -125,6 +127,15 @@ enum missing_action {
static enum missing_action arg_missing_action;
static show_object_fn fn_show_object;
+struct configured_exclusion {
+ struct oidmap_entry e;
+ char *pack_hash_hex;
+ char *uri;
+};
+static struct oidmap configured_exclusions;
+
+static struct oidset excluded_by_config;
+
/*
* stats
*/
@@ -969,6 +980,25 @@ static void write_reused_pack(struct hashfile *f)
unuse_pack(&w_curs);
}
+static void write_excluded_by_configs(void)
+{
+ struct oidset_iter iter;
+ const struct object_id *oid;
+
+ oidset_iter_init(&excluded_by_config, &iter);
+ while ((oid = oidset_iter_next(&iter))) {
+ struct configured_exclusion *ex =
+ oidmap_get(&configured_exclusions, oid);
+
+ if (!ex)
+ BUG("configured exclusion wasn't configured");
+ write_in_full(1, ex->pack_hash_hex, strlen(ex->pack_hash_hex));
+ write_in_full(1, " ", 1);
+ write_in_full(1, ex->uri, strlen(ex->uri));
+ write_in_full(1, "\n", 1);
+ }
+}
+
static const char no_split_warning[] = N_(
"disabling bitmap writing, packs are split due to pack.packSizeLimit"
);
@@ -1266,6 +1296,25 @@ static int want_object_in_pack(const struct object_id *oid,
}
}
+ if (uri_protocols.nr) {
+ struct configured_exclusion *ex =
+ oidmap_get(&configured_exclusions, oid);
+ int i;
+ const char *p;
+
+ if (ex) {
+ for (i = 0; i < uri_protocols.nr; i++) {
+ if (skip_prefix(ex->uri,
+ uri_protocols.items[i].string,
+ &p) &&
+ *p == ':') {
+ oidset_insert(&excluded_by_config, oid);
+ return 0;
+ }
+ }
+ }
+ }
+
return 1;
}
@@ -2864,6 +2913,29 @@ static int git_pack_config(const char *k, const char *v, void *cb)
pack_idx_opts.version);
return 0;
}
+ if (!strcmp(k, "uploadpack.blobpackfileuri")) {
+ struct configured_exclusion *ex = xmalloc(sizeof(*ex));
+ const char *oid_end, *pack_end;
+ /*
+ * Stores the pack hash. This is not a true object ID, but is
+ * of the same form.
+ */
+ struct object_id pack_hash;
+
+ if (parse_oid_hex(v, &ex->e.oid, &oid_end) ||
+ *oid_end != ' ' ||
+ parse_oid_hex(oid_end + 1, &pack_hash, &pack_end) ||
+ *pack_end != ' ')
+ die(_("value of uploadpack.blobpackfileuri must be "
+ "of the form '<object-hash> <pack-hash> <uri>' (got '%s')"), v);
+ if (oidmap_get(&configured_exclusions, &ex->e.oid))
+ die(_("object already configured in another "
+ "uploadpack.blobpackfileuri (got '%s')"), v);
+ ex->pack_hash_hex = xcalloc(1, pack_end - oid_end);
+ memcpy(ex->pack_hash_hex, oid_end + 1, pack_end - oid_end - 1);
+ ex->uri = xstrdup(pack_end + 1);
+ oidmap_put(&configured_exclusions, ex);
+ }
return git_default_config(k, v, cb);
}
@@ -3462,6 +3534,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
N_("do not pack objects in promisor packfiles")),
OPT_BOOL(0, "delta-islands", &use_delta_islands,
N_("respect islands during delta compression")),
+ OPT_STRING_LIST(0, "uri-protocol", &uri_protocols,
+ N_("protocol"),
+ N_("exclude any configured uploadpack.blobpackfileuri with this protocol")),
OPT_END(),
};
@@ -3650,6 +3725,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
}
trace2_region_enter("pack-objects", "write-pack-file", the_repository);
+ write_excluded_by_configs();
write_pack_file();
trace2_region_leave("pack-objects", "write-pack-file", the_repository);
diff --git a/builtin/pull.c b/builtin/pull.c
index 00e5857a8d..8159c5d7c9 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -1025,12 +1025,14 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
commit_list_insert(head, &list);
merge_head = lookup_commit_reference(the_repository,
&merge_heads.oid[0]);
- if (is_descendant_of(merge_head, list)) {
+ if (repo_is_descendant_of(the_repository,
+ merge_head, list)) {
/* we can fast-forward this without invoking rebase */
opt_ff = "--ff-only";
ran_ff = 1;
ret = run_merge();
}
+ free_commit_list(list);
}
if (!ran_ff)
ret = run_rebase(&curr_head, merge_heads.oid, &rebase_fork_point);
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index ea3d0f01af..d43663bb0a 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -249,6 +249,7 @@ static void show_ref(const char *path, const struct object_id *oid)
strbuf_addf(&cap, " push-cert=%s", push_cert_nonce);
if (advertise_push_options)
strbuf_addstr(&cap, " push-options");
+ strbuf_addf(&cap, " object-format=%s", the_hash_algo->name);
strbuf_addf(&cap, " agent=%s", git_user_agent_sanitized());
packet_write_fmt(1, "%s %s%c%s\n",
oid_to_hex(oid), path, 0, cap.buf);
@@ -1624,6 +1625,8 @@ static struct command *read_head_info(struct packet_reader *reader,
linelen = strlen(reader->line);
if (linelen < reader->pktlen) {
const char *feature_list = reader->line + linelen + 1;
+ const char *hash = NULL;
+ int len = 0;
if (parse_feature_request(feature_list, "report-status"))
report_status = 1;
if (parse_feature_request(feature_list, "side-band-64k"))
@@ -1636,6 +1639,13 @@ static struct command *read_head_info(struct packet_reader *reader,
if (advertise_push_options
&& parse_feature_request(feature_list, "push-options"))
use_push_options = 1;
+ hash = parse_feature_value(feature_list, "object-format", &len, NULL);
+ if (!hash) {
+ hash = hash_algos[GIT_HASH_SHA1].name;
+ len = strlen(hash);
+ }
+ if (xstrncmpz(the_hash_algo->name, hash, len))
+ die("error: unsupported object format '%s'", hash);
}
if (!strcmp(reader->line, "push-cert")) {
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 52ecf6d43c..ca1d8079f3 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -615,7 +615,7 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
int i;
memset(&collected, 0, sizeof(collected));
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
for (p = worktrees; *p; p++) {
if (!all_worktrees && !(*p)->is_current)
continue;
diff --git a/builtin/show-index.c b/builtin/show-index.c
index 0826f6a5a2..8106b03a6b 100644
--- a/builtin/show-index.c
+++ b/builtin/show-index.c
@@ -1,9 +1,12 @@
#include "builtin.h"
#include "cache.h"
#include "pack.h"
+#include "parse-options.h"
-static const char show_index_usage[] =
-"git show-index";
+static const char *const show_index_usage[] = {
+ "git show-index [--object-format=<hash-algorithm>]",
+ NULL
+};
int cmd_show_index(int argc, const char **argv, const char *prefix)
{
@@ -11,10 +14,26 @@ int cmd_show_index(int argc, const char **argv, const char *prefix)
unsigned nr;
unsigned int version;
static unsigned int top_index[256];
- const unsigned hashsz = the_hash_algo->rawsz;
+ unsigned hashsz;
+ const char *hash_name = NULL;
+ int hash_algo;
+ const struct option show_index_options[] = {
+ OPT_STRING(0, "object-format", &hash_name, N_("hash-algorithm"),
+ N_("specify the hash algorithm to use")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, show_index_options, show_index_usage, 0);
+
+ if (hash_name) {
+ hash_algo = hash_algo_by_name(hash_name);
+ if (hash_algo == GIT_HASH_UNKNOWN)
+ die(_("Unknown hash algorithm"));
+ repo_set_hash_algo(the_repository, hash_algo);
+ }
+
+ hashsz = the_hash_algo->rawsz;
- if (argc != 1)
- usage(show_index_usage);
if (fread(top_index, 2 * 4, 1, stdin) != 1)
die("unable to read header");
if (top_index[0] == htonl(PACK_IDX_SIGNATURE)) {
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 95d0882417..4003f4d13a 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -99,6 +99,10 @@ static int update_working_directory(struct pattern_list *pl)
struct lock_file lock_file = LOCK_INIT;
struct repository *r = the_repository;
+ /* If no branch has been checked out, there are no updates to make. */
+ if (is_index_unborn(r->index))
+ return UPDATE_SPARSITY_SUCCESS;
+
memset(&o, 0, sizeof(o));
o.verbose_update = isatty(2);
o.update = 1;
@@ -249,6 +253,8 @@ static int set_config(enum sparse_checkout_mode mode)
{
const char *config_path;
+ if (upgrade_repository_format(1) < 0)
+ die(_("unable to upgrade repository format to enable worktreeConfig"));
if (git_config_set_gently("extensions.worktreeConfig", "true")) {
error(_("failed to set extensions.worktreeConfig setting"));
return 1;
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index f55f7b7704..a1c75607c7 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -2277,6 +2277,49 @@ static int module_set_url(int argc, const char **argv, const char *prefix)
return 0;
}
+static int module_set_branch(int argc, const char **argv, const char *prefix)
+{
+ int opt_default = 0, ret;
+ const char *opt_branch = NULL;
+ const char *path;
+ char *config_name;
+
+ /*
+ * We accept the `quiet` option for uniformity across subcommands,
+ * though there is nothing to make less verbose in this subcommand.
+ */
+ struct option options[] = {
+ OPT_NOOP_NOARG('q', "quiet"),
+ OPT_BOOL('d', "default", &opt_default,
+ N_("set the default tracking branch to master")),
+ OPT_STRING('b', "branch", &opt_branch, N_("branch"),
+ N_("set the default tracking branch")),
+ OPT_END()
+ };
+ const char *const usage[] = {
+ N_("git submodule--helper set-branch [-q|--quiet] (-d|--default) <path>"),
+ N_("git submodule--helper set-branch [-q|--quiet] (-b|--branch) <branch> <path>"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+
+ if (!opt_branch && !opt_default)
+ die(_("--branch or --default required"));
+
+ if (opt_branch && opt_default)
+ die(_("--branch and --default are mutually exclusive"));
+
+ if (argc != 1 || !(path = argv[0]))
+ usage_with_options(usage, options);
+
+ config_name = xstrfmt("submodule.%s.branch", path);
+ ret = config_set_in_gitmodules_file_gently(config_name, opt_branch);
+
+ free(config_name);
+ return !!ret;
+}
+
#define SUPPORT_SUPER_PREFIX (1<<0)
struct cmd_struct {
@@ -2308,6 +2351,7 @@ static struct cmd_struct commands[] = {
{"check-name", check_name, 0},
{"config", module_config, 0},
{"set-url", module_set_url, 0},
+ {"set-branch", module_set_branch, 0},
};
int cmd_submodule__helper(int argc, const char **argv, const char *prefix)
diff --git a/builtin/worktree.c b/builtin/worktree.c
index d99db35668..f0cbdef718 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -67,7 +67,12 @@ static void delete_worktrees_dir_if_empty(void)
rmdir(git_path("worktrees")); /* ignore failed removal */
}
-static int prune_worktree(const char *id, struct strbuf *reason)
+/*
+ * Return true if worktree entry should be pruned, along with the reason for
+ * pruning. Otherwise, return false and the worktree's path, or NULL if it
+ * cannot be determined. Caller is responsible for freeing returned path.
+ */
+static int should_prune_worktree(const char *id, struct strbuf *reason, char **wtpath)
{
struct stat st;
char *path;
@@ -75,20 +80,21 @@ static int prune_worktree(const char *id, struct strbuf *reason)
size_t len;
ssize_t read_result;
+ *wtpath = NULL;
if (!is_directory(git_path("worktrees/%s", id))) {
- strbuf_addf(reason, _("Removing worktrees/%s: not a valid directory"), id);
+ strbuf_addstr(reason, _("not a valid directory"));
return 1;
}
if (file_exists(git_path("worktrees/%s/locked", id)))
return 0;
if (stat(git_path("worktrees/%s/gitdir", id), &st)) {
- strbuf_addf(reason, _("Removing worktrees/%s: gitdir file does not exist"), id);
+ strbuf_addstr(reason, _("gitdir file does not exist"));
return 1;
}
fd = open(git_path("worktrees/%s/gitdir", id), O_RDONLY);
if (fd < 0) {
- strbuf_addf(reason, _("Removing worktrees/%s: unable to read gitdir file (%s)"),
- id, strerror(errno));
+ strbuf_addf(reason, _("unable to read gitdir file (%s)"),
+ strerror(errno));
return 1;
}
len = xsize_t(st.st_size);
@@ -96,8 +102,8 @@ static int prune_worktree(const char *id, struct strbuf *reason)
read_result = read_in_full(fd, path, len);
if (read_result < 0) {
- strbuf_addf(reason, _("Removing worktrees/%s: unable to read gitdir file (%s)"),
- id, strerror(errno));
+ strbuf_addf(reason, _("unable to read gitdir file (%s)"),
+ strerror(errno));
close(fd);
free(path);
return 1;
@@ -106,53 +112,103 @@ static int prune_worktree(const char *id, struct strbuf *reason)
if (read_result != len) {
strbuf_addf(reason,
- _("Removing worktrees/%s: short read (expected %"PRIuMAX" bytes, read %"PRIuMAX")"),
- id, (uintmax_t)len, (uintmax_t)read_result);
+ _("short read (expected %"PRIuMAX" bytes, read %"PRIuMAX")"),
+ (uintmax_t)len, (uintmax_t)read_result);
free(path);
return 1;
}
while (len && (path[len - 1] == '\n' || path[len - 1] == '\r'))
len--;
if (!len) {
- strbuf_addf(reason, _("Removing worktrees/%s: invalid gitdir file"), id);
+ strbuf_addstr(reason, _("invalid gitdir file"));
free(path);
return 1;
}
path[len] = '\0';
if (!file_exists(path)) {
- free(path);
if (stat(git_path("worktrees/%s/index", id), &st) ||
st.st_mtime <= expire) {
- strbuf_addf(reason, _("Removing worktrees/%s: gitdir file points to non-existent location"), id);
+ strbuf_addstr(reason, _("gitdir file points to non-existent location"));
+ free(path);
return 1;
} else {
+ *wtpath = path;
return 0;
}
}
- free(path);
+ *wtpath = path;
return 0;
}
+static void prune_worktree(const char *id, const char *reason)
+{
+ if (show_only || verbose)
+ printf_ln(_("Removing %s/%s: %s"), "worktrees", id, reason);
+ if (!show_only)
+ delete_git_dir(id);
+}
+
+static int prune_cmp(const void *a, const void *b)
+{
+ const struct string_list_item *x = a;
+ const struct string_list_item *y = b;
+ int c;
+
+ if ((c = fspathcmp(x->string, y->string)))
+ return c;
+ /*
+ * paths same; prune_dupes() removes all but the first worktree entry
+ * having the same path, so sort main worktree ('util' is NULL) above
+ * linked worktrees ('util' not NULL) since main worktree can't be
+ * removed
+ */
+ if (!x->util)
+ return -1;
+ if (!y->util)
+ return 1;
+ /* paths same; sort by .git/worktrees/<id> */
+ return strcmp(x->util, y->util);
+}
+
+static void prune_dups(struct string_list *l)
+{
+ int i;
+
+ QSORT(l->items, l->nr, prune_cmp);
+ for (i = 1; i < l->nr; i++) {
+ if (!fspathcmp(l->items[i].string, l->items[i - 1].string))
+ prune_worktree(l->items[i].util, "duplicate entry");
+ }
+}
+
static void prune_worktrees(void)
{
struct strbuf reason = STRBUF_INIT;
+ struct strbuf main_path = STRBUF_INIT;
+ struct string_list kept = STRING_LIST_INIT_NODUP;
DIR *dir = opendir(git_path("worktrees"));
struct dirent *d;
if (!dir)
return;
while ((d = readdir(dir)) != NULL) {
+ char *path;
if (is_dot_or_dotdot(d->d_name))
continue;
strbuf_reset(&reason);
- if (!prune_worktree(d->d_name, &reason))
- continue;
- if (show_only || verbose)
- printf("%s\n", reason.buf);
- if (show_only)
- continue;
- delete_git_dir(d->d_name);
+ if (should_prune_worktree(d->d_name, &reason, &path))
+ prune_worktree(d->d_name, reason.buf);
+ else if (path)
+ string_list_append(&kept, path)->util = xstrdup(d->d_name);
}
closedir(dir);
+
+ strbuf_add_absolute_path(&main_path, get_git_common_dir());
+ /* massage main worktree absolute path to match 'gitdir' content */
+ strbuf_strip_suffix(&main_path, "/.");
+ string_list_append(&kept, strbuf_detach(&main_path, NULL));
+ prune_dups(&kept);
+ string_list_clear(&kept, 1);
+
if (!show_only)
delete_worktrees_dir_if_empty();
strbuf_release(&reason);
@@ -224,34 +280,33 @@ static const char *worktree_basename(const char *path, int *olen)
return name;
}
-static void validate_worktree_add(const char *path, const struct add_opts *opts)
+/* check that path is viable location for worktree */
+static void check_candidate_path(const char *path,
+ int force,
+ struct worktree **worktrees,
+ const char *cmd)
{
- struct worktree **worktrees;
struct worktree *wt;
int locked;
if (file_exists(path) && !is_empty_dir(path))
die(_("'%s' already exists"), path);
- worktrees = get_worktrees(0);
wt = find_worktree_by_path(worktrees, path);
if (!wt)
- goto done;
+ return;
locked = !!worktree_lock_reason(wt);
- if ((!locked && opts->force) || (locked && opts->force > 1)) {
+ if ((!locked && force) || (locked && force > 1)) {
if (delete_git_dir(wt->id))
- die(_("unable to re-add worktree '%s'"), path);
- goto done;
+ die(_("unusable worktree destination '%s'"), path);
+ return;
}
if (locked)
- die(_("'%s' is a missing but locked worktree;\nuse 'add -f -f' to override, or 'unlock' and 'prune' or 'remove' to clear"), path);
+ die(_("'%s' is a missing but locked worktree;\nuse '%s -f -f' to override, or 'unlock' and 'prune' or 'remove' to clear"), cmd, path);
else
- die(_("'%s' is a missing but already registered worktree;\nuse 'add -f' to override, or 'prune' or 'remove' to clear"), path);
-
-done:
- free_worktrees(worktrees);
+ die(_("'%s' is a missing but already registered worktree;\nuse '%s -f' to override, or 'prune' or 'remove' to clear"), cmd, path);
}
static int add_worktree(const char *path, const char *refname,
@@ -268,8 +323,12 @@ static int add_worktree(const char *path, const char *refname,
struct commit *commit = NULL;
int is_branch = 0;
struct strbuf sb_name = STRBUF_INIT;
+ struct worktree **worktrees;
- validate_worktree_add(path, opts);
+ worktrees = get_worktrees();
+ check_candidate_path(path, opts->force, worktrees, "add");
+ free_worktrees(worktrees);
+ worktrees = NULL;
/* is 'refname' a branch or commit? */
if (!opts->detach && !strbuf_check_branch_ref(&symref, refname) &&
@@ -638,6 +697,23 @@ static void measure_widths(struct worktree **wt, int *abbrev, int *maxlen)
}
}
+static int pathcmp(const void *a_, const void *b_)
+{
+ const struct worktree *const *a = a_;
+ const struct worktree *const *b = b_;
+ return fspathcmp((*a)->path, (*b)->path);
+}
+
+static void pathsort(struct worktree **wt)
+{
+ int n = 0;
+ struct worktree **p = wt;
+
+ while (*p++)
+ n++;
+ QSORT(wt, n, pathcmp);
+}
+
static int list(int ac, const char **av, const char *prefix)
{
int porcelain = 0;
@@ -651,9 +727,12 @@ static int list(int ac, const char **av, const char *prefix)
if (ac)
usage_with_options(worktree_usage, options);
else {
- struct worktree **worktrees = get_worktrees(GWT_SORT_LINKED);
+ struct worktree **worktrees = get_worktrees();
int path_maxlen = 0, abbrev = DEFAULT_ABBREV, i;
+ /* sort worktrees by path but keep main worktree at top */
+ pathsort(worktrees + 1);
+
if (!porcelain)
measure_widths(worktrees, &abbrev, &path_maxlen);
@@ -682,7 +761,7 @@ static int lock_worktree(int ac, const char **av, const char *prefix)
if (ac != 1)
usage_with_options(worktree_usage, options);
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
wt = find_worktree(worktrees, prefix, av[0]);
if (!wt)
die(_("'%s' is not a working tree"), av[0]);
@@ -715,7 +794,7 @@ static int unlock_worktree(int ac, const char **av, const char *prefix)
if (ac != 1)
usage_with_options(worktree_usage, options);
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
wt = find_worktree(worktrees, prefix, av[0]);
if (!wt)
die(_("'%s' is not a working tree"), av[0]);
@@ -789,7 +868,7 @@ static int move_worktree(int ac, const char **av, const char *prefix)
strbuf_addstr(&dst, path);
free(path);
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
wt = find_worktree(worktrees, prefix, av[0]);
if (!wt)
die(_("'%s' is not a working tree"), av[0]);
@@ -804,8 +883,7 @@ static int move_worktree(int ac, const char **av, const char *prefix)
strbuf_trim_trailing_dir_sep(&dst);
strbuf_addstr(&dst, sep);
}
- if (file_exists(dst.buf))
- die(_("target '%s' already exists"), dst.buf);
+ check_candidate_path(dst.buf, force, worktrees, "move");
validate_no_submodules(wt);
@@ -916,7 +994,7 @@ static int remove_worktree(int ac, const char **av, const char *prefix)
if (ac != 1)
usage_with_options(worktree_usage, options);
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
wt = find_worktree(worktrees, prefix, av[0]);
if (!wt)
die(_("'%s' is not a working tree"), av[0]);
diff --git a/bundle.c b/bundle.c
index 99439e07a1..2a0d744d3f 100644
--- a/bundle.c
+++ b/bundle.c
@@ -23,6 +23,17 @@ static void add_to_ref_list(const struct object_id *oid, const char *name,
list->nr++;
}
+static const struct git_hash_algo *detect_hash_algo(struct strbuf *buf)
+{
+ size_t len = strcspn(buf->buf, " \n");
+ int algo;
+
+ algo = hash_algo_by_length(len / 2);
+ if (algo == GIT_HASH_UNKNOWN)
+ return NULL;
+ return &hash_algos[algo];
+}
+
static int parse_bundle_header(int fd, struct bundle_header *header,
const char *report_path)
{
@@ -52,12 +63,21 @@ static int parse_bundle_header(int fd, struct bundle_header *header,
}
strbuf_rtrim(&buf);
+ if (!header->hash_algo) {
+ header->hash_algo = detect_hash_algo(&buf);
+ if (!header->hash_algo) {
+ error(_("unknown hash algorithm length"));
+ status = -1;
+ break;
+ }
+ }
+
/*
* Tip lines have object name, SP, and refname.
* Prerequisites have object name that is optionally
* followed by SP and subject line.
*/
- if (parse_oid_hex(buf.buf, &oid, &p) ||
+ if (parse_oid_hex_algop(buf.buf, &oid, &p, header->hash_algo) ||
(*p && !isspace(*p)) ||
(!is_prereq && !*p)) {
if (report_path)
diff --git a/bundle.h b/bundle.h
index ceab0c7475..2dc9442024 100644
--- a/bundle.h
+++ b/bundle.h
@@ -15,6 +15,7 @@ struct ref_list {
struct bundle_header {
struct ref_list prerequisites;
struct ref_list references;
+ const struct git_hash_algo *hash_algo;
};
int is_bundle(const char *path, int quiet);
diff --git a/cache.h b/cache.h
index 654426460c..126ec56c7f 100644
--- a/cache.h
+++ b/cache.h
@@ -1042,6 +1042,7 @@ struct repository_format {
int worktree_config;
int is_bare;
int hash_algo;
+ int has_extensions;
char *work_tree;
struct string_list unknown_extensions;
};
diff --git a/command-list.txt b/command-list.txt
index cbb960c843..89aa60cde7 100644
--- a/command-list.txt
+++ b/command-list.txt
@@ -136,7 +136,7 @@ git-pack-redundant plumbinginterrogators
git-pack-refs ancillarymanipulators
git-parse-remote synchelpers
git-patch-id purehelpers
-git-prune ancillarymanipulators
+git-prune ancillarymanipulators complete
git-prune-packed plumbingmanipulators
git-pull mainporcelain remote
git-push mainporcelain remote
diff --git a/commit-graph.c b/commit-graph.c
index 2ff042fbf4..fdd1c4fa7c 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -87,15 +87,69 @@ static int commit_pos_cmp(const void *va, const void *vb)
commit_pos_at(&commit_pos, b);
}
+define_commit_slab(commit_graph_data_slab, struct commit_graph_data);
+static struct commit_graph_data_slab commit_graph_data_slab =
+ COMMIT_SLAB_INIT(1, commit_graph_data_slab);
+
+uint32_t commit_graph_position(const struct commit *c)
+{
+ struct commit_graph_data *data =
+ commit_graph_data_slab_peek(&commit_graph_data_slab, c);
+
+ return data ? data->graph_pos : COMMIT_NOT_FROM_GRAPH;
+}
+
+uint32_t commit_graph_generation(const struct commit *c)
+{
+ struct commit_graph_data *data =
+ commit_graph_data_slab_peek(&commit_graph_data_slab, c);
+
+ if (!data)
+ return GENERATION_NUMBER_INFINITY;
+ else if (data->graph_pos == COMMIT_NOT_FROM_GRAPH)
+ return GENERATION_NUMBER_INFINITY;
+
+ return data->generation;
+}
+
+static struct commit_graph_data *commit_graph_data_at(const struct commit *c)
+{
+ unsigned int i, nth_slab;
+ struct commit_graph_data *data =
+ commit_graph_data_slab_peek(&commit_graph_data_slab, c);
+
+ if (data)
+ return data;
+
+ nth_slab = c->index / commit_graph_data_slab.slab_size;
+ data = commit_graph_data_slab_at(&commit_graph_data_slab, c);
+
+ /*
+ * commit-slab initializes elements with zero, overwrite this with
+ * COMMIT_NOT_FROM_GRAPH for graph_pos.
+ *
+ * We avoid initializing generation with checking if graph position
+ * is not COMMIT_NOT_FROM_GRAPH.
+ */
+ for (i = 0; i < commit_graph_data_slab.slab_size; i++) {
+ commit_graph_data_slab.slab[nth_slab][i].graph_pos =
+ COMMIT_NOT_FROM_GRAPH;
+ }
+
+ return data;
+}
+
static int commit_gen_cmp(const void *va, const void *vb)
{
const struct commit *a = *(const struct commit **)va;
const struct commit *b = *(const struct commit **)vb;
+ uint32_t generation_a = commit_graph_generation(a);
+ uint32_t generation_b = commit_graph_generation(b);
/* lower generation commits first */
- if (a->generation < b->generation)
+ if (generation_a < generation_b)
return -1;
- else if (a->generation > b->generation)
+ else if (generation_a > generation_b)
return 1;
/* use date as a heuristic when generations are equal */
@@ -670,13 +724,14 @@ static struct commit_list **insert_parent_or_die(struct repository *r,
c = lookup_commit(r, &oid);
if (!c)
die(_("could not find commit %s"), oid_to_hex(&oid));
- c->graph_pos = pos;
+ commit_graph_data_at(c)->graph_pos = pos;
return &commit_list_insert(c, pptr)->next;
}
static void fill_commit_graph_info(struct commit *item, struct commit_graph *g, uint32_t pos)
{
const unsigned char *commit_data;
+ struct commit_graph_data *graph_data;
uint32_t lex_index;
while (pos < g->num_commits_in_base)
@@ -684,8 +739,10 @@ static void fill_commit_graph_info(struct commit *item, struct commit_graph *g,
lex_index = pos - g->num_commits_in_base;
commit_data = g->chunk_commit_data + GRAPH_DATA_WIDTH * lex_index;
- item->graph_pos = pos;
- item->generation = get_be32(commit_data + g->hash_len + 8) >> 2;
+
+ graph_data = commit_graph_data_at(item);
+ graph_data->graph_pos = pos;
+ graph_data->generation = get_be32(commit_data + g->hash_len + 8) >> 2;
}
static inline void set_commit_tree(struct commit *c, struct tree *t)
@@ -701,6 +758,7 @@ static int fill_commit_in_graph(struct repository *r,
uint32_t *parent_data_ptr;
uint64_t date_low, date_high;
struct commit_list **pptr;
+ struct commit_graph_data *graph_data;
const unsigned char *commit_data;
uint32_t lex_index;
@@ -714,7 +772,8 @@ static int fill_commit_in_graph(struct repository *r,
* Store the "full" position, but then use the
* "local" position for the rest of the calculation.
*/
- item->graph_pos = pos;
+ graph_data = commit_graph_data_at(item);
+ graph_data->graph_pos = pos;
lex_index = pos - g->num_commits_in_base;
commit_data = g->chunk_commit_data + (g->hash_len + 16) * lex_index;
@@ -727,7 +786,7 @@ static int fill_commit_in_graph(struct repository *r,
date_low = get_be32(commit_data + g->hash_len + 12);
item->date = (timestamp_t)((date_high << 32) | date_low);
- item->generation = get_be32(commit_data + g->hash_len + 8) >> 2;
+ graph_data->generation = get_be32(commit_data + g->hash_len + 8) >> 2;
pptr = &item->parents;
@@ -759,8 +818,9 @@ static int fill_commit_in_graph(struct repository *r,
static int find_commit_in_graph(struct commit *item, struct commit_graph *g, uint32_t *pos)
{
- if (item->graph_pos != COMMIT_NOT_FROM_GRAPH) {
- *pos = item->graph_pos;
+ uint32_t graph_pos = commit_graph_position(item);
+ if (graph_pos != COMMIT_NOT_FROM_GRAPH) {
+ *pos = graph_pos;
return 1;
} else {
struct commit_graph *cur_g = g;
@@ -815,12 +875,13 @@ static struct tree *load_tree_for_commit(struct repository *r,
{
struct object_id oid;
const unsigned char *commit_data;
+ uint32_t graph_pos = commit_graph_position(c);
- while (c->graph_pos < g->num_commits_in_base)
+ while (graph_pos < g->num_commits_in_base)
g = g->base_graph;
commit_data = g->chunk_commit_data +
- GRAPH_DATA_WIDTH * (c->graph_pos - g->num_commits_in_base);
+ GRAPH_DATA_WIDTH * (graph_pos - g->num_commits_in_base);
hashcpy(oid.hash, commit_data);
set_commit_tree(c, lookup_tree(r, &oid));
@@ -834,7 +895,7 @@ static struct tree *get_commit_tree_in_graph_one(struct repository *r,
{
if (c->maybe_tree)
return c->maybe_tree;
- if (c->graph_pos == COMMIT_NOT_FROM_GRAPH)
+ if (commit_graph_position(c) == COMMIT_NOT_FROM_GRAPH)
BUG("get_commit_tree_in_graph_one called from non-commit-graph commit");
return load_tree_for_commit(r, g, (struct commit *)c);
@@ -1020,7 +1081,7 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len,
else
packedDate[0] = 0;
- packedDate[0] |= htonl((*list)->generation << 2);
+ packedDate[0] |= htonl(commit_graph_data_at(*list)->generation << 2);
packedDate[1] = htonl((*list)->date);
hashwrite(f, packedDate, 8);
@@ -1219,7 +1280,7 @@ static void close_reachable(struct write_commit_graph_context *ctx)
continue;
if (ctx->split) {
if ((!parse_commit(commit) &&
- commit->graph_pos == COMMIT_NOT_FROM_GRAPH) ||
+ commit_graph_position(commit) == COMMIT_NOT_FROM_GRAPH) ||
flags == COMMIT_GRAPH_SPLIT_REPLACE)
add_missing_parents(ctx, commit);
} else if (!parse_commit_no_graph(commit))
@@ -1251,9 +1312,11 @@ static void compute_generation_numbers(struct write_commit_graph_context *ctx)
_("Computing commit graph generation numbers"),
ctx->commits.nr);
for (i = 0; i < ctx->commits.nr; i++) {
+ uint32_t generation = commit_graph_data_at(ctx->commits.list[i])->generation;
+
display_progress(ctx->progress, i + 1);
- if (ctx->commits.list[i]->generation != GENERATION_NUMBER_INFINITY &&
- ctx->commits.list[i]->generation != GENERATION_NUMBER_ZERO)
+ if (generation != GENERATION_NUMBER_INFINITY &&
+ generation != GENERATION_NUMBER_ZERO)
continue;
commit_list_insert(ctx->commits.list[i], &list);
@@ -1264,22 +1327,26 @@ static void compute_generation_numbers(struct write_commit_graph_context *ctx)
uint32_t max_generation = 0;
for (parent = current->parents; parent; parent = parent->next) {
- if (parent->item->generation == GENERATION_NUMBER_INFINITY ||
- parent->item->generation == GENERATION_NUMBER_ZERO) {
+ generation = commit_graph_data_at(parent->item)->generation;
+
+ if (generation == GENERATION_NUMBER_INFINITY ||
+ generation == GENERATION_NUMBER_ZERO) {
all_parents_computed = 0;
commit_list_insert(parent->item, &list);
break;
- } else if (parent->item->generation > max_generation) {
- max_generation = parent->item->generation;
+ } else if (generation > max_generation) {
+ max_generation = generation;
}
}
if (all_parents_computed) {
- current->generation = max_generation + 1;
+ struct commit_graph_data *data = commit_graph_data_at(current);
+
+ data->generation = max_generation + 1;
pop_commit(&list);
- if (current->generation > GENERATION_NUMBER_MAX)
- current->generation = GENERATION_NUMBER_MAX;
+ if (data->generation > GENERATION_NUMBER_MAX)
+ data->generation = GENERATION_NUMBER_MAX;
}
}
}
@@ -1458,7 +1525,7 @@ static uint32_t count_distinct_commits(struct write_commit_graph_context *ctx)
if (ctx->split) {
struct commit *c = lookup_commit(ctx->r, &ctx->oids.list[i]);
- if (!c || c->graph_pos != COMMIT_NOT_FROM_GRAPH)
+ if (!c || commit_graph_position(c) != COMMIT_NOT_FROM_GRAPH)
continue;
}
@@ -1492,7 +1559,7 @@ static void copy_oids_to_commits(struct write_commit_graph_context *ctx)
ctx->commits.list[ctx->commits.nr] = lookup_commit(ctx->r, &ctx->oids.list[i]);
if (ctx->split && flags != COMMIT_GRAPH_SPLIT_REPLACE &&
- ctx->commits.list[ctx->commits.nr]->graph_pos != COMMIT_NOT_FROM_GRAPH)
+ commit_graph_position(ctx->commits.list[ctx->commits.nr]) != COMMIT_NOT_FROM_GRAPH)
continue;
if (ctx->split && flags == COMMIT_GRAPH_SPLIT_REPLACE)
@@ -2241,6 +2308,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
struct commit *graph_commit, *odb_commit;
struct commit_list *graph_parents, *odb_parents;
uint32_t max_generation = 0;
+ uint32_t generation;
display_progress(progress, i + 1);
hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i);
@@ -2279,8 +2347,9 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
oid_to_hex(&graph_parents->item->object.oid),
oid_to_hex(&odb_parents->item->object.oid));
- if (graph_parents->item->generation > max_generation)
- max_generation = graph_parents->item->generation;
+ generation = commit_graph_generation(graph_parents->item);
+ if (generation > max_generation)
+ max_generation = generation;
graph_parents = graph_parents->next;
odb_parents = odb_parents->next;
@@ -2290,7 +2359,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
graph_report(_("commit-graph parent list for commit %s terminates early"),
oid_to_hex(&cur_oid));
- if (!graph_commit->generation) {
+ if (!commit_graph_generation(graph_commit)) {
if (generation_zero == GENERATION_NUMBER_EXISTS)
graph_report(_("commit-graph has generation number zero for commit %s, but non-zero elsewhere"),
oid_to_hex(&cur_oid));
@@ -2310,10 +2379,11 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
if (max_generation == GENERATION_NUMBER_MAX)
max_generation--;
- if (graph_commit->generation != max_generation + 1)
+ generation = commit_graph_generation(graph_commit);
+ if (generation != max_generation + 1)
graph_report(_("commit-graph generation for commit %s is %u != %u"),
oid_to_hex(&cur_oid),
- graph_commit->generation,
+ generation,
max_generation + 1);
if (graph_commit->date != odb_commit->date)
diff --git a/commit-graph.h b/commit-graph.h
index 3ba0da1e5f..28f89cdf3e 100644
--- a/commit-graph.h
+++ b/commit-graph.h
@@ -135,4 +135,14 @@ void free_commit_graph(struct commit_graph *);
*/
void disable_commit_graph(struct repository *r);
+struct commit_graph_data {
+ uint32_t graph_pos;
+ uint32_t generation;
+};
+
+/*
+ * Commits should be parsed before accessing generation, graph positions.
+ */
+uint32_t commit_graph_generation(const struct commit *);
+uint32_t commit_graph_position(const struct commit *);
#endif
diff --git a/commit-reach.c b/commit-reach.c
index 4ca7e706a1..efd5925cbb 100644
--- a/commit-reach.c
+++ b/commit-reach.c
@@ -58,14 +58,15 @@ static struct commit_list *paint_down_to_common(struct repository *r,
struct commit *commit = prio_queue_get(&queue);
struct commit_list *parents;
int flags;
+ uint32_t generation = commit_graph_generation(commit);
- if (min_generation && commit->generation > last_gen)
+ if (min_generation && generation > last_gen)
BUG("bad generation skip %8x > %8x at %s",
- commit->generation, last_gen,
+ generation, last_gen,
oid_to_hex(&commit->object.oid));
- last_gen = commit->generation;
+ last_gen = generation;
- if (commit->generation < min_generation)
+ if (generation < min_generation)
break;
flags = commit->object.flags & (PARENT1 | PARENT2 | STALE);
@@ -176,18 +177,20 @@ static int remove_redundant(struct repository *r, struct commit **array, int cnt
repo_parse_commit(r, array[i]);
for (i = 0; i < cnt; i++) {
struct commit_list *common;
- uint32_t min_generation = array[i]->generation;
+ uint32_t min_generation = commit_graph_generation(array[i]);
if (redundant[i])
continue;
for (j = filled = 0; j < cnt; j++) {
+ uint32_t curr_generation;
if (i == j || redundant[j])
continue;
filled_index[filled] = j;
work[filled++] = array[j];
- if (array[j]->generation < min_generation)
- min_generation = array[j]->generation;
+ curr_generation = commit_graph_generation(array[j]);
+ if (curr_generation < min_generation)
+ min_generation = curr_generation;
}
common = paint_down_to_common(r, array[i], filled,
work, min_generation);
@@ -283,7 +286,9 @@ struct commit_list *repo_get_merge_bases(struct repository *r,
/*
* Is "commit" a descendant of one of the elements on the "with_commit" list?
*/
-int is_descendant_of(struct commit *commit, struct commit_list *with_commit)
+int repo_is_descendant_of(struct repository *r,
+ struct commit *commit,
+ struct commit_list *with_commit)
{
if (!with_commit)
return 1;
@@ -301,7 +306,7 @@ int is_descendant_of(struct commit *commit, struct commit_list *with_commit)
other = with_commit->item;
with_commit = with_commit->next;
- if (in_merge_bases(other, commit))
+ if (repo_in_merge_bases_many(r, other, 1, &commit))
return 1;
}
return 0;
@@ -316,23 +321,26 @@ int repo_in_merge_bases_many(struct repository *r, struct commit *commit,
{
struct commit_list *bases;
int ret = 0, i;
- uint32_t min_generation = GENERATION_NUMBER_INFINITY;
+ uint32_t generation, min_generation = GENERATION_NUMBER_INFINITY;
if (repo_parse_commit(r, commit))
return ret;
for (i = 0; i < nr_reference; i++) {
if (repo_parse_commit(r, reference[i]))
return ret;
- if (reference[i]->generation < min_generation)
- min_generation = reference[i]->generation;
+
+ generation = commit_graph_generation(reference[i]);
+ if (generation < min_generation)
+ min_generation = generation;
}
- if (commit->generation > min_generation)
+ generation = commit_graph_generation(commit);
+ if (generation > min_generation)
return ret;
bases = paint_down_to_common(r, commit,
nr_reference, reference,
- commit->generation);
+ generation);
if (commit->object.flags & PARENT2)
ret = 1;
clear_commit_marks(commit, all_flags);
@@ -348,7 +356,15 @@ int repo_in_merge_bases(struct repository *r,
struct commit *commit,
struct commit *reference)
{
- return repo_in_merge_bases_many(r, commit, 1, &reference);
+ int res;
+ struct commit_list *list = NULL;
+ struct commit_list **next = &list;
+
+ next = commit_list_append(commit, next);
+ res = repo_is_descendant_of(r, reference, list);
+ free_commit_list(list);
+
+ return res;
}
struct commit_list *reduce_heads(struct commit_list *heads)
@@ -396,6 +412,7 @@ int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
struct object *o;
struct commit *old_commit, *new_commit;
struct commit_list *old_commit_list = NULL;
+ int ret;
/*
* Both new_commit and old_commit must be commit-ish and new_commit is descendant of
@@ -417,7 +434,10 @@ int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
return 0;
commit_list_insert(old_commit, &old_commit_list);
- return is_descendant_of(new_commit, old_commit_list);
+ ret = repo_is_descendant_of(the_repository,
+ new_commit, old_commit_list);
+ free_commit_list(old_commit_list);
+ return ret;
}
/*
@@ -467,7 +487,7 @@ static enum contains_result contains_test(struct commit *candidate,
/* Otherwise, we don't know; prepare to recurse */
parse_commit_or_die(candidate);
- if (candidate->generation < cutoff)
+ if (commit_graph_generation(candidate) < cutoff)
return CONTAINS_NO;
return CONTAINS_UNKNOWN;
@@ -490,10 +510,12 @@ static enum contains_result contains_tag_algo(struct commit *candidate,
const struct commit_list *p;
for (p = want; p; p = p->next) {
+ uint32_t generation;
struct commit *c = p->item;
load_commit_graph_info(the_repository, c);
- if (c->generation < cutoff)
- cutoff = c->generation;
+ generation = commit_graph_generation(c);
+ if (generation < cutoff)
+ cutoff = generation;
}
result = contains_test(candidate, want, cache, cutoff);
@@ -536,7 +558,7 @@ int commit_contains(struct ref_filter *filter, struct commit *commit,
{
if (filter->with_commit_tag_algo)
return contains_tag_algo(commit, list, cache) == CONTAINS_YES;
- return is_descendant_of(commit, list);
+ return repo_is_descendant_of(the_repository, commit, list);
}
static int compare_commits_by_gen(const void *_a, const void *_b)
@@ -544,9 +566,12 @@ static int compare_commits_by_gen(const void *_a, const void *_b)
const struct commit *a = *(const struct commit * const *)_a;
const struct commit *b = *(const struct commit * const *)_b;
- if (a->generation < b->generation)
+ uint32_t generation_a = commit_graph_generation(a);
+ uint32_t generation_b = commit_graph_generation(b);
+
+ if (generation_a < generation_b)
return -1;
- if (a->generation > b->generation)
+ if (generation_a > generation_b)
return 1;
return 0;
}
@@ -585,7 +610,7 @@ int can_all_from_reach_with_flag(struct object_array *from,
list[nr_commits] = (struct commit *)from_one;
if (parse_commit(list[nr_commits]) ||
- list[nr_commits]->generation < min_generation) {
+ commit_graph_generation(list[nr_commits]) < min_generation) {
result = 0;
goto cleanup;
}
@@ -621,7 +646,7 @@ int can_all_from_reach_with_flag(struct object_array *from,
if (parse_commit(parent->item) ||
parent->item->date < min_commit_date ||
- parent->item->generation < min_generation)
+ commit_graph_generation(parent->item) < min_generation)
continue;
commit_list_insert(parent->item, &stack);
@@ -662,11 +687,13 @@ int can_all_from_reach(struct commit_list *from, struct commit_list *to,
add_object_array(&from_iter->item->object, NULL, &from_objs);
if (!parse_commit(from_iter->item)) {
+ uint32_t generation;
if (from_iter->item->date < min_commit_date)
min_commit_date = from_iter->item->date;
- if (from_iter->item->generation < min_generation)
- min_generation = from_iter->item->generation;
+ generation = commit_graph_generation(from_iter->item);
+ if (generation < min_generation)
+ min_generation = generation;
}
from_iter = from_iter->next;
@@ -674,11 +701,13 @@ int can_all_from_reach(struct commit_list *from, struct commit_list *to,
while (to_iter) {
if (!parse_commit(to_iter->item)) {
+ uint32_t generation;
if (to_iter->item->date < min_commit_date)
min_commit_date = to_iter->item->date;
- if (to_iter->item->generation < min_generation)
- min_generation = to_iter->item->generation;
+ generation = commit_graph_generation(to_iter->item);
+ if (generation < min_generation)
+ min_generation = generation;
}
to_iter->item->object.flags |= PARENT2;
@@ -718,11 +747,13 @@ struct commit_list *get_reachable_subset(struct commit **from, int nr_from,
struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
for (item = to; item < to_last; item++) {
+ uint32_t generation;
struct commit *c = *item;
parse_commit(c);
- if (c->generation < min_generation)
- min_generation = c->generation;
+ generation = commit_graph_generation(c);
+ if (generation < min_generation)
+ min_generation = generation;
if (!(c->object.flags & PARENT1)) {
c->object.flags |= PARENT1;
@@ -755,7 +786,7 @@ struct commit_list *get_reachable_subset(struct commit **from, int nr_from,
parse_commit(p);
- if (p->generation < min_generation)
+ if (commit_graph_generation(p) < min_generation)
continue;
if (p->object.flags & PARENT2)
diff --git a/commit-reach.h b/commit-reach.h
index 99a43e8b64..b49ad71a31 100644
--- a/commit-reach.h
+++ b/commit-reach.h
@@ -27,7 +27,9 @@ struct commit_list *repo_get_merge_bases_many_dirty(struct repository *r,
struct commit_list *get_octopus_merge_bases(struct commit_list *in);
-int is_descendant_of(struct commit *commit, struct commit_list *with_commit);
+int repo_is_descendant_of(struct repository *r,
+ struct commit *commit,
+ struct commit_list *with_commit);
int repo_in_merge_bases(struct repository *r,
struct commit *commit,
struct commit *reference);
diff --git a/commit.c b/commit.c
index 87686a7055..43d29a800d 100644
--- a/commit.c
+++ b/commit.c
@@ -37,7 +37,7 @@ struct commit *lookup_commit_reference_gently(struct repository *r,
if (!obj)
return NULL;
- return object_as_type(r, obj, OBJ_COMMIT, quiet);
+ return object_as_type(obj, OBJ_COMMIT, quiet);
}
struct commit *lookup_commit_reference(struct repository *r, const struct object_id *oid)
@@ -62,7 +62,7 @@ struct commit *lookup_commit(struct repository *r, const struct object_id *oid)
struct object *obj = lookup_object(r, oid);
if (!obj)
return create_object(r, oid, alloc_commit_node(r));
- return object_as_type(r, obj, OBJ_COMMIT, 0);
+ return object_as_type(obj, OBJ_COMMIT, 0);
}
struct commit *lookup_commit_reference_by_name(const char *name)
@@ -339,7 +339,7 @@ struct tree *repo_get_commit_tree(struct repository *r,
if (commit->maybe_tree || !commit->object.parsed)
return commit->maybe_tree;
- if (commit->graph_pos != COMMIT_NOT_FROM_GRAPH)
+ if (commit_graph_position(commit) != COMMIT_NOT_FROM_GRAPH)
return get_commit_tree_in_graph(r, commit);
return NULL;
@@ -729,11 +729,13 @@ int compare_commits_by_author_date(const void *a_, const void *b_,
int compare_commits_by_gen_then_commit_date(const void *a_, const void *b_, void *unused)
{
const struct commit *a = a_, *b = b_;
+ const uint32_t generation_a = commit_graph_generation(a),
+ generation_b = commit_graph_generation(b);
/* newer commits first */
- if (a->generation < b->generation)
+ if (generation_a < generation_b)
return 1;
- else if (a->generation > b->generation)
+ else if (generation_a > generation_b)
return -1;
/* use date as a heuristic when generations are equal */
diff --git a/commit.h b/commit.h
index 1b2dea5d85..e901538909 100644
--- a/commit.h
+++ b/commit.h
@@ -36,8 +36,6 @@ struct commit {
* or get_commit_tree_oid().
*/
struct tree *maybe_tree;
- uint32_t graph_pos;
- uint32_t generation;
unsigned int index;
};
diff --git a/compat/vcbuild/scripts/clink.pl b/compat/vcbuild/scripts/clink.pl
index d9f71b7cbb..61ad084a7b 100755
--- a/compat/vcbuild/scripts/clink.pl
+++ b/compat/vcbuild/scripts/clink.pl
@@ -23,7 +23,9 @@ while (@ARGV) {
# before any "-l*" flags.
$is_debug = 1;
}
- if ("$arg" =~ /^-[DIMGOZ]/) {
+ if ("$arg" =~ /^-I\/mingw(32|64)/) {
+ # eat
+ } elsif ("$arg" =~ /^-[DIMGOZ]/) {
push(@cflags, $arg);
} elsif ("$arg" eq "-o") {
my $file_out = shift @ARGV;
diff --git a/connect.c b/connect.c
index 0df45a1108..e0d5b9fee0 100644
--- a/connect.c
+++ b/connect.c
@@ -18,7 +18,7 @@
static char *server_capabilities_v1;
static struct argv_array server_capabilities_v2 = ARGV_ARRAY_INIT;
-static const char *parse_feature_value(const char *, const char *, int *);
+static const char *next_server_feature_value(const char *feature, int *len, int *offset);
static int check_ref(const char *name, unsigned int flags)
{
@@ -83,6 +83,21 @@ int server_supports_v2(const char *c, int die_on_error)
return 0;
}
+int server_feature_v2(const char *c, const char **v)
+{
+ int i;
+
+ for (i = 0; i < server_capabilities_v2.argc; i++) {
+ const char *out;
+ if (skip_prefix(server_capabilities_v2.argv[i], c, &out) &&
+ (*out == '=')) {
+ *v = out + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
int server_supports_feature(const char *c, const char *feature,
int die_on_error)
{
@@ -181,17 +196,16 @@ reject:
static void annotate_refs_with_symref_info(struct ref *ref)
{
struct string_list symref = STRING_LIST_INIT_DUP;
- const char *feature_list = server_capabilities_v1;
+ int offset = 0;
- while (feature_list) {
+ while (1) {
int len;
const char *val;
- val = parse_feature_value(feature_list, "symref", &len);
+ val = next_server_feature_value("symref", &len, &offset);
if (!val)
break;
parse_one_symref_info(&symref, val, len);
- feature_list = val + 1;
}
string_list_sort(&symref);
@@ -205,21 +219,36 @@ static void annotate_refs_with_symref_info(struct ref *ref)
string_list_clear(&symref, 0);
}
-static void process_capabilities(const char *line, int *len)
+static void process_capabilities(struct packet_reader *reader, int *linelen)
{
+ const char *feat_val;
+ int feat_len;
+ const char *line = reader->line;
int nul_location = strlen(line);
- if (nul_location == *len)
+ if (nul_location == *linelen)
return;
server_capabilities_v1 = xstrdup(line + nul_location + 1);
- *len = nul_location;
+ *linelen = nul_location;
+
+ feat_val = server_feature_value("object-format", &feat_len);
+ if (feat_val) {
+ char *hash_name = xstrndup(feat_val, feat_len);
+ int hash_algo = hash_algo_by_name(hash_name);
+ if (hash_algo != GIT_HASH_UNKNOWN)
+ reader->hash_algo = &hash_algos[hash_algo];
+ free(hash_name);
+ } else {
+ reader->hash_algo = &hash_algos[GIT_HASH_SHA1];
+ }
}
-static int process_dummy_ref(const char *line)
+static int process_dummy_ref(const struct packet_reader *reader)
{
+ const char *line = reader->line;
struct object_id oid;
const char *name;
- if (parse_oid_hex(line, &oid, &name))
+ if (parse_oid_hex_algop(line, &oid, &name, reader->hash_algo))
return 0;
if (*name != ' ')
return 0;
@@ -235,13 +264,15 @@ static void check_no_capabilities(const char *line, int len)
line + strlen(line));
}
-static int process_ref(const char *line, int len, struct ref ***list,
- unsigned int flags, struct oid_array *extra_have)
+static int process_ref(const struct packet_reader *reader, int len,
+ struct ref ***list, unsigned int flags,
+ struct oid_array *extra_have)
{
+ const char *line = reader->line;
struct object_id old_oid;
const char *name;
- if (parse_oid_hex(line, &old_oid, &name))
+ if (parse_oid_hex_algop(line, &old_oid, &name, reader->hash_algo))
return 0;
if (*name != ' ')
return 0;
@@ -261,16 +292,17 @@ static int process_ref(const char *line, int len, struct ref ***list,
return 1;
}
-static int process_shallow(const char *line, int len,
+static int process_shallow(const struct packet_reader *reader, int len,
struct oid_array *shallow_points)
{
+ const char *line = reader->line;
const char *arg;
struct object_id old_oid;
if (!skip_prefix(line, "shallow ", &arg))
return 0;
- if (get_oid_hex(arg, &old_oid))
+ if (get_oid_hex_algop(arg, &old_oid, reader->hash_algo))
die(_("protocol error: expected shallow sha-1, got '%s'"), arg);
if (!shallow_points)
die(_("repository on the other end cannot be shallow"));
@@ -317,20 +349,20 @@ struct ref **get_remote_heads(struct packet_reader *reader,
switch (state) {
case EXPECTING_FIRST_REF:
- process_capabilities(reader->line, &len);
- if (process_dummy_ref(reader->line)) {
+ process_capabilities(reader, &len);
+ if (process_dummy_ref(reader)) {
state = EXPECTING_SHALLOW;
break;
}
state = EXPECTING_REF;
/* fallthrough */
case EXPECTING_REF:
- if (process_ref(reader->line, len, &list, flags, extra_have))
+ if (process_ref(reader, len, &list, flags, extra_have))
break;
state = EXPECTING_SHALLOW;
/* fallthrough */
case EXPECTING_SHALLOW:
- if (process_shallow(reader->line, len, shallow_points))
+ if (process_shallow(reader, len, shallow_points))
break;
die(_("protocol error: unexpected '%s'"), reader->line);
case EXPECTING_DONE:
@@ -344,7 +376,7 @@ struct ref **get_remote_heads(struct packet_reader *reader,
}
/* Returns 1 when a valid ref has been added to `list`, 0 otherwise */
-static int process_ref_v2(const char *line, struct ref ***list)
+static int process_ref_v2(struct packet_reader *reader, struct ref ***list)
{
int ret = 1;
int i = 0;
@@ -352,6 +384,7 @@ static int process_ref_v2(const char *line, struct ref ***list)
struct ref *ref;
struct string_list line_sections = STRING_LIST_INIT_DUP;
const char *end;
+ const char *line = reader->line;
/*
* Ref lines have a number of fields which are space deliminated. The
@@ -364,7 +397,7 @@ static int process_ref_v2(const char *line, struct ref ***list)
goto out;
}
- if (parse_oid_hex(line_sections.items[i++].string, &old_oid, &end) ||
+ if (parse_oid_hex_algop(line_sections.items[i++].string, &old_oid, &end, reader->hash_algo) ||
*end) {
ret = 0;
goto out;
@@ -372,7 +405,7 @@ static int process_ref_v2(const char *line, struct ref ***list)
ref = alloc_ref(line_sections.items[i++].string);
- oidcpy(&ref->old_oid, &old_oid);
+ memcpy(ref->old_oid.hash, old_oid.hash, reader->hash_algo->rawsz);
**list = ref;
*list = &ref->next;
@@ -385,7 +418,8 @@ static int process_ref_v2(const char *line, struct ref ***list)
struct object_id peeled_oid;
char *peeled_name;
struct ref *peeled;
- if (parse_oid_hex(arg, &peeled_oid, &end) || *end) {
+ if (parse_oid_hex_algop(arg, &peeled_oid, &end,
+ reader->hash_algo) || *end) {
ret = 0;
goto out;
}
@@ -393,7 +427,8 @@ static int process_ref_v2(const char *line, struct ref ***list)
peeled_name = xstrfmt("%s^{}", ref->name);
peeled = alloc_ref(peeled_name);
- oidcpy(&peeled->old_oid, &peeled_oid);
+ memcpy(peeled->old_oid.hash, peeled_oid.hash,
+ reader->hash_algo->rawsz);
**list = peeled;
*list = &peeled->next;
@@ -423,6 +458,7 @@ struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
int stateless_rpc)
{
int i;
+ const char *hash_name;
*list = NULL;
if (server_supports_v2("ls-refs", 1))
@@ -431,6 +467,16 @@ struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
if (server_supports_v2("agent", 0))
packet_write_fmt(fd_out, "agent=%s", git_user_agent_sanitized());
+ if (server_feature_v2("object-format", &hash_name)) {
+ int hash_algo = hash_algo_by_name(hash_name);
+ if (hash_algo == GIT_HASH_UNKNOWN)
+ die(_("unknown object format '%s' specified by server"), hash_name);
+ reader->hash_algo = &hash_algos[hash_algo];
+ packet_write_fmt(fd_out, "object-format=%s", reader->hash_algo->name);
+ } else {
+ reader->hash_algo = &hash_algos[GIT_HASH_SHA1];
+ }
+
if (server_options && server_options->nr &&
server_supports_v2("server-option", 1))
for (i = 0; i < server_options->nr; i++)
@@ -450,7 +496,7 @@ struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
/* Process response from server */
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
- if (!process_ref_v2(reader->line, &list))
+ if (!process_ref_v2(reader, &list))
die(_("invalid ls-refs response: %s"), reader->line);
}
@@ -463,7 +509,7 @@ struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
return list;
}
-static const char *parse_feature_value(const char *feature_list, const char *feature, int *lenp)
+const char *parse_feature_value(const char *feature_list, const char *feature, int *lenp, int *offset)
{
int len;
@@ -471,6 +517,8 @@ static const char *parse_feature_value(const char *feature_list, const char *fea
return NULL;
len = strlen(feature);
+ if (offset)
+ feature_list += *offset;
while (*feature_list) {
const char *found = strstr(feature_list, feature);
if (!found)
@@ -485,9 +533,14 @@ static const char *parse_feature_value(const char *feature_list, const char *fea
}
/* feature with a value (e.g., "agent=git/1.2.3") */
else if (*value == '=') {
+ int end;
+
value++;
+ end = strcspn(value, " \t\n");
if (lenp)
- *lenp = strcspn(value, " \t\n");
+ *lenp = end;
+ if (offset)
+ *offset = value + end - feature_list;
return value;
}
/*
@@ -500,14 +553,41 @@ static const char *parse_feature_value(const char *feature_list, const char *fea
return NULL;
}
+int server_supports_hash(const char *desired, int *feature_supported)
+{
+ int offset = 0;
+ int len;
+ const char *hash;
+
+ hash = next_server_feature_value("object-format", &len, &offset);
+ if (feature_supported)
+ *feature_supported = !!hash;
+ if (!hash) {
+ hash = hash_algos[GIT_HASH_SHA1].name;
+ len = strlen(hash);
+ }
+ while (hash) {
+ if (!xstrncmpz(desired, hash, len))
+ return 1;
+
+ hash = next_server_feature_value("object-format", &len, &offset);
+ }
+ return 0;
+}
+
int parse_feature_request(const char *feature_list, const char *feature)
{
- return !!parse_feature_value(feature_list, feature, NULL);
+ return !!parse_feature_value(feature_list, feature, NULL, NULL);
+}
+
+static const char *next_server_feature_value(const char *feature, int *len, int *offset)
+{
+ return parse_feature_value(server_capabilities_v1, feature, len, offset);
}
const char *server_feature_value(const char *feature, int *len)
{
- return parse_feature_value(server_capabilities_v1, feature, len);
+ return parse_feature_value(server_capabilities_v1, feature, len, NULL);
}
int server_supports(const char *feature)
diff --git a/connect.h b/connect.h
index 235bc66254..c53586e929 100644
--- a/connect.h
+++ b/connect.h
@@ -18,7 +18,10 @@ int url_is_local_not_ssh(const char *url);
struct packet_reader;
enum protocol_version discover_version(struct packet_reader *reader);
+int server_supports_hash(const char *desired, int *feature_supported);
+const char *parse_feature_value(const char *feature_list, const char *feature, int *lenp, int *offset);
int server_supports_v2(const char *c, int die_on_error);
+int server_feature_v2(const char *c, const char **v);
int server_supports_feature(const char *c, const char *feature,
int die_on_error);
diff --git a/connected.c b/connected.c
index 3135b71e19..937b4bae38 100644
--- a/connected.c
+++ b/connected.c
@@ -43,10 +43,12 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
if (transport && transport->smart_options &&
transport->smart_options->self_contained_and_connected &&
- transport->pack_lockfile &&
- strip_suffix(transport->pack_lockfile, ".keep", &base_len)) {
+ transport->pack_lockfiles.nr == 1 &&
+ strip_suffix(transport->pack_lockfiles.items[0].string,
+ ".keep", &base_len)) {
struct strbuf idx_file = STRBUF_INIT;
- strbuf_add(&idx_file, transport->pack_lockfile, base_len);
+ strbuf_add(&idx_file, transport->pack_lockfiles.items[0].string,
+ base_len);
strbuf_addstr(&idx_file, ".idx");
new_pack = add_packed_git(idx_file.buf, idx_file.len, 1);
strbuf_release(&idx_file);
diff --git a/contrib/coccinelle/commit.cocci b/contrib/coccinelle/commit.cocci
index 778e4704f6..af6dd4c20c 100644
--- a/contrib/coccinelle/commit.cocci
+++ b/contrib/coccinelle/commit.cocci
@@ -32,3 +32,21 @@ expression c;
- c->maybe_tree
+ repo_get_commit_tree(specify_the_right_repo_here, c)
...>}
+
+@@
+struct commit *c;
+expression E;
+@@
+(
+- c->generation = E;
++ commit_graph_data_at(c)->generation = E;
+|
+- c->graph_pos = E;
++ commit_graph_data_at(c)->graph_pos = E;
+|
+- c->generation
++ commit_graph_generation(c)
+|
+- c->graph_pos
++ commit_graph_position(c)
+)
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index 4b59004847..de5d0fbbd1 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -301,6 +301,19 @@ __gitcomp_direct ()
COMPREPLY=($1)
}
+# Similar to __gitcomp_direct, but appends to COMPREPLY instead.
+# Callers must take care of providing only words that match the current word
+# to be completed and adding any prefix and/or suffix (trailing space!), if
+# necessary.
+# 1: List of newline-separated matching completion words, complete with
+# prefix and suffix.
+__gitcomp_direct_append ()
+{
+ local IFS=$'\n'
+
+ COMPREPLY+=($1)
+}
+
__gitcompappend ()
{
local x i=${#COMPREPLY[@]}
@@ -611,6 +624,19 @@ __git_heads ()
"refs/heads/$cur_*" "refs/heads/$cur_*/**"
}
+# Lists branches from remote repositories.
+# 1: A prefix to be added to each listed branch (optional).
+# 2: List only branches matching this word (optional; list all branches if
+# unset or empty).
+# 3: A suffix to be appended to each listed branch (optional).
+__git_remote_heads ()
+{
+ local pfx="${1-}" cur_="${2-}" sfx="${3-}"
+
+ __git for-each-ref --format="${pfx//\%/%%}%(refname:strip=2)$sfx" \
+ "refs/remotes/$cur_*" "refs/remotes/$cur_*/**"
+}
+
# Lists tags from the local repository.
# Accepts the same positional parameters as __git_heads() above.
__git_tags ()
@@ -621,6 +647,26 @@ __git_tags ()
"refs/tags/$cur_*" "refs/tags/$cur_*/**"
}
+# List unique branches from refs/remotes used for 'git checkout' and 'git
+# switch' tracking DWIMery.
+# 1: A prefix to be added to each listed branch (optional)
+# 2: List only branches matching this word (optional; list all branches if
+# unset or empty).
+# 3: A suffix to be appended to each listed branch (optional).
+__git_dwim_remote_heads ()
+{
+ local pfx="${1-}" cur_="${2-}" sfx="${3-}"
+ local fer_pfx="${pfx//\%/%%}" # "escape" for-each-ref format specifiers
+
+ # employ the heuristic used by git checkout and git switch
+ # Try to find a remote branch that cur_es the completion word
+ # but only output if the branch name is unique
+ __git for-each-ref --format="$fer_pfx%(refname:strip=3)$sfx" \
+ --sort="refname:strip=3" \
+ "refs/remotes/*/$cur_*" "refs/remotes/*/$cur_*/**" | \
+ uniq -u
+}
+
# Lists refs from the local (by default) or from a remote repository.
# It accepts 0, 1 or 2 arguments:
# 1: The remote to list refs from (optional; ignored, if set but empty).
@@ -696,13 +742,7 @@ __git_refs ()
__git_dir="$dir" __git for-each-ref --format="$fer_pfx%($format)$sfx" \
"${refs[@]}"
if [ -n "$track" ]; then
- # employ the heuristic used by git checkout
- # Try to find a remote branch that matches the completion word
- # but only output if the branch name is unique
- __git for-each-ref --format="$fer_pfx%(refname:strip=3)$sfx" \
- --sort="refname:strip=3" \
- "refs/remotes/*/$match*" "refs/remotes/*/$match*/**" | \
- uniq -u
+ __git_dwim_remote_heads "$pfx" "$match" "$sfx"
fi
return
fi
@@ -749,29 +789,51 @@ __git_refs ()
# Usage: __git_complete_refs [<option>]...
# --remote=<remote>: The remote to list refs from, can be the name of a
# configured remote, a path, or a URL.
-# --track: List unique remote branches for 'git checkout's tracking DWIMery.
+# --dwim: List unique remote branches for 'git switch's tracking DWIMery.
# --pfx=<prefix>: A prefix to be added to each ref.
# --cur=<word>: The current ref to be completed. Defaults to the current
# word to be completed.
# --sfx=<suffix>: A suffix to be appended to each ref instead of the default
# space.
+# --mode=<mode>: What set of refs to complete, one of 'refs' (the default) to
+# complete all refs, 'heads' to complete only branches, or
+# 'remote-heads' to complete only remote branches. Note that
+# --remote is only compatible with --mode=refs.
__git_complete_refs ()
{
- local remote track pfx cur_="$cur" sfx=" "
+ local remote dwim pfx cur_="$cur" sfx=" " mode="refs"
while test $# != 0; do
case "$1" in
--remote=*) remote="${1##--remote=}" ;;
- --track) track="yes" ;;
+ --dwim) dwim="yes" ;;
+ # --track is an old spelling of --dwim
+ --track) dwim="yes" ;;
--pfx=*) pfx="${1##--pfx=}" ;;
--cur=*) cur_="${1##--cur=}" ;;
--sfx=*) sfx="${1##--sfx=}" ;;
+ --mode=*) mode="${1##--mode=}" ;;
*) return 1 ;;
esac
shift
done
- __gitcomp_direct "$(__git_refs "$remote" "$track" "$pfx" "$cur_" "$sfx")"
+ # complete references based on the specified mode
+ case "$mode" in
+ refs)
+ __gitcomp_direct "$(__git_refs "$remote" "" "$pfx" "$cur_" "$sfx")" ;;
+ heads)
+ __gitcomp_direct "$(__git_heads "$pfx" "$cur_" "$sfx")" ;;
+ remote-heads)
+ __gitcomp_direct "$(__git_remote_heads "$pfx" "$cur_" "$sfx")" ;;
+ *)
+ return 1 ;;
+ esac
+
+ # Append DWIM remote branch names if requested
+ if [ "$dwim" = "yes" ]; then
+ __gitcomp_direct_append "$(__git_dwim_remote_heads "$pfx" "$cur_" "$sfx")"
+ fi
}
# __git_refs2 requires 1 argument (to pass to __git_refs)
@@ -1102,6 +1164,40 @@ __git_find_on_cmdline ()
done
}
+# Similar to __git_find_on_cmdline, except that it loops backwards and thus
+# prints the *last* word found. Useful for finding which of two options that
+# supersede each other came last, such as "--guess" and "--no-guess".
+#
+# Usage: __git_find_last_on_cmdline [<option>]... "<wordlist>"
+# --show-idx: Optionally show the index of the found word in the $words array.
+__git_find_last_on_cmdline ()
+{
+ local word c=$cword show_idx
+
+ while test $# -gt 1; do
+ case "$1" in
+ --show-idx) show_idx=y ;;
+ *) return 1 ;;
+ esac
+ shift
+ done
+ local wordlist="$1"
+
+ while [ $c -gt 1 ]; do
+ ((c--))
+ for word in $wordlist; do
+ if [ "$word" = "${words[c]}" ]; then
+ if [ -n "$show_idx" ]; then
+ echo "$c $word"
+ else
+ echo "$word"
+ fi
+ return
+ fi
+ done
+ done
+}
+
# Echo the value of an option set on the command line or config
#
# $1: short option name
@@ -1356,6 +1452,46 @@ _git_bundle ()
esac
}
+# Helper function to decide whether or not we should enable DWIM logic for
+# git-switch and git-checkout.
+#
+# To decide between the following rules in priority order
+# 1) the last provided of "--guess" or "--no-guess" explicitly enable or
+# disable completion of DWIM logic respectively.
+# 2) If the --no-track option is provided, take this as a hint to disable the
+# DWIM completion logic
+# 3) If GIT_COMPLETION_CHECKOUT_NO_GUESS is set, disable the DWIM completion
+# logic, as requested by the user.
+# 4) Enable DWIM logic otherwise.
+#
+__git_checkout_default_dwim_mode ()
+{
+ local last_option dwim_opt="--dwim"
+
+ if [ "$GIT_COMPLETION_CHECKOUT_NO_GUESS" = "1" ]; then
+ dwim_opt=""
+ fi
+
+ # --no-track disables DWIM, but with lower priority than
+ # --guess/--no-guess
+ if [ -n "$(__git_find_on_cmdline "--no-track")" ]; then
+ dwim_opt=""
+ fi
+
+ # Find the last provided --guess or --no-guess
+ last_option="$(__git_find_last_on_cmdline "--guess --no-guess")"
+ case "$last_option" in
+ --guess)
+ dwim_opt="--dwim"
+ ;;
+ --no-guess)
+ dwim_opt=""
+ ;;
+ esac
+
+ echo "$dwim_opt"
+}
+
_git_checkout ()
{
__git_has_doubledash && return
@@ -1368,14 +1504,38 @@ _git_checkout ()
__gitcomp_builtin checkout
;;
*)
- # check if --track, --no-track, or --no-guess was specified
- # if so, disable DWIM mode
- local flags="--track --no-track --no-guess" track_opt="--track"
- if [ "$GIT_COMPLETION_CHECKOUT_NO_GUESS" = "1" ] ||
- [ -n "$(__git_find_on_cmdline "$flags")" ]; then
- track_opt=''
+ local dwim_opt="$(__git_checkout_default_dwim_mode)"
+ local prevword prevword="${words[cword-1]}"
+
+ case "$prevword" in
+ -b|-B|--orphan)
+ # Complete local branches (and DWIM branch
+ # remote branch names) for an option argument
+ # specifying a new branch name. This is for
+ # convenience, assuming new branches are
+ # possibly based on pre-existing branch names.
+ __git_complete_refs $dwim_opt --mode="heads"
+ return
+ ;;
+ *)
+ ;;
+ esac
+
+ # At this point, we've already handled special completion for
+ # the arguments to -b/-B, and --orphan. There are 3 main
+ # things left we can possibly complete:
+ # 1) a start-point for -b/-B, -d/--detach, or --orphan
+ # 2) a remote head, for --track
+ # 3) an arbitrary reference, possibly including DWIM names
+ #
+
+ if [ -n "$(__git_find_on_cmdline "-b -B -d --detach --orphan")" ]; then
+ __git_complete_refs --mode="refs"
+ elif [ -n "$(__git_find_on_cmdline "--track")" ]; then
+ __git_complete_refs --mode="remote-heads"
+ else
+ __git_complete_refs $dwim_opt --mode="refs"
fi
- __git_complete_refs $track_opt
;;
esac
}
@@ -2224,29 +2384,43 @@ _git_switch ()
__gitcomp_builtin switch
;;
*)
- # check if --track, --no-track, or --no-guess was specified
- # if so, disable DWIM mode
- local track_opt="--track" only_local_ref=n
- if [ "$GIT_COMPLETION_CHECKOUT_NO_GUESS" = "1" ] ||
- [ -n "$(__git_find_on_cmdline "--track --no-track --no-guess")" ]; then
- track_opt=''
- fi
- # explicit --guess enables DWIM mode regardless of
- # $GIT_COMPLETION_CHECKOUT_NO_GUESS
- if [ -n "$(__git_find_on_cmdline "--guess")" ]; then
- track_opt='--track'
- fi
- if [ -z "$(__git_find_on_cmdline "-d --detach")" ]; then
- only_local_ref=y
- else
- # --guess --detach is invalid combination, no
- # dwim will be done when --detach is specified
- track_opt=
+ local dwim_opt="$(__git_checkout_default_dwim_mode)"
+ local prevword prevword="${words[cword-1]}"
+
+ case "$prevword" in
+ -c|-C|--orphan)
+ # Complete local branches (and DWIM branch
+ # remote branch names) for an option argument
+ # specifying a new branch name. This is for
+ # convenience, assuming new branches are
+ # possibly based on pre-existing branch names.
+ __git_complete_refs $dwim_opt --mode="heads"
+ return
+ ;;
+ *)
+ ;;
+ esac
+
+ # Unlike in git checkout, git switch --orphan does not take
+ # a start point. Thus we really have nothing to complete after
+ # the branch name.
+ if [ -n "$(__git_find_on_cmdline "--orphan")" ]; then
+ return
fi
- if [ $only_local_ref = y -a -z "$track_opt" ]; then
- __gitcomp_direct "$(__git_heads "" "$cur" " ")"
+
+ # At this point, we've already handled special completion for
+ # -c/-C, and --orphan. There are 3 main things left to
+ # complete:
+ # 1) a start-point for -c/-C or -d/--detach
+ # 2) a remote head, for --track
+ # 3) a branch name, possibly including DWIM remote branches
+
+ if [ -n "$(__git_find_on_cmdline "-c -C -d --detach")" ]; then
+ __git_complete_refs --mode="refs"
+ elif [ -n "$(__git_find_on_cmdline "--track")" ]; then
+ __git_complete_refs --mode="remote-heads"
else
- __git_complete_refs $track_opt
+ __git_complete_refs $dwim_opt --mode="heads"
fi
;;
esac
diff --git a/contrib/completion/git-prompt.sh b/contrib/completion/git-prompt.sh
index 014cd7c3cf..e6cd5464e5 100644
--- a/contrib/completion/git-prompt.sh
+++ b/contrib/completion/git-prompt.sh
@@ -70,6 +70,15 @@
# state symbols by setting GIT_PS1_STATESEPARATOR. The default separator
# is SP.
#
+# When there is an in-progress operation such as a merge, rebase,
+# revert, cherry-pick, or bisect, the prompt will include information
+# related to the operation, often in the form "|<OPERATION-NAME>".
+#
+# When the repository has a sparse-checkout, a notification of the form
+# "|SPARSE" will be included in the prompt. This can be shortened to a
+# single '?' character by setting GIT_PS1_COMPRESSSPARSESTATE, or omitted
+# by setting GIT_PS1_OMITSPARSESTATE.
+#
# By default, __git_ps1 will compare HEAD to your SVN upstream if it can
# find one, or @{upstream} otherwise. Once you have set
# GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
@@ -421,6 +430,13 @@ __git_ps1 ()
return $exit
fi
+ local sparse=""
+ if [ -z "${GIT_PS1_COMPRESSSPARSESTATE}" ] &&
+ [ -z "${GIT_PS1_OMITSPARSESTATE}" ] &&
+ [ "$(git config --bool core.sparseCheckout)" == "true" ]; then
+ sparse="|SPARSE"
+ fi
+
local r=""
local b=""
local step=""
@@ -492,6 +508,7 @@ __git_ps1 ()
local i=""
local s=""
local u=""
+ local h=""
local c=""
local p=""
@@ -524,6 +541,11 @@ __git_ps1 ()
u="%${ZSH_VERSION+%}"
fi
+ if [ -n "${GIT_PS1_COMPRESSSPARSESTATE}" ] &&
+ [ "$(git config --bool core.sparseCheckout)" == "true" ]; then
+ h="?"
+ fi
+
if [ -n "${GIT_PS1_SHOWUPSTREAM-}" ]; then
__git_ps1_show_upstream
fi
@@ -542,8 +564,8 @@ __git_ps1 ()
b="\${__git_ps1_branch_name}"
fi
- local f="$w$i$s$u"
- local gitstring="$c$b${f:+$z$f}$r$p"
+ local f="$h$w$i$s$u"
+ local gitstring="$c$b${f:+$z$f}${sparse}$r$p"
if [ $pcmode = yes ]; then
if [ "${__git_printf_supports_v-}" != yes ]; then
diff --git a/diff.c b/diff.c
index 863da896c0..d24aaa3047 100644
--- a/diff.c
+++ b/diff.c
@@ -6763,8 +6763,11 @@ void diff_change(struct diff_options *options,
return;
if (options->flags.quick && options->skip_stat_unmatch &&
- !diff_filespec_check_stat_unmatch(options->repo, p))
+ !diff_filespec_check_stat_unmatch(options->repo, p)) {
+ diff_free_filespec_data(p->one);
+ diff_free_filespec_data(p->two);
return;
+ }
options->flags.has_changes = 1;
}
diff --git a/dir.c b/dir.c
index d97e955848..1045cc9c6f 100644
--- a/dir.c
+++ b/dir.c
@@ -193,6 +193,10 @@ int fill_directory(struct dir_struct *dir,
const char *prefix;
size_t prefix_len;
+ unsigned exclusive_flags = DIR_SHOW_IGNORED | DIR_SHOW_IGNORED_TOO;
+ if ((dir->flags & exclusive_flags) == exclusive_flags)
+ BUG("DIR_SHOW_IGNORED and DIR_SHOW_IGNORED_TOO are exclusive");
+
/*
* Calculate common prefix for the pathspec, and
* use that to optimize the directory walk
@@ -364,7 +368,8 @@ static int match_pathspec_item(const struct index_state *istate,
return MATCHED_FNMATCH;
/* Perform checks to see if "name" is a leading string of the pathspec */
- if (flags & DO_MATCH_LEADING_PATHSPEC) {
+ if ( (flags & DO_MATCH_LEADING_PATHSPEC) &&
+ !(flags & DO_MATCH_EXCLUDE)) {
/* name is a literal prefix of the pathspec */
int offset = name[namelen-1] == '/' ? 1 : 0;
if ((namelen < matchlen) &&
@@ -401,6 +406,10 @@ static int match_pathspec_item(const struct index_state *istate,
}
/*
+ * do_match_pathspec() is meant to ONLY be called by
+ * match_pathspec_with_flags(); calling it directly risks pathspecs
+ * like ':!unwanted_path' being ignored.
+ *
* Given a name and a list of pathspecs, returns the nature of the
* closest (i.e. most specific) match of the name to any of the
* pathspecs.
@@ -486,13 +495,12 @@ static int do_match_pathspec(const struct index_state *istate,
return retval;
}
-int match_pathspec(const struct index_state *istate,
- const struct pathspec *ps,
- const char *name, int namelen,
- int prefix, char *seen, int is_dir)
+static int match_pathspec_with_flags(const struct index_state *istate,
+ const struct pathspec *ps,
+ const char *name, int namelen,
+ int prefix, char *seen, unsigned flags)
{
int positive, negative;
- unsigned flags = is_dir ? DO_MATCH_DIRECTORY : 0;
positive = do_match_pathspec(istate, ps, name, namelen,
prefix, seen, flags);
if (!(ps->magic & PATHSPEC_EXCLUDE) || !positive)
@@ -503,6 +511,16 @@ int match_pathspec(const struct index_state *istate,
return negative ? 0 : positive;
}
+int match_pathspec(const struct index_state *istate,
+ const struct pathspec *ps,
+ const char *name, int namelen,
+ int prefix, char *seen, int is_dir)
+{
+ unsigned flags = is_dir ? DO_MATCH_DIRECTORY : 0;
+ return match_pathspec_with_flags(istate, ps, name, namelen,
+ prefix, seen, flags);
+}
+
/**
* Check if a submodule is a superset of the pathspec
*/
@@ -511,11 +529,11 @@ int submodule_path_match(const struct index_state *istate,
const char *submodule_name,
char *seen)
{
- int matched = do_match_pathspec(istate, ps, submodule_name,
- strlen(submodule_name),
- 0, seen,
- DO_MATCH_DIRECTORY |
- DO_MATCH_LEADING_PATHSPEC);
+ int matched = match_pathspec_with_flags(istate, ps, submodule_name,
+ strlen(submodule_name),
+ 0, seen,
+ DO_MATCH_DIRECTORY |
+ DO_MATCH_LEADING_PATHSPEC);
return matched;
}
@@ -1757,9 +1775,11 @@ static enum path_treatment treat_directory(struct dir_struct *dir,
* for matching patterns.
*/
if (pathspec && !excluded) {
- matches_how = do_match_pathspec(istate, pathspec, dirname, len,
- 0 /* prefix */, NULL /* seen */,
- DO_MATCH_LEADING_PATHSPEC);
+ matches_how = match_pathspec_with_flags(istate, pathspec,
+ dirname, len,
+ 0 /* prefix */,
+ NULL /* seen */,
+ DO_MATCH_LEADING_PATHSPEC);
if (!matches_how)
return path_none;
}
@@ -1820,7 +1840,7 @@ static enum path_treatment treat_directory(struct dir_struct *dir,
* to recurse into untracked/ignored directories if either of the
* following bits is set:
* - DIR_SHOW_IGNORED_TOO (because then we need to determine if
- * there are ignored directories below)
+ * there are ignored entries below)
* - DIR_HIDE_EMPTY_DIRECTORIES (because we have to determine if
* the directory is empty)
*/
@@ -1838,10 +1858,11 @@ static enum path_treatment treat_directory(struct dir_struct *dir,
return path_excluded;
/*
- * If we have we don't want to know the all the paths under an
- * untracked or ignored directory, we still need to go into the
- * directory to determine if it is empty (because an empty directory
- * should be path_none instead of path_excluded or path_untracked).
+ * Even if we don't want to know all the paths under an untracked or
+ * ignored directory, we may still need to go into the directory to
+ * determine if it is empty (because with DIR_HIDE_EMPTY_DIRECTORIES,
+ * an empty directory should be path_none instead of path_excluded or
+ * path_untracked).
*/
check_only = ((dir->flags & DIR_HIDE_EMPTY_DIRECTORIES) &&
!(dir->flags & DIR_SHOW_IGNORED_TOO));
@@ -2191,9 +2212,9 @@ static enum path_treatment treat_path(struct dir_struct *dir,
if (excluded)
return path_excluded;
if (pathspec &&
- !do_match_pathspec(istate, pathspec, path->buf, path->len,
- 0 /* prefix */, NULL /* seen */,
- 0 /* flags */))
+ !match_pathspec(istate, pathspec, path->buf, path->len,
+ 0 /* prefix */, NULL /* seen */,
+ 0 /* is_dir */))
return path_none;
return path_untracked;
}
diff --git a/fetch-pack.c b/fetch-pack.c
index d8bbf45ee2..80fb3bd899 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -38,6 +38,7 @@ static int server_supports_filtering;
static struct shallow_lock shallow_lock;
static const char *alternate_shallow_file;
static struct strbuf fsck_msg_types = STRBUF_INIT;
+static struct string_list uri_protocols = STRING_LIST_INIT_DUP;
/* Remember to update object flag allocation in object.h */
#define COMPLETE (1U << 0)
@@ -794,7 +795,8 @@ static void write_promisor_file(const char *keep_name,
}
static int get_pack(struct fetch_pack_args *args,
- int xd[2], char **pack_lockfile,
+ int xd[2], struct string_list *pack_lockfiles,
+ int only_packfile,
struct ref **sought, int nr_sought)
{
struct async demux;
@@ -838,7 +840,7 @@ static int get_pack(struct fetch_pack_args *args,
}
if (do_keep || args->from_promisor) {
- if (pack_lockfile)
+ if (pack_lockfiles)
cmd.out = -1;
cmd_name = "index-pack";
argv_array_push(&cmd.args, cmd_name);
@@ -855,15 +857,22 @@ static int get_pack(struct fetch_pack_args *args,
"--keep=fetch-pack %"PRIuMAX " on %s",
(uintmax_t)getpid(), hostname);
}
- if (args->check_self_contained_and_connected)
+ if (only_packfile && args->check_self_contained_and_connected)
argv_array_push(&cmd.args, "--check-self-contained-and-connected");
+ else
+ /*
+ * We cannot perform any connectivity checks because
+ * not all packs have been downloaded; let the caller
+ * have this responsibility.
+ */
+ args->check_self_contained_and_connected = 0;
/*
* If we're obtaining the filename of a lockfile, we'll use
* that filename to write a .promisor file with more
* information below. If not, we need index-pack to do it for
* us.
*/
- if (!(do_keep && pack_lockfile) && args->from_promisor)
+ if (!(do_keep && pack_lockfiles) && args->from_promisor)
argv_array_push(&cmd.args, "--promisor");
}
else {
@@ -899,8 +908,9 @@ static int get_pack(struct fetch_pack_args *args,
cmd.git_cmd = 1;
if (start_command(&cmd))
die(_("fetch-pack: unable to fork off %s"), cmd_name);
- if (do_keep && pack_lockfile) {
- *pack_lockfile = index_pack_lockfile(cmd.out);
+ if (do_keep && pack_lockfiles) {
+ string_list_append_nodup(pack_lockfiles,
+ index_pack_lockfile(cmd.out));
close(cmd.out);
}
@@ -922,8 +932,8 @@ static int get_pack(struct fetch_pack_args *args,
* Now that index-pack has succeeded, write the promisor file using the
* obtained .keep filename if necessary
*/
- if (do_keep && pack_lockfile && args->from_promisor)
- write_promisor_file(*pack_lockfile, sought, nr_sought);
+ if (do_keep && pack_lockfiles && pack_lockfiles->nr && args->from_promisor)
+ write_promisor_file(pack_lockfiles->items[0].string, sought, nr_sought);
return 0;
}
@@ -940,7 +950,7 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
const struct ref *orig_ref,
struct ref **sought, int nr_sought,
struct shallow_info *si,
- char **pack_lockfile)
+ struct string_list *pack_lockfiles)
{
struct repository *r = the_repository;
struct ref *ref = copy_ref_list(orig_ref);
@@ -1040,6 +1050,8 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
print_verbose(args, _("Server supports %s"), "deepen-relative");
else if (args->deepen_relative)
die(_("Server does not support --deepen"));
+ if (!server_supports_hash(the_hash_algo->name, NULL))
+ die(_("Server does not support this repository's object format"));
if (!args->no_dependents) {
mark_complete_and_common_ref(negotiator, args, &ref);
@@ -1067,7 +1079,7 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
alternate_shallow_file = setup_temporary_shallow(si->shallow);
else
alternate_shallow_file = NULL;
- if (get_pack(args, fd, pack_lockfile, sought, nr_sought))
+ if (get_pack(args, fd, pack_lockfiles, 1, sought, nr_sought))
die(_("git fetch-pack: fetch failed."));
all_done:
@@ -1178,6 +1190,7 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
int sideband_all, int seen_ack)
{
int ret = 0;
+ const char *hash_name;
struct strbuf req_buf = STRBUF_INIT;
if (server_supports_v2("fetch", 1))
@@ -1192,6 +1205,17 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
args->server_options->items[i].string);
}
+ if (server_feature_v2("object-format", &hash_name)) {
+ int hash_algo = hash_algo_by_name(hash_name);
+ if (hash_algo_by_ptr(the_hash_algo) != hash_algo)
+ die(_("mismatched algorithms: client %s; server %s"),
+ the_hash_algo->name, hash_name);
+ packet_write_fmt(fd_out, "object-format=%s", the_hash_algo->name);
+ } else if (hash_algo_by_ptr(the_hash_algo) != GIT_HASH_SHA1) {
+ die(_("the server does not support algorithm '%s'"),
+ the_hash_algo->name);
+ }
+
packet_buf_delim(&req_buf);
if (args->use_thin_pack)
packet_buf_write(&req_buf, "thin-pack");
@@ -1221,6 +1245,26 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
warning("filtering not recognized by server, ignoring");
}
+ if (server_supports_feature("fetch", "packfile-uris", 0)) {
+ int i;
+ struct strbuf to_send = STRBUF_INIT;
+
+ for (i = 0; i < uri_protocols.nr; i++) {
+ const char *s = uri_protocols.items[i].string;
+
+ if (!strcmp(s, "https") || !strcmp(s, "http")) {
+ if (to_send.len)
+ strbuf_addch(&to_send, ',');
+ strbuf_addstr(&to_send, s);
+ }
+ }
+ if (to_send.len) {
+ packet_buf_write(&req_buf, "packfile-uris %s",
+ to_send.buf);
+ strbuf_release(&to_send);
+ }
+ }
+
/* add wants */
add_wants(args->no_dependents, wants, &req_buf);
@@ -1443,6 +1487,21 @@ static void receive_wanted_refs(struct packet_reader *reader,
die(_("error processing wanted refs: %d"), reader->status);
}
+static void receive_packfile_uris(struct packet_reader *reader,
+ struct string_list *uris)
+{
+ process_section_header(reader, "packfile-uris", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ if (reader->pktlen < the_hash_algo->hexsz ||
+ reader->line[the_hash_algo->hexsz] != ' ')
+ die("expected '<hash> <uri>', got: %s\n", reader->line);
+
+ string_list_append(uris, reader->line);
+ }
+ if (reader->status != PACKET_READ_DELIM)
+ die("expected DELIM");
+}
+
enum fetch_state {
FETCH_CHECK_LOCAL = 0,
FETCH_SEND_REQUEST,
@@ -1464,7 +1523,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct ref **sought, int nr_sought,
struct oid_array *shallows,
struct shallow_info *si,
- char **pack_lockfile)
+ struct string_list *pack_lockfiles)
{
struct repository *r = the_repository;
struct ref *ref = copy_ref_list(orig_ref);
@@ -1476,6 +1535,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct fetch_negotiator negotiator_alloc;
struct fetch_negotiator *negotiator;
int seen_ack = 0;
+ struct string_list packfile_uris = STRING_LIST_INIT_DUP;
+ int i;
if (args->no_dependents) {
negotiator = NULL;
@@ -1569,9 +1630,12 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
if (process_section_header(&reader, "wanted-refs", 1))
receive_wanted_refs(&reader, sought, nr_sought);
- /* get the pack */
+ /* get the pack(s) */
+ if (process_section_header(&reader, "packfile-uris", 1))
+ receive_packfile_uris(&reader, &packfile_uris);
process_section_header(&reader, "packfile", 0);
- if (get_pack(args, fd, pack_lockfile, sought, nr_sought))
+ if (get_pack(args, fd, pack_lockfiles,
+ !packfile_uris.nr, sought, nr_sought))
die(_("git fetch-pack: fetch failed."));
do_check_stateless_delimiter(args, &reader);
@@ -1582,8 +1646,55 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
}
}
+ for (i = 0; i < packfile_uris.nr; i++) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ char packname[GIT_MAX_HEXSZ + 1];
+ const char *uri = packfile_uris.items[i].string +
+ the_hash_algo->hexsz + 1;
+
+ argv_array_push(&cmd.args, "http-fetch");
+ argv_array_pushf(&cmd.args, "--packfile=%.*s",
+ (int) the_hash_algo->hexsz,
+ packfile_uris.items[i].string);
+ argv_array_push(&cmd.args, uri);
+ cmd.git_cmd = 1;
+ cmd.no_stdin = 1;
+ cmd.out = -1;
+ if (start_command(&cmd))
+ die("fetch-pack: unable to spawn http-fetch");
+
+ if (read_in_full(cmd.out, packname, 5) < 0 ||
+ memcmp(packname, "keep\t", 5))
+ die("fetch-pack: expected keep then TAB at start of http-fetch output");
+
+ if (read_in_full(cmd.out, packname,
+ the_hash_algo->hexsz + 1) < 0 ||
+ packname[the_hash_algo->hexsz] != '\n')
+ die("fetch-pack: expected hash then LF at end of http-fetch output");
+
+ packname[the_hash_algo->hexsz] = '\0';
+
+ close(cmd.out);
+
+ if (finish_command(&cmd))
+ die("fetch-pack: unable to finish http-fetch");
+
+ if (memcmp(packfile_uris.items[i].string, packname,
+ the_hash_algo->hexsz))
+ die("fetch-pack: pack downloaded from %s does not match expected hash %.*s",
+ uri, (int) the_hash_algo->hexsz,
+ packfile_uris.items[i].string);
+
+ string_list_append_nodup(pack_lockfiles,
+ xstrfmt("%s/pack/pack-%s.keep",
+ get_object_directory(),
+ packname));
+ }
+ string_list_clear(&packfile_uris, 0);
+
if (negotiator)
negotiator->release(negotiator);
+
oidset_clear(&common);
return ref;
}
@@ -1620,6 +1731,14 @@ static void fetch_pack_config(void)
git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
+ if (!uri_protocols.nr) {
+ char *str;
+
+ if (!git_config_get_string("fetch.uriprotocols", &str) && str) {
+ string_list_split(&uri_protocols, str, ',', -1);
+ free(str);
+ }
+ }
git_config(fetch_pack_config_cb, NULL);
}
@@ -1772,7 +1891,7 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
const struct ref *ref,
struct ref **sought, int nr_sought,
struct oid_array *shallow,
- char **pack_lockfile,
+ struct string_list *pack_lockfiles,
enum protocol_version version)
{
struct ref *ref_cpy;
@@ -1807,11 +1926,11 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
memset(&si, 0, sizeof(si));
ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
&shallows_scratch, &si,
- pack_lockfile);
+ pack_lockfiles);
} else {
prepare_shallow_info(&si, shallow);
ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
- &si, pack_lockfile);
+ &si, pack_lockfiles);
}
reprepare_packed_git(the_repository);
diff --git a/fetch-pack.h b/fetch-pack.h
index 67f684229a..85d1e39fe7 100644
--- a/fetch-pack.h
+++ b/fetch-pack.h
@@ -83,7 +83,7 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
struct ref **sought,
int nr_sought,
struct oid_array *shallow,
- char **pack_lockfile,
+ struct string_list *pack_lockfiles,
enum protocol_version version);
/*
diff --git a/fuzz-commit-graph.c b/fuzz-commit-graph.c
index 9fd1c04edd..430817214d 100644
--- a/fuzz-commit-graph.c
+++ b/fuzz-commit-graph.c
@@ -12,7 +12,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
initialize_the_repository();
g = parse_commit_graph((void *)data, size);
repo_clear(the_repository);
- free(g);
+ free_commit_graph(g);
return 0;
}
diff --git a/git-compat-util.h b/git-compat-util.h
index a73632e8e4..5637114b8d 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -869,6 +869,12 @@ FILE *fopen_for_writing(const char *path);
FILE *fopen_or_warn(const char *path, const char *mode);
/*
+ * Like strncmp, but only return zero if s is NUL-terminated and exactly len
+ * characters long. If it is not, consider it greater than t.
+ */
+int xstrncmpz(const char *s, const char *t, size_t len);
+
+/*
* FREE_AND_NULL(ptr) is like free(ptr) followed by ptr = NULL. Note
* that ptr is used twice, so don't pass e.g. ptr++.
*/
diff --git a/git-cvsexportcommit.perl b/git-cvsexportcommit.perl
index fc00d5946a..6483d792d3 100755
--- a/git-cvsexportcommit.perl
+++ b/git-cvsexportcommit.perl
@@ -22,6 +22,10 @@ die "Need at least one commit identifier!" unless @ARGV;
my $repo = Git->repository();
$opt_w = $repo->config('cvsexportcommit.cvsdir') unless defined $opt_w;
+my $tmpdir = File::Temp->newdir;
+my $hash_algo = $repo->config('extensions.objectformat') || 'sha1';
+my $hexsz = $hash_algo eq 'sha256' ? 64 : 40;
+
if ($opt_w || $opt_W) {
# Remember where GIT_DIR is before changing to CVS checkout
unless ($ENV{GIT_DIR}) {
@@ -96,7 +100,7 @@ foreach my $line (@commit) {
}
if ($stage eq 'headers') {
- if ($line =~ m/^parent (\w{40})$/) { # found a parent
+ if ($line =~ m/^parent ([0-9a-f]{$hexsz})$/) { # found a parent
push @parents, $1;
} elsif ($line =~ m/^author (.+) \d+ [-+]\d+$/) {
$author = $1;
@@ -111,7 +115,7 @@ foreach my $line (@commit) {
}
}
-my $noparent = "0000000000000000000000000000000000000000";
+my $noparent = "0" x $hexsz;
if ($parent) {
my $found;
# double check that it's a valid parent
@@ -174,7 +178,7 @@ my $context = $opt_p ? '' : '-C1';
print "Checking if patch will apply\n";
my @stat;
-open APPLY, "GIT_DIR= git-apply $context --summary --numstat<.cvsexportcommit.diff|" || die "cannot patch";
+open APPLY, "GIT_INDEX_FILE=$tmpdir/index git-apply $context --summary --numstat<.cvsexportcommit.diff|" || die "cannot patch";
@stat=<APPLY>;
close APPLY || die "Cannot patch";
my (@bfiles,@files,@afiles,@dfiles);
@@ -329,7 +333,7 @@ print "Applying\n";
if ($opt_W) {
system("git checkout -q $commit^0") && die "cannot patch";
} else {
- `GIT_DIR= git-apply $context --summary --numstat --apply <.cvsexportcommit.diff` || die "cannot patch";
+ `GIT_INDEX_FILE=$tmpdir/index git-apply $context --summary --numstat --apply <.cvsexportcommit.diff` || die "cannot patch";
}
print "Patch applied successfully. Adding new files and directories to CVS\n";
@@ -407,7 +411,7 @@ unlink(".cvsexportcommit.diff");
if ($opt_W) {
system("git checkout $go_back_to") && die "cannot move back to $go_back_to";
- if (!($go_back_to =~ /^[0-9a-fA-F]{40}$/)) {
+ if (!($go_back_to =~ /^[0-9a-fA-F]{$hexsz}$/)) {
system("git symbolic-ref HEAD $go_back_to") &&
die "cannot move back to $go_back_to";
}
diff --git a/git-cvsimport.perl b/git-cvsimport.perl
index 1057f389d3..7bf3c12d67 100755
--- a/git-cvsimport.perl
+++ b/git-cvsimport.perl
@@ -637,9 +637,9 @@ sub getwd() {
return $pwd;
}
-sub is_sha1 {
+sub is_oid {
my $s = shift;
- return $s =~ /^[a-f0-9]{40}$/;
+ return $s =~ /^[a-f0-9]{40}(?:[a-f0-9]{24})?$/;
}
sub get_headref ($) {
@@ -810,7 +810,7 @@ sub write_tree () {
open(my $fh, '-|', qw(git write-tree))
or die "unable to open git write-tree: $!";
chomp(my $tree = <$fh>);
- is_sha1($tree)
+ is_oid($tree)
or die "Cannot get tree id ($tree): $!";
close($fh)
or die "Error running git write-tree: $?\n";
@@ -896,7 +896,7 @@ sub commit {
print "Committed patch $patchset ($branch $commit_date)\n" if $opt_v;
chomp(my $cid = <$commit_read>);
- is_sha1($cid) or die "Cannot get commit id ($cid): $!\n";
+ is_oid($cid) or die "Cannot get commit id ($cid): $!\n";
print "Commit ID $cid\n" if $opt_v;
close($commit_read);
diff --git a/git-cvsserver.perl b/git-cvsserver.perl
index ae1044273d..f6f3fc192c 100755
--- a/git-cvsserver.perl
+++ b/git-cvsserver.perl
@@ -365,7 +365,7 @@ sub req_Root
}
foreach my $line ( @gitvars )
{
- next unless ( $line =~ /^(gitcvs)\.(?:(ext|pserver)\.)?([\w-]+)=(.*)$/ );
+ next unless ( $line =~ /^(gitcvs|extensions)\.(?:(ext|pserver)\.)?([\w-]+)=(.*)$/ );
unless ($2) {
$cfg->{$1}{$3} = $4;
} else {
@@ -392,6 +392,9 @@ sub req_Root
$log->nofile();
}
+ $state->{rawsz} = ($cfg->{'extensions'}{'objectformat'} || 'sha1') eq 'sha256' ? 32 : 20;
+ $state->{hexsz} = $state->{rawsz} * 2;
+
return 1;
}
@@ -1581,7 +1584,7 @@ sub req_ci
$parenthash = safe_pipe_capture('git', 'show-ref', '-s', $branchRef);
chomp $parenthash;
- if ($parenthash !~ /^[0-9a-f]{40}$/)
+ if ($parenthash !~ /^[0-9a-f]{$state->{hexsz}}$/)
{
if ( defined($stickyInfo) && defined($stickyInfo->{tag}) )
{
@@ -1708,7 +1711,7 @@ sub req_ci
chomp($commithash);
$log->info("Commit hash : $commithash");
- unless ( $commithash =~ /[a-zA-Z0-9]{40}/ )
+ unless ( $commithash =~ /[a-zA-Z0-9]{$state->{hexsz}}/ )
{
$log->warn("Commit failed (Invalid commit hash)");
print "error 1 Commit failed (unknown reason)\n";
@@ -2375,7 +2378,7 @@ sub req_annotate
print "E ***************\n";
while ( <ANNOTATE> )
{
- if (m/^([a-zA-Z0-9]{40})\t\([^\)]*\)(.*)$/i)
+ if (m/^([a-zA-Z0-9]{$state->{hexsz}})\t\([^\)]*\)(.*)$/i)
{
my $commithash = $1;
my $data = $2;
@@ -2852,7 +2855,7 @@ sub transmitfile
return;
}
- die "Need filehash" unless ( defined ( $filehash ) and $filehash =~ /^[a-zA-Z0-9]{40}$/ );
+ die "Need filehash" unless ( defined ( $filehash ) and $filehash =~ /^[a-zA-Z0-9]{$state->{hexsz}}$/ );
my $type = safe_pipe_capture('git', 'cat-file', '-t', $filehash);
chomp $type;
@@ -3042,7 +3045,7 @@ sub ensureWorkTree
my $ver = safe_pipe_capture('git', 'show-ref', '-s', "refs/heads/$state->{module}");
chomp $ver;
- if ($ver !~ /^[0-9a-f]{40}$/)
+ if ($ver !~ /^[0-9a-f]{$state->{hexsz}}$/)
{
$log->warn("Error from git show-ref -s refs/head$state->{module}");
print "error 1 cannot find the current HEAD of module";
@@ -3281,7 +3284,7 @@ sub open_blob_or_die
}
elsif( $srcType eq "sha1" )
{
- unless ( defined ( $name ) and $name =~ /^[a-zA-Z0-9]{40}$/ )
+ unless ( defined ( $name ) and $name =~ /^[a-zA-Z0-9]{$state->{hexsz}}$/ )
{
$log->warn("Need filehash");
die "Need filehash\n";
@@ -3817,7 +3820,7 @@ sub update
chomp $commitsha1;
my $commitinfo = ::safe_pipe_capture('git', 'cat-file', 'commit', $self->{module});
- unless ( $commitinfo =~ /tree\s+[a-zA-Z0-9]{40}/ )
+ unless ( $commitinfo =~ /tree\s+[a-zA-Z0-9]{$state->{hexsz}}/ )
{
die("Invalid module '$self->{module}'");
}
@@ -3957,7 +3960,7 @@ sub update
while ( <FILELIST> )
{
chomp;
- unless ( /^:\d{6}\s+([0-7]{6})\s+[a-f0-9]{40}\s+([a-f0-9]{40})\s+(\w)$/o )
+ unless ( /^:\d{6}\s+([0-7]{6})\s+[a-f0-9]{$state->{hexsz}}\s+([a-f0-9]{$state->{hexsz}})\s+(\w)$/o )
{
die("Couldn't process git-diff-tree line : $_");
}
@@ -4625,11 +4628,11 @@ sub getmeta
$db_query->execute($filename, $intRev);
$meta = $db_query->fetchrow_hashref;
}
- elsif ( $revision =~ /^2\.1\.1\.2000(\.[1-3][0-9][0-9]){20}$/ )
+ elsif ( $revision =~ /^2\.1\.1\.2000(\.[1-3][0-9][0-9]){$state->{rawsz}}$/ )
{
my ($commitHash)=($revision=~/^2\.1\.1\.2000(.*)$/);
$commitHash=~s/\.([0-9]+)/sprintf("%02x",$1-100)/eg;
- if($commitHash=~/^[0-9a-f]{40}$/)
+ if($commitHash=~/^[0-9a-f]{$state->{hexsz}}$/)
{
return $self->getMetaFromCommithash($filename,$commitHash);
}
@@ -4639,7 +4642,7 @@ sub getmeta
$log->warning("failed get $revision with commithash=$commitHash");
undef $revision;
}
- elsif ( $revision =~ /^[0-9a-f]{40}$/ )
+ elsif ( $revision =~ /^[0-9a-f]{$state->{hexsz}}$/ )
{
# Try DB first. This is mostly only useful for req_annotate(),
# which only calls this for stuff that should already be in
@@ -4658,7 +4661,7 @@ sub getmeta
if(! $meta)
{
my($revCommit)=$self->lookupCommitRef($revision);
- if($revCommit=~/^[0-9a-f]{40}$/)
+ if($revCommit=~/^[0-9a-f]{$state->{hexsz}}$/)
{
return $self->getMetaFromCommithash($filename,$revCommit);
}
@@ -4672,7 +4675,7 @@ sub getmeta
else
{
my($revCommit)=$self->lookupCommitRef($revision);
- if($revCommit=~/^[0-9a-f]{40}$/)
+ if($revCommit=~/^[0-9a-f]{$state->{hexsz}}$/)
{
return $self->getMetaFromCommithash($filename,$revCommit);
}
@@ -4767,7 +4770,7 @@ sub getMetaFromCommithash
my($fileHash) = ::safe_pipe_capture("git","rev-parse","$revCommit:$filename");
chomp $fileHash;
- if(!($fileHash=~/^[0-9a-f]{40}$/))
+ if(!($fileHash=~/^[0-9a-f]{$state->{hexsz}}$/))
{
die "Invalid fileHash '$fileHash' looking up"
." '$revCommit:$filename'\n";
@@ -4863,7 +4866,7 @@ sub lookupCommitRef
$commitHash = ::safe_pipe_capture("git","rev-parse","--verify","--quiet",
$self->unescapeRefName($ref));
$commitHash=~s/\s*$//;
- if(!($commitHash=~/^[0-9a-f]{40}$/))
+ if(!($commitHash=~/^[0-9a-f]{$state->{hexsz}}$/))
{
$commitHash=undef;
}
@@ -4909,7 +4912,7 @@ sub commitmessage
my $commithash = shift;
my $tablename = $self->tablename("commitmsgs");
- die("Need commithash") unless ( defined($commithash) and $commithash =~ /^[a-zA-Z0-9]{40}$/ );
+ die("Need commithash") unless ( defined($commithash) and $commithash =~ /^[a-zA-Z0-9]{$state->{hexsz}}$/ );
my $db_query;
$db_query = $self->{dbh}->prepare_cached("SELECT value FROM $tablename WHERE key=?",{},1);
diff --git a/git-submodule.sh b/git-submodule.sh
index 39ebdf25b5..43eb6051d2 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -719,7 +719,7 @@ cmd_update()
# $@ = requested path
#
cmd_set_branch() {
- unset_branch=false
+ default=
branch=
while test $# -ne 0
@@ -729,7 +729,7 @@ cmd_set_branch() {
# we don't do anything with this but we need to accept it
;;
-d|--default)
- unset_branch=true
+ default=1
;;
-b|--branch)
case "$2" in '') usage ;; esac
@@ -750,33 +750,7 @@ cmd_set_branch() {
shift
done
- if test $# -ne 1
- then
- usage
- fi
-
- # we can't use `git submodule--helper name` here because internally, it
- # hashes the path so a trailing slash could lead to an unintentional no match
- name="$(git submodule--helper list "$1" | cut -f2)"
- if test -z "$name"
- then
- exit 1
- fi
-
- test -n "$branch"; has_branch=$?
- test "$unset_branch" = true; has_unset_branch=$?
-
- if test $((!$has_branch != !$has_unset_branch)) -eq 0
- then
- usage
- fi
-
- if test $has_branch -eq 0
- then
- git submodule--helper config submodule."$name".branch "$branch"
- else
- git submodule--helper config --unset submodule."$name".branch
- fi
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper set-branch ${GIT_QUIET:+--quiet} ${branch:+--branch "$branch"} ${default:+--default} -- "$@"
}
#
diff --git a/git-svn.perl b/git-svn.perl
index 4aa208ff5f..58f5a7ac8f 100755
--- a/git-svn.perl
+++ b/git-svn.perl
@@ -5,7 +5,8 @@ use 5.008;
use warnings;
use strict;
use vars qw/ $AUTHOR $VERSION
- $sha1 $sha1_short $_revision $_repository
+ $oid $oid_short $oid_length
+ $_revision $_repository
$_q $_authors $_authors_prog %users/;
$AUTHOR = 'Eric Wong <normalperson@yhbt.net>';
$VERSION = '@@GIT_VERSION@@';
@@ -103,8 +104,9 @@ sub _req_svn {
}
}
-$sha1 = qr/[a-f\d]{40}/;
-$sha1_short = qr/[a-f\d]{4,40}/;
+$oid = qr/(?:[a-f\d]{40}(?:[a-f\d]{24})?)/;
+$oid_short = qr/[a-f\d]{4,64}/;
+$oid_length = 40;
my ($_stdin, $_help, $_edit,
$_message, $_file, $_branch_dest,
$_template, $_shared,
@@ -498,6 +500,7 @@ sub do_git_init_db {
command_noisy('config', "$pfx.preserve-empty-dirs", 'true');
command_noisy('config', "$pfx.placeholder-filename", $$fname);
}
+ load_object_format();
}
sub init_subdir {
@@ -582,7 +585,7 @@ sub cmd_set_tree {
print "Reading from stdin...\n";
@commits = ();
while (<STDIN>) {
- if (/\b($sha1_short)\b/o) {
+ if (/\b($oid_short)\b/o) {
unshift @commits, $1;
}
}
@@ -1831,7 +1834,7 @@ sub get_tree_from_treeish {
if ($type eq 'commit') {
$expected = (grep /^tree /, command(qw/cat-file commit/,
$treeish))[0];
- ($expected) = ($expected =~ /^tree ($sha1)$/o);
+ ($expected) = ($expected =~ /^tree ($oid)$/o);
die "Unable to get tree from $treeish\n" unless $expected;
} elsif ($type eq 'tree') {
$expected = $treeish;
@@ -1975,9 +1978,15 @@ sub read_git_config {
}
}
}
+ load_object_format();
delete @$opts{@config_only} if @config_only;
}
+sub load_object_format {
+ chomp(my $hash = `git config --get extensions.objectformat`);
+ $::oid_length = 64 if $hash eq 'sha256';
+}
+
sub extract_metadata {
my $id = shift or return (undef, undef, undef);
my ($url, $rev, $uuid) = ($id =~ /^\s*git-svn-id:\s+(.*)\@(\d+)
@@ -2006,10 +2015,10 @@ sub cmt_sha2rev_batch {
print $out $sha, "\n";
while (my $line = <$in>) {
- if ($first && $line =~ /^[[:xdigit:]]{40}\smissing$/) {
+ if ($first && $line =~ /^$::oid\smissing$/) {
last;
} elsif ($first &&
- $line =~ /^[[:xdigit:]]{40}\scommit\s(\d+)$/) {
+ $line =~ /^$::oid\scommit\s(\d+)$/) {
$first = 0;
$size = $1;
next;
@@ -2036,7 +2045,7 @@ sub working_head_info {
my $hash;
my %max;
while (<$fh>) {
- if ( m{^commit ($::sha1)$} ) {
+ if ( m{^commit ($::oid)$} ) {
unshift @$refs, $hash if $hash and $refs;
$hash = $1;
next;
diff --git a/git.c b/git.c
index a2d337eed7..2f021b97f3 100644
--- a/git.c
+++ b/git.c
@@ -574,7 +574,7 @@ static struct cmd_struct commands[] = {
{ "shortlog", cmd_shortlog, RUN_SETUP_GENTLY | USE_PAGER },
{ "show", cmd_show, RUN_SETUP },
{ "show-branch", cmd_show_branch, RUN_SETUP },
- { "show-index", cmd_show_index },
+ { "show-index", cmd_show_index, RUN_SETUP_GENTLY },
{ "show-ref", cmd_show_ref, RUN_SETUP },
{ "sparse-checkout", cmd_sparse_checkout, RUN_SETUP | NEED_WORK_TREE },
{ "stage", cmd_add, RUN_SETUP | NEED_WORK_TREE },
diff --git a/http-fetch.c b/http-fetch.c
index a32ac118d9..1df376e745 100644
--- a/http-fetch.c
+++ b/http-fetch.c
@@ -5,22 +5,90 @@
#include "walker.h"
static const char http_fetch_usage[] = "git http-fetch "
-"[-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin] commit-id url";
+"[-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin | --packfile=hash | commit-id] url";
-int cmd_main(int argc, const char **argv)
+static int fetch_using_walker(const char *raw_url, int get_verbosely,
+ int get_recover, int commits, char **commit_id,
+ const char **write_ref, int commits_on_stdin)
{
+ char *url = NULL;
struct walker *walker;
+ int rc;
+
+ str_end_url_with_slash(raw_url, &url);
+
+ http_init(NULL, url, 0);
+
+ walker = get_http_walker(url);
+ walker->get_verbosely = get_verbosely;
+ walker->get_recover = get_recover;
+ walker->get_progress = 0;
+
+ rc = walker_fetch(walker, commits, commit_id, write_ref, url);
+
+ if (commits_on_stdin)
+ walker_targets_free(commits, commit_id, write_ref);
+
+ if (walker->corrupt_object_found) {
+ fprintf(stderr,
+"Some loose object were found to be corrupt, but they might be just\n"
+"a false '404 Not Found' error message sent with incorrect HTTP\n"
+"status code. Suggest running 'git fsck'.\n");
+ }
+
+ walker_free(walker);
+ http_cleanup();
+ free(url);
+
+ return rc;
+}
+
+static void fetch_single_packfile(struct object_id *packfile_hash,
+ const char *url) {
+ struct http_pack_request *preq;
+ struct slot_results results;
+ int ret;
+
+ http_init(NULL, url, 0);
+
+ preq = new_direct_http_pack_request(packfile_hash->hash, xstrdup(url));
+ if (preq == NULL)
+ die("couldn't create http pack request");
+ preq->slot->results = &results;
+ preq->generate_keep = 1;
+
+ if (start_active_slot(preq->slot)) {
+ run_active_slot(preq->slot);
+ if (results.curl_result != CURLE_OK) {
+ die("Unable to get pack file %s\n%s", preq->url,
+ curl_errorstr);
+ }
+ } else {
+ die("Unable to start request");
+ }
+
+ if ((ret = finish_http_pack_request(preq)))
+ die("finish_http_pack_request gave result %d", ret);
+
+ release_http_pack_request(preq);
+ http_cleanup();
+}
+
+int cmd_main(int argc, const char **argv)
+{
int commits_on_stdin = 0;
int commits;
const char **write_ref = NULL;
char **commit_id;
- char *url = NULL;
int arg = 1;
- int rc = 0;
int get_verbosely = 0;
int get_recover = 0;
+ int packfile = 0;
+ struct object_id packfile_hash;
while (arg < argc && argv[arg][0] == '-') {
+ const char *p;
+
if (argv[arg][1] == 't') {
} else if (argv[arg][1] == 'c') {
} else if (argv[arg][1] == 'a') {
@@ -35,46 +103,34 @@ int cmd_main(int argc, const char **argv)
get_recover = 1;
} else if (!strcmp(argv[arg], "--stdin")) {
commits_on_stdin = 1;
+ } else if (skip_prefix(argv[arg], "--packfile=", &p)) {
+ const char *end;
+
+ packfile = 1;
+ if (parse_oid_hex(p, &packfile_hash, &end) || *end)
+ die(_("argument to --packfile must be a valid hash (got '%s')"), p);
}
arg++;
}
- if (argc != arg + 2 - commits_on_stdin)
+ if (argc != arg + 2 - (commits_on_stdin || packfile))
usage(http_fetch_usage);
- if (commits_on_stdin) {
- commits = walker_targets_stdin(&commit_id, &write_ref);
- } else {
- commit_id = (char **) &argv[arg++];
- commits = 1;
- }
-
- if (argv[arg])
- str_end_url_with_slash(argv[arg], &url);
setup_git_directory();
git_config(git_default_config, NULL);
- http_init(NULL, url, 0);
- walker = get_http_walker(url);
- walker->get_verbosely = get_verbosely;
- walker->get_recover = get_recover;
-
- rc = walker_fetch(walker, commits, commit_id, write_ref, url);
-
- if (commits_on_stdin)
- walker_targets_free(commits, commit_id, write_ref);
-
- if (walker->corrupt_object_found) {
- fprintf(stderr,
-"Some loose object were found to be corrupt, but they might be just\n"
-"a false '404 Not Found' error message sent with incorrect HTTP\n"
-"status code. Suggest running 'git fsck'.\n");
+ if (packfile) {
+ fetch_single_packfile(&packfile_hash, argv[arg]);
+ return 0;
}
- walker_free(walker);
- http_cleanup();
-
- free(url);
-
- return rc;
+ if (commits_on_stdin) {
+ commits = walker_targets_stdin(&commit_id, &write_ref);
+ } else {
+ commit_id = (char **) &argv[arg++];
+ commits = 1;
+ }
+ return fetch_using_walker(argv[arg], get_verbosely, get_recover,
+ commits, commit_id, write_ref,
+ commits_on_stdin);
}
diff --git a/http-push.c b/http-push.c
index 822f326599..1ff1883cdd 100644
--- a/http-push.c
+++ b/http-push.c
@@ -70,10 +70,10 @@ enum XML_Status {
#define LOCK_REFRESH 30
/* Remember to update object flag allocation in object.h */
-#define LOCAL (1u<<16)
-#define REMOTE (1u<<17)
-#define FETCHING (1u<<18)
-#define PUSHING (1u<<19)
+#define LOCAL (1u<<11)
+#define REMOTE (1u<<12)
+#define FETCHING (1u<<13)
+#define PUSHING (1u<<14)
/* We allow "recursive" symbolic refs. Only within reason, though */
#define MAXDEPTH 5
@@ -117,6 +117,7 @@ enum transfer_state {
struct transfer_request {
struct object *obj;
+ struct packed_git *target;
char *url;
char *dest;
struct remote_lock *lock;
@@ -314,17 +315,18 @@ static void start_fetch_packed(struct transfer_request *request)
release_request(request);
return;
}
+ close_pack_index(target);
+ request->target = target;
fprintf(stderr, "Fetching pack %s\n",
hash_to_hex(target->hash));
fprintf(stderr, " which contains %s\n", oid_to_hex(&request->obj->oid));
- preq = new_http_pack_request(target, repo->url);
+ preq = new_http_pack_request(target->hash, repo->url);
if (preq == NULL) {
repo->can_update_info_refs = 0;
return;
}
- preq->lst = &repo->packs;
/* Make sure there isn't another open request for this pack */
while (check_request) {
@@ -597,6 +599,8 @@ static void finish_request(struct transfer_request *request)
}
if (fail)
repo->can_update_info_refs = 0;
+ else
+ http_install_packfile(request->target, &repo->packs);
release_request(request);
}
}
diff --git a/http-walker.c b/http-walker.c
index fe15e325fa..4fb1235cd4 100644
--- a/http-walker.c
+++ b/http-walker.c
@@ -439,6 +439,7 @@ static int http_fetch_pack(struct walker *walker, struct alt_base *repo, unsigne
target = find_sha1_pack(sha1, repo->packs);
if (!target)
return -1;
+ close_pack_index(target);
if (walker->get_verbosely) {
fprintf(stderr, "Getting pack %s\n",
@@ -447,10 +448,9 @@ static int http_fetch_pack(struct walker *walker, struct alt_base *repo, unsigne
hash_to_hex(sha1));
}
- preq = new_http_pack_request(target, repo->base);
+ preq = new_http_pack_request(target->hash, repo->base);
if (preq == NULL)
goto abort;
- preq->lst = &repo->packs;
preq->slot->results = &results;
if (start_active_slot(preq->slot)) {
@@ -469,6 +469,7 @@ static int http_fetch_pack(struct walker *walker, struct alt_base *repo, unsigne
release_http_pack_request(preq);
if (ret)
return ret;
+ http_install_packfile(target, &repo->packs);
return 0;
diff --git a/http.c b/http.c
index 4882c9f5b2..3b12843a5b 100644
--- a/http.c
+++ b/http.c
@@ -18,7 +18,7 @@
static struct trace_key trace_curl = TRACE_KEY_INIT(CURL);
static int trace_curl_data = 1;
-static struct string_list cookies_to_redact = STRING_LIST_INIT_DUP;
+static int trace_curl_redact = 1;
#if LIBCURL_VERSION_NUM >= 0x070a08
long int git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER;
#else
@@ -642,8 +642,9 @@ static void redact_sensitive_header(struct strbuf *header)
{
const char *sensitive_header;
- if (skip_prefix(header->buf, "Authorization:", &sensitive_header) ||
- skip_prefix(header->buf, "Proxy-Authorization:", &sensitive_header)) {
+ if (trace_curl_redact &&
+ (skip_prefix(header->buf, "Authorization:", &sensitive_header) ||
+ skip_prefix(header->buf, "Proxy-Authorization:", &sensitive_header))) {
/* The first token is the type, which is OK to log */
while (isspace(*sensitive_header))
sensitive_header++;
@@ -652,20 +653,15 @@ static void redact_sensitive_header(struct strbuf *header)
/* Everything else is opaque and possibly sensitive */
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addstr(header, " <redacted>");
- } else if (cookies_to_redact.nr &&
+ } else if (trace_curl_redact &&
skip_prefix(header->buf, "Cookie:", &sensitive_header)) {
struct strbuf redacted_header = STRBUF_INIT;
- char *cookie;
+ const char *cookie;
while (isspace(*sensitive_header))
sensitive_header++;
- /*
- * The contents of header starting from sensitive_header will
- * subsequently be overridden, so it is fine to mutate this
- * string (hence the assignment to "char *").
- */
- cookie = (char *) sensitive_header;
+ cookie = sensitive_header;
while (cookie) {
char *equals;
@@ -678,14 +674,8 @@ static void redact_sensitive_header(struct strbuf *header)
strbuf_addstr(&redacted_header, cookie);
continue;
}
- *equals = 0; /* temporarily set to NUL for lookup */
- if (string_list_lookup(&cookies_to_redact, cookie)) {
- strbuf_addstr(&redacted_header, cookie);
- strbuf_addstr(&redacted_header, "=<redacted>");
- } else {
- *equals = '=';
- strbuf_addstr(&redacted_header, cookie);
- }
+ strbuf_add(&redacted_header, cookie, equals - cookie);
+ strbuf_addstr(&redacted_header, "=<redacted>");
if (semicolon) {
/*
* There are more cookies. (Or, for some
@@ -1003,11 +993,8 @@ static CURL *get_curl_handle(void)
setup_curl_trace(result);
if (getenv("GIT_TRACE_CURL_NO_DATA"))
trace_curl_data = 0;
- if (getenv("GIT_REDACT_COOKIES")) {
- string_list_split(&cookies_to_redact,
- getenv("GIT_REDACT_COOKIES"), ',', -1);
- string_list_sort(&cookies_to_redact);
- }
+ if (!git_env_bool("GIT_TRACE_REDACT", 1))
+ trace_curl_redact = 0;
curl_easy_setopt(result, CURLOPT_USERAGENT,
user_agent ? user_agent : git_user_agent());
@@ -2274,70 +2261,74 @@ void release_http_pack_request(struct http_pack_request *preq)
int finish_http_pack_request(struct http_pack_request *preq)
{
- struct packed_git **lst;
- struct packed_git *p = preq->target;
- char *tmp_idx;
- size_t len;
struct child_process ip = CHILD_PROCESS_INIT;
-
- close_pack_index(p);
+ int tmpfile_fd;
+ int ret = 0;
fclose(preq->packfile);
preq->packfile = NULL;
- lst = preq->lst;
- while (*lst != p)
- lst = &((*lst)->next);
- *lst = (*lst)->next;
-
- if (!strip_suffix(preq->tmpfile.buf, ".pack.temp", &len))
- BUG("pack tmpfile does not end in .pack.temp?");
- tmp_idx = xstrfmt("%.*s.idx.temp", (int)len, preq->tmpfile.buf);
+ tmpfile_fd = xopen(preq->tmpfile.buf, O_RDONLY);
argv_array_push(&ip.args, "index-pack");
- argv_array_pushl(&ip.args, "-o", tmp_idx, NULL);
- argv_array_push(&ip.args, preq->tmpfile.buf);
+ argv_array_push(&ip.args, "--stdin");
ip.git_cmd = 1;
- ip.no_stdin = 1;
- ip.no_stdout = 1;
+ ip.in = tmpfile_fd;
+ if (preq->generate_keep) {
+ argv_array_pushf(&ip.args, "--keep=git %"PRIuMAX,
+ (uintmax_t)getpid());
+ ip.out = 0;
+ } else {
+ ip.no_stdout = 1;
+ }
if (run_command(&ip)) {
- unlink(preq->tmpfile.buf);
- unlink(tmp_idx);
- free(tmp_idx);
- return -1;
+ ret = -1;
+ goto cleanup;
}
- unlink(sha1_pack_index_name(p->hash));
+cleanup:
+ close(tmpfile_fd);
+ unlink(preq->tmpfile.buf);
+ return ret;
+}
+
+void http_install_packfile(struct packed_git *p,
+ struct packed_git **list_to_remove_from)
+{
+ struct packed_git **lst = list_to_remove_from;
- if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->hash))
- || finalize_object_file(tmp_idx, sha1_pack_index_name(p->hash))) {
- free(tmp_idx);
- return -1;
- }
+ while (*lst != p)
+ lst = &((*lst)->next);
+ *lst = (*lst)->next;
install_packed_git(the_repository, p);
- free(tmp_idx);
- return 0;
}
struct http_pack_request *new_http_pack_request(
- struct packed_git *target, const char *base_url)
+ const unsigned char *packed_git_hash, const char *base_url) {
+
+ struct strbuf buf = STRBUF_INIT;
+
+ end_url_with_slash(&buf, base_url);
+ strbuf_addf(&buf, "objects/pack/pack-%s.pack",
+ hash_to_hex(packed_git_hash));
+ return new_direct_http_pack_request(packed_git_hash,
+ strbuf_detach(&buf, NULL));
+}
+
+struct http_pack_request *new_direct_http_pack_request(
+ const unsigned char *packed_git_hash, char *url)
{
off_t prev_posn = 0;
- struct strbuf buf = STRBUF_INIT;
struct http_pack_request *preq;
preq = xcalloc(1, sizeof(*preq));
strbuf_init(&preq->tmpfile, 0);
- preq->target = target;
- end_url_with_slash(&buf, base_url);
- strbuf_addf(&buf, "objects/pack/pack-%s.pack",
- hash_to_hex(target->hash));
- preq->url = strbuf_detach(&buf, NULL);
+ preq->url = url;
- strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->hash));
+ strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(packed_git_hash));
preq->packfile = fopen(preq->tmpfile.buf, "a");
if (!preq->packfile) {
error("Unable to open local file %s for pack",
@@ -2361,7 +2352,7 @@ struct http_pack_request *new_http_pack_request(
if (http_is_verbose)
fprintf(stderr,
"Resuming fetch of pack %s at byte %"PRIuMAX"\n",
- hash_to_hex(target->hash),
+ hash_to_hex(packed_git_hash),
(uintmax_t)prev_posn);
http_opt_request_remainder(preq->slot->curl, prev_posn);
}
diff --git a/http.h b/http.h
index faf8cbb0d1..5de792ef3f 100644
--- a/http.h
+++ b/http.h
@@ -216,18 +216,36 @@ int http_get_info_packs(const char *base_url,
struct http_pack_request {
char *url;
- struct packed_git *target;
- struct packed_git **lst;
+
+ /*
+ * If this is true, finish_http_pack_request() will pass "--keep" to
+ * index-pack, resulting in the creation of a keep file, and will not
+ * suppress its stdout (that is, the "keep\t<hash>\n" line will be
+ * printed to stdout).
+ */
+ unsigned generate_keep : 1;
+
FILE *packfile;
struct strbuf tmpfile;
struct active_request_slot *slot;
};
struct http_pack_request *new_http_pack_request(
- struct packed_git *target, const char *base_url);
+ const unsigned char *packed_git_hash, const char *base_url);
+struct http_pack_request *new_direct_http_pack_request(
+ const unsigned char *packed_git_hash, char *url);
int finish_http_pack_request(struct http_pack_request *preq);
void release_http_pack_request(struct http_pack_request *preq);
+/*
+ * Remove p from the given list, and invoke install_packed_git() on it.
+ *
+ * This is a convenience function for users that have obtained a list of packs
+ * from http_get_info_packs() and have chosen a specific pack to fetch.
+ */
+void http_install_packfile(struct packed_git *p,
+ struct packed_git **list_to_remove_from);
+
/* Helpers for fetching object */
struct http_object_request {
char *url;
diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c
index 256bcfbdfe..3553ad7b0a 100644
--- a/list-objects-filter-options.c
+++ b/list-objects-filter-options.c
@@ -326,7 +326,8 @@ void partial_clone_register(
/* Check if it is already registered */
if (!promisor_remote_find(remote)) {
- git_config_set("core.repositoryformatversion", "1");
+ if (upgrade_repository_format(1) < 0)
+ die(_("unable to upgrade repository format to support partial clone"));
/* Add promisor config for the remote */
cfg_name = xstrfmt("remote.%s.promisor", remote);
diff --git a/object-store.h b/object-store.h
index d1e490f203..f439d47af8 100644
--- a/object-store.h
+++ b/object-store.h
@@ -70,6 +70,7 @@ struct packed_git {
size_t index_size;
uint32_t num_objects;
uint32_t num_bad_objects;
+ uint32_t crc_offset;
unsigned char *bad_object_sha1;
int index_version;
time_t mtime;
diff --git a/object.c b/object.c
index 794c86650e..3257518656 100644
--- a/object.c
+++ b/object.c
@@ -157,13 +157,13 @@ void *create_object(struct repository *r, const struct object_id *oid, void *o)
return obj;
}
-void *object_as_type(struct repository *r, struct object *obj, enum object_type type, int quiet)
+void *object_as_type(struct object *obj, enum object_type type, int quiet)
{
if (obj->type == type)
return obj;
else if (obj->type == OBJ_NONE) {
if (type == OBJ_COMMIT)
- init_commit_node(r, (struct commit *) obj);
+ init_commit_node((struct commit *) obj);
else
obj->type = type;
return obj;
diff --git a/object.h b/object.h
index b22328b838..38dc2d5a6c 100644
--- a/object.h
+++ b/object.h
@@ -15,7 +15,6 @@ struct parsed_object_pool {
struct alloc_state *commit_state;
struct alloc_state *tag_state;
struct alloc_state *object_state;
- unsigned commit_count;
/* parent substitutions from .git/info/grafts and .git/shallow */
struct commit_graft **grafts;
@@ -59,7 +58,7 @@ struct object_array {
/*
* object flag allocation:
- * revision.h: 0---------10 15 25----28
+ * revision.h: 0---------10 15 23------26
* fetch-pack.c: 01
* negotiator/default.c: 2--5
* walker.c: 0-2
@@ -67,7 +66,7 @@ struct object_array {
* builtin/blame.c: 12-13
* bisect.c: 16
* bundle.c: 16
- * http-push.c: 16-----19
+ * http-push.c: 11-----14
* commit-graph.c: 15
* commit-reach.c: 16-----19
* sha1-name.c: 20
@@ -79,7 +78,7 @@ struct object_array {
* builtin/show-branch.c: 0-------------------------------------------26
* builtin/unpack-objects.c: 2021
*/
-#define FLAG_BITS 29
+#define FLAG_BITS 28
/*
* The object type is stored in 3 bits.
@@ -121,7 +120,7 @@ struct object *lookup_object(struct repository *r, const struct object_id *oid);
void *create_object(struct repository *r, const struct object_id *oid, void *obj);
-void *object_as_type(struct repository *r, struct object *obj, enum object_type type, int quiet);
+void *object_as_type(struct object *obj, enum object_type type, int quiet);
/*
* Returns the object, having parsed it to find out what it is.
diff --git a/packfile.c b/packfile.c
index f4e752996d..6ab5233613 100644
--- a/packfile.c
+++ b/packfile.c
@@ -178,6 +178,7 @@ int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
*/
(sizeof(off_t) <= 4))
return error("pack too large for current definition of off_t in %s", path);
+ p->crc_offset = 8 + 4 * 256 + nr * hashsz;
}
p->index_version = version;
diff --git a/perl/Git/IndexInfo.pm b/perl/Git/IndexInfo.pm
index a43108c985..2a7b4908f3 100644
--- a/perl/Git/IndexInfo.pm
+++ b/perl/Git/IndexInfo.pm
@@ -5,13 +5,15 @@ use Git qw/command_input_pipe command_close_pipe/;
sub new {
my ($class) = @_;
+ my $hash_algo = Git::config('extensions.objectformat') || 'sha1';
my ($gui, $ctx) = command_input_pipe(qw/update-index -z --index-info/);
- bless { gui => $gui, ctx => $ctx, nr => 0}, $class;
+ bless { gui => $gui, ctx => $ctx, nr => 0, hash_algo => $hash_algo}, $class;
}
sub remove {
my ($self, $path) = @_;
- if (print { $self->{gui} } '0 ', 0 x 40, "\t", $path, "\0") {
+ my $length = $self->{hash_algo} eq 'sha256' ? 64 : 40;
+ if (print { $self->{gui} } '0 ', 0 x $length, "\t", $path, "\0") {
return ++$self->{nr};
}
undef;
diff --git a/perl/Git/SVN.pm b/perl/Git/SVN.pm
index 4b28b87784..d1c352f92b 100644
--- a/perl/Git/SVN.pm
+++ b/perl/Git/SVN.pm
@@ -2,7 +2,7 @@ package Git::SVN;
use strict;
use warnings;
use Fcntl qw/:DEFAULT :seek/;
-use constant rev_map_fmt => 'NH40';
+use constant rev_map_fmt => 'NH*';
use vars qw/$_no_metadata
$_repack $_repack_flags $_use_svm_props $_head
$_use_svnsync_props $no_reuse_existing
@@ -874,7 +874,7 @@ sub assert_index_clean {
command_noisy('read-tree', $treeish) unless -e $self->{index};
my $x = command_oneline('write-tree');
my ($y) = (command(qw/cat-file commit/, $treeish) =~
- /^tree ($::sha1)/mo);
+ /^tree ($::oid)/mo);
return if $y eq $x;
warn "Index mismatch: $y != $x\nrereading $treeish\n";
@@ -1020,7 +1020,7 @@ sub do_git_commit {
$tree = $self->tmp_index_do(sub {
command_oneline('write-tree') });
}
- die "Tree is not a valid sha1: $tree\n" if $tree !~ /^$::sha1$/o;
+ die "Tree is not a valid oid $tree\n" if $tree !~ /^$::oid$/o;
my @exec = ('git', 'commit-tree', $tree);
foreach ($self->get_commit_parents($log_entry)) {
@@ -1048,8 +1048,8 @@ sub do_git_commit {
close $out_fh or croak $!;
waitpid $pid, 0;
croak $? if $?;
- if ($commit !~ /^$::sha1$/o) {
- die "Failed to commit, invalid sha1: $commit\n";
+ if ($commit !~ /^$::oid$/o) {
+ die "Failed to commit, invalid oid: $commit\n";
}
$self->rev_map_set($log_entry->{revision}, $commit, 1);
@@ -2087,10 +2087,10 @@ sub rebuild_from_rev_db {
open my $fh, '<', $path or croak "open: $!";
binmode $fh or croak "binmode: $!";
while (<$fh>) {
- length($_) == 41 or croak "inconsistent size in ($_) != 41";
+ length($_) == $::oid_length + 1 or croak "inconsistent size in ($_)";
chomp($_);
++$r;
- next if $_ eq ('0' x 40);
+ next if $_ eq ('0' x $::oid_length);
$self->rev_map_set($r, $_);
print "r$r = $_\n";
}
@@ -2150,7 +2150,7 @@ sub rebuild {
my $svn_uuid = $self->rewrite_uuid || $self->ra_uuid;
my $c;
while (<$log>) {
- if ( m{^commit ($::sha1)$} ) {
+ if ( m{^commit ($::oid)$} ) {
$c = $1;
next;
}
@@ -2196,9 +2196,9 @@ sub rebuild {
# (mainly tags)
#
# The format is this:
-# - 24 bytes for every record,
+# - 24 or 36 bytes for every record,
# * 4 bytes for the integer representing an SVN revision number
-# * 20 bytes representing the sha1 of a git commit
+# * 20 or 32 bytes representing the oid of a git commit
# - No empty padding records like the old format
# (except the last record, which can be overwritten)
# - new records are written append-only since SVN revision numbers
@@ -2207,7 +2207,7 @@ sub rebuild {
# - Piping the file to xxd -c24 is a good way of dumping it for
# viewing or editing (piped back through xxd -r), should the need
# ever arise.
-# - The last record can be padding revision with an all-zero sha1
+# - The last record can be padding revision with an all-zero oid
# This is used to optimize fetch performance when using multiple
# "fetch" directives in .git/config
#
@@ -2215,38 +2215,39 @@ sub rebuild {
sub _rev_map_set {
my ($fh, $rev, $commit) = @_;
+ my $record_size = ($::oid_length / 2) + 4;
binmode $fh or croak "binmode: $!";
my $size = (stat($fh))[7];
- ($size % 24) == 0 or croak "inconsistent size: $size";
+ ($size % $record_size) == 0 or croak "inconsistent size: $size";
my $wr_offset = 0;
if ($size > 0) {
- sysseek($fh, -24, SEEK_END) or croak "seek: $!";
- my $read = sysread($fh, my $buf, 24) or croak "read: $!";
- $read == 24 or croak "read only $read bytes (!= 24)";
+ sysseek($fh, -$record_size, SEEK_END) or croak "seek: $!";
+ my $read = sysread($fh, my $buf, $record_size) or croak "read: $!";
+ $read == $record_size or croak "read only $read bytes (!= $record_size)";
my ($last_rev, $last_commit) = unpack(rev_map_fmt, $buf);
- if ($last_commit eq ('0' x40)) {
- if ($size >= 48) {
- sysseek($fh, -48, SEEK_END) or croak "seek: $!";
- $read = sysread($fh, $buf, 24) or
+ if ($last_commit eq ('0' x $::oid_length)) {
+ if ($size >= ($record_size * 2)) {
+ sysseek($fh, -($record_size * 2), SEEK_END) or croak "seek: $!";
+ $read = sysread($fh, $buf, $record_size) or
croak "read: $!";
- $read == 24 or
- croak "read only $read bytes (!= 24)";
+ $read == $record_size or
+ croak "read only $read bytes (!= $record_size)";
($last_rev, $last_commit) =
unpack(rev_map_fmt, $buf);
- if ($last_commit eq ('0' x40)) {
+ if ($last_commit eq ('0' x $::oid_length)) {
croak "inconsistent .rev_map\n";
}
}
if ($last_rev >= $rev) {
croak "last_rev is higher!: $last_rev >= $rev";
}
- $wr_offset = -24;
+ $wr_offset = -$record_size;
}
}
sysseek($fh, $wr_offset, SEEK_END) or croak "seek: $!";
- syswrite($fh, pack(rev_map_fmt, $rev, $commit), 24) == 24 or
+ syswrite($fh, pack(rev_map_fmt, $rev, $commit), $record_size) == $record_size or
croak "write: $!";
}
@@ -2271,7 +2272,7 @@ sub mkfile {
sub rev_map_set {
my ($self, $rev, $commit, $update_ref, $uuid) = @_;
defined $commit or die "missing arg3\n";
- length $commit == 40 or die "arg3 must be a full SHA1 hexsum\n";
+ $commit =~ /^$::oid$/ or die "arg3 must be a full hex object ID\n";
my $db = $self->map_path($uuid);
my $db_lock = "$db.lock";
my $sigmask;
@@ -2344,29 +2345,30 @@ sub rev_map_max {
sub rev_map_max_norebuild {
my ($self, $want_commit) = @_;
+ my $record_size = ($::oid_length / 2) + 4;
my $map_path = $self->map_path;
stat $map_path or return $want_commit ? (0, undef) : 0;
sysopen(my $fh, $map_path, O_RDONLY) or croak "open: $!";
binmode $fh or croak "binmode: $!";
my $size = (stat($fh))[7];
- ($size % 24) == 0 or croak "inconsistent size: $size";
+ ($size % $record_size) == 0 or croak "inconsistent size: $size";
if ($size == 0) {
close $fh or croak "close: $!";
return $want_commit ? (0, undef) : 0;
}
- sysseek($fh, -24, SEEK_END) or croak "seek: $!";
- sysread($fh, my $buf, 24) == 24 or croak "read: $!";
+ sysseek($fh, -$record_size, SEEK_END) or croak "seek: $!";
+ sysread($fh, my $buf, $record_size) == $record_size or croak "read: $!";
my ($r, $c) = unpack(rev_map_fmt, $buf);
- if ($want_commit && $c eq ('0' x40)) {
- if ($size < 48) {
+ if ($want_commit && $c eq ('0' x $::oid_length)) {
+ if ($size < $record_size * 2) {
return $want_commit ? (0, undef) : 0;
}
- sysseek($fh, -48, SEEK_END) or croak "seek: $!";
- sysread($fh, $buf, 24) == 24 or croak "read: $!";
+ sysseek($fh, -($record_size * 2), SEEK_END) or croak "seek: $!";
+ sysread($fh, $buf, $record_size) == $record_size or croak "read: $!";
($r, $c) = unpack(rev_map_fmt, $buf);
- if ($c eq ('0'x40)) {
+ if ($c eq ('0' x $::oid_length)) {
croak "Penultimate record is all-zeroes in $map_path";
}
}
@@ -2387,30 +2389,31 @@ sub rev_map_get {
sub _rev_map_get {
my ($fh, $rev) = @_;
+ my $record_size = ($::oid_length / 2) + 4;
binmode $fh or croak "binmode: $!";
my $size = (stat($fh))[7];
- ($size % 24) == 0 or croak "inconsistent size: $size";
+ ($size % $record_size) == 0 or croak "inconsistent size: $size";
if ($size == 0) {
return undef;
}
- my ($l, $u) = (0, $size - 24);
+ my ($l, $u) = (0, $size - $record_size);
my ($r, $c, $buf);
while ($l <= $u) {
- my $i = int(($l/24 + $u/24) / 2) * 24;
+ my $i = int(($l/$record_size + $u/$record_size) / 2) * $record_size;
sysseek($fh, $i, SEEK_SET) or croak "seek: $!";
- sysread($fh, my $buf, 24) == 24 or croak "read: $!";
+ sysread($fh, my $buf, $record_size) == $record_size or croak "read: $!";
my ($r, $c) = unpack(rev_map_fmt, $buf);
if ($r < $rev) {
- $l = $i + 24;
+ $l = $i + $record_size;
} elsif ($r > $rev) {
- $u = $i - 24;
+ $u = $i - $record_size;
} else { # $r == $rev
- return $c eq ('0' x 40) ? undef : $c;
+ return $c eq ('0' x $::oid_length) ? undef : $c;
}
}
undef;
diff --git a/perl/Git/SVN/Editor.pm b/perl/Git/SVN/Editor.pm
index 0df16ed726..c961444d4c 100644
--- a/perl/Git/SVN/Editor.pm
+++ b/perl/Git/SVN/Editor.pm
@@ -63,7 +63,7 @@ sub generate_diff {
my @mods;
while (defined($_ = get_record($diff_fh, "\0"))) {
if ($state eq 'meta' && /^:(\d{6})\s(\d{6})\s
- ($::sha1)\s($::sha1)\s
+ ($::oid)\s($::oid)\s
([MTCRAD])\d*$/xo) {
push @mods, { mode_a => $1, mode_b => $2,
sha1_a => $3, sha1_b => $4,
@@ -400,12 +400,12 @@ sub T {
($m->{mode_b} !~ /^120/ && $m->{mode_a} =~ /^120/)) {
$self->D({
mode_a => $m->{mode_a}, mode_b => '000000',
- sha1_a => $m->{sha1_a}, sha1_b => '0' x 40,
+ sha1_a => $m->{sha1_a}, sha1_b => '0' x $::oid_length,
chg => 'D', file_b => $m->{file_b}
}, $deletions);
$self->A({
mode_a => '000000', mode_b => $m->{mode_b},
- sha1_a => '0' x 40, sha1_b => $m->{sha1_b},
+ sha1_a => '0' x $::oid_length, sha1_b => $m->{sha1_b},
chg => 'A', file_b => $m->{file_b}
}, $deletions);
return;
@@ -434,7 +434,7 @@ sub _chg_file_get_blob ($$$$) {
$self->change_file_prop($fbat,'svn:special',undef);
}
my $blob = $m->{"sha1_$which"};
- return ($fh,) if ($blob =~ /^0{40}$/);
+ return ($fh,) if ($blob =~ /^0+$/);
my $size = $::_repository->cat_blob($blob, $fh);
croak "Failed to read object $blob" if ($size < 0);
$fh->flush == 0 or croak $!;
diff --git a/perl/Git/SVN/Fetcher.pm b/perl/Git/SVN/Fetcher.pm
index 64e900a0e9..729e5337df 100644
--- a/perl/Git/SVN/Fetcher.pm
+++ b/perl/Git/SVN/Fetcher.pm
@@ -173,7 +173,7 @@ sub delete_entry {
# remove entire directories.
my ($tree) = (command('ls-tree', '-z', $self->{c}, "./$gpath")
- =~ /\A040000 tree ([a-f\d]{40})\t\Q$gpath\E\0/);
+ =~ /\A040000 tree ($::oid)\t\Q$gpath\E\0/);
if ($tree) {
my ($ls, $ctx) = command_output_pipe(qw/ls-tree
-r --name-only -z/,
@@ -203,7 +203,7 @@ sub open_file {
my $gpath = $self->git_path($path);
($mode, $blob) = (command('ls-tree', '-z', $self->{c}, "./$gpath")
- =~ /\A(\d{6}) blob ([a-f\d]{40})\t\Q$gpath\E\0/);
+ =~ /\A(\d{6}) blob ($::oid)\t\Q$gpath\E\0/);
unless (defined $mode && defined $blob) {
die "$path was not found in commit $self->{c} (r$rev)\n";
}
@@ -413,7 +413,7 @@ sub close_file {
$hash = $::_repository->hash_and_insert_object(
Git::temp_path($fh));
- $hash =~ /^[a-f\d]{40}$/ or die "not a sha1: $hash\n";
+ $hash =~ /^$::oid$/ or die "not an object ID: $hash\n";
Git::temp_release($fb->{base}, 1);
Git::temp_release($fh, 1);
diff --git a/perl/Git/SVN/Log.pm b/perl/Git/SVN/Log.pm
index 664105357c..3858fcf27d 100644
--- a/perl/Git/SVN/Log.pm
+++ b/perl/Git/SVN/Log.pm
@@ -285,7 +285,7 @@ sub cmd_show_log {
my (@k, $c, $d, $stat);
my $esc_color = qr/(?:\033\[(?:(?:\d+;)*\d*)?m)*/;
while (<$log>) {
- if (/^${esc_color}commit (?:- )?($::sha1_short)/o) {
+ if (/^${esc_color}commit (?:- )?($::oid_short)/o) {
my $cmt = $1;
if ($c && cmt_showable($c) && $c->{r} != $r_last) {
$r_last = $c->{r};
diff --git a/perl/Git/SVN/Ra.pm b/perl/Git/SVN/Ra.pm
index 56ad9870bc..2cfe055a9a 100644
--- a/perl/Git/SVN/Ra.pm
+++ b/perl/Git/SVN/Ra.pm
@@ -486,11 +486,11 @@ sub gs_fetch_loop_common {
$reload_ra->() if $ra_invalid;
}
# pre-fill the .rev_db since it'll eventually get filled in
- # with '0' x40 if something new gets committed
+ # with '0' x $oid_length if something new gets committed
foreach my $gs (@$gsv) {
next if $gs->rev_map_max >= $max;
next if defined $gs->rev_map_get($max);
- $gs->rev_map_set($max, 0 x40);
+ $gs->rev_map_set($max, 0 x $::oid_length);
}
foreach my $g (@$globs) {
my $k = "svn-remote.$g->{remote}.$g->{t}-maxRev";
diff --git a/pkt-line.c b/pkt-line.c
index 8f9bc68ee2..844c253ccd 100644
--- a/pkt-line.c
+++ b/pkt-line.c
@@ -490,6 +490,7 @@ void packet_reader_init(struct packet_reader *reader, int fd,
reader->buffer_size = sizeof(packet_buffer);
reader->options = options;
reader->me = "git";
+ reader->hash_algo = &hash_algos[GIT_HASH_SHA1];
}
enum packet_read_status packet_reader_read(struct packet_reader *reader)
diff --git a/pkt-line.h b/pkt-line.h
index 5b373fe4cd..8c90daa59e 100644
--- a/pkt-line.h
+++ b/pkt-line.h
@@ -177,6 +177,9 @@ struct packet_reader {
unsigned use_sideband : 1;
const char *me;
+
+ /* hash algorithm in use */
+ const struct git_hash_algo *hash_algo;
};
/*
diff --git a/ref-filter.c b/ref-filter.c
index bf7b70299b..8447cb09be 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -1579,7 +1579,7 @@ static void lazy_init_worktree_map(void)
if (ref_to_worktree_map.worktrees)
return;
- ref_to_worktree_map.worktrees = get_worktrees(0);
+ ref_to_worktree_map.worktrees = get_worktrees();
hashmap_init(&(ref_to_worktree_map.map), ref_to_worktree_map_cmpfnc, NULL, 0);
populate_worktree_map(&(ref_to_worktree_map.map), ref_to_worktree_map.worktrees);
}
diff --git a/refs.c b/refs.c
index b98dea5217..639cba93b4 100644
--- a/refs.c
+++ b/refs.c
@@ -9,6 +9,7 @@
#include "iterator.h"
#include "refs.h"
#include "refs/refs-internal.h"
+#include "run-command.h"
#include "object-store.h"
#include "object.h"
#include "tag.h"
@@ -16,6 +17,7 @@
#include "worktree.h"
#include "argv-array.h"
#include "repository.h"
+#include "sigchain.h"
/*
* List of all available backends
@@ -339,7 +341,7 @@ enum peel_status peel_object(const struct object_id *name, struct object_id *oid
if (o->type == OBJ_NONE) {
int type = oid_object_info(the_repository, name, NULL);
- if (type < 0 || !object_as_type(the_repository, o, type, 0))
+ if (type < 0 || !object_as_type(o, type, 0))
return PEEL_INVALID;
}
@@ -2016,10 +2018,65 @@ int ref_update_reject_duplicates(struct string_list *refnames,
return 0;
}
+static const char hook_not_found;
+static const char *hook;
+
+static int run_transaction_hook(struct ref_transaction *transaction,
+ const char *state)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ int ret = 0, i;
+
+ if (hook == &hook_not_found)
+ return ret;
+ if (!hook)
+ hook = find_hook("reference-transaction");
+ if (!hook) {
+ hook = &hook_not_found;
+ return ret;
+ }
+
+ argv_array_pushl(&proc.args, hook, state, NULL);
+ proc.in = -1;
+ proc.stdout_to_stderr = 1;
+ proc.trace2_hook_name = "reference-transaction";
+
+ ret = start_command(&proc);
+ if (ret)
+ return ret;
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s %s %s\n",
+ oid_to_hex(&update->old_oid),
+ oid_to_hex(&update->new_oid),
+ update->refname);
+
+ if (write_in_full(proc.in, buf.buf, buf.len) < 0) {
+ if (errno != EPIPE)
+ ret = -1;
+ break;
+ }
+ }
+
+ close(proc.in);
+ sigchain_pop(SIGPIPE);
+ strbuf_release(&buf);
+
+ ret |= finish_command(&proc);
+ return ret;
+}
+
int ref_transaction_prepare(struct ref_transaction *transaction,
struct strbuf *err)
{
struct ref_store *refs = transaction->ref_store;
+ int ret;
switch (transaction->state) {
case REF_TRANSACTION_OPEN:
@@ -2042,7 +2099,17 @@ int ref_transaction_prepare(struct ref_transaction *transaction,
return -1;
}
- return refs->be->transaction_prepare(refs, transaction, err);
+ ret = refs->be->transaction_prepare(refs, transaction, err);
+ if (ret)
+ return ret;
+
+ ret = run_transaction_hook(transaction, "prepared");
+ if (ret) {
+ ref_transaction_abort(transaction, err);
+ die(_("ref updates aborted by hook"));
+ }
+
+ return 0;
}
int ref_transaction_abort(struct ref_transaction *transaction,
@@ -2066,6 +2133,8 @@ int ref_transaction_abort(struct ref_transaction *transaction,
break;
}
+ run_transaction_hook(transaction, "aborted");
+
ref_transaction_free(transaction);
return ret;
}
@@ -2094,7 +2163,10 @@ int ref_transaction_commit(struct ref_transaction *transaction,
break;
}
- return refs->be->transaction_finish(refs, transaction, err);
+ ret = refs->be->transaction_finish(refs, transaction, err);
+ if (!ret)
+ run_transaction_hook(transaction, "committed");
+ return ret;
}
int refs_verify_refname_available(struct ref_store *refs,
diff --git a/refs.h b/refs.h
index 2e5146fd45..f212f8945e 100644
--- a/refs.h
+++ b/refs.h
@@ -441,19 +441,35 @@ int delete_refs(const char *msg, struct string_list *refnames,
int refs_delete_reflog(struct ref_store *refs, const char *refname);
int delete_reflog(const char *refname);
-/* iterate over reflog entries */
+/*
+ * Callback to process a reflog entry found by the iteration functions (see
+ * below)
+ */
typedef int each_reflog_ent_fn(
struct object_id *old_oid, struct object_id *new_oid,
const char *committer, timestamp_t timestamp,
int tz, const char *msg, void *cb_data);
+/* Iterate over reflog entries in the log for `refname`. */
+
+/* oldest entry first */
int refs_for_each_reflog_ent(struct ref_store *refs, const char *refname,
each_reflog_ent_fn fn, void *cb_data);
+
+/* youngest entry first */
int refs_for_each_reflog_ent_reverse(struct ref_store *refs,
const char *refname,
each_reflog_ent_fn fn,
void *cb_data);
+
+/*
+ * Iterate over reflog entries in the log for `refname` in the main ref store.
+ */
+
+/* oldest entry first */
int for_each_reflog_ent(const char *refname, each_reflog_ent_fn fn, void *cb_data);
+
+/* youngest entry first */
int for_each_reflog_ent_reverse(const char *refname, each_reflog_ent_fn fn, void *cb_data);
/*
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index ff2436c0fb..4271362d26 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -347,9 +347,13 @@ int is_empty_ref_iterator(struct ref_iterator *ref_iterator);
/*
* Return an iterator that goes over each reference in `refs` for
* which the refname begins with prefix. If trim is non-zero, then
- * trim that many characters off the beginning of each refname. flags
- * can be DO_FOR_EACH_INCLUDE_BROKEN to include broken references in
- * the iteration. The output is ordered by refname.
+ * trim that many characters off the beginning of each refname.
+ * The output is ordered by refname. The following flags are supported:
+ *
+ * DO_FOR_EACH_INCLUDE_BROKEN: include broken references in
+ * the iteration.
+ *
+ * DO_FOR_EACH_PER_WORKTREE_ONLY: only produce REF_TYPE_PER_WORKTREE refs.
*/
struct ref_iterator *refs_ref_iterator_begin(
struct ref_store *refs,
@@ -438,6 +442,14 @@ void base_ref_iterator_free(struct ref_iterator *iter);
/* Virtual function declarations for ref_iterators: */
+/*
+ * backend-specific implementation of ref_iterator_advance. For symrefs, the
+ * function should set REF_ISSYMREF, and it should also dereference the symref
+ * to provide the OID referent. If DO_FOR_EACH_INCLUDE_BROKEN is set, symrefs
+ * with non-existent referents and refs pointing to non-existent object names
+ * should also be returned. If DO_FOR_EACH_PER_WORKTREE_ONLY, only
+ * REF_TYPE_PER_WORKTREE refs should be returned.
+ */
typedef int ref_iterator_advance_fn(struct ref_iterator *ref_iterator);
typedef int ref_iterator_peel_fn(struct ref_iterator *ref_iterator,
diff --git a/remote-curl.c b/remote-curl.c
index 75532a8bae..5cbc6e5002 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -41,7 +41,9 @@ struct options {
deepen_relative : 1,
from_promisor : 1,
no_dependents : 1,
- atomic : 1;
+ atomic : 1,
+ object_format : 1;
+ const struct git_hash_algo *hash_algo;
};
static struct options options;
static struct string_list cas_options = STRING_LIST_INIT_DUP;
@@ -190,6 +192,16 @@ static int set_option(const char *name, const char *value)
} else if (!strcmp(name, "filter")) {
options.filter = xstrdup(value);
return 0;
+ } else if (!strcmp(name, "object-format")) {
+ int algo;
+ options.object_format = 1;
+ if (strcmp(value, "true")) {
+ algo = hash_algo_by_name(value);
+ if (algo == GIT_HASH_UNKNOWN)
+ die("unknown object format '%s'", value);
+ options.hash_algo = &hash_algos[algo];
+ }
+ return 0;
} else {
return 1 /* unsupported */;
}
@@ -231,6 +243,7 @@ static struct ref *parse_git_refs(struct discovery *heads, int for_push)
case protocol_v0:
get_remote_heads(&reader, &list, for_push ? REF_NORMAL : 0,
NULL, &heads->shallow);
+ options.hash_algo = reader.hash_algo;
break;
case protocol_unknown_version:
BUG("unknown protocol version");
@@ -239,6 +252,19 @@ static struct ref *parse_git_refs(struct discovery *heads, int for_push)
return list;
}
+static const struct git_hash_algo *detect_hash_algo(struct discovery *heads)
+{
+ const char *p = memchr(heads->buf, '\t', heads->len);
+ int algo;
+ if (!p)
+ return the_hash_algo;
+
+ algo = hash_algo_by_length((p - heads->buf) / 2);
+ if (algo == GIT_HASH_UNKNOWN)
+ return NULL;
+ return &hash_algos[algo];
+}
+
static struct ref *parse_info_refs(struct discovery *heads)
{
char *data, *start, *mid;
@@ -249,6 +275,12 @@ static struct ref *parse_info_refs(struct discovery *heads)
struct ref *ref = NULL;
struct ref *last_ref = NULL;
+ options.hash_algo = detect_hash_algo(heads);
+ if (!options.hash_algo)
+ die("%sinfo/refs not valid: could not determine hash algorithm; "
+ "is this a git repository?",
+ transport_anonymize_url(url.buf));
+
data = heads->buf;
start = NULL;
mid = data;
@@ -259,13 +291,13 @@ static struct ref *parse_info_refs(struct discovery *heads)
if (data[i] == '\t')
mid = &data[i];
if (data[i] == '\n') {
- if (mid - start != the_hash_algo->hexsz)
+ if (mid - start != options.hash_algo->hexsz)
die(_("%sinfo/refs not valid: is this a git repository?"),
transport_anonymize_url(url.buf));
data[i] = 0;
ref_name = mid + 1;
ref = alloc_ref(ref_name);
- get_oid_hex(start, &ref->old_oid);
+ get_oid_hex_algop(start, &ref->old_oid, options.hash_algo);
if (!refs)
refs = ref;
if (last_ref)
@@ -509,11 +541,16 @@ static struct ref *get_refs(int for_push)
static void output_refs(struct ref *refs)
{
struct ref *posn;
+ if (options.object_format && options.hash_algo) {
+ printf(":object-format %s\n", options.hash_algo->name);
+ }
for (posn = refs; posn; posn = posn->next) {
if (posn->symref)
printf("@%s %s\n", posn->symref, posn->name);
else
- printf("%s %s\n", oid_to_hex(&posn->old_oid), posn->name);
+ printf("%s %s\n", hash_to_hex_algop(posn->old_oid.hash,
+ options.hash_algo),
+ posn->name);
}
printf("\n");
fflush(stdout);
@@ -1499,6 +1536,7 @@ int cmd_main(int argc, const char **argv)
printf("option\n");
printf("push\n");
printf("check-connectivity\n");
+ printf("object-format\n");
printf("\n");
fflush(stdout);
} else if (skip_prefix(buf.buf, "stateless-connect ", &arg)) {
diff --git a/repository.h b/repository.h
index 6534fbb7b3..3c1f7d54bd 100644
--- a/repository.h
+++ b/repository.h
@@ -196,4 +196,10 @@ void repo_update_index_if_able(struct repository *, struct lock_file *);
void prepare_repo_settings(struct repository *r);
+/*
+ * Return 1 if upgrade repository format to target_version succeeded,
+ * 0 if no upgrade is necessary, and -1 when upgrade is not possible.
+ */
+int upgrade_repository_format(int target_version);
+
#endif /* REPOSITORY_H */
diff --git a/revision.c b/revision.c
index ebb4d2a0f2..223e99db5d 100644
--- a/revision.c
+++ b/revision.c
@@ -725,7 +725,7 @@ static int check_maybe_different_in_bloom_filter(struct rev_info *revs,
if (!revs->repo->objects->commit_graph)
return -1;
- if (commit->generation == GENERATION_NUMBER_INFINITY)
+ if (commit_graph_generation(commit) == GENERATION_NUMBER_INFINITY)
return -1;
filter = get_bloom_filter(revs->repo, commit, 0);
@@ -1609,7 +1609,7 @@ static void add_other_reflogs_to_pending(struct all_refs_cb *cb)
{
struct worktree **worktrees, **p;
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
for (p = worktrees; *p; p++) {
struct worktree *wt = *p;
@@ -1697,7 +1697,7 @@ void add_index_objects_to_pending(struct rev_info *revs, unsigned int flags)
if (revs->single_worktree)
return;
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
for (p = worktrees; *p; p++) {
struct worktree *wt = *p;
struct index_state istate = { NULL };
@@ -3320,7 +3320,7 @@ static void explore_to_depth(struct rev_info *revs,
struct topo_walk_info *info = revs->topo_walk_info;
struct commit *c;
while ((c = prio_queue_peek(&info->explore_queue)) &&
- c->generation >= gen_cutoff)
+ commit_graph_generation(c) >= gen_cutoff)
explore_walk_step(revs);
}
@@ -3336,7 +3336,7 @@ static void indegree_walk_step(struct rev_info *revs)
if (parse_commit_gently(c, 1) < 0)
return;
- explore_to_depth(revs, c->generation);
+ explore_to_depth(revs, commit_graph_generation(c));
for (p = c->parents; p; p = p->next) {
struct commit *parent = p->item;
@@ -3360,7 +3360,7 @@ static void compute_indegrees_to_depth(struct rev_info *revs,
struct topo_walk_info *info = revs->topo_walk_info;
struct commit *c;
while ((c = prio_queue_peek(&info->indegree_queue)) &&
- c->generation >= gen_cutoff)
+ commit_graph_generation(c) >= gen_cutoff)
indegree_walk_step(revs);
}
@@ -3413,6 +3413,7 @@ static void init_topo_walk(struct rev_info *revs)
info->min_generation = GENERATION_NUMBER_INFINITY;
for (list = revs->commits; list; list = list->next) {
struct commit *c = list->item;
+ uint32_t generation;
if (parse_commit_gently(c, 1))
continue;
@@ -3420,8 +3421,9 @@ static void init_topo_walk(struct rev_info *revs)
test_flag_and_insert(&info->explore_queue, c, TOPO_WALK_EXPLORED);
test_flag_and_insert(&info->indegree_queue, c, TOPO_WALK_INDEGREE);
- if (c->generation < info->min_generation)
- info->min_generation = c->generation;
+ generation = commit_graph_generation(c);
+ if (generation < info->min_generation)
+ info->min_generation = generation;
*(indegree_slab_at(&info->indegree, c)) = 1;
@@ -3472,6 +3474,7 @@ static void expand_topo_walk(struct rev_info *revs, struct commit *commit)
for (p = commit->parents; p; p = p->next) {
struct commit *parent = p->item;
int *pi;
+ uint32_t generation;
if (parent->object.flags & UNINTERESTING)
continue;
@@ -3479,8 +3482,9 @@ static void expand_topo_walk(struct rev_info *revs, struct commit *commit)
if (parse_commit_gently(parent, 1) < 0)
continue;
- if (parent->generation < info->min_generation) {
- info->min_generation = parent->generation;
+ generation = commit_graph_generation(parent);
+ if (generation < info->min_generation) {
+ info->min_generation = generation;
compute_indegrees_to_depth(revs, info->min_generation);
}
diff --git a/revision.h b/revision.h
index 93491b79d4..f412ae85eb 100644
--- a/revision.h
+++ b/revision.h
@@ -37,6 +37,10 @@
/* WARNING: This is also used as REACHABLE in commit-graph.c. */
#define PULL_MERGE (1u<<15)
+
+#define TOPO_WALK_EXPLORED (1u<<23)
+#define TOPO_WALK_INDEGREE (1u<<24)
+
/*
* Indicates object was reached by traversal. i.e. not given by user on
* command-line or stdin.
@@ -48,9 +52,6 @@
#define TRACK_LINEAR (1u<<26)
#define ALL_REV_FLAGS (((1u<<11)-1) | NOT_USER_GIVEN | TRACK_LINEAR | PULL_MERGE)
-#define TOPO_WALK_EXPLORED (1u<<27)
-#define TOPO_WALK_INDEGREE (1u<<28)
-
#define DECORATE_SHORT_REFS 1
#define DECORATE_FULL_REFS 2
diff --git a/send-pack.c b/send-pack.c
index 19eb9b04e4..d671ab5d05 100644
--- a/send-pack.c
+++ b/send-pack.c
@@ -363,6 +363,7 @@ int send_pack(struct send_pack_args *args,
int atomic_supported = 0;
int use_push_options = 0;
int push_options_supported = 0;
+ int object_format_supported = 0;
unsigned cmds_sent = 0;
int ret;
struct async demux;
@@ -389,6 +390,9 @@ int send_pack(struct send_pack_args *args,
if (server_supports("push-options"))
push_options_supported = 1;
+ if (!server_supports_hash(the_hash_algo->name, &object_format_supported))
+ die(_("the receiving end does not support this repository's hash algorithm"));
+
if (args->push_cert != SEND_PACK_PUSH_CERT_NEVER) {
int len;
push_cert_nonce = server_feature_value("push-cert", &len);
@@ -429,6 +433,8 @@ int send_pack(struct send_pack_args *args,
strbuf_addstr(&cap_buf, " atomic");
if (use_push_options)
strbuf_addstr(&cap_buf, " push-options");
+ if (object_format_supported)
+ strbuf_addf(&cap_buf, " object-format=%s", the_hash_algo->name);
if (agent_supported)
strbuf_addf(&cap_buf, " agent=%s", git_user_agent_sanitized());
diff --git a/serve.c b/serve.c
index c046926ba1..fbd2fcdfb5 100644
--- a/serve.c
+++ b/serve.c
@@ -22,6 +22,14 @@ static int agent_advertise(struct repository *r,
return 1;
}
+static int object_format_advertise(struct repository *r,
+ struct strbuf *value)
+{
+ if (value)
+ strbuf_addstr(value, r->hash_algo->name);
+ return 1;
+}
+
struct protocol_capability {
/*
* The name of the capability. The server uses this name when
@@ -57,6 +65,7 @@ static struct protocol_capability capabilities[] = {
{ "ls-refs", always_advertise, ls_refs },
{ "fetch", upload_pack_advertise, upload_pack_v2 },
{ "server-option", always_advertise, NULL },
+ { "object-format", object_format_advertise, NULL },
};
static void advertise_capabilities(void)
@@ -153,6 +162,22 @@ int has_capability(const struct argv_array *keys, const char *capability,
return 0;
}
+static void check_algorithm(struct repository *r, struct argv_array *keys)
+{
+ int client = GIT_HASH_SHA1, server = hash_algo_by_ptr(r->hash_algo);
+ const char *algo_name;
+
+ if (has_capability(keys, "object-format", &algo_name)) {
+ client = hash_algo_by_name(algo_name);
+ if (client == GIT_HASH_UNKNOWN)
+ die("unknown object format '%s'", algo_name);
+ }
+
+ if (client != server)
+ die("mismatched object format: server %s; client %s\n",
+ r->hash_algo->name, hash_algos[client].name);
+}
+
enum request_state {
PROCESS_REQUEST_KEYS,
PROCESS_REQUEST_DONE,
@@ -225,6 +250,8 @@ static int process_request(void)
if (!command)
die("no command requested");
+ check_algorithm(the_repository, &keys);
+
command->command(the_repository, &keys, &reader);
argv_array_clear(&keys);
diff --git a/setup.c b/setup.c
index 65fe5ecefb..dbac2eabe8 100644
--- a/setup.c
+++ b/setup.c
@@ -455,6 +455,7 @@ static int check_repo_format(const char *var, const char *value, void *vdata)
if (strcmp(var, "core.repositoryformatversion") == 0)
data->version = git_config_int(var, value);
else if (skip_prefix(var, "extensions.", &ext)) {
+ data->has_extensions = 1;
/*
* record any known extensions here; otherwise,
* we fall through to recording it as unknown, and
@@ -506,9 +507,15 @@ static int check_repository_format_gently(const char *gitdir, struct repository_
die("%s", err.buf);
}
- repository_format_precious_objects = candidate->precious_objects;
- set_repository_format_partial_clone(candidate->partial_clone);
- repository_format_worktree_config = candidate->worktree_config;
+ if (candidate->version >= 1) {
+ repository_format_precious_objects = candidate->precious_objects;
+ set_repository_format_partial_clone(candidate->partial_clone);
+ repository_format_worktree_config = candidate->worktree_config;
+ } else {
+ repository_format_precious_objects = 0;
+ set_repository_format_partial_clone(NULL);
+ repository_format_worktree_config = 0;
+ }
string_list_clear(&candidate->unknown_extensions, 0);
if (repository_format_worktree_config) {
@@ -538,6 +545,34 @@ static int check_repository_format_gently(const char *gitdir, struct repository_
return 0;
}
+int upgrade_repository_format(int target_version)
+{
+ struct strbuf sb = STRBUF_INIT;
+ struct strbuf err = STRBUF_INIT;
+ struct strbuf repo_version = STRBUF_INIT;
+ struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT;
+
+ strbuf_git_common_path(&sb, the_repository, "config");
+ read_repository_format(&repo_fmt, sb.buf);
+ strbuf_release(&sb);
+
+ if (repo_fmt.version >= target_version)
+ return 0;
+
+ if (verify_repository_format(&repo_fmt, &err) < 0 ||
+ (!repo_fmt.version && repo_fmt.has_extensions)) {
+ warning("unable to upgrade repository format from %d to %d: %s",
+ repo_fmt.version, target_version, err.buf);
+ strbuf_release(&err);
+ return -1;
+ }
+
+ strbuf_addf(&repo_version, "%d", target_version);
+ git_config_set("core.repositoryformatversion", repo_version.buf);
+ strbuf_release(&repo_version);
+ return 1;
+}
+
static void init_repository_format(struct repository_format *format)
{
const struct repository_format fresh = REPOSITORY_FORMAT_INIT;
@@ -1273,6 +1308,7 @@ void check_repository_format(struct repository_format *fmt)
fmt = &repo_fmt;
check_repository_format_gently(get_git_dir(), fmt, NULL);
startup_info->have_repository = 1;
+ repo_set_hash_algo(the_repository, fmt->hash_algo);
clear_repository_format(&repo_fmt);
}
diff --git a/strbuf.c b/strbuf.c
index 2f1a7d3209..e3397cc4c7 100644
--- a/strbuf.c
+++ b/strbuf.c
@@ -556,11 +556,6 @@ ssize_t strbuf_write(struct strbuf *sb, FILE *f)
return sb->len ? fwrite(sb->buf, 1, sb->len, f) : 0;
}
-ssize_t strbuf_write_fd(struct strbuf *sb, int fd)
-{
- return sb->len ? write(fd, sb->buf, sb->len) : 0;
-}
-
#define STRBUF_MAXLINK (2*PATH_MAX)
int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint)
diff --git a/strbuf.h b/strbuf.h
index 7062eb6410..223ee2094a 100644
--- a/strbuf.h
+++ b/strbuf.h
@@ -473,7 +473,6 @@ int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint);
* NUL bytes.
*/
ssize_t strbuf_write(struct strbuf *sb, FILE *stream);
-ssize_t strbuf_write_fd(struct strbuf *sb, int fd);
/**
* Read a line from a FILE *, overwriting the existing contents of
diff --git a/t/README b/t/README
index cf863837ab..70ec61cf88 100644
--- a/t/README
+++ b/t/README
@@ -1,7 +1,7 @@
-Core GIT Tests
+Core Git Tests
==============
-This directory holds many test scripts for core GIT tools. The
+This directory holds many test scripts for core Git tools. The
first part of this short document describes how to run the tests
and read their output.
@@ -1117,21 +1117,21 @@ Tips for Writing Tests
As with any programming projects, existing programs are the best
source of the information. However, do _not_ emulate
t0000-basic.sh when writing your tests. The test is special in
-that it tries to validate the very core of GIT. For example, it
+that it tries to validate the very core of Git. For example, it
knows that there will be 256 subdirectories under .git/objects/,
and it knows that the object ID of an empty tree is a certain
40-byte string. This is deliberately done so in t0000-basic.sh
because the things the very basic core test tries to achieve is
-to serve as a basis for people who are changing the GIT internal
+to serve as a basis for people who are changing the Git internals
drastically. For these people, after making certain changes,
not seeing failures from the basic test _is_ a failure. And
-such drastic changes to the core GIT that even changes these
+such drastic changes to the core Git that even changes these
otherwise supposedly stable object IDs should be accompanied by
an update to t0000-basic.sh.
However, other tests that simply rely on basic parts of the core
-GIT working properly should not have that level of intimate
-knowledge of the core GIT internals. If all the test scripts
+Git working properly should not have that level of intimate
+knowledge of the core Git internals. If all the test scripts
hardcoded the object IDs like t0000-basic.sh does, that defeats
the purpose of t0000-basic.sh, which is to isolate that level of
validation in one place. Your test also ends up needing
diff --git a/t/helper/test-oid-array.c b/t/helper/test-oid-array.c
index ce9fd5f091..b16cd0b11b 100644
--- a/t/helper/test-oid-array.c
+++ b/t/helper/test-oid-array.c
@@ -12,6 +12,9 @@ int cmd__oid_array(int argc, const char **argv)
{
struct oid_array array = OID_ARRAY_INIT;
struct strbuf line = STRBUF_INIT;
+ int nongit_ok;
+
+ setup_git_directory_gently(&nongit_ok);
while (strbuf_getline(&line, stdin) != EOF) {
const char *arg;
diff --git a/t/helper/test-reach.c b/t/helper/test-reach.c
index a0272178b7..14a3655442 100644
--- a/t/helper/test-reach.c
+++ b/t/helper/test-reach.c
@@ -67,7 +67,7 @@ int cmd__reach(int ac, const char **av)
die("failed to load commit for input %s resulting in oid %s\n",
buf.buf, oid_to_hex(&oid));
- c = object_as_type(r, peeled, OBJ_COMMIT, 0);
+ c = object_as_type(peeled, OBJ_COMMIT, 0);
if (!c)
die("failed to load commit for input %s resulting in oid %s\n",
@@ -108,7 +108,7 @@ int cmd__reach(int ac, const char **av)
else if (!strcmp(av[1], "in_merge_bases"))
printf("%s(A,B):%d\n", av[1], in_merge_bases(A, B));
else if (!strcmp(av[1], "is_descendant_of"))
- printf("%s(A,X):%d\n", av[1], is_descendant_of(A, X));
+ printf("%s(A,X):%d\n", av[1], repo_is_descendant_of(r, A, X));
else if (!strcmp(av[1], "get_merge_bases_many")) {
struct commit_list *list = get_merge_bases_many(A, X_nr, X_array);
printf("%s(A,X):\n", av[1]);
diff --git a/t/helper/test-ref-store.c b/t/helper/test-ref-store.c
index 799fc00aa1..759e69dc54 100644
--- a/t/helper/test-ref-store.c
+++ b/t/helper/test-ref-store.c
@@ -37,7 +37,7 @@ static const char **get_store(const char **argv, struct ref_store **refs)
*refs = get_submodule_ref_store(gitdir);
} else if (skip_prefix(argv[0], "worktree:", &gitdir)) {
- struct worktree **p, **worktrees = get_worktrees(0);
+ struct worktree **p, **worktrees = get_worktrees();
for (p = worktrees; *p; p++) {
struct worktree *wt = *p;
diff --git a/t/lib-git-svn.sh b/t/lib-git-svn.sh
index 7d248e6588..547eb3c31a 100644
--- a/t/lib-git-svn.sh
+++ b/t/lib-git-svn.sh
@@ -78,21 +78,24 @@ maybe_start_httpd () {
}
convert_to_rev_db () {
- perl -w -- - "$@" <<\EOF
+ perl -w -- - "$(test_oid rawsz)" "$@" <<\EOF
use strict;
+my $oidlen = shift;
@ARGV == 2 or die "usage: convert_to_rev_db <input> <output>";
+my $record_size = $oidlen + 4;
+my $hexlen = $oidlen * 2;
open my $wr, '+>', $ARGV[1] or die "$!: couldn't open: $ARGV[1]";
open my $rd, '<', $ARGV[0] or die "$!: couldn't open: $ARGV[0]";
my $size = (stat($rd))[7];
-($size % 24) == 0 or die "Inconsistent size: $size";
-while (sysread($rd, my $buf, 24) == 24) {
- my ($r, $c) = unpack('NH40', $buf);
- my $offset = $r * 41;
+($size % $record_size) == 0 or die "Inconsistent size: $size";
+while (sysread($rd, my $buf, $record_size) == $record_size) {
+ my ($r, $c) = unpack("NH$hexlen", $buf);
+ my $offset = $r * ($hexlen + 1);
seek $wr, 0, 2 or die $!;
my $pos = tell $wr;
if ($pos < $offset) {
- for (1 .. (($offset - $pos) / 41)) {
- print $wr (('0' x 40),"\n") or die $!;
+ for (1 .. (($offset - $pos) / ($hexlen + 1))) {
+ print $wr (('0' x $hexlen),"\n") or die $!;
}
}
seek $wr, $offset, 0 or die $!;
diff --git a/t/perf/p1400-update-ref.sh b/t/perf/p1400-update-ref.sh
new file mode 100755
index 0000000000..d275a81248
--- /dev/null
+++ b/t/perf/p1400-update-ref.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+test_description="Tests performance of update-ref"
+
+. ./perf-lib.sh
+
+test_perf_fresh_repo
+
+test_expect_success "setup" '
+ test_commit PRE &&
+ test_commit POST &&
+ printf "create refs/heads/%d PRE\n" $(test_seq 1000) >create &&
+ printf "update refs/heads/%d POST PRE\n" $(test_seq 1000) >update &&
+ printf "delete refs/heads/%d POST\n" $(test_seq 1000) >delete
+'
+
+test_perf "update-ref" '
+ for i in $(test_seq 1000)
+ do
+ git update-ref refs/heads/branch PRE &&
+ git update-ref refs/heads/branch POST PRE &&
+ git update-ref -d refs/heads/branch
+ done
+'
+
+test_perf "update-ref --stdin" '
+ git update-ref --stdin <create &&
+ git update-ref --stdin <update &&
+ git update-ref --stdin <delete
+'
+
+test_done
diff --git a/t/t0002-gitfile.sh b/t/t0002-gitfile.sh
index 0aa9908ea1..960ed150cb 100755
--- a/t/t0002-gitfile.sh
+++ b/t/t0002-gitfile.sh
@@ -62,7 +62,7 @@ test_expect_success 'check commit-tree' '
'
test_expect_success 'check rev-list' '
- echo $SHA >"$REAL/HEAD" &&
+ git update-ref "HEAD" "$SHA" &&
test "$SHA" = "$(git rev-list HEAD)"
'
diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh
index a3988bd4b8..463dc3a8be 100755
--- a/t/t0410-partial-clone.sh
+++ b/t/t0410-partial-clone.sh
@@ -30,6 +30,29 @@ test_expect_success 'extensions.partialclone without filter' '
git -C client fetch origin
'
+test_expect_success 'convert shallow clone to partial clone' '
+ rm -fr server client &&
+ test_create_repo server &&
+ test_commit -C server my_commit 1 &&
+ test_commit -C server my_commit2 1 &&
+ git clone --depth=1 "file://$(pwd)/server" client &&
+ git -C client fetch --unshallow --filter="blob:none" &&
+ test_cmp_config -C client true remote.origin.promisor &&
+ test_cmp_config -C client blob:none remote.origin.partialclonefilter &&
+ test_cmp_config -C client 1 core.repositoryformatversion
+'
+
+test_expect_success 'convert shallow clone to partial clone must fail with any extension' '
+ rm -fr server client &&
+ test_create_repo server &&
+ test_commit -C server my_commit 1 &&
+ test_commit -C server my_commit2 1 &&
+ git clone --depth=1 "file://$(pwd)/server" client &&
+ test_cmp_config -C client 0 core.repositoryformatversion &&
+ git -C client config extensions.partialclone origin &&
+ test_must_fail git -C client fetch --unshallow --filter="blob:none"
+'
+
test_expect_success 'missing reflog object, but promised by a commit, passes fsck' '
rm -rf repo &&
test_create_repo repo &&
diff --git a/t/t1050-large.sh b/t/t1050-large.sh
index 184b479a21..6a56d1ca24 100755
--- a/t/t1050-large.sh
+++ b/t/t1050-large.sh
@@ -12,6 +12,7 @@ file_size () {
}
test_expect_success setup '
+ test_oid_init &&
# clone does not allow us to pass core.bigfilethreshold to
# new repos, so set core.bigfilethreshold globally
git config --global core.bigfilethreshold 200k &&
@@ -64,7 +65,7 @@ test_expect_success 'add a large file or two' '
test $count = 1 &&
cnt=$(git show-index <"$idx" | wc -l) &&
test $cnt = 2 &&
- for l in .git/objects/??/??????????????????????????????????????
+ for l in .git/objects/$OIDPATH_REGEX
do
test_path_is_file "$l" || continue
bad=t
@@ -177,7 +178,8 @@ test_expect_success 'git-show a large file' '
test_expect_success 'index-pack' '
git clone file://"$(pwd)"/.git foo &&
- GIT_DIR=non-existent git index-pack --strict --verify foo/.git/objects/pack/*.pack
+ GIT_DIR=non-existent git index-pack --object-format=$(test_oid algo) \
+ --strict --verify foo/.git/objects/pack/*.pack
'
test_expect_success 'repack' '
diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh
index 40cc004326..f35a73dd20 100755
--- a/t/t1090-sparse-checkout-scope.sh
+++ b/t/t1090-sparse-checkout-scope.sh
@@ -63,7 +63,6 @@ test_expect_success 'in partial clone, sparse checkout only fetches needed blobs
git -C server commit -m message &&
test_config -C client core.sparsecheckout 1 &&
- test_config -C client extensions.partialclone origin &&
echo "!/*" >client/.git/info/sparse-checkout &&
echo "/a" >>client/.git/info/sparse-checkout &&
git -C client fetch --filter=blob:none origin &&
diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh
index 88cdde255c..7cd45fc139 100755
--- a/t/t1091-sparse-checkout-builtin.sh
+++ b/t/t1091-sparse-checkout-builtin.sh
@@ -100,6 +100,28 @@ test_expect_success 'clone --sparse' '
check_files clone a
'
+test_expect_success 'interaction with clone --no-checkout (unborn index)' '
+ git clone --no-checkout "file://$(pwd)/repo" clone_no_checkout &&
+ git -C clone_no_checkout sparse-checkout init --cone &&
+ git -C clone_no_checkout sparse-checkout set folder1 &&
+
+ git -C clone_no_checkout sparse-checkout list >actual &&
+ cat >expect <<-\EOF &&
+ folder1
+ EOF
+ test_cmp expect actual &&
+
+ # nothing checked out, expect "No such file or directory"
+ ! ls clone_no_checkout/* >actual &&
+ test_must_be_empty actual &&
+ test_path_is_missing clone_no_checkout/.git/index &&
+
+ # No branch is checked out until we manually switch to one
+ git -C clone_no_checkout switch master &&
+ test_path_is_file clone_no_checkout/.git/index &&
+ check_files clone_no_checkout a folder1
+'
+
test_expect_success 'set enables config' '
git init empty-config &&
(
diff --git a/t/t1302-repo-version.sh b/t/t1302-repo-version.sh
index ce4cff13bb..d60c042ce8 100755
--- a/t/t1302-repo-version.sh
+++ b/t/t1302-repo-version.sh
@@ -8,6 +8,10 @@ test_description='Test repository version check'
. ./test-lib.sh
test_expect_success 'setup' '
+ test_oid_cache <<-\EOF &&
+ version sha1:0
+ version sha256:1
+ EOF
cat >test.patch <<-\EOF &&
diff --git a/test.txt b/test.txt
new file mode 100644
@@ -23,7 +27,7 @@ test_expect_success 'setup' '
'
test_expect_success 'gitdir selection on normal repos' '
- echo 0 >expect &&
+ echo $(test_oid version) >expect &&
git config core.repositoryformatversion >actual &&
git -C test config core.repositoryformatversion >actual2 &&
test_cmp expect actual &&
diff --git a/t/t1400-update-ref.sh b/t/t1400-update-ref.sh
index e1197ac818..27171f8261 100755
--- a/t/t1400-update-ref.sh
+++ b/t/t1400-update-ref.sh
@@ -37,15 +37,15 @@ test_expect_success setup '
test_expect_success "create $m" '
git update-ref $m $A &&
- test $A = $(cat .git/$m)
+ test $A = $(git show-ref -s --verify $m)
'
test_expect_success "create $m with oldvalue verification" '
git update-ref $m $B $A &&
- test $B = $(cat .git/$m)
+ test $B = $(git show-ref -s --verify $m)
'
test_expect_success "fail to delete $m with stale ref" '
test_must_fail git update-ref -d $m $A &&
- test $B = "$(cat .git/$m)"
+ test $B = "$(git show-ref -s --verify $m)"
'
test_expect_success "delete $m" '
test_when_finished "rm -f .git/$m" &&
@@ -56,7 +56,7 @@ test_expect_success "delete $m" '
test_expect_success "delete $m without oldvalue verification" '
test_when_finished "rm -f .git/$m" &&
git update-ref $m $A &&
- test $A = $(cat .git/$m) &&
+ test $A = $(git show-ref -s --verify $m) &&
git update-ref -d $m &&
test_path_is_missing .git/$m
'
@@ -69,15 +69,15 @@ test_expect_success "fail to create $n" '
test_expect_success "create $m (by HEAD)" '
git update-ref HEAD $A &&
- test $A = $(cat .git/$m)
+ test $A = $(git show-ref -s --verify $m)
'
test_expect_success "create $m (by HEAD) with oldvalue verification" '
git update-ref HEAD $B $A &&
- test $B = $(cat .git/$m)
+ test $B = $(git show-ref -s --verify $m)
'
test_expect_success "fail to delete $m (by HEAD) with stale ref" '
test_must_fail git update-ref -d HEAD $A &&
- test $B = $(cat .git/$m)
+ test $B = $(git show-ref -s --verify $m)
'
test_expect_success "delete $m (by HEAD)" '
test_when_finished "rm -f .git/$m" &&
@@ -178,14 +178,14 @@ test_expect_success '--no-create-reflog overrides core.logAllRefUpdates=always'
test_expect_success "create $m (by HEAD)" '
git update-ref HEAD $A &&
- test $A = $(cat .git/$m)
+ test $A = $(git show-ref -s --verify $m)
'
test_expect_success 'pack refs' '
git pack-refs --all
'
test_expect_success "move $m (by HEAD)" '
git update-ref HEAD $B $A &&
- test $B = $(cat .git/$m)
+ test $B = $(git show-ref -s --verify $m)
'
test_expect_success "delete $m (by HEAD) should remove both packed and loose $m" '
test_when_finished "rm -f .git/$m" &&
@@ -255,7 +255,7 @@ test_expect_success '(not) change HEAD with wrong SHA1' '
'
test_expect_success "(not) changed .git/$m" '
test_when_finished "rm -f .git/$m" &&
- ! test $B = $(cat .git/$m)
+ ! test $B = $(git show-ref -s --verify $m)
'
rm -f .git/logs/refs/heads/master
@@ -263,19 +263,19 @@ test_expect_success "create $m (logged by touch)" '
test_config core.logAllRefUpdates false &&
GIT_COMMITTER_DATE="2005-05-26 23:30" \
git update-ref --create-reflog HEAD $A -m "Initial Creation" &&
- test $A = $(cat .git/$m)
+ test $A = $(git show-ref -s --verify $m)
'
test_expect_success "update $m (logged by touch)" '
test_config core.logAllRefUpdates false &&
GIT_COMMITTER_DATE="2005-05-26 23:31" \
git update-ref HEAD $B $A -m "Switch" &&
- test $B = $(cat .git/$m)
+ test $B = $(git show-ref -s --verify $m)
'
test_expect_success "set $m (logged by touch)" '
test_config core.logAllRefUpdates false &&
GIT_COMMITTER_DATE="2005-05-26 23:41" \
git update-ref HEAD $A &&
- test $A = $(cat .git/$m)
+ test $A = $(git show-ref -s --verify $m)
'
test_expect_success 'empty directory removal' '
@@ -319,19 +319,19 @@ test_expect_success "create $m (logged by config)" '
test_config core.logAllRefUpdates true &&
GIT_COMMITTER_DATE="2005-05-26 23:32" \
git update-ref HEAD $A -m "Initial Creation" &&
- test $A = $(cat .git/$m)
+ test $A = $(git show-ref -s --verify $m)
'
test_expect_success "update $m (logged by config)" '
test_config core.logAllRefUpdates true &&
GIT_COMMITTER_DATE="2005-05-26 23:33" \
git update-ref HEAD'" $B $A "'-m "Switch" &&
- test $B = $(cat .git/$m)
+ test $B = $(git show-ref -s --verify $m)
'
test_expect_success "set $m (logged by config)" '
test_config core.logAllRefUpdates true &&
GIT_COMMITTER_DATE="2005-05-26 23:43" \
git update-ref HEAD $A &&
- test $A = $(cat .git/$m)
+ test $A = $(git show-ref -s --verify $m)
'
cat >expect <<EOF
diff --git a/t/t1416-ref-transaction-hooks.sh b/t/t1416-ref-transaction-hooks.sh
new file mode 100755
index 0000000000..da58d867a5
--- /dev/null
+++ b/t/t1416-ref-transaction-hooks.sh
@@ -0,0 +1,109 @@
+#!/bin/sh
+
+test_description='reference transaction hooks'
+
+. ./test-lib.sh
+
+test_expect_success setup '
+ mkdir -p .git/hooks &&
+ test_commit PRE &&
+ test_commit POST &&
+ POST_OID=$(git rev-parse POST)
+'
+
+test_expect_success 'hook allows updating ref if successful' '
+ test_when_finished "rm .git/hooks/reference-transaction" &&
+ git reset --hard PRE &&
+ write_script .git/hooks/reference-transaction <<-\EOF &&
+ echo "$*" >>actual
+ EOF
+ cat >expect <<-EOF &&
+ prepared
+ committed
+ EOF
+ git update-ref HEAD POST &&
+ test_cmp expect actual
+'
+
+test_expect_success 'hook aborts updating ref in prepared state' '
+ test_when_finished "rm .git/hooks/reference-transaction" &&
+ git reset --hard PRE &&
+ write_script .git/hooks/reference-transaction <<-\EOF &&
+ if test "$1" = prepared
+ then
+ exit 1
+ fi
+ EOF
+ test_must_fail git update-ref HEAD POST 2>err &&
+ test_i18ngrep "ref updates aborted by hook" err
+'
+
+test_expect_success 'hook gets all queued updates in prepared state' '
+ test_when_finished "rm .git/hooks/reference-transaction actual" &&
+ git reset --hard PRE &&
+ write_script .git/hooks/reference-transaction <<-\EOF &&
+ if test "$1" = prepared
+ then
+ while read -r line
+ do
+ printf "%s\n" "$line"
+ done >actual
+ fi
+ EOF
+ cat >expect <<-EOF &&
+ $ZERO_OID $POST_OID HEAD
+ $ZERO_OID $POST_OID refs/heads/master
+ EOF
+ git update-ref HEAD POST <<-EOF &&
+ update HEAD $ZERO_OID $POST_OID
+ update refs/heads/master $ZERO_OID $POST_OID
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'hook gets all queued updates in committed state' '
+ test_when_finished "rm .git/hooks/reference-transaction actual" &&
+ git reset --hard PRE &&
+ write_script .git/hooks/reference-transaction <<-\EOF &&
+ if test "$1" = committed
+ then
+ while read -r line
+ do
+ printf "%s\n" "$line"
+ done >actual
+ fi
+ EOF
+ cat >expect <<-EOF &&
+ $ZERO_OID $POST_OID HEAD
+ $ZERO_OID $POST_OID refs/heads/master
+ EOF
+ git update-ref HEAD POST &&
+ test_cmp expect actual
+'
+
+test_expect_success 'hook gets all queued updates in aborted state' '
+ test_when_finished "rm .git/hooks/reference-transaction actual" &&
+ git reset --hard PRE &&
+ write_script .git/hooks/reference-transaction <<-\EOF &&
+ if test "$1" = aborted
+ then
+ while read -r line
+ do
+ printf "%s\n" "$line"
+ done >actual
+ fi
+ EOF
+ cat >expect <<-EOF &&
+ $ZERO_OID $POST_OID HEAD
+ $ZERO_OID $POST_OID refs/heads/master
+ EOF
+ git update-ref --stdin <<-EOF &&
+ start
+ update HEAD POST $ZERO_OID
+ update refs/heads/master POST $ZERO_OID
+ abort
+ EOF
+ test_cmp expect actual
+'
+
+test_done
diff --git a/t/t1506-rev-parse-diagnosis.sh b/t/t1506-rev-parse-diagnosis.sh
index 52edcbdcc3..dbf690b9c1 100755
--- a/t/t1506-rev-parse-diagnosis.sh
+++ b/t/t1506-rev-parse-diagnosis.sh
@@ -207,7 +207,7 @@ test_expect_success 'arg before dashdash must be a revision (ambiguous)' '
{
# we do not want to use rev-parse here, because
# we are testing it
- cat .git/refs/heads/foobar &&
+ git show-ref -s refs/heads/foobar &&
printf "%s\n" --
} >expect &&
git rev-parse foobar -- >actual &&
diff --git a/t/t2203-add-intent.sh b/t/t2203-add-intent.sh
index 5bbe8dcce4..8a5d55054f 100755
--- a/t/t2203-add-intent.sh
+++ b/t/t2203-add-intent.sh
@@ -232,17 +232,54 @@ test_expect_success 'double rename detection in status' '
)
'
-test_expect_success 'diff-files/diff-cached shows ita as new/not-new files' '
+test_expect_success 'i-t-a files shown as new for "diff", "diff-files"; not-new for "diff --cached"' '
git reset --hard &&
- echo new >new-ita &&
- git add -N new-ita &&
+ : >empty &&
+ content="foo" &&
+ echo "$content" >not-empty &&
+
+ hash_e=$(git hash-object empty) &&
+ hash_n=$(git hash-object not-empty) &&
+ hash_t=$(git hash-object -t tree /dev/null) &&
+
+ cat >expect.diff_p <<-EOF &&
+ diff --git a/empty b/empty
+ new file mode 100644
+ index 0000000..$(git rev-parse --short $hash_e)
+ diff --git a/not-empty b/not-empty
+ new file mode 100644
+ index 0000000..$(git rev-parse --short $hash_n)
+ --- /dev/null
+ +++ b/not-empty
+ @@ -0,0 +1 @@
+ +$content
+ EOF
+ cat >expect.diff_s <<-EOF &&
+ create mode 100644 empty
+ create mode 100644 not-empty
+ EOF
+ cat >expect.diff_a <<-EOF &&
+ :000000 100644 0000000 $(git rev-parse --short $hash_t) A$(printf "\t")empty
+ :000000 100644 0000000 $(git rev-parse --short $hash_t) A$(printf "\t")not-empty
+ EOF
+
+ git add -N empty not-empty &&
+
+ git diff >actual &&
+ test_cmp expect.diff_p actual &&
+
git diff --summary >actual &&
- echo " create mode 100644 new-ita" >expected &&
- test_cmp expected actual &&
- git diff --cached --summary >actual2 &&
- test_must_be_empty actual2
-'
+ test_cmp expect.diff_s actual &&
+
+ git diff-files -p >actual &&
+ test_cmp expect.diff_p actual &&
+ git diff-files --abbrev >actual &&
+ test_cmp expect.diff_a actual &&
+
+ git diff --cached >actual &&
+ test_must_be_empty actual
+'
test_expect_success '"diff HEAD" includes ita as new files' '
git reset --hard &&
diff --git a/t/t2401-worktree-prune.sh b/t/t2401-worktree-prune.sh
index b7d6d5d45a..a6ce7f590b 100755
--- a/t/t2401-worktree-prune.sh
+++ b/t/t2401-worktree-prune.sh
@@ -92,4 +92,28 @@ test_expect_success 'not prune proper checkouts' '
test -d .git/worktrees/nop
'
+test_expect_success 'prune duplicate (linked/linked)' '
+ test_when_finished rm -fr .git/worktrees w1 w2 &&
+ git worktree add --detach w1 &&
+ git worktree add --detach w2 &&
+ sed "s/w2/w1/" .git/worktrees/w2/gitdir >.git/worktrees/w2/gitdir.new &&
+ mv .git/worktrees/w2/gitdir.new .git/worktrees/w2/gitdir &&
+ git worktree prune --verbose >actual &&
+ test_i18ngrep "duplicate entry" actual &&
+ test -d .git/worktrees/w1 &&
+ ! test -d .git/worktrees/w2
+'
+
+test_expect_success 'prune duplicate (main/linked)' '
+ test_when_finished rm -fr repo wt &&
+ test_create_repo repo &&
+ test_commit -C repo x &&
+ git -C repo worktree add --detach ../wt &&
+ rm -fr wt &&
+ mv repo wt &&
+ git -C wt worktree prune --verbose >actual &&
+ test_i18ngrep "duplicate entry" actual &&
+ ! test -d .git/worktrees/wt
+'
+
test_done
diff --git a/t/t2403-worktree-move.sh b/t/t2403-worktree-move.sh
index 939d18d728..a4e1a178e0 100755
--- a/t/t2403-worktree-move.sh
+++ b/t/t2403-worktree-move.sh
@@ -112,6 +112,27 @@ test_expect_success 'move locked worktree (force)' '
git worktree move --force --force flump ploof
'
+test_expect_success 'refuse to move worktree atop existing path' '
+ >bobble &&
+ git worktree add --detach beeble &&
+ test_must_fail git worktree move beeble bobble
+'
+
+test_expect_success 'move atop existing but missing worktree' '
+ git worktree add --detach gnoo &&
+ git worktree add --detach pneu &&
+ rm -fr pneu &&
+ test_must_fail git worktree move gnoo pneu &&
+ git worktree move --force gnoo pneu &&
+
+ git worktree add --detach nu &&
+ git worktree lock nu &&
+ rm -fr nu &&
+ test_must_fail git worktree move pneu nu &&
+ test_must_fail git worktree --force move pneu nu &&
+ git worktree move --force --force pneu nu
+'
+
test_expect_success 'move a repo with uninitialized submodule' '
git init withsub &&
(
diff --git a/t/t2404-worktree-config.sh b/t/t2404-worktree-config.sh
index 286121d8de..9536d10919 100755
--- a/t/t2404-worktree-config.sh
+++ b/t/t2404-worktree-config.sh
@@ -23,8 +23,10 @@ test_expect_success 'config --worktree without extension' '
'
test_expect_success 'enable worktreeConfig extension' '
+ git config core.repositoryformatversion 1 &&
git config extensions.worktreeConfig true &&
- test_cmp_config true extensions.worktreeConfig
+ test_cmp_config true extensions.worktreeConfig &&
+ test_cmp_config 1 core.repositoryformatversion
'
test_expect_success 'config is shared as before' '
diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh
index 411a70b0ce..b6aa04bbec 100755
--- a/t/t3200-branch.sh
+++ b/t/t3200-branch.sh
@@ -323,11 +323,11 @@ test_expect_success 'git branch --list -v with --abbrev' '
test_expect_success 'git branch --column' '
COLUMNS=81 git branch --column=column >actual &&
- cat >expected <<\EOF &&
+ cat >expect <<\EOF &&
a/b/c bam foo l * master mb o/o q
abc bar j/k m/m master2 n o/p r
EOF
- test_cmp expected actual
+ test_cmp expect actual
'
test_expect_success 'git branch --column with an extremely long branch name' '
@@ -336,7 +336,7 @@ test_expect_success 'git branch --column with an extremely long branch name' '
test_when_finished "git branch -d $long" &&
git branch $long &&
COLUMNS=80 git branch --column=column >actual &&
- cat >expected <<EOF &&
+ cat >expect <<EOF &&
a/b/c
abc
bam
@@ -355,7 +355,7 @@ test_expect_success 'git branch --column with an extremely long branch name' '
r
$long
EOF
- test_cmp expected actual
+ test_cmp expect actual
'
test_expect_success 'git branch with column.*' '
@@ -364,11 +364,11 @@ test_expect_success 'git branch with column.*' '
COLUMNS=80 git branch >actual &&
git config --unset column.branch &&
git config --unset column.ui &&
- cat >expected <<\EOF &&
+ cat >expect <<\EOF &&
a/b/c bam foo l * master mb o/o q
abc bar j/k m/m master2 n o/p r
EOF
- test_cmp expected actual
+ test_cmp expect actual
'
test_expect_success 'git branch --column -v should fail' '
@@ -379,7 +379,7 @@ test_expect_success 'git branch -v with column.ui ignored' '
git config column.ui column &&
COLUMNS=80 git branch -v | cut -c -10 | sed "s/ *$//" >actual &&
git config --unset column.ui &&
- cat >expected <<\EOF &&
+ cat >expect <<\EOF &&
a/b/c
abc
bam
@@ -397,12 +397,12 @@ test_expect_success 'git branch -v with column.ui ignored' '
q
r
EOF
- test_cmp expected actual
+ test_cmp expect actual
'
mv .git/config .git/config-saved
-test_expect_success 'git branch -m q q2 without config should succeed' '
+test_expect_success SHA1 'git branch -m q q2 without config should succeed' '
git branch -m q q2 &&
git branch -m q2 q
'
@@ -835,32 +835,42 @@ test_expect_success 'branch from tag w/--track causes failure' '
'
test_expect_success '--set-upstream-to fails on multiple branches' '
- test_must_fail git branch --set-upstream-to master a b c
+ echo "fatal: too many arguments to set new upstream" >expect &&
+ test_must_fail git branch --set-upstream-to master a b c 2>err &&
+ test_i18ncmp expect err
'
test_expect_success '--set-upstream-to fails on detached HEAD' '
git checkout HEAD^{} &&
- test_must_fail git branch --set-upstream-to master &&
- git checkout -
+ test_when_finished git checkout - &&
+ echo "fatal: could not set upstream of HEAD to master when it does not point to any branch." >expect &&
+ test_must_fail git branch --set-upstream-to master 2>err &&
+ test_i18ncmp expect err
'
test_expect_success '--set-upstream-to fails on a missing dst branch' '
- test_must_fail git branch --set-upstream-to master does-not-exist
+ echo "fatal: branch '"'"'does-not-exist'"'"' does not exist" >expect &&
+ test_must_fail git branch --set-upstream-to master does-not-exist 2>err &&
+ test_i18ncmp expect err
'
test_expect_success '--set-upstream-to fails on a missing src branch' '
- test_must_fail git branch --set-upstream-to does-not-exist master
+ test_must_fail git branch --set-upstream-to does-not-exist master 2>err &&
+ test_i18ngrep "the requested upstream branch '"'"'does-not-exist'"'"' does not exist" err
'
test_expect_success '--set-upstream-to fails on a non-ref' '
- test_must_fail git branch --set-upstream-to HEAD^{}
+ echo "fatal: Cannot setup tracking information; starting point '"'"'HEAD^{}'"'"' is not a branch." >expect &&
+ test_must_fail git branch --set-upstream-to HEAD^{} 2>err &&
+ test_i18ncmp expect err
'
test_expect_success '--set-upstream-to fails on locked config' '
test_when_finished "rm -f .git/config.lock" &&
>.git/config.lock &&
git branch locked &&
- test_must_fail git branch --set-upstream-to locked
+ test_must_fail git branch --set-upstream-to locked 2>err &&
+ test_i18ngrep "could not lock config file .git/config: File exists" err
'
test_expect_success 'use --set-upstream-to modify HEAD' '
@@ -881,14 +891,17 @@ test_expect_success 'use --set-upstream-to modify a particular branch' '
'
test_expect_success '--unset-upstream should fail if given a non-existent branch' '
- test_must_fail git branch --unset-upstream i-dont-exist
+ echo "fatal: Branch '"'"'i-dont-exist'"'"' has no upstream information" >expect &&
+ test_must_fail git branch --unset-upstream i-dont-exist 2>err &&
+ test_i18ncmp expect err
'
test_expect_success '--unset-upstream should fail if config is locked' '
test_when_finished "rm -f .git/config.lock" &&
git branch --set-upstream-to locked &&
>.git/config.lock &&
- test_must_fail git branch --unset-upstream
+ test_must_fail git branch --unset-upstream 2>err &&
+ test_i18ngrep "could not lock config file .git/config: File exists" err
'
test_expect_success 'test --unset-upstream on HEAD' '
@@ -900,17 +913,23 @@ test_expect_success 'test --unset-upstream on HEAD' '
test_must_fail git config branch.master.remote &&
test_must_fail git config branch.master.merge &&
# fail for a branch without upstream set
- test_must_fail git branch --unset-upstream
+ echo "fatal: Branch '"'"'master'"'"' has no upstream information" >expect &&
+ test_must_fail git branch --unset-upstream 2>err &&
+ test_i18ncmp expect err
'
test_expect_success '--unset-upstream should fail on multiple branches' '
- test_must_fail git branch --unset-upstream a b c
+ echo "fatal: too many arguments to unset upstream" >expect &&
+ test_must_fail git branch --unset-upstream a b c 2>err &&
+ test_i18ncmp expect err
'
test_expect_success '--unset-upstream should fail on detached HEAD' '
git checkout HEAD^{} &&
- test_must_fail git branch --unset-upstream &&
- git checkout -
+ test_when_finished git checkout - &&
+ echo "fatal: could not unset upstream of HEAD when it does not point to any branch." >expect &&
+ test_must_fail git branch --unset-upstream 2>err &&
+ test_i18ncmp expect err
'
test_expect_success 'test --unset-upstream on a particular branch' '
@@ -922,17 +941,17 @@ test_expect_success 'test --unset-upstream on a particular branch' '
'
test_expect_success 'disabled option --set-upstream fails' '
- test_must_fail git branch --set-upstream origin/master
+ test_must_fail git branch --set-upstream origin/master
'
test_expect_success '--set-upstream-to notices an error to set branch as own upstream' '
git branch --set-upstream-to refs/heads/my13 my13 2>actual &&
- cat >expected <<-\EOF &&
+ cat >expect <<-\EOF &&
warning: Not setting branch my13 as its own upstream.
EOF
test_expect_code 1 git config branch.my13.remote &&
test_expect_code 1 git config branch.my13.merge &&
- test_i18ncmp expected actual
+ test_i18ncmp expect actual
'
# Keep this test last, as it changes the current branch
diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh
index a1bc3e2001..b454f400eb 100755
--- a/t/t3430-rebase-merges.sh
+++ b/t/t3430-rebase-merges.sh
@@ -420,7 +420,7 @@ test_expect_success 'with --autosquash and --exec' '
git commit --fixup B B.t &&
write_script show.sh <<-\EOF &&
subject="$(git show -s --format=%s HEAD)"
- content="$(git diff HEAD^! | tail -n 1)"
+ content="$(git diff HEAD^ HEAD | tail -n 1)"
echo "$subject: $content"
EOF
test_tick &&
diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh
index 575e079cc2..958c2da56e 100755
--- a/t/t4014-format-patch.sh
+++ b/t/t4014-format-patch.sh
@@ -81,16 +81,16 @@ test_expect_success 'format-patch --ignore-if-in-upstream handles tags' '
'
test_expect_success "format-patch doesn't consider merge commits" '
- git checkout -b slave master &&
+ git checkout -b feature master &&
echo "Another line" >>file &&
test_tick &&
- git commit -am "Slave change #1" &&
+ git commit -am "Feature branch change #1" &&
echo "Yet another line" >>file &&
test_tick &&
- git commit -am "Slave change #2" &&
+ git commit -am "Feature branch change #2" &&
git checkout -b merger master &&
test_tick &&
- git merge --no-ff slave &&
+ git merge --no-ff feature &&
git format-patch -3 --stdout >patch &&
grep "^From " patch >from &&
test_line_count = 3 from
diff --git a/t/t4068-diff-symmetric.sh b/t/t4068-diff-symmetric.sh
new file mode 100755
index 0000000000..31d17a5af0
--- /dev/null
+++ b/t/t4068-diff-symmetric.sh
@@ -0,0 +1,91 @@
+#!/bin/sh
+
+test_description='behavior of diff with symmetric-diff setups'
+
+. ./test-lib.sh
+
+# build these situations:
+# - normal merge with one merge base (br1...b2r);
+# - criss-cross merge ie 2 merge bases (br1...master);
+# - disjoint subgraph (orphan branch, br3...master).
+#
+# B---E <-- master
+# / \ /
+# A X
+# \ / \
+# C---D--G <-- br1
+# \ /
+# ---F <-- br2
+#
+# H <-- br3
+#
+# We put files into a few commits so that we can verify the
+# output as well.
+
+test_expect_success setup '
+ git commit --allow-empty -m A &&
+ echo b >b &&
+ git add b &&
+ git commit -m B &&
+ git checkout -b br1 HEAD^ &&
+ echo c >c &&
+ git add c &&
+ git commit -m C &&
+ git tag commit-C &&
+ git merge -m D master &&
+ git tag commit-D &&
+ git checkout master &&
+ git merge -m E commit-C &&
+ git checkout -b br2 commit-C &&
+ echo f >f &&
+ git add f &&
+ git commit -m F &&
+ git checkout br1 &&
+ git merge -m G br2 &&
+ git checkout --orphan br3 &&
+ git commit -m H
+'
+
+test_expect_success 'diff with one merge base' '
+ git diff commit-D...br1 >tmp &&
+ tail -n 1 tmp >actual &&
+ echo +f >expect &&
+ test_cmp expect actual
+'
+
+# The output (in tmp) can have +b or +c depending
+# on which merge base (commit B or C) is picked.
+# It should have one of those two, which comes out
+# to seven lines.
+test_expect_success 'diff with two merge bases' '
+ git diff br1...master >tmp 2>err &&
+ test_line_count = 7 tmp &&
+ test_line_count = 1 err
+'
+
+test_expect_success 'diff with no merge bases' '
+ test_must_fail git diff br2...br3 >tmp 2>err &&
+ test_i18ngrep "fatal: br2...br3: no merge base" err
+'
+
+test_expect_success 'diff with too many symmetric differences' '
+ test_must_fail git diff br1...master br2...br3 >tmp 2>err &&
+ test_i18ngrep "usage" err
+'
+
+test_expect_success 'diff with symmetric difference and extraneous arg' '
+ test_must_fail git diff master br1...master >tmp 2>err &&
+ test_i18ngrep "usage" err
+'
+
+test_expect_success 'diff with two ranges' '
+ test_must_fail git diff master br1..master br2..br3 >tmp 2>err &&
+ test_i18ngrep "usage" err
+'
+
+test_expect_success 'diff with ranges and extra arg' '
+ test_must_fail git diff master br1..master commit-D >tmp 2>err &&
+ test_i18ngrep "usage" err
+'
+
+test_done
diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh
index 410a09b0dd..746cdb626e 100755
--- a/t/t5300-pack-object.sh
+++ b/t/t5300-pack-object.sh
@@ -12,7 +12,8 @@ TRASH=$(pwd)
test_expect_success \
'setup' \
- 'rm -f .git/index* &&
+ 'test_oid_init &&
+ rm -f .git/index* &&
perl -e "print \"a\" x 4096;" > a &&
perl -e "print \"b\" x 4096;" > b &&
perl -e "print \"c\" x 4096;" > c &&
@@ -412,18 +413,18 @@ test_expect_success 'set up pack for non-repo tests' '
'
test_expect_success 'index-pack --stdin complains of non-repo' '
- nongit test_must_fail git index-pack --stdin <foo.pack &&
+ nongit test_must_fail git index-pack --object-format=$(test_oid algo) --stdin <foo.pack &&
test_path_is_missing non-repo/.git
'
test_expect_success 'index-pack <pack> works in non-repo' '
- nongit git index-pack ../foo.pack &&
+ nongit git index-pack --object-format=$(test_oid algo) ../foo.pack &&
test_path_is_file foo.idx
'
test_expect_success 'index-pack --strict <pack> works in non-repo' '
rm -f foo.idx &&
- nongit git index-pack --strict ../foo.pack &&
+ nongit git index-pack --strict --object-format=$(test_oid algo) ../foo.pack &&
test_path_is_file foo.idx
'
diff --git a/t/t5302-pack-index.sh b/t/t5302-pack-index.sh
index ad07f2f7fc..8981c9b90e 100755
--- a/t/t5302-pack-index.sh
+++ b/t/t5302-pack-index.sh
@@ -7,65 +7,65 @@ test_description='pack index with 64-bit offsets and object CRC'
. ./test-lib.sh
test_expect_success 'setup' '
- test_oid_init &&
- rawsz=$(test_oid rawsz) &&
- rm -rf .git &&
- git init &&
- git config pack.threads 1 &&
- i=1 &&
- while test $i -le 100
- do
- iii=$(printf '%03i' $i)
- test-tool genrandom "bar" 200 > wide_delta_$iii &&
- test-tool genrandom "baz $iii" 50 >> wide_delta_$iii &&
- test-tool genrandom "foo"$i 100 > deep_delta_$iii &&
- test-tool genrandom "foo"$(expr $i + 1) 100 >> deep_delta_$iii &&
- test-tool genrandom "foo"$(expr $i + 2) 100 >> deep_delta_$iii &&
- echo $iii >file_$iii &&
- test-tool genrandom "$iii" 8192 >>file_$iii &&
- git update-index --add file_$iii deep_delta_$iii wide_delta_$iii &&
- i=$(expr $i + 1) || return 1
- done &&
- { echo 101 && test-tool genrandom 100 8192; } >file_101 &&
- git update-index --add file_101 &&
- tree=$(git write-tree) &&
- commit=$(git commit-tree $tree </dev/null) && {
- echo $tree &&
- git ls-tree $tree | sed -e "s/.* \\([0-9a-f]*\\) .*/\\1/"
- } >obj-list &&
- git update-ref HEAD $commit
+ test_oid_init &&
+ rawsz=$(test_oid rawsz) &&
+ rm -rf .git &&
+ git init &&
+ git config pack.threads 1 &&
+ i=1 &&
+ while test $i -le 100
+ do
+ iii=$(printf '%03i' $i)
+ test-tool genrandom "bar" 200 > wide_delta_$iii &&
+ test-tool genrandom "baz $iii" 50 >> wide_delta_$iii &&
+ test-tool genrandom "foo"$i 100 > deep_delta_$iii &&
+ test-tool genrandom "foo"$(expr $i + 1) 100 >> deep_delta_$iii &&
+ test-tool genrandom "foo"$(expr $i + 2) 100 >> deep_delta_$iii &&
+ echo $iii >file_$iii &&
+ test-tool genrandom "$iii" 8192 >>file_$iii &&
+ git update-index --add file_$iii deep_delta_$iii wide_delta_$iii &&
+ i=$(expr $i + 1) || return 1
+ done &&
+ { echo 101 && test-tool genrandom 100 8192; } >file_101 &&
+ git update-index --add file_101 &&
+ tree=$(git write-tree) &&
+ commit=$(git commit-tree $tree </dev/null) && {
+ echo $tree &&
+ git ls-tree $tree | sed -e "s/.* \\([0-9a-f]*\\) .*/\\1/"
+ } >obj-list &&
+ git update-ref HEAD $commit
'
-test_expect_success \
- 'pack-objects with index version 1' \
- 'pack1=$(git pack-objects --index-version=1 test-1 <obj-list) &&
- git verify-pack -v "test-1-${pack1}.pack"'
+test_expect_success 'pack-objects with index version 1' '
+ pack1=$(git pack-objects --index-version=1 test-1 <obj-list) &&
+ git verify-pack -v "test-1-${pack1}.pack"
+'
-test_expect_success \
- 'pack-objects with index version 2' \
- 'pack2=$(git pack-objects --index-version=2 test-2 <obj-list) &&
- git verify-pack -v "test-2-${pack2}.pack"'
+test_expect_success 'pack-objects with index version 2' '
+ pack2=$(git pack-objects --index-version=2 test-2 <obj-list) &&
+ git verify-pack -v "test-2-${pack2}.pack"
+'
-test_expect_success \
- 'both packs should be identical' \
- 'cmp "test-1-${pack1}.pack" "test-2-${pack2}.pack"'
+test_expect_success 'both packs should be identical' '
+ cmp "test-1-${pack1}.pack" "test-2-${pack2}.pack"
+'
-test_expect_success \
- 'index v1 and index v2 should be different' \
- '! cmp "test-1-${pack1}.idx" "test-2-${pack2}.idx"'
+test_expect_success 'index v1 and index v2 should be different' '
+ ! cmp "test-1-${pack1}.idx" "test-2-${pack2}.idx"
+'
-test_expect_success \
- 'index-pack with index version 1' \
- 'git index-pack --index-version=1 -o 1.idx "test-1-${pack1}.pack"'
+test_expect_success 'index-pack with index version 1' '
+ git index-pack --index-version=1 -o 1.idx "test-1-${pack1}.pack"
+'
-test_expect_success \
- 'index-pack with index version 2' \
- 'git index-pack --index-version=2 -o 2.idx "test-1-${pack1}.pack"'
+test_expect_success 'index-pack with index version 2' '
+ git index-pack --index-version=2 -o 2.idx "test-1-${pack1}.pack"
+'
-test_expect_success \
- 'index-pack results should match pack-objects ones' \
- 'cmp "test-1-${pack1}.idx" "1.idx" &&
- cmp "test-2-${pack2}.idx" "2.idx"'
+test_expect_success 'index-pack results should match pack-objects ones' '
+ cmp "test-1-${pack1}.idx" "1.idx" &&
+ cmp "test-2-${pack2}.idx" "2.idx"
+'
test_expect_success 'index-pack --verify on index version 1' '
git index-pack --verify "test-1-${pack1}.pack"
@@ -75,13 +75,13 @@ test_expect_success 'index-pack --verify on index version 2' '
git index-pack --verify "test-2-${pack2}.pack"
'
-test_expect_success \
- 'pack-objects --index-version=2, is not accepted' \
- 'test_must_fail git pack-objects --index-version=2, test-3 <obj-list'
+test_expect_success 'pack-objects --index-version=2, is not accepted' '
+ test_must_fail git pack-objects --index-version=2, test-3 <obj-list
+'
-test_expect_success \
- 'index v2: force some 64-bit offsets with pack-objects' \
- 'pack3=$(git pack-objects --index-version=2,0x40000 test-3 <obj-list)'
+test_expect_success 'index v2: force some 64-bit offsets with pack-objects' '
+ pack3=$(git pack-objects --index-version=2,0x40000 test-3 <obj-list)
+'
if msg=$(git verify-pack -v "test-3-${pack3}.pack" 2>&1) ||
! (echo "$msg" | grep "pack too large .* off_t")
@@ -91,21 +91,21 @@ else
say "# skipping tests concerning 64-bit offsets"
fi
-test_expect_success OFF64_T \
- 'index v2: verify a pack with some 64-bit offsets' \
- 'git verify-pack -v "test-3-${pack3}.pack"'
+test_expect_success OFF64_T 'index v2: verify a pack with some 64-bit offsets' '
+ git verify-pack -v "test-3-${pack3}.pack"
+'
-test_expect_success OFF64_T \
- '64-bit offsets: should be different from previous index v2 results' \
- '! cmp "test-2-${pack2}.idx" "test-3-${pack3}.idx"'
+test_expect_success OFF64_T '64-bit offsets: should be different from previous index v2 results' '
+ ! cmp "test-2-${pack2}.idx" "test-3-${pack3}.idx"
+'
-test_expect_success OFF64_T \
- 'index v2: force some 64-bit offsets with index-pack' \
- 'git index-pack --index-version=2,0x40000 -o 3.idx "test-1-${pack1}.pack"'
+test_expect_success OFF64_T 'index v2: force some 64-bit offsets with index-pack' '
+ git index-pack --index-version=2,0x40000 -o 3.idx "test-1-${pack1}.pack"
+'
-test_expect_success OFF64_T \
- '64-bit offsets: index-pack result should match pack-objects one' \
- 'cmp "test-3-${pack3}.idx" "3.idx"'
+test_expect_success OFF64_T '64-bit offsets: index-pack result should match pack-objects one' '
+ cmp "test-3-${pack3}.idx" "3.idx"
+'
test_expect_success OFF64_T 'index-pack --verify on 64-bit offset v2 (cheat)' '
# This cheats by knowing which lower offset should still be encoded
@@ -120,135 +120,143 @@ test_expect_success OFF64_T 'index-pack --verify on 64-bit offset v2' '
# returns the object number for given object in given pack index
index_obj_nr()
{
- idx_file=$1
- object_sha1=$2
- nr=0
- git show-index < $idx_file |
- while read offs sha1 extra
- do
- nr=$(($nr + 1))
- test "$sha1" = "$object_sha1" || continue
- echo "$(($nr - 1))"
- break
- done
+ idx_file=$1
+ object_sha1=$2
+ nr=0
+ git show-index < $idx_file |
+ while read offs sha1 extra
+ do
+ nr=$(($nr + 1))
+ test "$sha1" = "$object_sha1" || continue
+ echo "$(($nr - 1))"
+ break
+ done
}
# returns the pack offset for given object as found in given pack index
index_obj_offset()
{
- idx_file=$1
- object_sha1=$2
- git show-index < $idx_file | grep $object_sha1 |
- ( read offs extra && echo "$offs" )
+ idx_file=$1
+ object_sha1=$2
+ git show-index < $idx_file | grep $object_sha1 |
+ ( read offs extra && echo "$offs" )
}
-test_expect_success \
- '[index v1] 1) stream pack to repository' \
- 'git index-pack --index-version=1 --stdin < "test-1-${pack1}.pack" &&
- git prune-packed &&
- git count-objects | ( read nr rest && test "$nr" -eq 1 ) &&
- cmp "test-1-${pack1}.pack" ".git/objects/pack/pack-${pack1}.pack" &&
- cmp "test-1-${pack1}.idx" ".git/objects/pack/pack-${pack1}.idx"'
+test_expect_success '[index v1] 1) stream pack to repository' '
+ git index-pack --index-version=1 --stdin < "test-1-${pack1}.pack" &&
+ git prune-packed &&
+ git count-objects | ( read nr rest && test "$nr" -eq 1 ) &&
+ cmp "test-1-${pack1}.pack" ".git/objects/pack/pack-${pack1}.pack" &&
+ cmp "test-1-${pack1}.idx" ".git/objects/pack/pack-${pack1}.idx"
+'
test_expect_success \
- '[index v1] 2) create a stealth corruption in a delta base reference' \
- '# This test assumes file_101 is a delta smaller than 16 bytes.
- # It should be against file_100 but we substitute its base for file_099
- sha1_101=$(git hash-object file_101) &&
- sha1_099=$(git hash-object file_099) &&
- offs_101=$(index_obj_offset 1.idx $sha1_101) &&
- nr_099=$(index_obj_nr 1.idx $sha1_099) &&
- chmod +w ".git/objects/pack/pack-${pack1}.pack" &&
- recordsz=$((rawsz + 4)) &&
- dd of=".git/objects/pack/pack-${pack1}.pack" seek=$(($offs_101 + 1)) \
- if=".git/objects/pack/pack-${pack1}.idx" \
- skip=$((4 + 256 * 4 + $nr_099 * recordsz)) \
- bs=1 count=$rawsz conv=notrunc &&
- git cat-file blob $sha1_101 > file_101_foo1'
+ '[index v1] 2) create a stealth corruption in a delta base reference' '
+ # This test assumes file_101 is a delta smaller than 16 bytes.
+ # It should be against file_100 but we substitute its base for file_099
+ sha1_101=$(git hash-object file_101) &&
+ sha1_099=$(git hash-object file_099) &&
+ offs_101=$(index_obj_offset 1.idx $sha1_101) &&
+ nr_099=$(index_obj_nr 1.idx $sha1_099) &&
+ chmod +w ".git/objects/pack/pack-${pack1}.pack" &&
+ recordsz=$((rawsz + 4)) &&
+ dd of=".git/objects/pack/pack-${pack1}.pack" seek=$(($offs_101 + 1)) \
+ if=".git/objects/pack/pack-${pack1}.idx" \
+ skip=$((4 + 256 * 4 + $nr_099 * recordsz)) \
+ bs=1 count=$rawsz conv=notrunc &&
+ git cat-file blob $sha1_101 > file_101_foo1
+'
test_expect_success \
- '[index v1] 3) corrupted delta happily returned wrong data' \
- 'test -f file_101_foo1 && ! cmp file_101 file_101_foo1'
+ '[index v1] 3) corrupted delta happily returned wrong data' '
+ test -f file_101_foo1 && ! cmp file_101 file_101_foo1
+'
test_expect_success \
- '[index v1] 4) confirm that the pack is actually corrupted' \
- 'test_must_fail git fsck --full $commit'
+ '[index v1] 4) confirm that the pack is actually corrupted' '
+ test_must_fail git fsck --full $commit
+'
test_expect_success \
- '[index v1] 5) pack-objects happily reuses corrupted data' \
- 'pack4=$(git pack-objects test-4 <obj-list) &&
- test -f "test-4-${pack4}.pack"'
+ '[index v1] 5) pack-objects happily reuses corrupted data' '
+ pack4=$(git pack-objects test-4 <obj-list) &&
+ test -f "test-4-${pack4}.pack"
+'
-test_expect_success \
- '[index v1] 6) newly created pack is BAD !' \
- 'test_must_fail git verify-pack -v "test-4-${pack4}.pack"'
+test_expect_success '[index v1] 6) newly created pack is BAD !' '
+ test_must_fail git verify-pack -v "test-4-${pack4}.pack"
+'
-test_expect_success \
- '[index v2] 1) stream pack to repository' \
- 'rm -f .git/objects/pack/* &&
- git index-pack --index-version=2 --stdin < "test-1-${pack1}.pack" &&
- git prune-packed &&
- git count-objects | ( read nr rest && test "$nr" -eq 1 ) &&
- cmp "test-1-${pack1}.pack" ".git/objects/pack/pack-${pack1}.pack" &&
- cmp "test-2-${pack1}.idx" ".git/objects/pack/pack-${pack1}.idx"'
+test_expect_success '[index v2] 1) stream pack to repository' '
+ rm -f .git/objects/pack/* &&
+ git index-pack --index-version=2 --stdin < "test-1-${pack1}.pack" &&
+ git prune-packed &&
+ git count-objects | ( read nr rest && test "$nr" -eq 1 ) &&
+ cmp "test-1-${pack1}.pack" ".git/objects/pack/pack-${pack1}.pack" &&
+ cmp "test-2-${pack1}.idx" ".git/objects/pack/pack-${pack1}.idx"
+'
test_expect_success \
- '[index v2] 2) create a stealth corruption in a delta base reference' \
- '# This test assumes file_101 is a delta smaller than 16 bytes.
- # It should be against file_100 but we substitute its base for file_099
- sha1_101=$(git hash-object file_101) &&
- sha1_099=$(git hash-object file_099) &&
- offs_101=$(index_obj_offset 1.idx $sha1_101) &&
- nr_099=$(index_obj_nr 1.idx $sha1_099) &&
- chmod +w ".git/objects/pack/pack-${pack1}.pack" &&
- dd of=".git/objects/pack/pack-${pack1}.pack" seek=$(($offs_101 + 1)) \
- if=".git/objects/pack/pack-${pack1}.idx" \
- skip=$((8 + 256 * 4 + $nr_099 * rawsz)) \
- bs=1 count=$rawsz conv=notrunc &&
- git cat-file blob $sha1_101 > file_101_foo2'
+ '[index v2] 2) create a stealth corruption in a delta base reference' '
+ # This test assumes file_101 is a delta smaller than 16 bytes.
+ # It should be against file_100 but we substitute its base for file_099
+ sha1_101=$(git hash-object file_101) &&
+ sha1_099=$(git hash-object file_099) &&
+ offs_101=$(index_obj_offset 1.idx $sha1_101) &&
+ nr_099=$(index_obj_nr 1.idx $sha1_099) &&
+ chmod +w ".git/objects/pack/pack-${pack1}.pack" &&
+ dd of=".git/objects/pack/pack-${pack1}.pack" seek=$(($offs_101 + 1)) \
+ if=".git/objects/pack/pack-${pack1}.idx" \
+ skip=$((8 + 256 * 4 + $nr_099 * rawsz)) \
+ bs=1 count=$rawsz conv=notrunc &&
+ git cat-file blob $sha1_101 > file_101_foo2
+'
test_expect_success \
- '[index v2] 3) corrupted delta happily returned wrong data' \
- 'test -f file_101_foo2 && ! cmp file_101 file_101_foo2'
+ '[index v2] 3) corrupted delta happily returned wrong data' '
+ test -f file_101_foo2 && ! cmp file_101 file_101_foo2
+'
test_expect_success \
- '[index v2] 4) confirm that the pack is actually corrupted' \
- 'test_must_fail git fsck --full $commit'
+ '[index v2] 4) confirm that the pack is actually corrupted' '
+ test_must_fail git fsck --full $commit
+'
test_expect_success \
- '[index v2] 5) pack-objects refuses to reuse corrupted data' \
- 'test_must_fail git pack-objects test-5 <obj-list &&
- test_must_fail git pack-objects --no-reuse-object test-6 <obj-list'
+ '[index v2] 5) pack-objects refuses to reuse corrupted data' '
+ test_must_fail git pack-objects test-5 <obj-list &&
+ test_must_fail git pack-objects --no-reuse-object test-6 <obj-list
+'
test_expect_success \
- '[index v2] 6) verify-pack detects CRC mismatch' \
- 'rm -f .git/objects/pack/* &&
- git index-pack --index-version=2 --stdin < "test-1-${pack1}.pack" &&
- git verify-pack ".git/objects/pack/pack-${pack1}.pack" &&
- obj=$(git hash-object file_001) &&
- nr=$(index_obj_nr ".git/objects/pack/pack-${pack1}.idx" $obj) &&
- chmod +w ".git/objects/pack/pack-${pack1}.idx" &&
- printf xxxx | dd of=".git/objects/pack/pack-${pack1}.idx" conv=notrunc \
- bs=1 count=4 seek=$((8 + 256 * 4 + $(wc -l <obj-list) * rawsz + $nr * 4)) &&
- ( while read obj
- do git cat-file -p $obj >/dev/null || exit 1
- done <obj-list ) &&
- test_must_fail git verify-pack ".git/objects/pack/pack-${pack1}.pack"
+ '[index v2] 6) verify-pack detects CRC mismatch' '
+ rm -f .git/objects/pack/* &&
+ git index-pack --index-version=2 --stdin < "test-1-${pack1}.pack" &&
+ git verify-pack ".git/objects/pack/pack-${pack1}.pack" &&
+ obj=$(git hash-object file_001) &&
+ nr=$(index_obj_nr ".git/objects/pack/pack-${pack1}.idx" $obj) &&
+ chmod +w ".git/objects/pack/pack-${pack1}.idx" &&
+ printf xxxx | dd of=".git/objects/pack/pack-${pack1}.idx" conv=notrunc \
+ bs=1 count=4 seek=$((8 + 256 * 4 + $(wc -l <obj-list) * rawsz + $nr * 4)) &&
+ ( while read obj
+ do git cat-file -p $obj >/dev/null || exit 1
+ done <obj-list ) &&
+ test_must_fail git verify-pack ".git/objects/pack/pack-${pack1}.pack"
'
test_expect_success 'running index-pack in the object store' '
- rm -f .git/objects/pack/* &&
- cp test-1-${pack1}.pack .git/objects/pack/pack-${pack1}.pack &&
- (
- cd .git/objects/pack &&
- git index-pack pack-${pack1}.pack
- ) &&
- test -f .git/objects/pack/pack-${pack1}.idx
+ rm -f .git/objects/pack/* &&
+ cp test-1-${pack1}.pack .git/objects/pack/pack-${pack1}.pack &&
+ (
+ cd .git/objects/pack &&
+ git index-pack pack-${pack1}.pack
+ ) &&
+ test -f .git/objects/pack/pack-${pack1}.idx
'
test_expect_success 'index-pack --strict warns upon missing tagger in tag' '
- sha=$(git rev-parse HEAD) &&
- cat >wrong-tag <<EOF &&
+ sha=$(git rev-parse HEAD) &&
+ cat >wrong-tag <<EOF &&
object $sha
type commit
tag guten tag
@@ -256,18 +264,18 @@ tag guten tag
This is an invalid tag.
EOF
- tag=$(git hash-object -t tag -w --stdin <wrong-tag) &&
- pack1=$(echo $tag $sha | git pack-objects tag-test) &&
- echo remove tag object &&
- thirtyeight=${tag#??} &&
- rm -f .git/objects/${tag%$thirtyeight}/$thirtyeight &&
- git index-pack --strict tag-test-${pack1}.pack 2>err &&
- grep "^warning:.* expected .tagger. line" err
+ tag=$(git hash-object -t tag -w --stdin <wrong-tag) &&
+ pack1=$(echo $tag $sha | git pack-objects tag-test) &&
+ echo remove tag object &&
+ thirtyeight=${tag#??} &&
+ rm -f .git/objects/${tag%$thirtyeight}/$thirtyeight &&
+ git index-pack --strict tag-test-${pack1}.pack 2>err &&
+ grep "^warning:.* expected .tagger. line" err
'
test_expect_success 'index-pack --fsck-objects also warns upon missing tagger in tag' '
- git index-pack --fsck-objects tag-test-${pack1}.pack 2>err &&
- grep "^warning:.* expected .tagger. line" err
+ git index-pack --fsck-objects tag-test-${pack1}.pack 2>err &&
+ grep "^warning:.* expected .tagger. line" err
'
test_done
diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh
index a79c624875..26f332d6a3 100755
--- a/t/t5318-commit-graph.sh
+++ b/t/t5318-commit-graph.sh
@@ -147,7 +147,7 @@ test_expect_success 'Add more commits' '
test_expect_success 'commit-graph write progress off for redirected stderr' '
cd "$TRASH_DIRECTORY/full" &&
git commit-graph write 2>err &&
- test_line_count = 0 err
+ test_must_be_empty err
'
test_expect_success 'commit-graph write force progress on for stderr' '
@@ -159,13 +159,34 @@ test_expect_success 'commit-graph write force progress on for stderr' '
test_expect_success 'commit-graph write with the --no-progress option' '
cd "$TRASH_DIRECTORY/full" &&
git commit-graph write --no-progress 2>err &&
- test_line_count = 0 err
+ test_must_be_empty err
+'
+
+test_expect_success 'commit-graph write --stdin-commits progress off for redirected stderr' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git rev-parse commits/5 >in &&
+ git commit-graph write --stdin-commits <in 2>err &&
+ test_must_be_empty err
+'
+
+test_expect_success 'commit-graph write --stdin-commits force progress on for stderr' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git rev-parse commits/5 >in &&
+ GIT_PROGRESS_DELAY=0 git commit-graph write --stdin-commits --progress <in 2>err &&
+ test_i18ngrep "Collecting commits from input" err
+'
+
+test_expect_success 'commit-graph write --stdin-commits with the --no-progress option' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git rev-parse commits/5 >in &&
+ git commit-graph write --stdin-commits --no-progress <in 2>err &&
+ test_must_be_empty err
'
test_expect_success 'commit-graph verify progress off for redirected stderr' '
cd "$TRASH_DIRECTORY/full" &&
git commit-graph verify 2>err &&
- test_line_count = 0 err
+ test_must_be_empty err
'
test_expect_success 'commit-graph verify force progress on for stderr' '
@@ -177,7 +198,7 @@ test_expect_success 'commit-graph verify force progress on for stderr' '
test_expect_success 'commit-graph verify with the --no-progress option' '
cd "$TRASH_DIRECTORY/full" &&
git commit-graph verify --no-progress 2>err &&
- test_line_count = 0 err
+ test_must_be_empty err
'
# Current graph structure:
diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh
index 8c54e34ef1..3557374312 100755
--- a/t/t5500-fetch-pack.sh
+++ b/t/t5500-fetch-pack.sh
@@ -871,9 +871,10 @@ test_expect_success 'shallow since with commit graph and already-seen commit' '
GIT_PROTOCOL=version=2 git upload-pack . <<-EOF >/dev/null
0012command=fetch
+ $(echo "object-format=$(test_oid algo)" | packetize)
00010013deepen-since 1
- 0032want $(git rev-parse other)
- 0032have $(git rev-parse master)
+ $(echo "want $(git rev-parse other)" | packetize)
+ $(echo "have $(git rev-parse master)" | packetize)
0000
EOF
)
@@ -999,7 +1000,6 @@ fetch_filter_blob_limit_zero () {
test_config -C "$SERVER" uploadpack.allowfilter 1 &&
git clone "$URL" client &&
- test_config -C client extensions.partialclone origin &&
test_commit -C "$SERVER" two &&
diff --git a/t/t5505-remote.sh b/t/t5505-remote.sh
index dda81b7d07..8d62edd98b 100755
--- a/t/t5505-remote.sh
+++ b/t/t5505-remote.sh
@@ -988,7 +988,7 @@ test_expect_success 'remote set-branches' '
+refs/heads/maint:refs/remotes/scratch/maint
+refs/heads/master:refs/remotes/scratch/master
+refs/heads/next:refs/remotes/scratch/next
- +refs/heads/pu:refs/remotes/scratch/pu
+ +refs/heads/seen:refs/remotes/scratch/seen
+refs/heads/t/topic:refs/remotes/scratch/t/topic
EOF
sort <<-\EOF >expect.setup-ffonly &&
@@ -998,7 +998,7 @@ test_expect_success 'remote set-branches' '
sort <<-\EOF >expect.respect-ffonly &&
refs/heads/master:refs/remotes/scratch/master
+refs/heads/next:refs/remotes/scratch/next
- +refs/heads/pu:refs/remotes/scratch/pu
+ +refs/heads/seen:refs/remotes/scratch/seen
EOF
git clone .git/ setbranches &&
@@ -1016,7 +1016,7 @@ test_expect_success 'remote set-branches' '
git config --get-all remote.scratch.fetch >config-result &&
sort <config-result >../actual.replace &&
- git remote set-branches --add scratch pu t/topic &&
+ git remote set-branches --add scratch seen t/topic &&
git config --get-all remote.scratch.fetch >config-result &&
sort <config-result >../actual.add-two &&
@@ -1028,7 +1028,7 @@ test_expect_success 'remote set-branches' '
git config --get-all remote.scratch.fetch >config-result &&
sort <config-result >../actual.setup-ffonly &&
- git remote set-branches --add scratch pu &&
+ git remote set-branches --add scratch seen &&
git config --get-all remote.scratch.fetch >config-result &&
sort <config-result >../actual.respect-ffonly
) &&
diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh
index 9c6218f568..36ad20a849 100755
--- a/t/t5516-fetch-push.sh
+++ b/t/t5516-fetch-push.sh
@@ -747,42 +747,42 @@ test_expect_success 'deletion of a non-existent ref alone does trigger post-rece
'
test_expect_success 'mixed ref updates, deletes, invalid deletes trigger hooks with correct input' '
- mk_test_with_hooks testrepo heads/master heads/next heads/pu &&
+ mk_test_with_hooks testrepo heads/master heads/next heads/seen &&
orgmaster=$(cd testrepo && git show-ref -s --verify refs/heads/master) &&
newmaster=$(git show-ref -s --verify refs/heads/master) &&
orgnext=$(cd testrepo && git show-ref -s --verify refs/heads/next) &&
newnext=$ZERO_OID &&
- orgpu=$(cd testrepo && git show-ref -s --verify refs/heads/pu) &&
- newpu=$(git show-ref -s --verify refs/heads/master) &&
+ orgseen=$(cd testrepo && git show-ref -s --verify refs/heads/seen) &&
+ newseen=$(git show-ref -s --verify refs/heads/master) &&
git push testrepo refs/heads/master:refs/heads/master \
- refs/heads/master:refs/heads/pu :refs/heads/next \
+ refs/heads/master:refs/heads/seen :refs/heads/next \
:refs/heads/nonexistent &&
(
cd testrepo/.git &&
cat >pre-receive.expect <<-EOF &&
$orgmaster $newmaster refs/heads/master
$orgnext $newnext refs/heads/next
- $orgpu $newpu refs/heads/pu
+ $orgseen $newseen refs/heads/seen
$ZERO_OID $ZERO_OID refs/heads/nonexistent
EOF
cat >update.expect <<-EOF &&
refs/heads/master $orgmaster $newmaster
refs/heads/next $orgnext $newnext
- refs/heads/pu $orgpu $newpu
+ refs/heads/seen $orgseen $newseen
refs/heads/nonexistent $ZERO_OID $ZERO_OID
EOF
cat >post-receive.expect <<-EOF &&
$orgmaster $newmaster refs/heads/master
$orgnext $newnext refs/heads/next
- $orgpu $newpu refs/heads/pu
+ $orgseen $newseen refs/heads/seen
EOF
cat >post-update.expect <<-EOF &&
refs/heads/master
refs/heads/next
- refs/heads/pu
+ refs/heads/seen
EOF
test_cmp pre-receive.expect pre-receive.actual &&
diff --git a/t/t5540-http-push-webdav.sh b/t/t5540-http-push-webdav.sh
index d476c33509..450321fddb 100755
--- a/t/t5540-http-push-webdav.sh
+++ b/t/t5540-http-push-webdav.sh
@@ -126,6 +126,22 @@ test_expect_success 'create and delete remote branch' '
test_must_fail git show-ref --verify refs/remotes/origin/dev
'
+test_expect_success 'non-force push fails if not up to date' '
+ git init --bare "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo_conflict.git &&
+ git -C "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo_conflict.git update-server-info &&
+ git clone $HTTPD_URL/dumb/test_repo_conflict.git "$ROOT_PATH"/c1 &&
+ git clone $HTTPD_URL/dumb/test_repo_conflict.git "$ROOT_PATH"/c2 &&
+ test_commit -C "$ROOT_PATH/c1" path1 &&
+ git -C "$ROOT_PATH/c1" push origin HEAD &&
+ git -C "$ROOT_PATH/c2" pull &&
+ test_commit -C "$ROOT_PATH/c1" path2 &&
+ git -C "$ROOT_PATH/c1" push origin HEAD &&
+ test_commit -C "$ROOT_PATH/c2" path3 &&
+ git -C "$ROOT_PATH/c1" log --graph --all &&
+ git -C "$ROOT_PATH/c2" log --graph --all &&
+ test_must_fail git -C "$ROOT_PATH/c2" push origin HEAD
+'
+
test_expect_success 'MKCOL sends directory names with trailing slashes' '
! grep "\"MKCOL.*[^/] HTTP/[^ ]*\"" < "$HTTPD_ROOT_PATH"/access.log
diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh
index afc680d5e3..463d0f12e5 100755
--- a/t/t5541-http-push-smart.sh
+++ b/t/t5541-http-push-smart.sh
@@ -464,6 +464,21 @@ test_expect_success 'push status output scrubs password' '
grep "^To $HTTPD_URL/smart/test_repo.git" status
'
+test_expect_success 'clone/fetch scrubs password from reflogs' '
+ cd "$ROOT_PATH" &&
+ git clone "$HTTPD_URL_USER_PASS/smart/test_repo.git" \
+ reflog-test &&
+ cd reflog-test &&
+ test_commit prepare-for-force-fetch &&
+ git switch -c away &&
+ git fetch "$HTTPD_URL_USER_PASS/smart/test_repo.git" \
+ +master:master &&
+ # should have been scrubbed down to vanilla URL
+ git log -g master >reflog &&
+ grep "$HTTPD_URL" reflog &&
+ ! grep "$HTTPD_URL_USER_PASS" reflog
+'
+
test_expect_success 'colorize errors/hints' '
cd "$ROOT_PATH"/test_repo_clone &&
test_must_fail git -c color.transport=always -c color.advice=always \
diff --git a/t/t5550-http-fetch-dumb.sh b/t/t5550-http-fetch-dumb.sh
index 50485300eb..483578b2d7 100755
--- a/t/t5550-http-fetch-dumb.sh
+++ b/t/t5550-http-fetch-dumb.sh
@@ -50,6 +50,24 @@ test_expect_success 'create password-protected repository' '
"$HTTPD_DOCUMENT_ROOT_PATH/auth/dumb/repo.git"
'
+test_expect_success 'create empty remote repository' '
+ git init --bare "$HTTPD_DOCUMENT_ROOT_PATH/empty.git" &&
+ (cd "$HTTPD_DOCUMENT_ROOT_PATH/empty.git" &&
+ mkdir -p hooks &&
+ write_script "hooks/post-update" <<-\EOF &&
+ exec git update-server-info
+ EOF
+ hooks/post-update
+ )
+'
+
+test_expect_success 'empty dumb HTTP repository has default hash algorithm' '
+ test_when_finished "rm -fr clone-empty" &&
+ git clone $HTTPD_URL/dumb/empty.git clone-empty &&
+ git -C clone-empty rev-parse --show-object-format >empty-format &&
+ test "$(cat empty-format)" = "$(test_oid algo)"
+'
+
setup_askpass_helper
test_expect_success 'cloning password-protected repository can fail' '
@@ -199,6 +217,28 @@ test_expect_success 'fetch packed objects' '
git clone $HTTPD_URL/dumb/repo_pack.git
'
+test_expect_success 'http-fetch --packfile' '
+ # Arbitrary hash. Use rev-parse so that we get one of the correct
+ # length.
+ ARBITRARY=$(git -C "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git rev-parse HEAD) &&
+
+ git init packfileclient &&
+ p=$(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git && ls objects/pack/pack-*.pack) &&
+ git -C packfileclient http-fetch --packfile=$ARBITRARY "$HTTPD_URL"/dumb/repo_pack.git/$p >out &&
+
+ grep "^keep.[0-9a-f]\{16,\}$" out &&
+ cut -c6- out >packhash &&
+
+ # Ensure that the expected files are generated
+ test -e "packfileclient/.git/objects/pack/pack-$(cat packhash).pack" &&
+ test -e "packfileclient/.git/objects/pack/pack-$(cat packhash).idx" &&
+ test -e "packfileclient/.git/objects/pack/pack-$(cat packhash).keep" &&
+
+ # Ensure that it has the HEAD of repo_pack, at least
+ HASH=$(git -C "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git rev-parse HEAD) &&
+ git -C packfileclient cat-file -e "$HASH"
+'
+
test_expect_success 'fetch notices corrupt pack' '
cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
@@ -214,6 +254,14 @@ test_expect_success 'fetch notices corrupt pack' '
)
'
+test_expect_success 'http-fetch --packfile with corrupt pack' '
+ rm -rf packfileclient &&
+ git init packfileclient &&
+ p=$(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad1.git && ls objects/pack/pack-*.pack) &&
+ test_must_fail git -C packfileclient http-fetch --packfile \
+ "$HTTPD_URL"/dumb/repo_bad1.git/$p
+'
+
test_expect_success 'fetch notices corrupt idx' '
cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh
index be01cf7bb2..e40e9ed52f 100755
--- a/t/t5551-http-fetch-smart.sh
+++ b/t/t5551-http-fetch-smart.sh
@@ -209,6 +209,16 @@ test_expect_success 'GIT_CURL_VERBOSE redacts auth details' '
grep "Authorization: Basic <redacted>" trace
'
+test_expect_success 'GIT_TRACE_CURL does not redact auth details if GIT_TRACE_REDACT=0' '
+ rm -rf redact-auth trace &&
+ set_askpass user@host pass@host &&
+ GIT_TRACE_REDACT=0 GIT_TRACE_CURL="$(pwd)/trace" \
+ git clone --bare "$HTTPD_URL/auth/smart/repo.git" redact-auth &&
+ expect_askpass both user@host &&
+
+ grep "Authorization: Basic [0-9a-zA-Z+/]" trace
+'
+
test_expect_success 'disable dumb http on server' '
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
config http.getanyfile false
@@ -454,37 +464,37 @@ test_expect_success 'fetch by SHA-1 without tag following' '
--no-tags origin $(cat bar_hash)
'
-test_expect_success 'GIT_REDACT_COOKIES redacts cookies' '
+test_expect_success 'cookies are redacted by default' '
rm -rf clone &&
echo "Set-Cookie: Foo=1" >cookies &&
echo "Set-Cookie: Bar=2" >>cookies &&
- GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Bar,Baz \
+ GIT_TRACE_CURL=true \
git -c "http.cookieFile=$(pwd)/cookies" clone \
$HTTPD_URL/smart/repo.git clone 2>err &&
- grep "Cookie:.*Foo=1" err &&
+ grep "Cookie:.*Foo=<redacted>" err &&
grep "Cookie:.*Bar=<redacted>" err &&
+ ! grep "Cookie:.*Foo=1" err &&
! grep "Cookie:.*Bar=2" err
'
-test_expect_success 'GIT_REDACT_COOKIES redacts cookies when GIT_CURL_VERBOSE=1' '
+test_expect_success 'empty values of cookies are also redacted' '
rm -rf clone &&
- echo "Set-Cookie: Foo=1" >cookies &&
- echo "Set-Cookie: Bar=2" >>cookies &&
- GIT_CURL_VERBOSE=1 GIT_REDACT_COOKIES=Bar,Baz \
+ echo "Set-Cookie: Foo=" >cookies &&
+ GIT_TRACE_CURL=true \
git -c "http.cookieFile=$(pwd)/cookies" clone \
$HTTPD_URL/smart/repo.git clone 2>err &&
- grep "Cookie:.*Foo=1" err &&
- grep "Cookie:.*Bar=<redacted>" err &&
- ! grep "Cookie:.*Bar=2" err
+ grep "Cookie:.*Foo=<redacted>" err
'
-test_expect_success 'GIT_REDACT_COOKIES handles empty values' '
+test_expect_success 'GIT_TRACE_REDACT=0 disables cookie redaction' '
rm -rf clone &&
- echo "Set-Cookie: Foo=" >cookies &&
- GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Foo \
+ echo "Set-Cookie: Foo=1" >cookies &&
+ echo "Set-Cookie: Bar=2" >>cookies &&
+ GIT_TRACE_REDACT=0 GIT_TRACE_CURL=true \
git -c "http.cookieFile=$(pwd)/cookies" clone \
$HTTPD_URL/smart/repo.git clone 2>err &&
- grep "Cookie:.*Foo=<redacted>" err
+ grep "Cookie:.*Foo=1" err &&
+ grep "Cookie:.*Bar=2" err
'
test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' '
diff --git a/t/t5562-http-backend-content-length.sh b/t/t5562-http-backend-content-length.sh
index 3f4ac71f83..c6ec625497 100755
--- a/t/t5562-http-backend-content-length.sh
+++ b/t/t5562-http-backend-content-length.sh
@@ -46,6 +46,7 @@ ssize_b100dots() {
}
test_expect_success 'setup' '
+ test_oid_init &&
HTTP_CONTENT_ENCODING="identity" &&
export HTTP_CONTENT_ENCODING &&
git config http.receivepack true &&
@@ -62,8 +63,8 @@ test_expect_success 'setup' '
test_copy_bytes 10 <fetch_body >fetch_body.trunc &&
hash_next=$(git commit-tree -p HEAD -m next HEAD^{tree}) &&
{
- printf "%s %s refs/heads/newbranch\\0report-status\\n" \
- "$ZERO_OID" "$hash_next" | packetize &&
+ printf "%s %s refs/heads/newbranch\\0report-status object-format=%s\\n" \
+ "$ZERO_OID" "$hash_next" "$(test_oid algo)" | packetize &&
printf 0000 &&
echo "$hash_next" | git pack-objects --stdout
} >push_body &&
diff --git a/t/t5701-git-serve.sh b/t/t5701-git-serve.sh
index ffb9613885..a1f5fdc9fd 100755
--- a/t/t5701-git-serve.sh
+++ b/t/t5701-git-serve.sh
@@ -5,12 +5,17 @@ test_description='test protocol v2 server commands'
. ./test-lib.sh
test_expect_success 'test capability advertisement' '
+ test_oid_cache <<-EOF &&
+ wrong_algo sha1:sha256
+ wrong_algo sha256:sha1
+ EOF
cat >expect <<-EOF &&
version 2
agent=git/$(git version | cut -d" " -f3)
ls-refs
fetch=shallow
server-option
+ object-format=$(test_oid algo)
0000
EOF
@@ -45,6 +50,7 @@ test_expect_success 'request invalid capability' '
test_expect_success 'request with no command' '
test-tool pkt-line pack >in <<-EOF &&
agent=git/test
+ object-format=$(test_oid algo)
0000
EOF
test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
@@ -54,6 +60,7 @@ test_expect_success 'request with no command' '
test_expect_success 'request invalid command' '
test-tool pkt-line pack >in <<-EOF &&
command=foo
+ object-format=$(test_oid algo)
agent=git/test
0000
EOF
@@ -61,6 +68,17 @@ test_expect_success 'request invalid command' '
test_i18ngrep "invalid command" err
'
+test_expect_success 'wrong object-format' '
+ test-tool pkt-line pack >in <<-EOF &&
+ command=fetch
+ agent=git/test
+ object-format=$(test_oid wrong_algo)
+ 0000
+ EOF
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
+ test_i18ngrep "mismatched object format" err
+'
+
# Test the basics of ls-refs
#
test_expect_success 'setup some refs and tags' '
@@ -74,6 +92,7 @@ test_expect_success 'setup some refs and tags' '
test_expect_success 'basics of ls-refs' '
test-tool pkt-line pack >in <<-EOF &&
command=ls-refs
+ object-format=$(test_oid algo)
0000
EOF
@@ -96,6 +115,7 @@ test_expect_success 'basics of ls-refs' '
test_expect_success 'basic ref-prefixes' '
test-tool pkt-line pack >in <<-EOF &&
command=ls-refs
+ object-format=$(test_oid algo)
0001
ref-prefix refs/heads/master
ref-prefix refs/tags/one
@@ -116,6 +136,7 @@ test_expect_success 'basic ref-prefixes' '
test_expect_success 'refs/heads prefix' '
test-tool pkt-line pack >in <<-EOF &&
command=ls-refs
+ object-format=$(test_oid algo)
0001
ref-prefix refs/heads/
0000
@@ -136,6 +157,7 @@ test_expect_success 'refs/heads prefix' '
test_expect_success 'peel parameter' '
test-tool pkt-line pack >in <<-EOF &&
command=ls-refs
+ object-format=$(test_oid algo)
0001
peel
ref-prefix refs/tags/
@@ -157,6 +179,7 @@ test_expect_success 'peel parameter' '
test_expect_success 'symrefs parameter' '
test-tool pkt-line pack >in <<-EOF &&
command=ls-refs
+ object-format=$(test_oid algo)
0001
symrefs
ref-prefix refs/heads/
@@ -178,6 +201,7 @@ test_expect_success 'symrefs parameter' '
test_expect_success 'sending server-options' '
test-tool pkt-line pack >in <<-EOF &&
command=ls-refs
+ object-format=$(test_oid algo)
server-option=hello
server-option=world
0001
@@ -200,6 +224,7 @@ test_expect_success 'unexpected lines are not allowed in fetch request' '
test-tool pkt-line pack >in <<-EOF &&
command=fetch
+ object-format=$(test_oid algo)
0001
this-is-not-a-command
0000
diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh
index 8da65e60de..1b54c35b01 100755
--- a/t/t5702-protocol-v2.sh
+++ b/t/t5702-protocol-v2.sh
@@ -13,6 +13,7 @@ start_git_daemon --export-all --enable=receive-pack
daemon_parent=$GIT_DAEMON_DOCUMENT_ROOT_PATH/parent
test_expect_success 'create repo to be served by git-daemon' '
+ test_oid_init &&
git init "$daemon_parent" &&
test_commit -C "$daemon_parent" one
'
@@ -348,7 +349,6 @@ test_expect_success 'partial fetch' '
rm -rf client "$(pwd)/trace" &&
git init client &&
SERVER="file://$(pwd)/server" &&
- test_config -C client extensions.partialClone "$SERVER" &&
GIT_TRACE_PACKET="$(pwd)/trace" git -C client -c protocol.version=2 \
fetch --filter=blob:none "$SERVER" master:refs/heads/other &&
@@ -394,6 +394,7 @@ test_expect_success 'even with handcrafted request, filter does not work if not
# Custom request that tries to filter even though it is not advertised.
test-tool pkt-line pack >in <<-EOF &&
command=fetch
+ object-format=$(test_oid algo)
0001
want $(git -C server rev-parse master)
filter blob:none
@@ -795,6 +796,94 @@ test_expect_success 'when server does not send "ready", expect FLUSH' '
test_i18ngrep "expected no other sections to be sent after no .ready." err
'
+configure_exclusion () {
+ git -C "$1" hash-object "$2" >objh &&
+ git -C "$1" pack-objects "$HTTPD_DOCUMENT_ROOT_PATH/mypack" <objh >packh &&
+ git -C "$1" config --add \
+ "uploadpack.blobpackfileuri" \
+ "$(cat objh) $(cat packh) $HTTPD_URL/dumb/mypack-$(cat packh).pack" &&
+ cat objh
+}
+
+test_expect_success 'part of packfile response provided as URI' '
+ P="$HTTPD_DOCUMENT_ROOT_PATH/http_parent" &&
+ rm -rf "$P" http_child log &&
+
+ git init "$P" &&
+ git -C "$P" config "uploadpack.allowsidebandall" "true" &&
+
+ echo my-blob >"$P/my-blob" &&
+ git -C "$P" add my-blob &&
+ echo other-blob >"$P/other-blob" &&
+ git -C "$P" add other-blob &&
+ git -C "$P" commit -m x &&
+
+ configure_exclusion "$P" my-blob >h &&
+ configure_exclusion "$P" other-blob >h2 &&
+
+ GIT_TRACE=1 GIT_TRACE_PACKET="$(pwd)/log" GIT_TEST_SIDEBAND_ALL=1 \
+ git -c protocol.version=2 \
+ -c fetch.uriprotocols=http,https \
+ clone "$HTTPD_URL/smart/http_parent" http_child &&
+
+ # Ensure that my-blob and other-blob are in separate packfiles.
+ for idx in http_child/.git/objects/pack/*.idx
+ do
+ git verify-pack --verbose $idx >out &&
+ {
+ grep "^[0-9a-f]\{16,\} " out || :
+ } >out.objectlist &&
+ if test_line_count = 1 out.objectlist
+ then
+ if grep $(cat h) out
+ then
+ >hfound
+ fi &&
+ if grep $(cat h2) out
+ then
+ >h2found
+ fi
+ fi
+ done &&
+ test -f hfound &&
+ test -f h2found &&
+
+ # Ensure that there are exactly 6 files (3 .pack and 3 .idx).
+ ls http_child/.git/objects/pack/* >filelist &&
+ test_line_count = 6 filelist
+'
+
+test_expect_success 'fetching with valid packfile URI but invalid hash fails' '
+ P="$HTTPD_DOCUMENT_ROOT_PATH/http_parent" &&
+ rm -rf "$P" http_child log &&
+
+ git init "$P" &&
+ git -C "$P" config "uploadpack.allowsidebandall" "true" &&
+
+ echo my-blob >"$P/my-blob" &&
+ git -C "$P" add my-blob &&
+ echo other-blob >"$P/other-blob" &&
+ git -C "$P" add other-blob &&
+ git -C "$P" commit -m x &&
+
+ configure_exclusion "$P" my-blob >h &&
+ # Configure a URL for other-blob. Just reuse the hash of the object as
+ # the hash of the packfile, since the hash does not matter for this
+ # test as long as it is not the hash of the pack, and it is of the
+ # expected length.
+ git -C "$P" hash-object other-blob >objh &&
+ git -C "$P" pack-objects "$HTTPD_DOCUMENT_ROOT_PATH/mypack" <objh >packh &&
+ git -C "$P" config --add \
+ "uploadpack.blobpackfileuri" \
+ "$(cat objh) $(cat objh) $HTTPD_URL/dumb/mypack-$(cat packh).pack" &&
+
+ test_must_fail env GIT_TEST_SIDEBAND_ALL=1 \
+ git -c protocol.version=2 \
+ -c fetch.uriprotocols=http,https \
+ clone "$HTTPD_URL/smart/http_parent" http_child 2>err &&
+ test_i18ngrep "pack downloaded from.*does not match expected hash" err
+'
+
# DO NOT add non-httpd-specific tests here, because the last part of this
# test script is only executed when httpd is available and enabled.
diff --git a/t/t5703-upload-pack-ref-in-want.sh b/t/t5703-upload-pack-ref-in-want.sh
index 92ad5eeec0..748282f058 100755
--- a/t/t5703-upload-pack-ref-in-want.sh
+++ b/t/t5703-upload-pack-ref-in-want.sh
@@ -27,6 +27,15 @@ check_output () {
test_cmp sorted_commits actual_commits
}
+write_command () {
+ echo "command=$1"
+
+ if test "$(test_oid algo)" != sha1
+ then
+ echo "object-format=$(test_oid algo)"
+ fi
+}
+
# c(o/foo) d(o/bar)
# \ /
# b e(baz) f(master)
@@ -65,7 +74,7 @@ test_expect_success 'config controls ref-in-want advertisement' '
test_expect_success 'invalid want-ref line' '
test-tool pkt-line pack >in <<-EOF &&
- command=fetch
+ $(write_command fetch)
0001
no-progress
want-ref refs/heads/non-existent
@@ -86,7 +95,7 @@ test_expect_success 'basic want-ref' '
oid=$(git rev-parse a) &&
test-tool pkt-line pack >in <<-EOF &&
- command=fetch
+ $(write_command fetch)
0001
no-progress
want-ref refs/heads/master
@@ -110,7 +119,7 @@ test_expect_success 'multiple want-ref lines' '
oid=$(git rev-parse b) &&
test-tool pkt-line pack >in <<-EOF &&
- command=fetch
+ $(write_command fetch)
0001
no-progress
want-ref refs/heads/o/foo
@@ -132,7 +141,7 @@ test_expect_success 'mix want and want-ref' '
git rev-parse e f >expected_commits &&
test-tool pkt-line pack >in <<-EOF &&
- command=fetch
+ $(write_command fetch)
0001
no-progress
want-ref refs/heads/master
@@ -155,7 +164,7 @@ test_expect_success 'want-ref with ref we already have commit for' '
oid=$(git rev-parse c) &&
test-tool pkt-line pack >in <<-EOF &&
- command=fetch
+ $(write_command fetch)
0001
no-progress
want-ref refs/heads/o/foo
diff --git a/t/t5704-protocol-violations.sh b/t/t5704-protocol-violations.sh
index 950cfb21fe..5c941949b9 100755
--- a/t/t5704-protocol-violations.sh
+++ b/t/t5704-protocol-violations.sh
@@ -9,6 +9,7 @@ making sure that we do not segfault or otherwise behave badly.'
test_expect_success 'extra delim packet in v2 ls-refs args' '
{
packetize command=ls-refs &&
+ packetize "object-format=$(test_oid algo)" &&
printf 0001 &&
# protocol expects 0000 flush here
printf 0001
@@ -21,6 +22,7 @@ test_expect_success 'extra delim packet in v2 ls-refs args' '
test_expect_success 'extra delim packet in v2 fetch args' '
{
packetize command=fetch &&
+ packetize "object-format=$(test_oid algo)" &&
printf 0001 &&
# protocol expects 0000 flush here
printf 0001
diff --git a/t/t5801/git-remote-testgit b/t/t5801/git-remote-testgit
index 6b9f0b5dc7..1544d6dc6b 100755
--- a/t/t5801/git-remote-testgit
+++ b/t/t5801/git-remote-testgit
@@ -52,9 +52,11 @@ do
test -n "$GIT_REMOTE_TESTGIT_SIGNED_TAGS" && echo "signed-tags"
test -n "$GIT_REMOTE_TESTGIT_NO_PRIVATE_UPDATE" && echo "no-private-update"
echo 'option'
+ echo 'object-format'
echo
;;
list)
+ echo ":object-format $(git rev-parse --show-object-format=storage)"
git for-each-ref --format='? %(refname)' 'refs/heads/' 'refs/tags/'
head=$(git symbolic-ref HEAD)
echo "@$head HEAD"
@@ -139,6 +141,10 @@ do
test $val = "true" && force="true" || force=
echo "ok"
;;
+ object-format)
+ test $val = "true" && object_format="true" || object_format=
+ echo "ok"
+ ;;
*)
echo "unsupported"
;;
diff --git a/t/t6050-replace.sh b/t/t6050-replace.sh
index e7e64e085d..c80dc10b8f 100755
--- a/t/t6050-replace.sh
+++ b/t/t6050-replace.sh
@@ -135,7 +135,7 @@ test_expect_success 'tag replaced commit' '
test_expect_success '"git fsck" works' '
git fsck master >fsck_master.out &&
test_i18ngrep "dangling commit $R" fsck_master.out &&
- test_i18ngrep "dangling tag $(cat .git/refs/tags/mytag)" fsck_master.out &&
+ test_i18ngrep "dangling tag $(git show-ref -s refs/tags/mytag)" fsck_master.out &&
test -z "$(git fsck)"
'
diff --git a/t/t6132-pathspec-exclude.sh b/t/t6132-pathspec-exclude.sh
index 2462b19ddd..30328b87f0 100755
--- a/t/t6132-pathspec-exclude.sh
+++ b/t/t6132-pathspec-exclude.sh
@@ -211,4 +211,37 @@ test_expect_success 't_e_i() exclude case #8' '
)
'
+test_expect_success 'grep --untracked PATTERN' '
+ # This test is not an actual test of exclude patterns, rather it
+ # is here solely to ensure that if any tests are inserted, deleted, or
+ # changed above, that we still have untracked files with the expected
+ # contents for the NEXT two tests.
+ cat <<-\EOF >expect-grep &&
+ actual
+ expect
+ sub/actual
+ sub/expect
+ EOF
+ git grep -l --untracked file -- >actual-grep &&
+ test_cmp expect-grep actual-grep
+'
+
+test_expect_success 'grep --untracked PATTERN :(exclude)DIR' '
+ cat <<-\EOF >expect-grep &&
+ actual
+ expect
+ EOF
+ git grep -l --untracked file -- ":(exclude)sub" >actual-grep &&
+ test_cmp expect-grep actual-grep
+'
+
+test_expect_success 'grep --untracked PATTERN :(exclude)*FILE' '
+ cat <<-\EOF >expect-grep &&
+ actual
+ sub/actual
+ EOF
+ git grep -l --untracked file -- ":(exclude)*expect" >actual-grep &&
+ test_cmp expect-grep actual-grep
+'
+
test_done
diff --git a/t/t9020-remote-svn.sh b/t/t9020-remote-svn.sh
index 0f97bafb00..754c4a3284 100755
--- a/t/t9020-remote-svn.sh
+++ b/t/t9020-remote-svn.sh
@@ -48,8 +48,8 @@ test_expect_success REMOTE_SVN 'simple fetch' '
'
test_debug '
- cat .git/refs/svn/svnsim/master
- cat .git/refs/remotes/svnsim/master
+ git show-ref -s refs/svn/svnsim/master
+ git show-ref -s refs/remotes/svnsim/master
'
test_expect_success REMOTE_SVN 'repeated fetch, nothing shall change' '
diff --git a/t/t9100-git-svn-basic.sh b/t/t9100-git-svn-basic.sh
index 2c309a57d9..9f2d19ecc4 100755
--- a/t/t9100-git-svn-basic.sh
+++ b/t/t9100-git-svn-basic.sh
@@ -208,9 +208,10 @@ name='check imported tree checksums expected tree checksums'
rm -f expected
if test_have_prereq UTF8
then
- echo tree dc68b14b733e4ec85b04ab6f712340edc5dc936e > expected
+ echo tree dc68b14b733e4ec85b04ab6f712340edc5dc936e > expected.sha1
+ echo tree b95b55b29d771f5eb73aa9b9d52d02fe11a2538c2feb0829f754ce20a91d98eb > expected.sha256
fi
-cat >> expected <<\EOF
+cat >> expected.sha1 <<\EOF
tree c3322890dcf74901f32d216f05c5044f670ce632
tree d3ccd5035feafd17b030c5732e7808cc49122853
tree d03e1630363d4881e68929d532746b20b0986b83
@@ -220,8 +221,20 @@ tree 149d63cd5878155c846e8c55d7d8487de283f89e
tree d667270a1f7b109f5eb3aaea21ede14b56bfdd6e
tree 8f51f74cf0163afc9ad68a4b1537288c4558b5a4
EOF
+cat >> expected.sha256 <<\EOF
+tree 8d12756699d0b5b110514240a0ff141f6cbf8891fd69ab05e5594196fb437c9f
+tree 8187168d33f7d4ccb8c1cc6e99532810aaccb47658f35d19b3803072d1128d7a
+tree 74e535d85da8ee25eb23d7b506790c5ab3ccdb1ba0826bd57625ed44ef361650
+tree 6fd7dd963e3cdca0cbd6368ed3cfcc8037cc154d2e7719d9d369a0952364fd95
+tree 1fd6cec6aa95102d69266e20419bb62ec2a06372d614b9850ef23ff204103bb4
+tree 6fd7dd963e3cdca0cbd6368ed3cfcc8037cc154d2e7719d9d369a0952364fd95
+tree deb2b7ac79cd8ce6f52af6a5a0a08691e94ba74a2ed55966bb27dbec551730eb
+tree 59e2e936761188476a7752034e8aa0a822b34050c8504b0dfd946407f4bc9215
+EOF
-test_expect_success POSIXPERM,SYMLINKS "$name" "test_cmp expected a"
+test_expect_success POSIXPERM,SYMLINKS "$name" '
+ test_cmp expected.$(test_oid algo) a
+'
test_expect_success 'exit if remote refs are ambigious' '
git config --add svn-remote.svn.fetch \
diff --git a/t/t9101-git-svn-props.sh b/t/t9101-git-svn-props.sh
index c26c4b0927..8b5681dd68 100755
--- a/t/t9101-git-svn-props.sh
+++ b/t/t9101-git-svn-props.sh
@@ -160,11 +160,13 @@ cat >create-ignore.expect <<\EOF
/no-such-file*
EOF
-cat >create-ignore-index.expect <<\EOF
-100644 8c52e5dfcd0a8b6b6bcfe6b41b89bcbf493718a5 0 .gitignore
-100644 8c52e5dfcd0a8b6b6bcfe6b41b89bcbf493718a5 0 deeply/.gitignore
-100644 8c52e5dfcd0a8b6b6bcfe6b41b89bcbf493718a5 0 deeply/nested/.gitignore
-100644 8c52e5dfcd0a8b6b6bcfe6b41b89bcbf493718a5 0 deeply/nested/directory/.gitignore
+expectoid=$(git hash-object create-ignore.expect)
+
+cat >create-ignore-index.expect <<EOF
+100644 $expectoid 0 .gitignore
+100644 $expectoid 0 deeply/.gitignore
+100644 $expectoid 0 deeply/nested/.gitignore
+100644 $expectoid 0 deeply/nested/directory/.gitignore
EOF
test_expect_success 'test create-ignore' "
diff --git a/t/t9104-git-svn-follow-parent.sh b/t/t9104-git-svn-follow-parent.sh
index 5e0ad19177..67eed2fefc 100755
--- a/t/t9104-git-svn-follow-parent.sh
+++ b/t/t9104-git-svn-follow-parent.sh
@@ -161,6 +161,7 @@ test_expect_success "track initial change if it was only made to parent" '
'
test_expect_success "follow-parent is atomic" '
+ record_size=$(($(test_oid rawsz) + 4)) &&
(
cd wc &&
svn_cmd up &&
@@ -186,7 +187,7 @@ test_expect_success "follow-parent is atomic" '
mkdir -p "$GIT_DIR"/svn/refs/remotes/flunk@18 &&
rev_map=$(cd "$GIT_DIR"/svn/refs/remotes/stunk && ls .rev_map*) &&
dd if="$GIT_DIR"/svn/refs/remotes/stunk/$rev_map \
- of="$GIT_DIR"/svn/refs/remotes/flunk@18/$rev_map bs=24 count=1 &&
+ of="$GIT_DIR"/svn/refs/remotes/flunk@18/$rev_map bs=$record_size count=1 &&
rm -rf "$GIT_DIR"/svn/refs/remotes/stunk &&
git svn init --minimize-url -i flunk "$svnrepo"/flunk &&
git svn fetch -i flunk &&
diff --git a/t/t9108-git-svn-glob.sh b/t/t9108-git-svn-glob.sh
index 6990f64364..d5939d4753 100755
--- a/t/t9108-git-svn-glob.sh
+++ b/t/t9108-git-svn-glob.sh
@@ -48,7 +48,7 @@ test_expect_success 'test refspec globbing' '
"tags/*/src/a:refs/remotes/tags/*" &&
git svn multi-fetch &&
git log --pretty=oneline refs/remotes/tags/end >actual &&
- sed -e "s/^.\{41\}//" actual >output.end &&
+ cut -d" " -f2- actual >output.end &&
test_cmp expect.end output.end &&
test "$(git rev-parse refs/remotes/tags/end~1)" = \
"$(git rev-parse refs/remotes/branches/start)" &&
@@ -84,7 +84,7 @@ test_expect_success 'test left-hand-side only globbing' '
test $(git rev-parse refs/remotes/two/tags/end~3) = \
$(git rev-parse refs/remotes/two/branches/start) &&
git log --pretty=oneline refs/remotes/two/tags/end >actual &&
- sed -e "s/^.\{41\}//" actual >output.two &&
+ cut -d" " -f2- actual >output.two &&
test_cmp expect.two output.two
'
diff --git a/t/t9109-git-svn-multi-glob.sh b/t/t9109-git-svn-multi-glob.sh
index c1e7542a37..648dcee1ea 100755
--- a/t/t9109-git-svn-multi-glob.sh
+++ b/t/t9109-git-svn-multi-glob.sh
@@ -48,7 +48,7 @@ test_expect_success 'test refspec globbing' '
"tags/*/src/a:refs/remotes/tags/*" &&
git svn multi-fetch &&
git log --pretty=oneline refs/remotes/tags/end >actual &&
- sed -e "s/^.\{41\}//" actual >output.end &&
+ cut -d" " -f2- actual >output.end &&
test_cmp expect.end output.end &&
test "$(git rev-parse refs/remotes/tags/end~1)" = \
"$(git rev-parse refs/remotes/branches/v1/start)" &&
@@ -84,7 +84,7 @@ test_expect_success 'test left-hand-side only globbing' '
test $(git rev-parse refs/remotes/two/tags/end~3) = \
$(git rev-parse refs/remotes/two/branches/v1/start) &&
git log --pretty=oneline refs/remotes/two/tags/end >actual &&
- sed -e "s/^.\{41\}//" actual >output.two &&
+ cut -d" " -f2- actual >output.two &&
test_cmp expect.two output.two
'
cat > expect.four <<EOF
@@ -135,7 +135,7 @@ test_expect_success 'test another branch' '
test $(git rev-parse refs/remotes/four/tags/next~2) = \
$(git rev-parse refs/remotes/four/branches/v2/start) &&
git log --pretty=oneline refs/remotes/four/tags/next >actual &&
- sed -e "s/^.\{41\}//" actual >output.four &&
+ cut -d" " -f2- actual >output.four &&
test_cmp expect.four output.four
'
diff --git a/t/t9168-git-svn-partially-globbed-names.sh b/t/t9168-git-svn-partially-globbed-names.sh
index bdf6e84999..854b3419b2 100755
--- a/t/t9168-git-svn-partially-globbed-names.sh
+++ b/t/t9168-git-svn-partially-globbed-names.sh
@@ -49,7 +49,7 @@ test_expect_success 'test refspec prefixed globbing' '
"tags/t_*/src/a:refs/remotes/tags/t_*" &&
git svn multi-fetch &&
git log --pretty=oneline refs/remotes/tags/t_end >actual &&
- sed -e "s/^.\{41\}//" actual >output.end &&
+ cut -d" " -f2- actual >output.end &&
test_cmp expect.end output.end &&
test "$(git rev-parse refs/remotes/tags/t_end~1)" = \
"$(git rev-parse refs/remotes/branches/b_start)" &&
@@ -87,7 +87,7 @@ test_expect_success 'test left-hand-side only prefixed globbing' '
test $(git rev-parse refs/remotes/two/tags/t_end~3) = \
$(git rev-parse refs/remotes/two/branches/b_start) &&
git log --pretty=oneline refs/remotes/two/tags/t_end >actual &&
- sed -e "s/^.\{41\}//" actual >output.two &&
+ cut -d" " -f2- actual >output.two &&
test_cmp expect.two output.two
'
@@ -129,7 +129,7 @@ test_expect_success 'test prefixed globs match just prefix' '
test $(git rev-parse refs/remotes/three/tags/t_~1) = \
$(git rev-parse refs/remotes/three/branches/b_) &&
git log --pretty=oneline refs/remotes/three/tags/t_ >actual &&
- sed -e "s/^.\{41\}//" actual >output.three &&
+ cut -d" " -f2- actual >output.three &&
test_cmp expect.three output.three
'
@@ -199,7 +199,7 @@ test_expect_success 'test globbing in the middle of the word' '
test $(git rev-parse refs/remotes/five/tags/fghij~1) = \
$(git rev-parse refs/remotes/five/branches/abcde) &&
git log --pretty=oneline refs/remotes/five/tags/fghij >actual &&
- sed -e "s/^.\{41\}//" actual >output.five &&
+ cut -d" " -f2- actual >output.five &&
test_cmp expect.five output.five
'
diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh
index 3c44af6940..8425b9a531 100755
--- a/t/t9902-completion.sh
+++ b/t/t9902-completion.sh
@@ -494,7 +494,7 @@ test_expect_success '__gitcomp - prefix' '
'
test_expect_success '__gitcomp - suffix' '
- test_gitcomp "branch.me" "master maint next pu" "branch." \
+ test_gitcomp "branch.me" "master maint next seen" "branch." \
"ma" "." <<-\EOF
branch.master.Z
branch.maint.Z
@@ -545,7 +545,7 @@ read -r -d "" refs <<-\EOF
maint
master
next
-pu
+seen
EOF
test_expect_success '__gitcomp_nl - trailing space' '
@@ -1240,6 +1240,461 @@ test_expect_success '__git_complete_fetch_refspecs - fully qualified & prefix' '
test_cmp expected out
'
+test_expect_success 'git switch - with no options, complete local branches and unique remote branch names for DWIM logic' '
+ test_completion "git switch " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - completes refs and unique remote branches for DWIM' '
+ test_completion "git checkout " <<-\EOF
+ HEAD Z
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with --no-guess, complete only local branches' '
+ test_completion "git switch --no-guess " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - with GIT_COMPLETION_CHECKOUT_NO_GUESS=1, complete only local branches' '
+ GIT_COMPLETION_CHECKOUT_NO_GUESS=1 test_completion "git switch " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - --guess overrides GIT_COMPLETION_CHECKOUT_NO_GUESS=1, complete local branches and unique remote names for DWIM logic' '
+ GIT_COMPLETION_CHECKOUT_NO_GUESS=1 test_completion "git switch --guess " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - a later --guess overrides previous --no-guess, complete local and remote unique branches for DWIM' '
+ test_completion "git switch --no-guess --guess " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - a later --no-guess overrides previous --guess, complete only local branches' '
+ test_completion "git switch --guess --no-guess " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - with GIT_COMPLETION_NO_GUESS=1 only completes refs' '
+ GIT_COMPLETION_CHECKOUT_NO_GUESS=1 test_completion "git checkout " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - --guess overrides GIT_COMPLETION_NO_GUESS=1, complete refs and unique remote branches for DWIM' '
+ GIT_COMPLETION_CHECKOUT_NO_GUESS=1 test_completion "git checkout --guess " <<-\EOF
+ HEAD Z
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with --no-guess, only completes refs' '
+ test_completion "git checkout --no-guess " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - a later --guess overrides previous --no-guess, complete refs and unique remote branches for DWIM' '
+ test_completion "git checkout --no-guess --guess " <<-\EOF
+ HEAD Z
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - a later --no-guess overrides previous --guess, complete only refs' '
+ test_completion "git checkout --guess --no-guess " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with --detach, complete all references' '
+ test_completion "git switch --detach " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with --detach, complete only references' '
+ test_completion "git checkout --detach " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with -d, complete all references' '
+ test_completion "git switch -d " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with -d, complete only references' '
+ test_completion "git checkout -d " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with --track, complete only remote branches' '
+ test_completion "git switch --track " <<-\EOF
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with --track, complete only remote branches' '
+ test_completion "git checkout --track " <<-\EOF
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with --no-track, complete only local branch names' '
+ test_completion "git switch --no-track " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - with --no-track, complete only local references' '
+ test_completion "git checkout --no-track " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with -c, complete all references' '
+ test_completion "git switch -c new-branch " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with -C, complete all references' '
+ test_completion "git switch -C new-branch " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with -c and --track, complete all references' '
+ test_completion "git switch -c new-branch --track " <<-EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with -C and --track, complete all references' '
+ test_completion "git switch -C new-branch --track " <<-EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with -c and --no-track, complete all references' '
+ test_completion "git switch -c new-branch --no-track " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - with -C and --no-track, complete all references' '
+ test_completion "git switch -C new-branch --no-track " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with -b, complete all references' '
+ test_completion "git checkout -b new-branch " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with -B, complete all references' '
+ test_completion "git checkout -B new-branch " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with -b and --track, complete all references' '
+ test_completion "git checkout -b new-branch --track " <<-EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with -B and --track, complete all references' '
+ test_completion "git checkout -B new-branch --track " <<-EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with -b and --no-track, complete all references' '
+ test_completion "git checkout -b new-branch --no-track " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git checkout - with -B and --no-track, complete all references' '
+ test_completion "git checkout -B new-branch --no-track " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
+test_expect_success 'git switch - for -c, complete local branches and unique remote branches' '
+ test_completion "git switch -c " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - for -C, complete local branches and unique remote branches' '
+ test_completion "git switch -C " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - for -c with --no-guess, complete local branches only' '
+ test_completion "git switch --no-guess -c " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - for -C with --no-guess, complete local branches only' '
+ test_completion "git switch --no-guess -C " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - for -c with --no-track, complete local branches only' '
+ test_completion "git switch --no-track -c " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - for -C with --no-track, complete local branches only' '
+ test_completion "git switch --no-track -C " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - for -b, complete local branches and unique remote branches' '
+ test_completion "git checkout -b " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - for -B, complete local branches and unique remote branches' '
+ test_completion "git checkout -B " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - for -b with --no-guess, complete local branches only' '
+ test_completion "git checkout --no-guess -b " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - for -B with --no-guess, complete local branches only' '
+ test_completion "git checkout --no-guess -B " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - for -b with --no-track, complete local branches only' '
+ test_completion "git checkout --no-track -b " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - for -B with --no-track, complete local branches only' '
+ test_completion "git checkout --no-track -B " <<-\EOF
+ master Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - with --orphan completes local branch names and unique remote branch names' '
+ test_completion "git switch --orphan " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git switch - --orphan with branch already provided completes nothing else' '
+ test_completion "git switch --orphan master " <<-\EOF
+
+ EOF
+'
+
+test_expect_success 'git checkout - with --orphan completes local branch names and unique remote branch names' '
+ test_completion "git checkout --orphan " <<-\EOF
+ branch-in-other Z
+ master Z
+ master-in-other Z
+ matching-branch Z
+ EOF
+'
+
+test_expect_success 'git checkout - --orphan with branch already provided completes local refs for a start-point' '
+ test_completion "git checkout --orphan master " <<-\EOF
+ HEAD Z
+ master Z
+ matching-branch Z
+ matching-tag Z
+ other/branch-in-other Z
+ other/master-in-other Z
+ EOF
+'
+
test_expect_success 'teardown after ref completion' '
git branch -d matching-branch &&
git tag -d matching-tag &&
diff --git a/t/test-lib.sh b/t/test-lib.sh
index dbc027ff26..618a7c8d5b 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -1414,6 +1414,7 @@ test_oid_init
ZERO_OID=$(test_oid zero)
OID_REGEX=$(echo $ZERO_OID | sed -e 's/0/[0-9a-f]/g')
+OIDPATH_REGEX=$(test_oid_to_path $ZERO_OID | sed -e 's/0/[0-9a-f]/g')
EMPTY_TREE=$(test_oid empty_tree)
EMPTY_BLOB=$(test_oid empty_blob)
_z40=$ZERO_OID
diff --git a/tag.c b/tag.c
index 71b544467e..1ed2684e45 100644
--- a/tag.c
+++ b/tag.c
@@ -103,7 +103,7 @@ struct tag *lookup_tag(struct repository *r, const struct object_id *oid)
struct object *obj = lookup_object(r, oid);
if (!obj)
return create_object(r, oid, alloc_tag_node(r));
- return object_as_type(r, obj, OBJ_TAG, 0);
+ return object_as_type(obj, OBJ_TAG, 0);
}
static timestamp_t parse_tag_date(const char *buf, const char *tail)
diff --git a/transport-helper.c b/transport-helper.c
index 8a711cda29..c6b753bfae 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -32,7 +32,8 @@ struct helper_data {
signed_tags : 1,
check_connectivity : 1,
no_disconnect_req : 1,
- no_private_update : 1;
+ no_private_update : 1,
+ object_format : 1;
/*
* As an optimization, the transport code may invoke fetch before
@@ -207,6 +208,8 @@ static struct child_process *get_helper(struct transport *transport)
data->import_marks = xstrdup(arg);
} else if (starts_with(capname, "no-private-update")) {
data->no_private_update = 1;
+ } else if (starts_with(capname, "object-format")) {
+ data->object_format = 1;
} else if (mandatory) {
die(_("unknown mandatory capability %s; this remote "
"helper probably needs newer version of Git"),
@@ -410,10 +413,11 @@ static int fetch_with_fetch(struct transport *transport,
exit(128);
if (skip_prefix(buf.buf, "lock ", &name)) {
- if (transport->pack_lockfile)
+ if (transport->pack_lockfiles.nr)
warning(_("%s also locked %s"), data->name, name);
else
- transport->pack_lockfile = xstrdup(name);
+ string_list_append(&transport->pack_lockfiles,
+ name);
}
else if (data->check_connectivity &&
data->transport_options.check_self_contained_and_connected &&
@@ -1103,6 +1107,12 @@ static struct ref *get_refs_list_using_list(struct transport *transport,
data->get_refs_list_called = 1;
helper = get_helper(transport);
+ if (data->object_format) {
+ write_str_in_full(helper->in, "option object-format\n");
+ if (recvline(data, &buf) || strcmp(buf.buf, "ok"))
+ exit(128);
+ }
+
if (data->push && for_push)
write_str_in_full(helper->in, "list for-push\n");
else
@@ -1115,6 +1125,17 @@ static struct ref *get_refs_list_using_list(struct transport *transport,
if (!*buf.buf)
break;
+ else if (buf.buf[0] == ':') {
+ const char *value;
+ if (skip_prefix(buf.buf, ":object-format ", &value)) {
+ int algo = hash_algo_by_name(value);
+ if (algo == GIT_HASH_UNKNOWN)
+ die(_("unsupported object format '%s'"),
+ value);
+ transport->hash_algo = &hash_algos[algo];
+ }
+ continue;
+ }
eov = strchr(buf.buf, ' ');
if (!eov)
@@ -1127,7 +1148,7 @@ static struct ref *get_refs_list_using_list(struct transport *transport,
if (buf.buf[0] == '@')
(*tail)->symref = xstrdup(buf.buf + 1);
else if (buf.buf[0] != '?')
- get_oid_hex(buf.buf, &(*tail)->old_oid);
+ get_oid_hex_algop(buf.buf, &(*tail)->old_oid, transport->hash_algo);
if (eon) {
if (has_attribute(eon + 1, "unchanged")) {
(*tail)->status |= REF_STATUS_UPTODATE;
diff --git a/transport.c b/transport.c
index 7d50c502ad..b41386eccb 100644
--- a/transport.c
+++ b/transport.c
@@ -143,6 +143,9 @@ static struct ref *get_refs_from_bundle(struct transport *transport,
data->fd = read_bundle_header(transport->url, &data->header);
if (data->fd < 0)
die(_("could not read bundle '%s'"), transport->url);
+
+ transport->hash_algo = data->header.hash_algo;
+
for (i = 0; i < data->header.references.nr; i++) {
struct ref_list_entry *e = data->header.references.list + i;
struct ref *ref = alloc_ref(e->name);
@@ -157,11 +160,14 @@ static int fetch_refs_from_bundle(struct transport *transport,
int nr_heads, struct ref **to_fetch)
{
struct bundle_transport_data *data = transport->data;
+ int ret;
if (!data->get_refs_from_bundle_called)
get_refs_from_bundle(transport, 0, NULL);
- return unbundle(the_repository, &data->header, data->fd,
- transport->progress ? BUNDLE_VERBOSE : 0);
+ ret = unbundle(the_repository, &data->header, data->fd,
+ transport->progress ? BUNDLE_VERBOSE : 0);
+ transport->hash_algo = data->header.hash_algo;
+ return ret;
}
static int close_bundle(struct transport *transport)
@@ -312,6 +318,7 @@ static struct ref *handshake(struct transport *transport, int for_push,
BUG("unknown protocol version");
}
data->got_remote_heads = 1;
+ transport->hash_algo = reader.hash_algo;
if (reader.line_peeked)
BUG("buffer must be empty at the end of handshake()");
@@ -378,7 +385,7 @@ static int fetch_refs_via_pack(struct transport *transport,
refs = fetch_pack(&args, data->fd,
refs_tmp ? refs_tmp : transport->remote_refs,
to_fetch, nr_heads, &data->shallow,
- &transport->pack_lockfile, data->version);
+ &transport->pack_lockfiles, data->version);
close(data->fd[0]);
close(data->fd[1]);
@@ -921,6 +928,7 @@ struct transport *transport_get(struct remote *remote, const char *url)
struct transport *ret = xcalloc(1, sizeof(*ret));
ret->progress = isatty(2);
+ string_list_init(&ret->pack_lockfiles, 1);
if (!remote)
BUG("No remote provided to transport_get()");
@@ -988,9 +996,16 @@ struct transport *transport_get(struct remote *remote, const char *url)
ret->smart_options->receivepack = remote->receivepack;
}
+ ret->hash_algo = &hash_algos[GIT_HASH_SHA1];
+
return ret;
}
+const struct git_hash_algo *transport_get_hash_algo(struct transport *transport)
+{
+ return transport->hash_algo;
+}
+
int transport_set_option(struct transport *transport,
const char *name, const char *value)
{
@@ -1316,10 +1331,11 @@ int transport_fetch_refs(struct transport *transport, struct ref *refs)
void transport_unlock_pack(struct transport *transport)
{
- if (transport->pack_lockfile) {
- unlink_or_warn(transport->pack_lockfile);
- FREE_AND_NULL(transport->pack_lockfile);
- }
+ int i;
+
+ for (i = 0; i < transport->pack_lockfiles.nr; i++)
+ unlink_or_warn(transport->pack_lockfiles.items[i].string);
+ string_list_clear(&transport->pack_lockfiles, 0);
}
int transport_connect(struct transport *transport, const char *name,
diff --git a/transport.h b/transport.h
index 4298c855be..b3c30133ea 100644
--- a/transport.h
+++ b/transport.h
@@ -5,8 +5,7 @@
#include "run-command.h"
#include "remote.h"
#include "list-objects-filter-options.h"
-
-struct string_list;
+#include "string-list.h"
struct git_transport_options {
unsigned thin : 1;
@@ -98,7 +97,8 @@ struct transport {
*/
const struct string_list *server_options;
- char *pack_lockfile;
+ struct string_list pack_lockfiles;
+
signed verbose : 3;
/**
* Transports should not set this directly, and should use this
@@ -115,6 +115,8 @@ struct transport {
struct git_transport_options *smart_options;
enum transport_family family;
+
+ const struct git_hash_algo *hash_algo;
};
#define TRANSPORT_PUSH_ALL (1<<0)
@@ -243,6 +245,12 @@ int transport_push(struct repository *repo,
const struct ref *transport_get_remote_refs(struct transport *transport,
const struct argv_array *ref_prefixes);
+/*
+ * Fetch the hash algorithm used by a remote.
+ *
+ * This can only be called after fetching the remote refs.
+ */
+const struct git_hash_algo *transport_get_hash_algo(struct transport *transport);
int transport_fetch_refs(struct transport *transport, struct ref *refs);
void transport_unlock_pack(struct transport *transport);
int transport_disconnect(struct transport *transport);
diff --git a/tree.c b/tree.c
index 1466bcc6a8..e76517f6b1 100644
--- a/tree.c
+++ b/tree.c
@@ -200,7 +200,7 @@ struct tree *lookup_tree(struct repository *r, const struct object_id *oid)
struct object *obj = lookup_object(r, oid);
if (!obj)
return create_object(r, oid, alloc_tree_node(r));
- return object_as_type(r, obj, OBJ_TREE, 0);
+ return object_as_type(obj, OBJ_TREE, 0);
}
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
diff --git a/upload-pack.c b/upload-pack.c
index 401c9e6c4b..951a2b23aa 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -42,60 +42,71 @@
#define ALL_FLAGS (THEY_HAVE | OUR_REF | WANTED | COMMON_KNOWN | SHALLOW | \
NOT_SHALLOW | CLIENT_SHALLOW | HIDDEN_REF)
-static timestamp_t oldest_have;
-
-static int multi_ack;
-static int no_done;
-static int use_thin_pack, use_ofs_delta, use_include_tag;
-static int no_progress, daemon_mode;
-/* Allow specifying sha1 if it is a ref tip. */
-#define ALLOW_TIP_SHA1 01
-/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
-#define ALLOW_REACHABLE_SHA1 02
-/* Allow request of any sha1. Implies ALLOW_TIP_SHA1 and ALLOW_REACHABLE_SHA1. */
-#define ALLOW_ANY_SHA1 07
-static unsigned int allow_unadvertised_object_request;
-static int shallow_nr;
-static struct object_array extra_edge_obj;
-static unsigned int timeout;
-static int keepalive = 5;
-/* 0 for no sideband,
- * otherwise maximum packet size (up to 65520 bytes).
- */
-static int use_sideband;
-static const char *pack_objects_hook;
-
-static int filter_capability_requested;
-static int allow_filter;
-static int allow_ref_in_want;
-
-static int allow_sideband_all;
+/* Enum for allowed unadvertised object request (UOR) */
+enum allow_uor {
+ /* Allow specifying sha1 if it is a ref tip. */
+ ALLOW_TIP_SHA1 = 0x01,
+ /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
+ ALLOW_REACHABLE_SHA1 = 0x02,
+ /* Allow request of any sha1. Implies ALLOW_TIP_SHA1 and ALLOW_REACHABLE_SHA1. */
+ ALLOW_ANY_SHA1 = 0x07
+};
+/*
+ * Please annotate, and if possible group together, fields used only
+ * for protocol v0 or only for protocol v2.
+ */
struct upload_pack_data {
- struct string_list symref;
- struct string_list wanted_refs;
+ struct string_list symref; /* v0 only */
struct object_array want_obj;
struct object_array have_obj;
- struct oid_array haves;
+ struct oid_array haves; /* v2 only */
+ struct string_list wanted_refs; /* v2 only */
struct object_array shallows;
struct string_list deepen_not;
+ struct object_array extra_edge_obj;
int depth;
timestamp_t deepen_since;
int deepen_rev_list;
int deepen_relative;
+ int keepalive;
+ int shallow_nr;
+ timestamp_t oldest_have;
+
+ unsigned int timeout; /* v0 only */
+ enum {
+ NO_MULTI_ACK = 0,
+ MULTI_ACK = 1,
+ MULTI_ACK_DETAILED = 2
+ } multi_ack; /* v0 only */
+
+ /* 0 for no sideband, otherwise DEFAULT_PACKET_MAX or LARGE_PACKET_MAX */
+ int use_sideband;
+
+ struct string_list uri_protocols;
+ enum allow_uor allow_uor;
struct list_objects_filter_options filter_options;
struct packet_writer writer;
- unsigned stateless_rpc : 1;
+ const char *pack_objects_hook;
+
+ unsigned stateless_rpc : 1; /* v0 only */
+ unsigned no_done : 1; /* v0 only */
+ unsigned daemon_mode : 1; /* v0 only */
+ unsigned filter_capability_requested : 1; /* v0 only */
unsigned use_thin_pack : 1;
unsigned use_ofs_delta : 1;
unsigned no_progress : 1;
unsigned use_include_tag : 1;
- unsigned done : 1;
+ unsigned allow_filter : 1;
+
+ unsigned done : 1; /* v2 only */
+ unsigned allow_ref_in_want : 1; /* v2 only */
+ unsigned allow_sideband_all : 1; /* v2 only */
};
static void upload_pack_data_init(struct upload_pack_data *data)
@@ -107,6 +118,8 @@ static void upload_pack_data_init(struct upload_pack_data *data)
struct oid_array haves = OID_ARRAY_INIT;
struct object_array shallows = OBJECT_ARRAY_INIT;
struct string_list deepen_not = STRING_LIST_INIT_DUP;
+ struct string_list uri_protocols = STRING_LIST_INIT_DUP;
+ struct object_array extra_edge_obj = OBJECT_ARRAY_INIT;
memset(data, 0, sizeof(*data));
data->symref = symref;
@@ -116,7 +129,11 @@ static void upload_pack_data_init(struct upload_pack_data *data)
data->haves = haves;
data->shallows = shallows;
data->deepen_not = deepen_not;
+ data->uri_protocols = uri_protocols;
+ data->extra_edge_obj = extra_edge_obj;
packet_writer_init(&data->writer, 1);
+
+ data->keepalive = 5;
}
static void upload_pack_data_clear(struct upload_pack_data *data)
@@ -128,15 +145,19 @@ static void upload_pack_data_clear(struct upload_pack_data *data)
oid_array_clear(&data->haves);
object_array_clear(&data->shallows);
string_list_clear(&data->deepen_not, 0);
+ object_array_clear(&data->extra_edge_obj);
list_objects_filter_release(&data->filter_options);
+
+ free((char *)data->pack_objects_hook);
}
-static void reset_timeout(void)
+static void reset_timeout(unsigned int timeout)
{
alarm(timeout);
}
-static void send_client_data(int fd, const char *data, ssize_t sz)
+static void send_client_data(int fd, const char *data, ssize_t sz,
+ int use_sideband)
{
if (use_sideband) {
send_sideband(1, fd, data, sz, use_sideband);
@@ -161,42 +182,115 @@ static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
return 0;
}
-static void create_pack_file(struct upload_pack_data *pack_data)
+struct output_state {
+ char buffer[8193];
+ int used;
+ unsigned packfile_uris_started : 1;
+ unsigned packfile_started : 1;
+};
+
+static int relay_pack_data(int pack_objects_out, struct output_state *os,
+ int use_sideband, int write_packfile_line)
+{
+ /*
+ * We keep the last byte to ourselves
+ * in case we detect broken rev-list, so that we
+ * can leave the stream corrupted. This is
+ * unfortunate -- unpack-objects would happily
+ * accept a valid packdata with trailing garbage,
+ * so appending garbage after we pass all the
+ * pack data is not good enough to signal
+ * breakage to downstream.
+ */
+ ssize_t readsz;
+
+ readsz = xread(pack_objects_out, os->buffer + os->used,
+ sizeof(os->buffer) - os->used);
+ if (readsz < 0) {
+ return readsz;
+ }
+ os->used += readsz;
+
+ while (!os->packfile_started) {
+ char *p;
+ if (os->used >= 4 && !memcmp(os->buffer, "PACK", 4)) {
+ os->packfile_started = 1;
+ if (write_packfile_line) {
+ if (os->packfile_uris_started)
+ packet_delim(1);
+ packet_write_fmt(1, "\1packfile\n");
+ }
+ break;
+ }
+ if ((p = memchr(os->buffer, '\n', os->used))) {
+ if (!os->packfile_uris_started) {
+ os->packfile_uris_started = 1;
+ if (!write_packfile_line)
+ BUG("packfile_uris requires sideband-all");
+ packet_write_fmt(1, "\1packfile-uris\n");
+ }
+ *p = '\0';
+ packet_write_fmt(1, "\1%s\n", os->buffer);
+
+ os->used -= p - os->buffer + 1;
+ memmove(os->buffer, p + 1, os->used);
+ } else {
+ /*
+ * Incomplete line.
+ */
+ return readsz;
+ }
+ }
+
+ if (os->used > 1) {
+ send_client_data(1, os->buffer, os->used - 1, use_sideband);
+ os->buffer[0] = os->buffer[os->used - 1];
+ os->used = 1;
+ } else {
+ send_client_data(1, os->buffer, os->used, use_sideband);
+ os->used = 0;
+ }
+
+ return readsz;
+}
+
+static void create_pack_file(struct upload_pack_data *pack_data,
+ const struct string_list *uri_protocols)
{
struct child_process pack_objects = CHILD_PROCESS_INIT;
- char data[8193], progress[128];
+ struct output_state output_state = { { 0 } };
+ char progress[128];
char abort_msg[] = "aborting due to possible repository "
"corruption on the remote side.";
- int buffered = -1;
ssize_t sz;
int i;
FILE *pipe_fd;
- if (!pack_objects_hook)
+ if (!pack_data->pack_objects_hook)
pack_objects.git_cmd = 1;
else {
- argv_array_push(&pack_objects.args, pack_objects_hook);
+ argv_array_push(&pack_objects.args, pack_data->pack_objects_hook);
argv_array_push(&pack_objects.args, "git");
pack_objects.use_shell = 1;
}
- if (shallow_nr) {
+ if (pack_data->shallow_nr) {
argv_array_push(&pack_objects.args, "--shallow-file");
argv_array_push(&pack_objects.args, "");
}
argv_array_push(&pack_objects.args, "pack-objects");
argv_array_push(&pack_objects.args, "--revs");
- if (use_thin_pack)
+ if (pack_data->use_thin_pack)
argv_array_push(&pack_objects.args, "--thin");
argv_array_push(&pack_objects.args, "--stdout");
- if (shallow_nr)
+ if (pack_data->shallow_nr)
argv_array_push(&pack_objects.args, "--shallow");
- if (!no_progress)
+ if (!pack_data->no_progress)
argv_array_push(&pack_objects.args, "--progress");
- if (use_ofs_delta)
+ if (pack_data->use_ofs_delta)
argv_array_push(&pack_objects.args, "--delta-base-offset");
- if (use_include_tag)
+ if (pack_data->use_include_tag)
argv_array_push(&pack_objects.args, "--include-tag");
if (pack_data->filter_options.choice) {
const char *spec =
@@ -211,6 +305,11 @@ static void create_pack_file(struct upload_pack_data *pack_data)
spec);
}
}
+ if (uri_protocols) {
+ for (i = 0; i < uri_protocols->nr; i++)
+ argv_array_pushf(&pack_objects.args, "--uri-protocol=%s",
+ uri_protocols->items[i].string);
+ }
pack_objects.in = -1;
pack_objects.out = -1;
@@ -221,7 +320,7 @@ static void create_pack_file(struct upload_pack_data *pack_data)
pipe_fd = xfdopen(pack_objects.in, "w");
- if (shallow_nr)
+ if (pack_data->shallow_nr)
for_each_commit_graft(write_one_shallow, pipe_fd);
for (i = 0; i < pack_data->want_obj.nr; i++)
@@ -231,9 +330,9 @@ static void create_pack_file(struct upload_pack_data *pack_data)
for (i = 0; i < pack_data->have_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
oid_to_hex(&pack_data->have_obj.objects[i].item->oid));
- for (i = 0; i < extra_edge_obj.nr; i++)
+ for (i = 0; i < pack_data->extra_edge_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
- oid_to_hex(&extra_edge_obj.objects[i].item->oid));
+ oid_to_hex(&pack_data->extra_edge_obj.objects[i].item->oid));
fprintf(pipe_fd, "\n");
fflush(pipe_fd);
fclose(pipe_fd);
@@ -244,10 +343,10 @@ static void create_pack_file(struct upload_pack_data *pack_data)
while (1) {
struct pollfd pfd[2];
- int pe, pu, pollsize;
+ int pe, pu, pollsize, polltimeout;
int ret;
- reset_timeout();
+ reset_timeout(pack_data->timeout);
pollsize = 0;
pe = pu = -1;
@@ -268,8 +367,11 @@ static void create_pack_file(struct upload_pack_data *pack_data)
if (!pollsize)
break;
- ret = poll(pfd, pollsize,
- keepalive < 0 ? -1 : 1000 * keepalive);
+ polltimeout = pack_data->keepalive < 0
+ ? -1
+ : 1000 * pack_data->keepalive;
+
+ ret = poll(pfd, pollsize, polltimeout);
if (ret < 0) {
if (errno != EINTR) {
@@ -285,7 +387,8 @@ static void create_pack_file(struct upload_pack_data *pack_data)
sz = xread(pack_objects.err, progress,
sizeof(progress));
if (0 < sz)
- send_client_data(2, progress, sz);
+ send_client_data(2, progress, sz,
+ pack_data->use_sideband);
else if (sz == 0) {
close(pack_objects.err);
pack_objects.err = -1;
@@ -296,39 +399,17 @@ static void create_pack_file(struct upload_pack_data *pack_data)
continue;
}
if (0 <= pu && (pfd[pu].revents & (POLLIN|POLLHUP))) {
- /* Data ready; we keep the last byte to ourselves
- * in case we detect broken rev-list, so that we
- * can leave the stream corrupted. This is
- * unfortunate -- unpack-objects would happily
- * accept a valid packdata with trailing garbage,
- * so appending garbage after we pass all the
- * pack data is not good enough to signal
- * breakage to downstream.
- */
- char *cp = data;
- ssize_t outsz = 0;
- if (0 <= buffered) {
- *cp++ = buffered;
- outsz++;
- }
- sz = xread(pack_objects.out, cp,
- sizeof(data) - outsz);
- if (0 < sz)
- ;
- else if (sz == 0) {
+ int result = relay_pack_data(pack_objects.out,
+ &output_state,
+ pack_data->use_sideband,
+ !!uri_protocols);
+
+ if (result == 0) {
close(pack_objects.out);
pack_objects.out = -1;
- }
- else
+ } else if (result < 0) {
goto fail;
- sz += outsz;
- if (1 < sz) {
- buffered = data[sz-1] & 0xFF;
- sz--;
}
- else
- buffered = -1;
- send_client_data(1, data, sz);
}
/*
@@ -341,7 +422,7 @@ static void create_pack_file(struct upload_pack_data *pack_data)
* protocol to say anything, so those clients are just out of
* luck.
*/
- if (!ret && use_sideband) {
+ if (!ret && pack_data->use_sideband) {
static const char buf[] = "0005\1";
write_or_die(1, buf, 5);
}
@@ -353,32 +434,26 @@ static void create_pack_file(struct upload_pack_data *pack_data)
}
/* flush the data */
- if (0 <= buffered) {
- data[0] = buffered;
- send_client_data(1, data, 1);
+ if (output_state.used > 0) {
+ send_client_data(1, output_state.buffer, output_state.used,
+ pack_data->use_sideband);
fprintf(stderr, "flushed.\n");
}
- if (use_sideband)
+ if (pack_data->use_sideband)
packet_flush(1);
return;
fail:
- send_client_data(3, abort_msg, sizeof(abort_msg));
+ send_client_data(3, abort_msg, sizeof(abort_msg),
+ pack_data->use_sideband);
die("git upload-pack: %s", abort_msg);
}
-static int got_oid(const char *hex, struct object_id *oid,
- struct object_array *have_obj)
+static int do_got_oid(struct upload_pack_data *data, const struct object_id *oid)
{
- struct object *o;
int we_knew_they_have = 0;
+ struct object *o = parse_object(the_repository, oid);
- if (get_oid_hex(hex, oid))
- die("git upload-pack: expected SHA1 object, got '%s'", hex);
- if (!has_object_file(oid))
- return -1;
-
- o = parse_object(the_repository, oid);
if (!o)
die("oops (%s)", oid_to_hex(oid));
if (o->type == OBJ_COMMIT) {
@@ -388,30 +463,39 @@ static int got_oid(const char *hex, struct object_id *oid,
we_knew_they_have = 1;
else
o->flags |= THEY_HAVE;
- if (!oldest_have || (commit->date < oldest_have))
- oldest_have = commit->date;
+ if (!data->oldest_have || (commit->date < data->oldest_have))
+ data->oldest_have = commit->date;
for (parents = commit->parents;
parents;
parents = parents->next)
parents->item->object.flags |= THEY_HAVE;
}
if (!we_knew_they_have) {
- add_object_array(o, NULL, have_obj);
+ add_object_array(o, NULL, &data->have_obj);
return 1;
}
return 0;
}
-static int ok_to_give_up(const struct object_array *have_obj,
- struct object_array *want_obj)
+static int got_oid(struct upload_pack_data *data,
+ const char *hex, struct object_id *oid)
+{
+ if (get_oid_hex(hex, oid))
+ die("git upload-pack: expected SHA1 object, got '%s'", hex);
+ if (!has_object_file(oid))
+ return -1;
+ return do_got_oid(data, oid);
+}
+
+static int ok_to_give_up(struct upload_pack_data *data)
{
uint32_t min_generation = GENERATION_NUMBER_ZERO;
- if (!have_obj->nr)
+ if (!data->have_obj.nr)
return 0;
- return can_all_from_reach_with_flag(want_obj, THEY_HAVE,
- COMMON_KNOWN, oldest_have,
+ return can_all_from_reach_with_flag(&data->want_obj, THEY_HAVE,
+ COMMON_KNOWN, data->oldest_have,
min_generation);
}
@@ -429,20 +513,20 @@ static int get_common_commits(struct upload_pack_data *data,
for (;;) {
const char *arg;
- reset_timeout();
+ reset_timeout(data->timeout);
if (packet_reader_read(reader) != PACKET_READ_NORMAL) {
- if (multi_ack == 2
+ if (data->multi_ack == MULTI_ACK_DETAILED
&& got_common
&& !got_other
- && ok_to_give_up(&data->have_obj, &data->want_obj)) {
+ && ok_to_give_up(data)) {
sent_ready = 1;
packet_write_fmt(1, "ACK %s ready\n", last_hex);
}
- if (data->have_obj.nr == 0 || multi_ack)
+ if (data->have_obj.nr == 0 || data->multi_ack)
packet_write_fmt(1, "NAK\n");
- if (no_done && sent_ready) {
+ if (data->no_done && sent_ready) {
packet_write_fmt(1, "ACK %s\n", last_hex);
return 0;
}
@@ -453,13 +537,13 @@ static int get_common_commits(struct upload_pack_data *data,
continue;
}
if (skip_prefix(reader->line, "have ", &arg)) {
- switch (got_oid(arg, &oid, &data->have_obj)) {
+ switch (got_oid(data, arg, &oid)) {
case -1: /* they have what we do not */
got_other = 1;
- if (multi_ack
- && ok_to_give_up(&data->have_obj, &data->want_obj)) {
+ if (data->multi_ack
+ && ok_to_give_up(data)) {
const char *hex = oid_to_hex(&oid);
- if (multi_ack == 2) {
+ if (data->multi_ack == MULTI_ACK_DETAILED) {
sent_ready = 1;
packet_write_fmt(1, "ACK %s ready\n", hex);
} else
@@ -469,9 +553,9 @@ static int get_common_commits(struct upload_pack_data *data,
default:
got_common = 1;
oid_to_hex_r(last_hex, &oid);
- if (multi_ack == 2)
+ if (data->multi_ack == MULTI_ACK_DETAILED)
packet_write_fmt(1, "ACK %s common\n", last_hex);
- else if (multi_ack)
+ else if (data->multi_ack)
packet_write_fmt(1, "ACK %s continue\n", last_hex);
else if (data->have_obj.nr == 1)
packet_write_fmt(1, "ACK %s\n", last_hex);
@@ -481,7 +565,7 @@ static int get_common_commits(struct upload_pack_data *data,
}
if (!strcmp(reader->line, "done")) {
if (data->have_obj.nr > 0) {
- if (multi_ack)
+ if (data->multi_ack)
packet_write_fmt(1, "ACK %s\n", last_hex);
return 0;
}
@@ -492,10 +576,10 @@ static int get_common_commits(struct upload_pack_data *data,
}
}
-static int is_our_ref(struct object *o)
+static int is_our_ref(struct object *o, enum allow_uor allow_uor)
{
- int allow_hidden_ref = (allow_unadvertised_object_request &
- (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
+ int allow_hidden_ref = (allow_uor &
+ (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
return o->flags & ((allow_hidden_ref ? HIDDEN_REF : 0) | OUR_REF);
}
@@ -504,7 +588,8 @@ static int is_our_ref(struct object *o)
*/
static int do_reachable_revlist(struct child_process *cmd,
struct object_array *src,
- struct object_array *reachable)
+ struct object_array *reachable,
+ enum allow_uor allow_uor)
{
static const char *argv[] = {
"rev-list", "--stdin", NULL,
@@ -538,7 +623,7 @@ static int do_reachable_revlist(struct child_process *cmd,
continue;
if (reachable && o->type == OBJ_COMMIT)
o->flags &= ~TMP_MARK;
- if (!is_our_ref(o))
+ if (!is_our_ref(o, allow_uor))
continue;
memcpy(namebuf + 1, oid_to_hex(&o->oid), hexsz);
if (write_in_full(cmd->in, namebuf, hexsz + 2) < 0)
@@ -547,7 +632,7 @@ static int do_reachable_revlist(struct child_process *cmd,
namebuf[hexsz] = '\n';
for (i = 0; i < src->nr; i++) {
o = src->objects[i].item;
- if (is_our_ref(o)) {
+ if (is_our_ref(o, allow_uor)) {
if (reachable)
add_object_array(o, NULL, reachable);
continue;
@@ -574,7 +659,7 @@ error:
return -1;
}
-static int get_reachable_list(struct object_array *src,
+static int get_reachable_list(struct upload_pack_data *data,
struct object_array *reachable)
{
struct child_process cmd = CHILD_PROCESS_INIT;
@@ -583,7 +668,8 @@ static int get_reachable_list(struct object_array *src,
char namebuf[GIT_MAX_HEXSZ + 2]; /* ^ + hash + LF */
const unsigned hexsz = the_hash_algo->hexsz;
- if (do_reachable_revlist(&cmd, src, reachable) < 0)
+ if (do_reachable_revlist(&cmd, &data->shallows, reachable,
+ data->allow_uor) < 0)
return -1;
while ((i = read_in_full(cmd.out, namebuf, hexsz + 1)) == hexsz + 1) {
@@ -614,13 +700,13 @@ static int get_reachable_list(struct object_array *src,
return 0;
}
-static int has_unreachable(struct object_array *src)
+static int has_unreachable(struct object_array *src, enum allow_uor allow_uor)
{
struct child_process cmd = CHILD_PROCESS_INIT;
char buf[1];
int i;
- if (do_reachable_revlist(&cmd, src, NULL) < 0)
+ if (do_reachable_revlist(&cmd, src, NULL, allow_uor) < 0)
return 1;
/*
@@ -660,10 +746,9 @@ static void check_non_tip(struct upload_pack_data *data)
* uploadpack.allowReachableSHA1InWant,
* non-tip requests can never happen.
*/
- if (!data->stateless_rpc
- && !(allow_unadvertised_object_request & ALLOW_REACHABLE_SHA1))
+ if (!data->stateless_rpc && !(data->allow_uor & ALLOW_REACHABLE_SHA1))
goto error;
- if (!has_unreachable(&data->want_obj))
+ if (!has_unreachable(&data->want_obj, data->allow_uor))
/* All the non-tip ones are ancestors of what we advertised */
return;
@@ -671,7 +756,7 @@ error:
/* Pick one of them (we know there at least is one) */
for (i = 0; i < data->want_obj.nr; i++) {
struct object *o = data->want_obj.objects[i].item;
- if (!is_our_ref(o)) {
+ if (!is_our_ref(o, data->allow_uor)) {
packet_writer_error(&data->writer,
"upload-pack: not our ref %s",
oid_to_hex(&o->oid));
@@ -681,32 +766,30 @@ error:
}
}
-static void send_shallow(struct packet_writer *writer,
+static void send_shallow(struct upload_pack_data *data,
struct commit_list *result)
{
while (result) {
struct object *object = &result->item->object;
if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) {
- packet_writer_write(writer, "shallow %s",
+ packet_writer_write(&data->writer, "shallow %s",
oid_to_hex(&object->oid));
register_shallow(the_repository, &object->oid);
- shallow_nr++;
+ data->shallow_nr++;
}
result = result->next;
}
}
-static void send_unshallow(struct packet_writer *writer,
- const struct object_array *shallows,
- struct object_array *want_obj)
+static void send_unshallow(struct upload_pack_data *data)
{
int i;
- for (i = 0; i < shallows->nr; i++) {
- struct object *object = shallows->objects[i].item;
+ for (i = 0; i < data->shallows.nr; i++) {
+ struct object *object = data->shallows.objects[i].item;
if (object->flags & NOT_SHALLOW) {
struct commit_list *parents;
- packet_writer_write(writer, "unshallow %s",
+ packet_writer_write(&data->writer, "unshallow %s",
oid_to_hex(&object->oid));
object->flags &= ~CLIENT_SHALLOW;
/*
@@ -722,10 +805,10 @@ static void send_unshallow(struct packet_writer *writer,
parents = ((struct commit *)object)->parents;
while (parents) {
add_object_array(&parents->item->object,
- NULL, want_obj);
+ NULL, &data->want_obj);
parents = parents->next;
}
- add_object_array(object, NULL, &extra_edge_obj);
+ add_object_array(object, NULL, &data->extra_edge_obj);
}
/* make sure commit traversal conforms to client */
register_shallow(the_repository, &object->oid);
@@ -734,17 +817,16 @@ static void send_unshallow(struct packet_writer *writer,
static int check_ref(const char *refname_full, const struct object_id *oid,
int flag, void *cb_data);
-static void deepen(struct packet_writer *writer, int depth, int deepen_relative,
- struct object_array *shallows, struct object_array *want_obj)
+static void deepen(struct upload_pack_data *data, int depth)
{
if (depth == INFINITE_DEPTH && !is_repository_shallow(the_repository)) {
int i;
- for (i = 0; i < shallows->nr; i++) {
- struct object *object = shallows->objects[i].item;
+ for (i = 0; i < data->shallows.nr; i++) {
+ struct object *object = data->shallows.objects[i].item;
object->flags |= NOT_SHALLOW;
}
- } else if (deepen_relative) {
+ } else if (data->deepen_relative) {
struct object_array reachable_shallows = OBJECT_ARRAY_INIT;
struct commit_list *result;
@@ -755,87 +837,80 @@ static void deepen(struct packet_writer *writer, int depth, int deepen_relative,
head_ref_namespaced(check_ref, NULL);
for_each_namespaced_ref(check_ref, NULL);
- get_reachable_list(shallows, &reachable_shallows);
+ get_reachable_list(data, &reachable_shallows);
result = get_shallow_commits(&reachable_shallows,
depth + 1,
SHALLOW, NOT_SHALLOW);
- send_shallow(writer, result);
+ send_shallow(data, result);
free_commit_list(result);
object_array_clear(&reachable_shallows);
} else {
struct commit_list *result;
- result = get_shallow_commits(want_obj, depth,
+ result = get_shallow_commits(&data->want_obj, depth,
SHALLOW, NOT_SHALLOW);
- send_shallow(writer, result);
+ send_shallow(data, result);
free_commit_list(result);
}
- send_unshallow(writer, shallows, want_obj);
+ send_unshallow(data);
}
-static void deepen_by_rev_list(struct packet_writer *writer, int ac,
- const char **av,
- struct object_array *shallows,
- struct object_array *want_obj)
+static void deepen_by_rev_list(struct upload_pack_data *data,
+ int ac,
+ const char **av)
{
struct commit_list *result;
disable_commit_graph(the_repository);
result = get_shallow_commits_by_rev_list(ac, av, SHALLOW, NOT_SHALLOW);
- send_shallow(writer, result);
+ send_shallow(data, result);
free_commit_list(result);
- send_unshallow(writer, shallows, want_obj);
+ send_unshallow(data);
}
/* Returns 1 if a shallow list is sent or 0 otherwise */
-static int send_shallow_list(struct packet_writer *writer,
- int depth, int deepen_rev_list,
- timestamp_t deepen_since,
- struct string_list *deepen_not,
- int deepen_relative,
- struct object_array *shallows,
- struct object_array *want_obj)
+static int send_shallow_list(struct upload_pack_data *data)
{
int ret = 0;
- if (depth > 0 && deepen_rev_list)
+ if (data->depth > 0 && data->deepen_rev_list)
die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
- if (depth > 0) {
- deepen(writer, depth, deepen_relative, shallows, want_obj);
+ if (data->depth > 0) {
+ deepen(data, data->depth);
ret = 1;
- } else if (deepen_rev_list) {
+ } else if (data->deepen_rev_list) {
struct argv_array av = ARGV_ARRAY_INIT;
int i;
argv_array_push(&av, "rev-list");
- if (deepen_since)
- argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
- if (deepen_not->nr) {
+ if (data->deepen_since)
+ argv_array_pushf(&av, "--max-age=%"PRItime, data->deepen_since);
+ if (data->deepen_not.nr) {
argv_array_push(&av, "--not");
- for (i = 0; i < deepen_not->nr; i++) {
- struct string_list_item *s = deepen_not->items + i;
+ for (i = 0; i < data->deepen_not.nr; i++) {
+ struct string_list_item *s = data->deepen_not.items + i;
argv_array_push(&av, s->string);
}
argv_array_push(&av, "--not");
}
- for (i = 0; i < want_obj->nr; i++) {
- struct object *o = want_obj->objects[i].item;
+ for (i = 0; i < data->want_obj.nr; i++) {
+ struct object *o = data->want_obj.objects[i].item;
argv_array_push(&av, oid_to_hex(&o->oid));
}
- deepen_by_rev_list(writer, av.argc, av.argv, shallows, want_obj);
+ deepen_by_rev_list(data, av.argc, av.argv);
argv_array_clear(&av);
ret = 1;
} else {
- if (shallows->nr > 0) {
+ if (data->shallows.nr > 0) {
int i;
- for (i = 0; i < shallows->nr; i++)
+ for (i = 0; i < data->shallows.nr; i++)
register_shallow(the_repository,
- &shallows->objects[i].item->oid);
+ &data->shallows.objects[i].item->oid);
}
}
- shallow_nr += shallows->nr;
+ data->shallow_nr += data->shallows.nr;
return ret;
}
@@ -913,14 +988,14 @@ static void receive_needs(struct upload_pack_data *data,
{
int has_non_tip = 0;
- shallow_nr = 0;
+ data->shallow_nr = 0;
for (;;) {
struct object *o;
const char *features;
struct object_id oid_buf;
const char *arg;
- reset_timeout();
+ reset_timeout(data->timeout);
if (packet_reader_read(reader) != PACKET_READ_NORMAL)
break;
@@ -934,7 +1009,7 @@ static void receive_needs(struct upload_pack_data *data,
continue;
if (skip_prefix(reader->line, "filter ", &arg)) {
- if (!filter_capability_requested)
+ if (!data->filter_capability_requested)
die("git upload-pack: filtering capability not negotiated");
list_objects_filter_die_if_populated(&data->filter_options);
parse_list_objects_filter(&data->filter_options, arg);
@@ -949,25 +1024,26 @@ static void receive_needs(struct upload_pack_data *data,
if (parse_feature_request(features, "deepen-relative"))
data->deepen_relative = 1;
if (parse_feature_request(features, "multi_ack_detailed"))
- multi_ack = 2;
+ data->multi_ack = MULTI_ACK_DETAILED;
else if (parse_feature_request(features, "multi_ack"))
- multi_ack = 1;
+ data->multi_ack = MULTI_ACK;
if (parse_feature_request(features, "no-done"))
- no_done = 1;
+ data->no_done = 1;
if (parse_feature_request(features, "thin-pack"))
- use_thin_pack = 1;
+ data->use_thin_pack = 1;
if (parse_feature_request(features, "ofs-delta"))
- use_ofs_delta = 1;
+ data->use_ofs_delta = 1;
if (parse_feature_request(features, "side-band-64k"))
- use_sideband = LARGE_PACKET_MAX;
+ data->use_sideband = LARGE_PACKET_MAX;
else if (parse_feature_request(features, "side-band"))
- use_sideband = DEFAULT_PACKET_MAX;
+ data->use_sideband = DEFAULT_PACKET_MAX;
if (parse_feature_request(features, "no-progress"))
- no_progress = 1;
+ data->no_progress = 1;
if (parse_feature_request(features, "include-tag"))
- use_include_tag = 1;
- if (allow_filter && parse_feature_request(features, "filter"))
- filter_capability_requested = 1;
+ data->use_include_tag = 1;
+ if (data->allow_filter &&
+ parse_feature_request(features, "filter"))
+ data->filter_capability_requested = 1;
o = parse_object(the_repository, &oid_buf);
if (!o) {
@@ -979,8 +1055,8 @@ static void receive_needs(struct upload_pack_data *data,
}
if (!(o->flags & WANTED)) {
o->flags |= WANTED;
- if (!((allow_unadvertised_object_request & ALLOW_ANY_SHA1) == ALLOW_ANY_SHA1
- || is_our_ref(o)))
+ if (!((data->allow_uor & ALLOW_ANY_SHA1) == ALLOW_ANY_SHA1
+ || is_our_ref(o, data->allow_uor)))
has_non_tip = 1;
add_object_array(o, NULL, &data->want_obj);
}
@@ -996,20 +1072,13 @@ static void receive_needs(struct upload_pack_data *data,
if (has_non_tip)
check_non_tip(data);
- if (!use_sideband && daemon_mode)
- no_progress = 1;
+ if (!data->use_sideband && data->daemon_mode)
+ data->no_progress = 1;
if (data->depth == 0 && !data->deepen_rev_list && data->shallows.nr == 0)
return;
- if (send_shallow_list(&data->writer,
- data->depth,
- data->deepen_rev_list,
- data->deepen_since,
- &data->deepen_not,
- data->deepen_relative,
- &data->shallows,
- &data->want_obj))
+ if (send_shallow_list(data))
packet_flush(1);
}
@@ -1063,16 +1132,17 @@ static int send_ref(const char *refname, const struct object_id *oid,
struct strbuf symref_info = STRBUF_INIT;
format_symref_info(&symref_info, &data->symref);
- packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s agent=%s\n",
+ packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s object-format=%s agent=%s\n",
oid_to_hex(oid), refname_nons,
0, capabilities,
- (allow_unadvertised_object_request & ALLOW_TIP_SHA1) ?
+ (data->allow_uor & ALLOW_TIP_SHA1) ?
" allow-tip-sha1-in-want" : "",
- (allow_unadvertised_object_request & ALLOW_REACHABLE_SHA1) ?
+ (data->allow_uor & ALLOW_REACHABLE_SHA1) ?
" allow-reachable-sha1-in-want" : "",
data->stateless_rpc ? " no-done" : "",
symref_info.buf,
- allow_filter ? " filter" : "",
+ data->allow_filter ? " filter" : "",
+ the_hash_algo->name,
git_user_agent_sanitized());
strbuf_release(&symref_info);
} else {
@@ -1100,33 +1170,35 @@ static int find_symref(const char *refname, const struct object_id *oid,
return 0;
}
-static int upload_pack_config(const char *var, const char *value, void *unused)
+static int upload_pack_config(const char *var, const char *value, void *cb_data)
{
+ struct upload_pack_data *data = cb_data;
+
if (!strcmp("uploadpack.allowtipsha1inwant", var)) {
if (git_config_bool(var, value))
- allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
+ data->allow_uor |= ALLOW_TIP_SHA1;
else
- allow_unadvertised_object_request &= ~ALLOW_TIP_SHA1;
+ data->allow_uor &= ~ALLOW_TIP_SHA1;
} else if (!strcmp("uploadpack.allowreachablesha1inwant", var)) {
if (git_config_bool(var, value))
- allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
+ data->allow_uor |= ALLOW_REACHABLE_SHA1;
else
- allow_unadvertised_object_request &= ~ALLOW_REACHABLE_SHA1;
+ data->allow_uor &= ~ALLOW_REACHABLE_SHA1;
} else if (!strcmp("uploadpack.allowanysha1inwant", var)) {
if (git_config_bool(var, value))
- allow_unadvertised_object_request |= ALLOW_ANY_SHA1;
+ data->allow_uor |= ALLOW_ANY_SHA1;
else
- allow_unadvertised_object_request &= ~ALLOW_ANY_SHA1;
+ data->allow_uor &= ~ALLOW_ANY_SHA1;
} else if (!strcmp("uploadpack.keepalive", var)) {
- keepalive = git_config_int(var, value);
- if (!keepalive)
- keepalive = -1;
+ data->keepalive = git_config_int(var, value);
+ if (!data->keepalive)
+ data->keepalive = -1;
} else if (!strcmp("uploadpack.allowfilter", var)) {
- allow_filter = git_config_bool(var, value);
+ data->allow_filter = git_config_bool(var, value);
} else if (!strcmp("uploadpack.allowrefinwant", var)) {
- allow_ref_in_want = git_config_bool(var, value);
+ data->allow_ref_in_want = git_config_bool(var, value);
} else if (!strcmp("uploadpack.allowsidebandall", var)) {
- allow_sideband_all = git_config_bool(var, value);
+ data->allow_sideband_all = git_config_bool(var, value);
} else if (!strcmp("core.precomposeunicode", var)) {
precomposed_unicode = git_config_bool(var, value);
}
@@ -1134,7 +1206,7 @@ static int upload_pack_config(const char *var, const char *value, void *unused)
if (current_config_scope() != CONFIG_SCOPE_LOCAL &&
current_config_scope() != CONFIG_SCOPE_WORKTREE) {
if (!strcmp("uploadpack.packobjectshook", var))
- return git_config_string(&pack_objects_hook, var, value);
+ return git_config_string(&data->pack_objects_hook, var, value);
}
return parse_hide_refs_config(var, value, "uploadpack");
@@ -1145,19 +1217,18 @@ void upload_pack(struct upload_pack_options *options)
struct packet_reader reader;
struct upload_pack_data data;
- timeout = options->timeout;
- daemon_mode = options->daemon_mode;
-
- git_config(upload_pack_config, NULL);
-
upload_pack_data_init(&data);
+ git_config(upload_pack_config, &data);
+
data.stateless_rpc = options->stateless_rpc;
+ data.daemon_mode = options->daemon_mode;
+ data.timeout = options->timeout;
head_ref_namespaced(find_symref, &data.symref);
if (options->advertise_refs || !data.stateless_rpc) {
- reset_timeout();
+ reset_timeout(data.timeout);
head_ref_namespaced(send_ref, &data);
for_each_namespaced_ref(send_ref, &data);
advertise_shallow_grafts(1);
@@ -1175,7 +1246,7 @@ void upload_pack(struct upload_pack_options *options)
receive_needs(&data, &reader);
if (data.want_obj.nr) {
get_common_commits(&data, &reader);
- create_pack_file(&data);
+ create_pack_file(&data, NULL);
}
}
@@ -1269,7 +1340,7 @@ static void process_args(struct packet_reader *request,
/* process want */
if (parse_want(&data->writer, arg, &data->want_obj))
continue;
- if (allow_ref_in_want &&
+ if (data->allow_ref_in_want &&
parse_want_ref(&data->writer, arg, &data->wanted_refs,
&data->want_obj))
continue;
@@ -1279,19 +1350,19 @@ static void process_args(struct packet_reader *request,
/* process args like thin-pack */
if (!strcmp(arg, "thin-pack")) {
- use_thin_pack = 1;
+ data->use_thin_pack = 1;
continue;
}
if (!strcmp(arg, "ofs-delta")) {
- use_ofs_delta = 1;
+ data->use_ofs_delta = 1;
continue;
}
if (!strcmp(arg, "no-progress")) {
- no_progress = 1;
+ data->no_progress = 1;
continue;
}
if (!strcmp(arg, "include-tag")) {
- use_include_tag = 1;
+ data->use_include_tag = 1;
continue;
}
if (!strcmp(arg, "done")) {
@@ -1315,87 +1386,72 @@ static void process_args(struct packet_reader *request,
continue;
}
- if (allow_filter && skip_prefix(arg, "filter ", &p)) {
+ if (data->allow_filter && skip_prefix(arg, "filter ", &p)) {
list_objects_filter_die_if_populated(&data->filter_options);
parse_list_objects_filter(&data->filter_options, p);
continue;
}
if ((git_env_bool("GIT_TEST_SIDEBAND_ALL", 0) ||
- allow_sideband_all) &&
+ data->allow_sideband_all) &&
!strcmp(arg, "sideband-all")) {
data->writer.use_sideband = 1;
continue;
}
+ if (skip_prefix(arg, "packfile-uris ", &p)) {
+ string_list_split(&data->uri_protocols, p, ',', -1);
+ continue;
+ }
+
/* ignore unknown lines maybe? */
die("unexpected line: '%s'", arg);
}
+ if (data->uri_protocols.nr && !data->writer.use_sideband)
+ string_list_clear(&data->uri_protocols, 0);
+
if (request->status != PACKET_READ_FLUSH)
die(_("expected flush after fetch arguments"));
}
-static int process_haves(struct oid_array *haves, struct oid_array *common,
- struct object_array *have_obj)
+static int process_haves(struct upload_pack_data *data, struct oid_array *common)
{
int i;
/* Process haves */
- for (i = 0; i < haves->nr; i++) {
- const struct object_id *oid = &haves->oid[i];
- struct object *o;
- int we_knew_they_have = 0;
+ for (i = 0; i < data->haves.nr; i++) {
+ const struct object_id *oid = &data->haves.oid[i];
if (!has_object_file(oid))
continue;
oid_array_append(common, oid);
- o = parse_object(the_repository, oid);
- if (!o)
- die("oops (%s)", oid_to_hex(oid));
- if (o->type == OBJ_COMMIT) {
- struct commit_list *parents;
- struct commit *commit = (struct commit *)o;
- if (o->flags & THEY_HAVE)
- we_knew_they_have = 1;
- else
- o->flags |= THEY_HAVE;
- if (!oldest_have || (commit->date < oldest_have))
- oldest_have = commit->date;
- for (parents = commit->parents;
- parents;
- parents = parents->next)
- parents->item->object.flags |= THEY_HAVE;
- }
- if (!we_knew_they_have)
- add_object_array(o, NULL, have_obj);
+ do_got_oid(data, oid);
}
return 0;
}
-static int send_acks(struct packet_writer *writer, struct oid_array *acks,
- const struct object_array *have_obj,
- struct object_array *want_obj)
+static int send_acks(struct upload_pack_data *data, struct oid_array *acks)
{
int i;
- packet_writer_write(writer, "acknowledgments\n");
+ packet_writer_write(&data->writer, "acknowledgments\n");
/* Send Acks */
if (!acks->nr)
- packet_writer_write(writer, "NAK\n");
+ packet_writer_write(&data->writer, "NAK\n");
for (i = 0; i < acks->nr; i++) {
- packet_writer_write(writer, "ACK %s\n",
+ packet_writer_write(&data->writer, "ACK %s\n",
oid_to_hex(&acks->oid[i]));
}
- if (ok_to_give_up(have_obj, want_obj)) {
+ if (ok_to_give_up(data)) {
/* Send Ready */
- packet_writer_write(writer, "ready\n");
+ packet_writer_write(&data->writer, "ready\n");
return 1;
}
@@ -1407,11 +1463,10 @@ static int process_haves_and_send_acks(struct upload_pack_data *data)
struct oid_array common = OID_ARRAY_INIT;
int ret = 0;
- process_haves(&data->haves, &common, &data->have_obj);
+ process_haves(data, &common);
if (data->done) {
ret = 1;
- } else if (send_acks(&data->writer, &common,
- &data->have_obj, &data->want_obj)) {
+ } else if (send_acks(data, &common)) {
packet_writer_delim(&data->writer);
ret = 1;
} else {
@@ -1452,14 +1507,9 @@ static void send_shallow_info(struct upload_pack_data *data)
packet_writer_write(&data->writer, "shallow-info\n");
- if (!send_shallow_list(&data->writer, data->depth,
- data->deepen_rev_list,
- data->deepen_since, &data->deepen_not,
- data->deepen_relative,
- &data->shallows, &data->want_obj) &&
+ if (!send_shallow_list(data) &&
is_repository_shallow(the_repository))
- deepen(&data->writer, INFINITE_DEPTH, data->deepen_relative,
- &data->shallows, &data->want_obj);
+ deepen(data, INFINITE_DEPTH);
packet_delim(1);
}
@@ -1479,10 +1529,10 @@ int upload_pack_v2(struct repository *r, struct argv_array *keys,
clear_object_flags(ALL_FLAGS);
- git_config(upload_pack_config, NULL);
-
upload_pack_data_init(&data);
- use_sideband = LARGE_PACKET_MAX;
+ data.use_sideband = LARGE_PACKET_MAX;
+
+ git_config(upload_pack_config, &data);
while (state != FETCH_DONE) {
switch (state) {
@@ -1518,8 +1568,12 @@ int upload_pack_v2(struct repository *r, struct argv_array *keys,
send_wanted_ref_info(&data);
send_shallow_info(&data);
- packet_writer_write(&data.writer, "packfile\n");
- create_pack_file(&data);
+ if (data.uri_protocols.nr) {
+ create_pack_file(&data, &data.uri_protocols);
+ } else {
+ packet_writer_write(&data.writer, "packfile\n");
+ create_pack_file(&data, NULL);
+ }
state = FETCH_DONE;
break;
case FETCH_DONE:
@@ -1538,6 +1592,7 @@ int upload_pack_advertise(struct repository *r,
int allow_filter_value;
int allow_ref_in_want;
int allow_sideband_all_value;
+ char *str = NULL;
strbuf_addstr(value, "shallow");
@@ -1559,6 +1614,14 @@ int upload_pack_advertise(struct repository *r,
&allow_sideband_all_value) &&
allow_sideband_all_value))
strbuf_addstr(value, " sideband-all");
+
+ if (!repo_config_get_string(the_repository,
+ "uploadpack.blobpackfileuri",
+ &str) &&
+ str) {
+ strbuf_addstr(value, " packfile-uris");
+ free(str);
+ }
}
return 1;
diff --git a/worktree.c b/worktree.c
index ee82235f26..ff9b7d847f 100644
--- a/worktree.c
+++ b/worktree.c
@@ -123,14 +123,7 @@ static void mark_current_worktree(struct worktree **worktrees)
free(git_dir);
}
-static int compare_worktree(const void *a_, const void *b_)
-{
- const struct worktree *const *a = a_;
- const struct worktree *const *b = b_;
- return fspathcmp((*a)->path, (*b)->path);
-}
-
-struct worktree **get_worktrees(unsigned flags)
+struct worktree **get_worktrees(void)
{
struct worktree **list = NULL;
struct strbuf path = STRBUF_INIT;
@@ -161,13 +154,6 @@ struct worktree **get_worktrees(unsigned flags)
ALLOC_GROW(list, counter + 1, alloc);
list[counter] = NULL;
- if (flags & GWT_SORT_LINKED)
- /*
- * don't sort the first item (main worktree), which will
- * always be the first
- */
- QSORT(list + 1, counter - 1, compare_worktree);
-
mark_current_worktree(list);
return list;
}
@@ -418,7 +404,7 @@ const struct worktree *find_shared_symref(const char *symref,
if (worktrees)
free_worktrees(worktrees);
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
for (i = 0; worktrees[i]; i++) {
struct worktree *wt = worktrees[i];
@@ -577,7 +563,7 @@ int other_head_refs(each_ref_fn fn, void *cb_data)
struct worktree **worktrees, **p;
int ret = 0;
- worktrees = get_worktrees(0);
+ worktrees = get_worktrees();
for (p = worktrees; *p; p++) {
struct worktree *wt = *p;
struct object_id oid;
diff --git a/worktree.h b/worktree.h
index d242a6e71c..516744c433 100644
--- a/worktree.h
+++ b/worktree.h
@@ -18,19 +18,14 @@ struct worktree {
int lock_reason_valid; /* private */
};
-/* Functions for acting on the information about worktrees. */
-
-#define GWT_SORT_LINKED (1 << 0) /* keeps linked worktrees sorted */
-
/*
* Get the worktrees. The primary worktree will always be the first returned,
- * and linked worktrees will be pointed to by 'next' in each subsequent
- * worktree. No specific ordering is done on the linked worktrees.
+ * and linked worktrees will follow in no particular order.
*
* The caller is responsible for freeing the memory from the returned
- * worktree(s).
+ * worktrees by calling free_worktrees().
*/
-struct worktree **get_worktrees(unsigned flags);
+struct worktree **get_worktrees(void);
/*
* Returns 1 if linked worktrees exist, 0 otherwise.
diff --git a/wrapper.c b/wrapper.c
index 3a1c0e0526..4ff4a9c3db 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -105,6 +105,14 @@ char *xstrndup(const char *str, size_t len)
return xmemdupz(str, p ? p - str : len);
}
+int xstrncmpz(const char *s, const char *t, size_t len)
+{
+ int res = strncmp(s, t, len);
+ if (res)
+ return res;
+ return s[len] == '\0' ? 0 : 1;
+}
+
void *xrealloc(void *ptr, size_t size)
{
void *ret;
diff --git a/wt-status.c b/wt-status.c
index 98dfa6f73f..c560cbe860 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -1484,6 +1484,18 @@ static void show_bisect_in_progress(struct wt_status *s,
wt_longstatus_print_trailer(s);
}
+static void show_sparse_checkout_in_use(struct wt_status *s,
+ const char *color)
+{
+ if (s->state.sparse_checkout_percentage == SPARSE_CHECKOUT_DISABLED)
+ return;
+
+ status_printf_ln(s, color,
+ _("You are in a sparse checkout with %d%% of tracked files present."),
+ s->state.sparse_checkout_percentage);
+ wt_longstatus_print_trailer(s);
+}
+
/*
* Extract branch information from rebase/bisect
*/
@@ -1623,6 +1635,31 @@ int wt_status_check_bisect(const struct worktree *wt,
return 0;
}
+static void wt_status_check_sparse_checkout(struct repository *r,
+ struct wt_status_state *state)
+{
+ int skip_worktree = 0;
+ int i;
+
+ if (!core_apply_sparse_checkout || r->index->cache_nr == 0) {
+ /*
+ * Don't compute percentage of checked out files if we
+ * aren't in a sparse checkout or would get division by 0.
+ */
+ state->sparse_checkout_percentage = SPARSE_CHECKOUT_DISABLED;
+ return;
+ }
+
+ for (i = 0; i < r->index->cache_nr; i++) {
+ struct cache_entry *ce = r->index->cache[i];
+ if (ce_skip_worktree(ce))
+ skip_worktree++;
+ }
+
+ state->sparse_checkout_percentage =
+ 100 - (100 * skip_worktree)/r->index->cache_nr;
+}
+
void wt_status_get_state(struct repository *r,
struct wt_status_state *state,
int get_detached_from)
@@ -1658,6 +1695,7 @@ void wt_status_get_state(struct repository *r,
}
if (get_detached_from)
wt_status_get_detached_from(r, state);
+ wt_status_check_sparse_checkout(r, state);
}
static void wt_longstatus_print_state(struct wt_status *s)
@@ -1681,6 +1719,9 @@ static void wt_longstatus_print_state(struct wt_status *s)
show_revert_in_progress(s, state_color);
if (state->bisect_in_progress)
show_bisect_in_progress(s, state_color);
+
+ if (state->sparse_checkout_percentage != SPARSE_CHECKOUT_DISABLED)
+ show_sparse_checkout_in_use(s, state_color);
}
static void wt_longstatus_print(struct wt_status *s)
diff --git a/wt-status.h b/wt-status.h
index 73ab5d4da1..f1fa0ec1a7 100644
--- a/wt-status.h
+++ b/wt-status.h
@@ -79,6 +79,7 @@ enum wt_status_format {
#define HEAD_DETACHED_AT _("HEAD detached at ")
#define HEAD_DETACHED_FROM _("HEAD detached from ")
+#define SPARSE_CHECKOUT_DISABLED -1
struct wt_status_state {
int merge_in_progress;
@@ -90,6 +91,7 @@ struct wt_status_state {
int bisect_in_progress;
int revert_in_progress;
int detached_at;
+ int sparse_checkout_percentage; /* SPARSE_CHECKOUT_DISABLED if not sparse */
char *branch;
char *onto;
char *detached_from;