Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitaly.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Okstad <pokstad@gitlab.com>2019-04-12 09:03:10 +0300
committerZeger-Jan van de Weg <git@zjvandeweg.nl>2019-04-12 09:03:10 +0300
commit955926b1bfc47e5e1765471a2099cd84edfb01c6 (patch)
tree8c87fe5482f7b7c4a7d82ccf2e3d03638d2726fa
parent62dce810a31330b8e252de7fc5460a70d43a74f3 (diff)
Vendor CI Go tools
-rw-r--r--NOTICE725
-rw-r--r--_support/makegen.go10
-rw-r--r--internal/tools/main.go7
-rw-r--r--internal/tools/tools.go11
-rw-r--r--vendor/github.com/Bowery/prompt/CONTRIBUTORS.md9
-rw-r--r--vendor/github.com/Bowery/prompt/LICENSE21
-rw-r--r--vendor/github.com/Bowery/prompt/README.md38
-rw-r--r--vendor/github.com/Bowery/prompt/ansi_unix.go39
-rw-r--r--vendor/github.com/Bowery/prompt/ansi_windows.go510
-rw-r--r--vendor/github.com/Bowery/prompt/buffer.go152
-rw-r--r--vendor/github.com/Bowery/prompt/buffer_unix.go76
-rw-r--r--vendor/github.com/Bowery/prompt/buffer_windows.go150
-rw-r--r--vendor/github.com/Bowery/prompt/ioctl_bsd.go15
-rw-r--r--vendor/github.com/Bowery/prompt/ioctl_linux.go13
-rw-r--r--vendor/github.com/Bowery/prompt/ioctl_solaris.go41
-rw-r--r--vendor/github.com/Bowery/prompt/ioctl_unix.go62
-rw-r--r--vendor/github.com/Bowery/prompt/keys.go41
-rw-r--r--vendor/github.com/Bowery/prompt/keys_unix.go13
-rw-r--r--vendor/github.com/Bowery/prompt/keys_windows.go34
-rw-r--r--vendor/github.com/Bowery/prompt/prompt.go85
-rw-r--r--vendor/github.com/Bowery/prompt/term.go501
-rw-r--r--vendor/github.com/Bowery/prompt/term_unix.go96
-rw-r--r--vendor/github.com/Bowery/prompt/term_windows.go116
-rw-r--r--vendor/github.com/dchest/safefile/LICENSE26
-rw-r--r--vendor/github.com/dchest/safefile/README.md44
-rw-r--r--vendor/github.com/dchest/safefile/appveyor.yml24
-rw-r--r--vendor/github.com/dchest/safefile/rename.go9
-rw-r--r--vendor/github.com/dchest/safefile/rename_nonatomic.go51
-rw-r--r--vendor/github.com/dchest/safefile/safefile.go197
-rw-r--r--vendor/github.com/google/shlex/COPYING202
-rw-r--r--vendor/github.com/google/shlex/README2
-rw-r--r--vendor/github.com/google/shlex/shlex.go416
-rw-r--r--vendor/github.com/kardianos/govendor/LICENSE27
-rw-r--r--vendor/github.com/kardianos/govendor/README.md206
-rw-r--r--vendor/github.com/kardianos/govendor/appveyor.yml24
-rw-r--r--vendor/github.com/kardianos/govendor/cliprompt/cliPrompt.go154
-rw-r--r--vendor/github.com/kardianos/govendor/context/context.go436
-rw-r--r--vendor/github.com/kardianos/govendor/context/copy.go203
-rw-r--r--vendor/github.com/kardianos/govendor/context/err.go80
-rw-r--r--vendor/github.com/kardianos/govendor/context/fetch.go325
-rw-r--r--vendor/github.com/kardianos/govendor/context/get.go91
-rw-r--r--vendor/github.com/kardianos/govendor/context/label.go240
-rw-r--r--vendor/github.com/kardianos/govendor/context/license.go219
-rw-r--r--vendor/github.com/kardianos/govendor/context/modify.go778
-rw-r--r--vendor/github.com/kardianos/govendor/context/path.go235
-rw-r--r--vendor/github.com/kardianos/govendor/context/resolve.go550
-rw-r--r--vendor/github.com/kardianos/govendor/context/rewrite.go209
-rw-r--r--vendor/github.com/kardianos/govendor/context/status.go271
-rw-r--r--vendor/github.com/kardianos/govendor/context/sync.go390
-rw-r--r--vendor/github.com/kardianos/govendor/context/syslist.go8
-rw-r--r--vendor/github.com/kardianos/govendor/context/tags.go240
-rw-r--r--vendor/github.com/kardianos/govendor/context/vendorFile.go79
-rw-r--r--vendor/github.com/kardianos/govendor/context/version.go47
-rw-r--r--vendor/github.com/kardianos/govendor/help/gen-license.template8
-rw-r--r--vendor/github.com/kardianos/govendor/help/licenses.go395
-rw-r--r--vendor/github.com/kardianos/govendor/help/msg.go62
-rw-r--r--vendor/github.com/kardianos/govendor/help/text.go173
-rw-r--r--vendor/github.com/kardianos/govendor/help/version.go7
-rw-r--r--vendor/github.com/kardianos/govendor/internal/pathos/path.go158
-rw-r--r--vendor/github.com/kardianos/govendor/internal/vfilepath/prefix.go14
-rw-r--r--vendor/github.com/kardianos/govendor/internal/vfilepath/stub.go17
-rw-r--r--vendor/github.com/kardianos/govendor/internal/vfilepath/switch.go1
-rw-r--r--vendor/github.com/kardianos/govendor/internal/vfilepath/walk.go83
-rw-r--r--vendor/github.com/kardianos/govendor/internal/vos/stub.go61
-rw-r--r--vendor/github.com/kardianos/govendor/internal/vos/switch.go13
-rw-r--r--vendor/github.com/kardianos/govendor/main.go55
-rw-r--r--vendor/github.com/kardianos/govendor/migrate/gb.go29
-rw-r--r--vendor/github.com/kardianos/govendor/migrate/gdm.go78
-rw-r--r--vendor/github.com/kardianos/govendor/migrate/glide.go100
-rw-r--r--vendor/github.com/kardianos/govendor/migrate/glock.go97
-rw-r--r--vendor/github.com/kardianos/govendor/migrate/godep.go129
-rw-r--r--vendor/github.com/kardianos/govendor/migrate/migrate.go133
-rw-r--r--vendor/github.com/kardianos/govendor/migrate/old.go97
-rw-r--r--vendor/github.com/kardianos/govendor/pkgspec/pkg.go50
-rw-r--r--vendor/github.com/kardianos/govendor/pkgspec/pkgspec.go99
-rw-r--r--vendor/github.com/kardianos/govendor/prompt/prompt.go117
-rw-r--r--vendor/github.com/kardianos/govendor/run/command.go175
-rw-r--r--vendor/github.com/kardianos/govendor/run/filter.go179
-rw-r--r--vendor/github.com/kardianos/govendor/run/license.go107
-rw-r--r--vendor/github.com/kardianos/govendor/run/list.go132
-rw-r--r--vendor/github.com/kardianos/govendor/run/modify.go161
-rw-r--r--vendor/github.com/kardianos/govendor/run/run.go174
-rw-r--r--vendor/github.com/kardianos/govendor/run/shell.go94
-rw-r--r--vendor/github.com/kardianos/govendor/run/sync.go34
-rw-r--r--vendor/github.com/kardianos/govendor/vcs/bzr.go61
-rw-r--r--vendor/github.com/kardianos/govendor/vcs/git.go64
-rw-r--r--vendor/github.com/kardianos/govendor/vcs/hg.go65
-rw-r--r--vendor/github.com/kardianos/govendor/vcs/svn.go60
-rw-r--r--vendor/github.com/kardianos/govendor/vcs/vcs.go79
-rw-r--r--vendor/github.com/kardianos/govendor/vendorfile/file.go335
-rw-r--r--vendor/github.com/wadey/gocovmerge/LICENSE22
-rw-r--r--vendor/github.com/wadey/gocovmerge/README.md16
-rw-r--r--vendor/github.com/wadey/gocovmerge/gocovmerge.go111
-rw-r--r--vendor/golang.org/x/lint/CONTRIBUTING.md15
-rw-r--r--vendor/golang.org/x/lint/LICENSE27
-rw-r--r--vendor/golang.org/x/lint/README.md88
-rw-r--r--vendor/golang.org/x/lint/go.mod3
-rw-r--r--vendor/golang.org/x/lint/go.sum6
-rwxr-xr-xvendor/golang.org/x/lint/golint/bin/golintbin0 -> 5936840 bytes
-rw-r--r--vendor/golang.org/x/lint/golint/golint.go159
-rw-r--r--vendor/golang.org/x/lint/golint/import.go309
-rw-r--r--vendor/golang.org/x/lint/golint/importcomment.go13
-rw-r--r--vendor/golang.org/x/lint/lint.go1693
-rw-r--r--vendor/golang.org/x/tools/LICENSE27
-rw-r--r--vendor/golang.org/x/tools/PATENTS22
-rw-r--r--vendor/golang.org/x/tools/cmd/goimports/doc.go43
-rw-r--r--vendor/golang.org/x/tools/cmd/goimports/goimports.go369
-rw-r--r--vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go26
-rw-r--r--vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go11
-rw-r--r--vendor/golang.org/x/tools/cover/profile.go213
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/enclosing.go627
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/imports.go481
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/rewrite.go477
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/util.go14
-rw-r--r--vendor/golang.org/x/tools/go/ast/inspector/inspector.go182
-rw-r--r--vendor/golang.org/x/tools/go/ast/inspector/typeof.go216
-rw-r--r--vendor/golang.org/x/tools/go/buildutil/allpackages.go198
-rw-r--r--vendor/golang.org/x/tools/go/buildutil/fakecontext.go109
-rw-r--r--vendor/golang.org/x/tools/go/buildutil/overlay.go103
-rw-r--r--vendor/golang.org/x/tools/go/buildutil/tags.go75
-rw-r--r--vendor/golang.org/x/tools/go/buildutil/util.go212
-rw-r--r--vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go109
-rw-r--r--vendor/golang.org/x/tools/go/gcexportdata/importer.go73
-rw-r--r--vendor/golang.org/x/tools/go/internal/cgo/cgo.go220
-rw-r--r--vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go39
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go852
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go1036
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go93
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go1078
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go723
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go606
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go21
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go13
-rw-r--r--vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go160
-rw-r--r--vendor/golang.org/x/tools/go/loader/doc.go205
-rw-r--r--vendor/golang.org/x/tools/go/loader/loader.go1078
-rw-r--r--vendor/golang.org/x/tools/go/loader/util.go124
-rw-r--r--vendor/golang.org/x/tools/go/packages/doc.go222
-rw-r--r--vendor/golang.org/x/tools/go/packages/external.go79
-rw-r--r--vendor/golang.org/x/tools/go/packages/golist.go828
-rw-r--r--vendor/golang.org/x/tools/go/packages/golist_overlay.go104
-rw-r--r--vendor/golang.org/x/tools/go/packages/packages.go1059
-rw-r--r--vendor/golang.org/x/tools/go/packages/visit.go55
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/callee.go46
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/imports.go31
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/map.go313
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go72
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/ui.go52
-rw-r--r--vendor/golang.org/x/tools/go/vcs/discovery.go76
-rw-r--r--vendor/golang.org/x/tools/go/vcs/env.go39
-rw-r--r--vendor/golang.org/x/tools/go/vcs/http.go80
-rw-r--r--vendor/golang.org/x/tools/go/vcs/vcs.go759
-rw-r--r--vendor/golang.org/x/tools/imports/fix.go1259
-rw-r--r--vendor/golang.org/x/tools/imports/imports.go315
-rw-r--r--vendor/golang.org/x/tools/imports/mod.go355
-rw-r--r--vendor/golang.org/x/tools/imports/sortimports.go230
-rw-r--r--vendor/golang.org/x/tools/imports/zstdlib.go10325
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go196
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go13
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go14
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go13
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go29
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go37
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go127
-rw-r--r--vendor/golang.org/x/tools/internal/gopathwalk/walk.go250
-rw-r--r--vendor/golang.org/x/tools/internal/module/module.go540
-rw-r--r--vendor/golang.org/x/tools/internal/semver/semver.go388
-rw-r--r--vendor/gopkg.in/yaml.v2/LICENSE201
-rw-r--r--vendor/gopkg.in/yaml.v2/LICENSE.libyaml31
-rw-r--r--vendor/gopkg.in/yaml.v2/NOTICE13
-rw-r--r--vendor/gopkg.in/yaml.v2/README.md133
-rw-r--r--vendor/gopkg.in/yaml.v2/apic.go739
-rw-r--r--vendor/gopkg.in/yaml.v2/decode.go775
-rw-r--r--vendor/gopkg.in/yaml.v2/emitterc.go1685
-rw-r--r--vendor/gopkg.in/yaml.v2/encode.go390
-rw-r--r--vendor/gopkg.in/yaml.v2/go.mod5
-rw-r--r--vendor/gopkg.in/yaml.v2/parserc.go1095
-rw-r--r--vendor/gopkg.in/yaml.v2/readerc.go412
-rw-r--r--vendor/gopkg.in/yaml.v2/resolve.go258
-rw-r--r--vendor/gopkg.in/yaml.v2/scannerc.go2696
-rw-r--r--vendor/gopkg.in/yaml.v2/sorter.go113
-rw-r--r--vendor/gopkg.in/yaml.v2/writerc.go26
-rw-r--r--vendor/gopkg.in/yaml.v2/yaml.go466
-rw-r--r--vendor/gopkg.in/yaml.v2/yamlh.go738
-rw-r--r--vendor/gopkg.in/yaml.v2/yamlprivateh.go173
-rw-r--r--vendor/honnef.co/go/tools/LICENSE20
-rw-r--r--vendor/honnef.co/go/tools/arg/arg.go39
-rw-r--r--vendor/honnef.co/go/tools/callgraph/callgraph.go129
-rw-r--r--vendor/honnef.co/go/tools/callgraph/static/static.go35
-rw-r--r--vendor/honnef.co/go/tools/callgraph/util.go181
-rw-r--r--vendor/honnef.co/go/tools/cmd/staticcheck/README.md15
-rw-r--r--vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go30
-rw-r--r--vendor/honnef.co/go/tools/config/config.go162
-rw-r--r--vendor/honnef.co/go/tools/config/example.conf10
-rw-r--r--vendor/honnef.co/go/tools/deprecated/stdlib.go54
-rw-r--r--vendor/honnef.co/go/tools/functions/concrete.go56
-rw-r--r--vendor/honnef.co/go/tools/functions/functions.go150
-rw-r--r--vendor/honnef.co/go/tools/functions/loops.go50
-rw-r--r--vendor/honnef.co/go/tools/functions/pure.go123
-rw-r--r--vendor/honnef.co/go/tools/functions/terminates.go24
-rw-r--r--vendor/honnef.co/go/tools/go/types/typeutil/callee.go46
-rw-r--r--vendor/honnef.co/go/tools/go/types/typeutil/identical.go29
-rw-r--r--vendor/honnef.co/go/tools/go/types/typeutil/imports.go31
-rw-r--r--vendor/honnef.co/go/tools/go/types/typeutil/map.go315
-rw-r--r--vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go72
-rw-r--r--vendor/honnef.co/go/tools/go/types/typeutil/ui.go52
-rw-r--r--vendor/honnef.co/go/tools/internal/sharedcheck/lint.go68
-rw-r--r--vendor/honnef.co/go/tools/lint/LICENSE28
-rw-r--r--vendor/honnef.co/go/tools/lint/generated.go38
-rw-r--r--vendor/honnef.co/go/tools/lint/lint.go706
-rw-r--r--vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go323
-rw-r--r--vendor/honnef.co/go/tools/lint/lintutil/format/format.go128
-rw-r--r--vendor/honnef.co/go/tools/lint/lintutil/util.go362
-rw-r--r--vendor/honnef.co/go/tools/simple/CONTRIBUTING.md15
-rw-r--r--vendor/honnef.co/go/tools/simple/lint.go1734
-rw-r--r--vendor/honnef.co/go/tools/ssa/LICENSE28
-rw-r--r--vendor/honnef.co/go/tools/ssa/blockopt.go195
-rw-r--r--vendor/honnef.co/go/tools/ssa/builder.go2379
-rw-r--r--vendor/honnef.co/go/tools/ssa/const.go169
-rw-r--r--vendor/honnef.co/go/tools/ssa/create.go263
-rw-r--r--vendor/honnef.co/go/tools/ssa/doc.go123
-rw-r--r--vendor/honnef.co/go/tools/ssa/dom.go341
-rw-r--r--vendor/honnef.co/go/tools/ssa/emit.go468
-rw-r--r--vendor/honnef.co/go/tools/ssa/func.go701
-rw-r--r--vendor/honnef.co/go/tools/ssa/identical.go7
-rw-r--r--vendor/honnef.co/go/tools/ssa/identical_17.go7
-rw-r--r--vendor/honnef.co/go/tools/ssa/lift.go653
-rw-r--r--vendor/honnef.co/go/tools/ssa/lvalue.go123
-rw-r--r--vendor/honnef.co/go/tools/ssa/methods.go239
-rw-r--r--vendor/honnef.co/go/tools/ssa/mode.go100
-rw-r--r--vendor/honnef.co/go/tools/ssa/print.go435
-rw-r--r--vendor/honnef.co/go/tools/ssa/sanity.go523
-rw-r--r--vendor/honnef.co/go/tools/ssa/source.go293
-rw-r--r--vendor/honnef.co/go/tools/ssa/ssa.go1745
-rw-r--r--vendor/honnef.co/go/tools/ssa/ssautil/load.go143
-rw-r--r--vendor/honnef.co/go/tools/ssa/ssautil/switch.go234
-rw-r--r--vendor/honnef.co/go/tools/ssa/ssautil/visit.go79
-rw-r--r--vendor/honnef.co/go/tools/ssa/testmain.go267
-rw-r--r--vendor/honnef.co/go/tools/ssa/util.go119
-rw-r--r--vendor/honnef.co/go/tools/ssa/wrappers.go294
-rw-r--r--vendor/honnef.co/go/tools/ssa/write.go5
-rw-r--r--vendor/honnef.co/go/tools/ssautil/ssautil.go41
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md15
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/buildtag.go21
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/lint.go2818
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/rules.go322
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/vrp/channel.go73
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/vrp/int.go476
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/vrp/slice.go273
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/vrp/string.go258
-rw-r--r--vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go1049
-rw-r--r--vendor/honnef.co/go/tools/stylecheck/lint.go618
-rw-r--r--vendor/honnef.co/go/tools/stylecheck/names.go263
-rw-r--r--vendor/honnef.co/go/tools/unused/implements.go79
-rw-r--r--vendor/honnef.co/go/tools/unused/unused.go1100
-rw-r--r--vendor/honnef.co/go/tools/version/version.go17
-rw-r--r--vendor/vendor.json360
257 files changed, 75131 insertions, 6 deletions
diff --git a/NOTICE b/NOTICE
index f420323ea..bb2f54e72 100644
--- a/NOTICE
+++ b/NOTICE
@@ -101,6 +101,29 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/Bowery/prompt
+The MIT License (MIT)
+
+Copyright (c) 2013-2015 Bowery, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
COPYING - gitlab.com/gitlab-org/gitaly/vendor/github.com/BurntSushi/toml
The MIT License (MIT)
@@ -181,6 +204,34 @@ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/dchest/safefile
+Copyright (c) 2013 Dmitry Chestnykh <dmitry@codingrobots.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/getsentry/raven-go
Copyright (c) 2013 Apollic Software, LLC. All rights reserved.
Copyright (c) 2015 Functional Software, Inc. All rights reserved.
@@ -241,6 +292,210 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+COPYING - gitlab.com/gitlab-org/gitaly/vendor/github.com/google/shlex
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/grpc-ecosystem/go-grpc-middleware
Apache License
Version 2.0, January 2004
@@ -645,6 +900,35 @@ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/grpc-ecosystem/go-grpc-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/kardianos/govendor
+Copyright (c) 2015 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/kelseyhightower/envconfig
Copyright (c) 2013 Kelsey Hightower
@@ -2937,6 +3221,30 @@ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/uber/jaeger-lib
See the License for the specific language governing permissions and
limitations under the License.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/wadey/gocovmerge
+Copyright (c) 2015, Wade Simmons
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LICENSE - gitlab.com/gitlab-org/gitaly/vendor/gitlab.com/gitlab-org/gitaly-proto
The MIT License (MIT)
@@ -3036,6 +3344,35 @@ infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/lint
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LICENSE - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/net
Copyright (c) 2009 The Go Authors. All rights reserved.
@@ -3248,6 +3585,59 @@ infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/tools
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+PATENTS - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/tools
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LICENSE - gitlab.com/gitlab-org/gitaly/vendor/google.golang.org/genproto
Apache License
@@ -3684,4 +4074,337 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LICENSE-3rdparty.csv - gitlab.com/gitlab-org/gitaly/vendor/gopkg.in/DataDog/dd-trace-go.v1
Component,Origin,License,Copyright
-import,io.opentracing,Apache-2.0,Copyright 2016-2017 The OpenTracing Authors
+import,io.opentracing,Apache-2.0,Copyright 2016-2017 The OpenTracing Authors~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/gopkg.in/yaml.v2
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE.libyaml - gitlab.com/gitlab-org/gitaly/vendor/gopkg.in/yaml.v2
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+NOTICE - gitlab.com/gitlab-org/gitaly/vendor/gopkg.in/yaml.v2
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/honnef.co/go/tools
+Copyright (c) 2016 Dominik Honnef
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/honnef.co/go/tools/lint
+Copyright (c) 2013 The Go Authors. All rights reserved.
+Copyright (c) 2016 Dominik Honnef. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - gitlab.com/gitlab-org/gitaly/vendor/honnef.co/go/tools/ssa
+Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright (c) 2016 Dominik Honnef. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/_support/makegen.go b/_support/makegen.go
index 82e924062..146197857 100644
--- a/_support/makegen.go
+++ b/_support/makegen.go
@@ -370,7 +370,7 @@ lint: {{ .GoLint }}
@cd {{ .SourceDir }} && go run _support/lint.go
{{ .GoLint }}:
- go get golang.org/x/lint/golint
+ go install {{ .SourceDir }}/vendor/golang.org/x/lint/golint
.PHONY: check-formatting
check-formatting: {{ .GoImports }}
@@ -378,7 +378,7 @@ check-formatting: {{ .GoImports }}
@cd {{ .SourceDir }} && goimports -e -l {{ join .GoFiles " " }} | awk '{ print } END { if(NR>0) { print "Formatting error, run make format"; exit(1) } }'
{{ .GoImports }}:
- go get golang.org/x/tools/cmd/goimports
+ go install {{ .SourceDir }}/vendor/golang.org/x/tools/cmd/goimports
.PHONY: format
format: {{ .GoImports }}
@@ -393,7 +393,7 @@ staticcheck: {{ .StaticCheck }}
# Install staticcheck
{{ .StaticCheck }}:
- go get honnef.co/go/tools/cmd/staticcheck
+ go install {{ .SourceDir }}/vendor/honnef.co/go/tools/cmd/staticcheck
.PHONY: govendor-status
govendor-status: {{ .GoVendor }}
@@ -401,7 +401,7 @@ govendor-status: {{ .GoVendor }}
@cd {{ .SourceDir }} && govendor status
{{ .GoVendor }}:
- go get github.com/kardianos/govendor
+ go install {{ .SourceDir }}/vendor/github.com/kardianos/govendor
.PHONY: notice-up-to-date
notice-up-to-date: {{ .GoVendor }} clean-ruby-vendor-go
@@ -439,7 +439,7 @@ cover: prepare-tests {{ .GoCovMerge }}
@go tool cover -func "{{ .CoverageDir }}/all.merged"
{{ .GoCovMerge }}:
- go get github.com/wadey/gocovmerge
+ go install {{ .SourceDir }}/vendor/github.com/wadey/gocovmerge
.PHONY: docker
docker:
diff --git a/internal/tools/main.go b/internal/tools/main.go
new file mode 100644
index 000000000..0101ced3e
--- /dev/null
+++ b/internal/tools/main.go
@@ -0,0 +1,7 @@
+// Command tools exists purely to ensure the package manager doesn't prune the
+// CI tools from our vendor folder. This command is not meant for actual usage.
+package main
+
+func main() {
+ panic("this command only exists to help vendor CI tools")
+}
diff --git a/internal/tools/tools.go b/internal/tools/tools.go
new file mode 100644
index 000000000..f8257cb7a
--- /dev/null
+++ b/internal/tools/tools.go
@@ -0,0 +1,11 @@
+// +build tools
+
+package main
+
+import (
+ _ "github.com/kardianos/govendor"
+ _ "github.com/wadey/gocovmerge"
+ _ "golang.org/x/lint/golint"
+ _ "golang.org/x/tools/cmd/goimports"
+ _ "honnef.co/go/tools/cmd/staticcheck"
+)
diff --git a/vendor/github.com/Bowery/prompt/CONTRIBUTORS.md b/vendor/github.com/Bowery/prompt/CONTRIBUTORS.md
new file mode 100644
index 000000000..e620b8486
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/CONTRIBUTORS.md
@@ -0,0 +1,9 @@
+- [Larz Conwell](https://github.com/larzconwell)
+- [Steve Kaliski](https://github.com/sjkaliski)
+- [NHOrus](https://github.com/NHOrus)
+- [Attila Fülöp](https://github.com/AttilaFueloep)
+- [Gereon Frey](https://github.com/gfrey)
+- [Aaron Bieber](https://github.com/qbit)
+- [Ricky Medina](https://github.com/r-medina)
+- [sungo](https://github.com/sungo)
+- [Rohan Verma](https://github.com/rhnvrm)
diff --git a/vendor/github.com/Bowery/prompt/LICENSE b/vendor/github.com/Bowery/prompt/LICENSE
new file mode 100644
index 000000000..0cc1fbee7
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2015 Bowery, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Bowery/prompt/README.md b/vendor/github.com/Bowery/prompt/README.md
new file mode 100644
index 000000000..f9db406df
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/README.md
@@ -0,0 +1,38 @@
+# Prompt
+
+[![Circle CI](https://circleci.com/gh/Bowery/prompt/tree/master.png?style=badge)](https://circleci.com/gh/Bowery/prompt/tree/master)
+
+[![GoDoc](https://godoc.org/github.com/Bowery/prompt?status.png)](https://godoc.org/github.com/Bowery/prompt)
+
+Prompt is a cross platform line-editing prompting library. Read the GoDoc page
+for more info and for API details.
+
+## Features
+- Keyboard shortcuts in prompts
+- History support
+- Secure password prompt
+- Custom prompt support
+- Fallback prompt for unsupported terminals
+- ANSI conversion for Windows
+
+## Todo
+- Multi-line prompt as a Terminal option
+- Make refresh less jittery on Windows([possible reason](https://github.com/Bowery/prompt/blob/master/output_windows.go#L108))
+- Multi-byte character support on Windows
+- `AnsiWriter` should execute the equivalent ANSI escape code functionality on Windows
+- Support for more ANSI escape codes on Windows.
+- More keyboard shortcuts from Readlines shortcut list
+
+## Contributing
+
+Make sure Go is setup and running the latest release version, and make sure your `GOPATH` is setup properly.
+
+Follow the guidelines [here](https://guides.github.com/activities/contributing-to-open-source/#contributing).
+
+Please be sure to `gofmt` any code before doing commits. You can simply run `gofmt -w .` to format all the code in the directory.
+
+Lastly don't forget to add your name to [`CONTRIBUTORS.md`](https://github.com/Bowery/prompt/blob/master/CONTRIBUTORS.md)
+
+## License
+
+Prompt is MIT licensed, details can be found [here](https://raw.githubusercontent.com/Bowery/prompt/master/LICENSE).
diff --git a/vendor/github.com/Bowery/prompt/ansi_unix.go b/vendor/github.com/Bowery/prompt/ansi_unix.go
new file mode 100644
index 000000000..70adf684c
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/ansi_unix.go
@@ -0,0 +1,39 @@
+// +build linux darwin freebsd openbsd netbsd dragonfly solaris
+
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "os"
+)
+
+// AnsiReader is an io.Reader that wraps an *os.File.
+type AnsiReader struct {
+ file *os.File
+}
+
+// NewAnsiReader creates a AnsiReader from the given input file.
+func NewAnsiReader(in *os.File) *AnsiReader {
+ return &AnsiReader{file: in}
+}
+
+// Read reads data from the input file into b.
+func (ar *AnsiReader) Read(b []byte) (int, error) {
+ return ar.file.Read(b)
+}
+
+// AnsiWriter is an io.Writer that wraps an *os.File.
+type AnsiWriter struct {
+ file *os.File
+}
+
+// NewAnsiWriter creates a AnsiWriter from the given output file.
+func NewAnsiWriter(out *os.File) *AnsiWriter {
+ return &AnsiWriter{file: out}
+}
+
+// Write writes data from b into the input file.
+func (aw *AnsiWriter) Write(b []byte) (int, error) {
+ return aw.file.Write(b)
+}
diff --git a/vendor/github.com/Bowery/prompt/ansi_windows.go b/vendor/github.com/Bowery/prompt/ansi_windows.go
new file mode 100644
index 000000000..9d860a8a0
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/ansi_windows.go
@@ -0,0 +1,510 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "bytes"
+ "os"
+ "unicode/utf8"
+ "unsafe"
+)
+
+// keyEventType is the key event type for an input record.
+const keyEventType = 0x0001
+
+var (
+ readConsoleInput = kernel.NewProc("ReadConsoleInputW")
+)
+
+// inputRecord describes a input event from a console.
+type inputRecord struct {
+ eventType uint16
+ // Magic to get around the union C type, cast
+ // event to the type using unsafe.Pointer.
+ _ [2]byte
+ event [16]byte
+}
+
+// keyEventRecord describes a keyboard event.
+type keyEventRecord struct {
+ keyDown int32
+ repeatCount uint16
+ virtualKeyCode uint16
+ virtualScanCode uint16
+ char uint16
+ controlKeyState uint32
+}
+
+// AnsiReader is an io.Reader that reads from a given file and converts Windows
+// key codes to their equivalent ANSI escape codes.
+type AnsiReader struct {
+ fd uintptr
+ buf []rune
+}
+
+// NewAnsiReader creates a AnsiReader from the given input file.
+func NewAnsiReader(in *os.File) *AnsiReader {
+ return &AnsiReader{fd: in.Fd()}
+}
+
+// Read reads data from the input converting to ANSI escape codes that can be
+// read over multiple Reads.
+func (ar *AnsiReader) Read(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+
+ if len(ar.buf) == 0 {
+ var runes []rune
+ var read uint32
+ rec := new(inputRecord)
+
+ for runes == nil {
+ ret, _, err := readConsoleInput.Call(ar.fd, uintptr(unsafe.Pointer(rec)),
+ 1, uintptr(unsafe.Pointer(&read)))
+ if ret == 0 {
+ return 0, err
+ }
+
+ if rec.eventType != keyEventType {
+ continue
+ }
+
+ ke := (*keyEventRecord)(unsafe.Pointer(&rec.event))
+ if ke.keyDown == 0 {
+ continue
+ }
+
+ shift := false
+ if ke.controlKeyState&shiftKey != 0 {
+ shift = true
+ }
+
+ ctrl := false
+ if ke.controlKeyState&leftCtrlKey != 0 || ke.controlKeyState&rightCtrlKey != 0 {
+ ctrl = true
+ }
+
+ alt := false
+ if ke.controlKeyState&leftAltKey != 0 || ke.controlKeyState&rightAltKey != 0 {
+ alt = true
+ }
+
+ // Backspace, Return, Space.
+ if ke.char == ctrlH || ke.char == returnKey || ke.char == spaceKey {
+ code := string(returnKey)
+ if ke.char == ctrlH {
+ code = string(backKey)
+ } else if ke.char == spaceKey {
+ code = string(spaceKey)
+ }
+
+ if alt {
+ code = string(escKey) + code
+ }
+
+ runes = []rune(code)
+ break
+ }
+
+ // Generate runes for the chars and key codes.
+ if ke.char > 0 {
+ runes = []rune{rune(ke.char)}
+ } else {
+ code := string(escKey)
+
+ switch ke.virtualKeyCode {
+ case f1Key:
+ if ctrl {
+ continue
+ }
+
+ code += ar.shortFunction("P", shift, ctrl, alt)
+ case f2Key:
+ code += ar.shortFunction("Q", shift, ctrl, alt)
+ case f3Key:
+ code += ar.shortFunction("R", shift, ctrl, alt)
+ case f4Key:
+ code += ar.shortFunction("S", shift, ctrl, alt)
+ case f5Key:
+ code += ar.longFunction("15", shift, ctrl, alt)
+ case f6Key:
+ code += ar.longFunction("17", shift, ctrl, alt)
+ case f7Key:
+ code += ar.longFunction("18", shift, ctrl, alt)
+ case f8Key:
+ code += ar.longFunction("19", shift, ctrl, alt)
+ case f9Key:
+ code += ar.longFunction("20", shift, ctrl, alt)
+ case f10Key:
+ code += ar.longFunction("21", shift, ctrl, alt)
+ case f11Key:
+ code += ar.longFunction("23", shift, ctrl, alt)
+ case f12Key:
+ code += ar.longFunction("24", shift, ctrl, alt)
+ case insertKey:
+ if shift || ctrl {
+ continue
+ }
+
+ code += ar.longFunction("2", shift, ctrl, alt)
+ case deleteKey:
+ code += ar.longFunction("3", shift, ctrl, alt)
+ case homeKey:
+ code += "OH"
+ case endKey:
+ code += "OF"
+ case pgupKey:
+ if shift {
+ continue
+ }
+
+ code += ar.longFunction("5", shift, ctrl, alt)
+ case pgdownKey:
+ if shift {
+ continue
+ }
+
+ code += ar.longFunction("6", shift, ctrl, alt)
+ case upKey:
+ code += ar.arrow("A", shift, ctrl, alt)
+ case downKey:
+ code += ar.arrow("B", shift, ctrl, alt)
+ case leftKey:
+ code += ar.arrow("D", shift, ctrl, alt)
+ case rightKey:
+ code += ar.arrow("C", shift, ctrl, alt)
+ default:
+ continue
+ }
+
+ runes = []rune(code)
+ }
+ }
+
+ ar.buf = runes
+ }
+
+ // Get items from the buffer.
+ var n int
+ for i, r := range ar.buf {
+ if utf8.RuneLen(r) > len(b) {
+ ar.buf = ar.buf[i:]
+ return n, nil
+ }
+
+ nr := utf8.EncodeRune(b, r)
+ b = b[nr:]
+ n += nr
+ }
+
+ ar.buf = nil
+ return n, nil
+}
+
+// shortFunction creates a short function code.
+func (ar *AnsiReader) shortFunction(ident string, shift, ctrl, alt bool) string {
+ code := "O"
+
+ if shift {
+ code += "1;2"
+ } else if ctrl {
+ code += "1;5"
+ } else if alt {
+ code += "1;3"
+ }
+
+ return code + ident
+}
+
+// longFunction creates a long function code.
+func (ar *AnsiReader) longFunction(ident string, shift, ctrl, alt bool) string {
+ code := "["
+ code += ident
+
+ if shift {
+ code += ";2"
+ } else if ctrl {
+ code += ";5"
+ } else if alt {
+ code += ";3"
+ }
+
+ return code + "~"
+}
+
+// arrow creates an arrow code.
+func (ar *AnsiReader) arrow(ident string, shift, ctrl, alt bool) string {
+ code := "["
+
+ if shift {
+ code += "1;2"
+ } else if ctrl {
+ code += "1;5"
+ } else if alt {
+ code += "1;3"
+ }
+
+ return code + ident
+}
+
+// AnsiWriter is an io.Writer that writes to a given file and converts ANSI
+// escape codes to their equivalent Windows functionality.
+type AnsiWriter struct {
+ file *os.File
+ buf []byte
+}
+
+// NewAnsiWriter creates a AnsiWriter from the given output.
+func NewAnsiWriter(out *os.File) *AnsiWriter {
+ return &AnsiWriter{file: out}
+}
+
+// Write writes the buffer filtering out ANSI escape codes and converting to
+// the Windows functionality needed. ANSI escape codes may be found over multiple
+// Writes.
+func (aw *AnsiWriter) Write(b []byte) (int, error) {
+ needsProcessing := bytes.Contains(b, []byte(string(escKey)))
+ if len(aw.buf) > 0 {
+ needsProcessing = true
+ }
+
+ if !needsProcessing {
+ return aw.file.Write(b)
+ }
+ var p []byte
+
+ for _, char := range b {
+ // Found the beginning of an escape.
+ if char == escKey {
+ aw.buf = append(aw.buf, char)
+ continue
+ }
+
+ // Function identifiers.
+ if len(aw.buf) == 1 && (char == '_' || char == 'P' || char == '[' ||
+ char == ']' || char == '^' || char == ' ' || char == '#' ||
+ char == '%' || char == '(' || char == ')' || char == '*' ||
+ char == '+') {
+ aw.buf = append(aw.buf, char)
+ continue
+ }
+
+ // Cursor functions.
+ if len(aw.buf) == 1 && (char == '7' || char == '8') {
+ // Add another char before because finish skips 2 items.
+ aw.buf = append(aw.buf, '_', char)
+
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // Keyboard functions.
+ if len(aw.buf) == 1 && (char == '=' || char == '>') {
+ aw.buf = append(aw.buf, char)
+
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // Bottom left function.
+ if len(aw.buf) == 1 && char == 'F' {
+ // Add extra char for finish.
+ aw.buf = append(aw.buf, '_', char)
+
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // Reset function.
+ if len(aw.buf) == 1 && char == 'c' {
+ // Add extra char for finish.
+ aw.buf = append(aw.buf, '_', char)
+
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // Space functions.
+ if len(aw.buf) >= 2 && aw.buf[1] == ' ' && (char == 'F' || char == 'G' ||
+ char == 'L' || char == 'M' || char == 'N') {
+ aw.buf = append(aw.buf, char)
+
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // Number functions.
+ if len(aw.buf) >= 2 && aw.buf[1] == '#' && (char >= '3' && char <= '6') ||
+ char == '8' {
+ aw.buf = append(aw.buf, char)
+
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // Percentage functions.
+ if len(aw.buf) >= 2 && aw.buf[1] == '%' && (char == '@' || char == 'G') {
+ aw.buf = append(aw.buf, char)
+
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // Character set functions.
+ if len(aw.buf) >= 2 && (aw.buf[1] == '(' || aw.buf[1] == ')' ||
+ aw.buf[1] == '*' || aw.buf[1] == '+') && (char == '0' ||
+ (char >= '4' && char <= '7') || char == '=' || (char >= 'A' &&
+ char <= 'C') || char == 'E' || char == 'H' || char == 'K' ||
+ char == 'Q' || char == 'R' || char == 'Y') {
+ aw.buf = append(aw.buf, char)
+
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // APC functions.
+ if len(aw.buf) >= 2 && aw.buf[1] == '_' {
+ aw.buf = append(aw.buf, char)
+
+ // End of APC.
+ if char == '\\' && aw.buf[len(aw.buf)-1] == escKey {
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ continue
+ }
+
+ // DC functions.
+ if len(aw.buf) >= 2 && aw.buf[1] == 'P' {
+ aw.buf = append(aw.buf, char)
+
+ // End of DC.
+ if char == '\\' && aw.buf[len(aw.buf)-1] == escKey {
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ continue
+ }
+
+ // CSI functions.
+ if len(aw.buf) >= 2 && aw.buf[1] == '[' {
+ aw.buf = append(aw.buf, char)
+
+ // End of CSI.
+ if char == '@' || (char >= 'A' && char <= 'M') || char == 'P' ||
+ char == 'S' || char == 'T' || char == 'X' || char == 'Z' ||
+ char == '`' || (char >= 'b' && char <= 'd') || (char >= 'f' &&
+ char <= 'i') || (char >= 'l' && char <= 'n') || (char >= 'p' &&
+ char <= 't') || char == 'w' || char == 'x' || char == 'z' ||
+ char == '{' || char == '|' {
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ continue
+ }
+
+ // OSC functions.
+ if len(aw.buf) >= 2 && aw.buf[1] == ']' {
+ aw.buf = append(aw.buf, char)
+
+ // Capture incomplete code.
+ if len(aw.buf) == 4 && aw.buf[2] == '0' && char == ';' {
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ continue
+ }
+
+ // End of OSC.
+ if (char == '\\' && aw.buf[len(aw.buf)-1] == escKey) || char == ctrlG {
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ continue
+ }
+
+ // PM functions.
+ if len(aw.buf) >= 2 && aw.buf[1] == '^' {
+ aw.buf = append(aw.buf, char)
+
+ // End of PM.
+ if char == '\\' && aw.buf[len(aw.buf)-1] == escKey {
+ err := aw.finish(nil)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ continue
+ }
+
+ // Normal character, resets escape buffer.
+ if len(aw.buf) > 0 {
+ aw.buf = nil
+ }
+ p = append(p, char)
+ }
+
+ _, err := aw.file.Write(p)
+ return len(b), err
+}
+
+// finish finishes an ANSI escape code and calls the parsing function. Afterwards
+// the escape buffer is emptied.
+func (aw *AnsiWriter) finish(parse func([]byte) error) error {
+ var err error
+
+ if parse != nil {
+ err = parse(aw.buf[2:])
+ }
+
+ aw.buf = nil
+ return err
+}
diff --git a/vendor/github.com/Bowery/prompt/buffer.go b/vendor/github.com/Bowery/prompt/buffer.go
new file mode 100644
index 000000000..917da3e2d
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/buffer.go
@@ -0,0 +1,152 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "os"
+ "unicode/utf8"
+)
+
+// Buffer contains state for line editing and writing.
+type Buffer struct {
+ Out *os.File
+ Prompt string
+ Echo bool
+ Cols int
+ pos int
+ size int
+ data []rune
+}
+
+// NewBuffer creates a buffer writing to out if echo is true.
+func NewBuffer(prompt string, out *os.File, echo bool) *Buffer {
+ return &Buffer{
+ Out: out,
+ Prompt: prompt,
+ Echo: echo,
+ }
+}
+
+// String returns the data as a string.
+func (buf *Buffer) String() string {
+ return string(buf.data[:buf.size])
+}
+
+// Insert inserts characters at the cursors position.
+func (buf *Buffer) Insert(rs ...rune) error {
+ rsLen := len(rs)
+ total := buf.size + rsLen
+
+ if total > len(buf.data) {
+ buf.data = append(buf.data, make([]rune, rsLen)...)
+ }
+
+ // Shift characters to make room in the correct pos.
+ if buf.size != buf.pos {
+ copy(buf.data[buf.pos+rsLen:buf.size+rsLen], buf.data[buf.pos:buf.size])
+ }
+
+ for _, r := range rs {
+ buf.data[buf.pos] = r
+ buf.pos++
+ buf.size++
+ }
+
+ return buf.Refresh()
+}
+
+// Set sets the content in the buffer.
+func (buf *Buffer) Set(rs ...rune) error {
+ rsLen := len(rs)
+ buf.data = rs
+ buf.pos = rsLen
+ buf.size = rsLen
+
+ return buf.Refresh()
+}
+
+// Start moves the cursor to the start.
+func (buf *Buffer) Start() error {
+ if buf.pos <= 0 {
+ return nil
+ }
+
+ buf.pos = 0
+ return buf.Refresh()
+}
+
+// End moves the cursor to the end.
+func (buf *Buffer) End() error {
+ if buf.pos >= buf.size {
+ return nil
+ }
+
+ buf.pos = buf.size
+ return buf.Refresh()
+}
+
+// Left moves the cursor one character left.
+func (buf *Buffer) Left() error {
+ if buf.pos <= 0 {
+ return nil
+ }
+
+ buf.pos--
+ return buf.Refresh()
+}
+
+// Right moves the cursor one character right.
+func (buf *Buffer) Right() error {
+ if buf.pos >= buf.size {
+ return nil
+ }
+
+ buf.pos++
+ return buf.Refresh()
+}
+
+// Del removes the character at the cursor position.
+func (buf *Buffer) Del() error {
+ if buf.pos >= buf.size {
+ return nil
+ }
+
+ // Shift characters after position back one.
+ copy(buf.data[buf.pos:], buf.data[buf.pos+1:buf.size])
+ buf.size--
+
+ return buf.Refresh()
+}
+
+// DelLeft removes the character to the left.
+func (buf *Buffer) DelLeft() error {
+ if buf.pos <= 0 {
+ return nil
+ }
+
+ // Shift characters from position back one.
+ copy(buf.data[buf.pos-1:], buf.data[buf.pos:buf.size])
+ buf.pos--
+ buf.size--
+
+ return buf.Refresh()
+}
+
+// EndLine ends the line with CRLF.
+func (buf *Buffer) EndLine() error {
+ _, err := buf.Out.Write(crlf)
+ return err
+}
+
+// toBytes converts a slice of runes to its equivalent in bytes.
+func toBytes(runes []rune) []byte {
+ var bytes []byte
+ char := make([]byte, utf8.UTFMax)
+
+ for _, r := range runes {
+ n := utf8.EncodeRune(char, r)
+ bytes = append(bytes, char[:n]...)
+ }
+
+ return bytes
+}
diff --git a/vendor/github.com/Bowery/prompt/buffer_unix.go b/vendor/github.com/Bowery/prompt/buffer_unix.go
new file mode 100644
index 000000000..5464f25e6
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/buffer_unix.go
@@ -0,0 +1,76 @@
+// +build linux darwin freebsd openbsd netbsd dragonfly solaris
+
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "fmt"
+)
+
+// Refresh rewrites the prompt and buffer.
+func (buf *Buffer) Refresh() error {
+ // If we're not echoing just write prompt.
+ if !buf.Echo {
+ _, err := buf.Out.Write(mvLeftEdge)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write([]byte(buf.Prompt))
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write(delRight)
+ return err
+ }
+
+ prLen := len(buf.Prompt)
+ start := 0
+ size := buf.size
+ pos := buf.pos
+
+ // Get slice range that should be visible.
+ for prLen+pos >= buf.Cols {
+ start++
+ size--
+ pos--
+ }
+ for prLen+size > buf.Cols {
+ size--
+ }
+
+ _, err := buf.Out.Write(mvLeftEdge)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write([]byte(buf.Prompt))
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write(toBytes(buf.data[start : size+start]))
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write(delRight)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write([]byte(fmt.Sprintf(mvToCol, pos+prLen)))
+ return err
+}
+
+// ClsScreen clears the screen and refreshes.
+func (buf *Buffer) ClsScreen() error {
+ _, err := buf.Out.Write(clsScreen)
+ if err != nil {
+ return err
+ }
+
+ return buf.Refresh()
+}
diff --git a/vendor/github.com/Bowery/prompt/buffer_windows.go b/vendor/github.com/Bowery/prompt/buffer_windows.go
new file mode 100644
index 000000000..e24c0d18e
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/buffer_windows.go
@@ -0,0 +1,150 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "unsafe"
+)
+
+var (
+ fillConsoleOutputCharacter = kernel.NewProc("FillConsoleOutputCharacterW")
+ setConsoleCursorPosition = kernel.NewProc("SetConsoleCursorPosition")
+)
+
+// Refresh rewrites the prompt and buffer.
+func (buf *Buffer) Refresh() error {
+ csbi := new(consoleScreenBufferInfo)
+ ret, _, err := getConsoleScreenBufferInfo.Call(buf.Out.Fd(),
+ uintptr(unsafe.Pointer(csbi)))
+ if ret == 0 {
+ return err
+ }
+
+ // If we're not echoing just write prompt.
+ if !buf.Echo {
+ err = buf.delLine(csbi)
+ if err != nil {
+ return err
+ }
+
+ err = buf.mvLeftEdge(csbi)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write([]byte(buf.Prompt))
+ return err
+ }
+
+ prLen := len(buf.Prompt)
+ start := 0
+ size := buf.size
+ pos := buf.pos
+
+ // Get slice range that should be visible.
+ for prLen+pos >= buf.Cols {
+ start++
+ size--
+ pos--
+ }
+ for prLen+size > buf.Cols {
+ size--
+ }
+
+ err = buf.delLine(csbi)
+ if err != nil {
+ return err
+ }
+
+ err = buf.mvLeftEdge(csbi)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write([]byte(buf.Prompt))
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Out.Write(toBytes(buf.data[start : size+start]))
+ if err != nil {
+ return err
+ }
+
+ return buf.mvToCol(csbi, pos+prLen)
+}
+
+// ClsScreen clears the screen and refreshes.
+func (buf *Buffer) ClsScreen() error {
+ var written uint32
+ coords := new(coord)
+
+ csbi := new(consoleScreenBufferInfo)
+ ret, _, err := getConsoleScreenBufferInfo.Call(buf.Out.Fd(),
+ uintptr(unsafe.Pointer(csbi)))
+ if ret == 0 {
+ return err
+ }
+
+ // Clear everything from 0,0.
+ ret, _, err = fillConsoleOutputCharacter.Call(buf.Out.Fd(), uintptr(' '),
+ uintptr(csbi.size.x*csbi.size.y), uintptr(*(*int32)(unsafe.Pointer(coords))),
+ uintptr(unsafe.Pointer(&written)))
+ if ret == 0 {
+ return err
+ }
+
+ // Set cursor at 0,0.
+ ret, _, err = setConsoleCursorPosition.Call(buf.Out.Fd(),
+ uintptr(*(*int32)(unsafe.Pointer(coords))))
+ if ret == 0 {
+ return err
+ }
+
+ return buf.Refresh()
+}
+
+// delLine deletes the line the csbi cursor is positioned on.
+// TODO: Possible refresh jittering reason, instead we should copy the Unix
+// code and write over contents and then remove everything to the right.
+func (buf *Buffer) delLine(csbi *consoleScreenBufferInfo) error {
+ var written uint32
+ coords := &coord{y: csbi.cursorPosition.y}
+
+ ret, _, err := fillConsoleOutputCharacter.Call(buf.Out.Fd(), uintptr(' '),
+ uintptr(csbi.size.x), uintptr(*(*int32)(unsafe.Pointer(coords))),
+ uintptr(unsafe.Pointer(&written)))
+ if ret == 0 {
+ return err
+ }
+
+ return nil
+}
+
+// mvLeftEdge moves the cursor to the beginning of the line the csbi cursor
+// is positioned on.
+func (buf *Buffer) mvLeftEdge(csbi *consoleScreenBufferInfo) error {
+ coords := &coord{y: csbi.cursorPosition.y}
+
+ ret, _, err := setConsoleCursorPosition.Call(buf.Out.Fd(),
+ uintptr(*(*int32)(unsafe.Pointer(coords))))
+ if ret == 0 {
+ return err
+ }
+
+ return nil
+}
+
+// mvTolCol moves the cursor to the col on the line the csbi cursor is
+// positioned on.
+func (buf *Buffer) mvToCol(csbi *consoleScreenBufferInfo, x int) error {
+ coords := &coord{x: int16(x), y: csbi.cursorPosition.y}
+
+ ret, _, err := setConsoleCursorPosition.Call(buf.Out.Fd(),
+ uintptr(*(*int32)(unsafe.Pointer(coords))))
+ if ret == 0 {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Bowery/prompt/ioctl_bsd.go b/vendor/github.com/Bowery/prompt/ioctl_bsd.go
new file mode 100644
index 000000000..71476c889
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/ioctl_bsd.go
@@ -0,0 +1,15 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ tcgets = unix.TIOCGETA
+ tcsets = unix.TIOCSETA
+ tcsetsf = unix.TIOCSETAF
+)
diff --git a/vendor/github.com/Bowery/prompt/ioctl_linux.go b/vendor/github.com/Bowery/prompt/ioctl_linux.go
new file mode 100644
index 000000000..5ca9cdcde
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/ioctl_linux.go
@@ -0,0 +1,13 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ tcgets = unix.TCGETS
+ tcsets = unix.TCSETS
+ tcsetsf = unix.TCSETSF
+)
diff --git a/vendor/github.com/Bowery/prompt/ioctl_solaris.go b/vendor/github.com/Bowery/prompt/ioctl_solaris.go
new file mode 100644
index 000000000..cb00dd159
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/ioctl_solaris.go
@@ -0,0 +1,41 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ tcgets = unix.TCGETS
+ tcsetsf = unix.TCSETSF
+ tcsets = unix.TCSETS
+)
+
+// terminalSize retrieves the cols/rows for the terminal connected to out.
+func terminalSize(out *os.File) (int, int, error) {
+ ws, err := unix.IoctlGetWinsize(int(out.Fd()), unix.TIOCGWINSZ)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ return int(ws.Col), int(ws.Row), nil
+}
+
+// getTermios retrieves the termios settings for the terminal descriptor.
+func getTermios(fd uintptr) (*unix.Termios, error) {
+ return unix.IoctlGetTermios(int(fd), tcgets)
+}
+
+// setTermios sets the termios settings for the terminal descriptor,
+// optionally flushing the buffer before setting.
+func setTermios(fd uintptr, flush bool, mode *unix.Termios) error {
+ req := tcsets
+ if flush {
+ req = tcsetsf
+ }
+
+ return unix.IoctlSetTermios(int(fd), uint(req), mode)
+}
diff --git a/vendor/github.com/Bowery/prompt/ioctl_unix.go b/vendor/github.com/Bowery/prompt/ioctl_unix.go
new file mode 100644
index 000000000..0a9033b9a
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/ioctl_unix.go
@@ -0,0 +1,62 @@
+// +build linux darwin freebsd openbsd netbsd dragonfly
+
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "os"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// winsize contains the size for the terminal.
+type winsize struct {
+ rows uint16
+ cols uint16
+ _ uint32
+}
+
+// terminalSize retrieves the cols/rows for the terminal connected to out.
+func terminalSize(out *os.File) (int, int, error) {
+ ws := new(winsize)
+
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, out.Fd(),
+ uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
+ if err != 0 {
+ return 0, 0, err
+ }
+
+ return int(ws.cols), int(ws.rows), nil
+}
+
+// getTermios retrieves the termios settings for the terminal descriptor.
+func getTermios(fd uintptr) (*unix.Termios, error) {
+ termios := new(unix.Termios)
+
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, tcgets,
+ uintptr(unsafe.Pointer(termios)))
+ if err != 0 {
+ return nil, err
+ }
+
+ return termios, nil
+}
+
+// setTermios sets the termios settings for the terminal descriptor,
+// optionally flushing the buffer before setting.
+func setTermios(fd uintptr, flush bool, mode *unix.Termios) error {
+ req := int64(tcsets)
+ if flush {
+ req = int64(tcsetsf)
+ }
+
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(req),
+ uintptr(unsafe.Pointer(mode)))
+ if err != 0 {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Bowery/prompt/keys.go b/vendor/github.com/Bowery/prompt/keys.go
new file mode 100644
index 000000000..a4e2e40db
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/keys.go
@@ -0,0 +1,41 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+// Line ending in raw mode.
+var crlf = []byte("\r\n")
+
+const (
+ backKey = '\u007f'
+ escKey = '\u001B'
+ spaceKey = '\u0020'
+)
+
+const (
+ ctrlA = iota + 1
+ ctrlB
+ ctrlC
+ ctrlD
+ ctrlE
+ ctrlF
+ ctrlG
+ ctrlH
+ tabKey
+ ctrlJ
+ ctrlK
+ ctrlL
+ returnKey
+ ctrlN
+ ctrlO
+ ctrlP
+ ctrlQ
+ ctrlR
+ ctrlS
+ ctrlT
+ ctrlU
+ ctrlV
+ ctrlW
+ ctrlX
+ ctrlY
+ ctrlZ
+)
diff --git a/vendor/github.com/Bowery/prompt/keys_unix.go b/vendor/github.com/Bowery/prompt/keys_unix.go
new file mode 100644
index 000000000..c6dc854c7
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/keys_unix.go
@@ -0,0 +1,13 @@
+// +build linux darwin freebsd openbsd netbsd dragonfly solaris
+
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+const mvToCol = "\u001b[0G\u001b[%dC"
+
+var (
+ mvLeftEdge = []byte("\u001b[0G")
+ clsScreen = []byte("\u001b[H\u001b[2J")
+ delRight = []byte("\u001b[0K")
+)
diff --git a/vendor/github.com/Bowery/prompt/keys_windows.go b/vendor/github.com/Bowery/prompt/keys_windows.go
new file mode 100644
index 000000000..74db7874a
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/keys_windows.go
@@ -0,0 +1,34 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+const (
+ f1Key = 0x70 + iota
+ f2Key
+ f3Key
+ f4Key
+ f5Key
+ f6Key
+ f7Key
+ f8Key
+ f9Key
+ f10Key
+ f11Key
+ f12Key
+
+ homeKey = 0x24
+ endKey = 0x23
+ upKey = 0x26
+ downKey = 0x28
+ rightKey = 0x27
+ leftKey = 0x25
+ insertKey = 0x2d
+ pgupKey = 0x21
+ pgdownKey = 0x22
+ deleteKey = 0x2e
+ leftAltKey = 0x2
+ rightAltKey = 0x1
+ leftCtrlKey = 0x8
+ rightCtrlKey = 0x4
+ shiftKey = 0x10
+)
diff --git a/vendor/github.com/Bowery/prompt/prompt.go b/vendor/github.com/Bowery/prompt/prompt.go
new file mode 100644
index 000000000..f6037299e
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/prompt.go
@@ -0,0 +1,85 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+// Package prompt implements a cross platform line-editing prompt. It also
+// provides routines to use ANSI escape sequences across platforms for
+// terminal connected io.Readers/io.Writers.
+//
+// If os.Stdin isn't connected to a terminal or (on Unix)if the terminal
+// doesn't support the ANSI escape sequences needed a fallback prompt is
+// provided that doesn't do line-editing. Unix terminals that are not supported
+// will have the TERM environment variable set to either "dumb" or "cons25".
+//
+// The keyboard shortcuts are similar to those found in the Readline library:
+//
+// - Enter / CTRL+D
+// - End the line.
+// - CTRL+C
+// - End the line, return error `ErrCTRLC`.
+// - Backspace
+// - Remove the character to the left.
+// - CTRL+L
+// - Clear the screen(keeping the current lines content).
+// - Home / End
+// - Jump to the beginning/end of the line.
+// - Up arrow / Down arrow
+// - Go back and forward in the history.
+// - Left arrow / Right arrow
+// - Move left/right one character.
+// - Delete
+// - Remove the character to the right.
+package prompt
+
+// Basic is a wrapper around Terminal.Basic.
+func Basic(prefix string, required bool) (string, error) {
+ term, err := NewTerminal()
+ if err != nil {
+ return "", err
+ }
+ defer term.Close()
+
+ return term.Basic(prefix, required)
+}
+
+// BasicDefault is a wrapper around Terminal.BasicDefault.
+func BasicDefault(prefix, def string) (string, error) {
+ term, err := NewTerminal()
+ if err != nil {
+ return "", err
+ }
+ defer term.Close()
+
+ return term.BasicDefault(prefix, def)
+}
+
+// Ask is a wrapper around Terminal.Ask.
+func Ask(question string) (bool, error) {
+ term, err := NewTerminal()
+ if err != nil {
+ return false, err
+ }
+ defer term.Close()
+
+ return term.Ask(question)
+}
+
+// Custom is a wrapper around Terminal.Custom.
+func Custom(prefix string, test func(string) (string, bool)) (string, error) {
+ term, err := NewTerminal()
+ if err != nil {
+ return "", err
+ }
+ defer term.Close()
+
+ return term.Custom(prefix, test)
+}
+
+// Password is a wrapper around Terminal.Password.
+func Password(prefix string) (string, error) {
+ term, err := NewTerminal()
+ if err != nil {
+ return "", err
+ }
+ defer term.Close()
+
+ return term.Password(prefix)
+}
diff --git a/vendor/github.com/Bowery/prompt/term.go b/vendor/github.com/Bowery/prompt/term.go
new file mode 100644
index 000000000..e5cc87366
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/term.go
@@ -0,0 +1,501 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strings"
+)
+
+var (
+ // ErrCTRLC is returned when CTRL+C is pressed stopping the prompt.
+ ErrCTRLC = errors.New("Interrupted (CTRL+C)")
+ // ErrEOF is returned when CTRL+D is pressed stopping the prompt.
+ ErrEOF = errors.New("EOF (CTRL+D)")
+)
+
+// Possible events that may occur when reading from input.
+const (
+ evChar = iota
+ evSkip
+ evReturn
+ evEOF
+ evCtrlC
+ evBack
+ evClear
+ evHome
+ evEnd
+ evUp
+ evDown
+ evRight
+ evLeft
+ evDel
+)
+
+// IsNotTerminal checks if an error is related to the input not being a terminal.
+func IsNotTerminal(err error) bool {
+ return isNotTerminal(err)
+}
+
+// TerminalSize retrieves the columns/rows for the terminal connected to out.
+func TerminalSize(out *os.File) (int, int, error) {
+ return terminalSize(out)
+}
+
+// Terminal contains the state for raw terminal input.
+type Terminal struct {
+ In *os.File
+ Out *os.File
+ History []string
+ histIdx int
+ simpleReader *bufio.Reader
+ t *terminal
+}
+
+// NewTerminal creates a terminal and sets it to raw input mode.
+func NewTerminal() (*Terminal, error) {
+ in := os.Stdin
+
+ term, err := newTerminal(in)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Terminal{
+ In: in,
+ Out: os.Stdout,
+ History: make([]string, 0, 10),
+ histIdx: -1,
+ t: term,
+ }, nil
+}
+
+// Basic gets input and if required tests to ensure input was given.
+func (term *Terminal) Basic(prefix string, required bool) (string, error) {
+ return term.Custom(prefix, func(input string) (string, bool) {
+ if required && input == "" {
+ return "", false
+ }
+
+ return input, true
+ })
+}
+
+// BasicDefault gets input and if empty uses the given default.
+func (term *Terminal) BasicDefault(prefix, def string) (string, error) {
+ return term.Custom(prefix+" (Default: "+def+")", func(input string) (string, bool) {
+ if input == "" {
+ input = def
+ }
+
+ return input, true
+ })
+}
+
+// Ask gets input and checks if it's truthy or not, and returns that
+// in a boolean fashion.
+func (term *Terminal) Ask(question string) (bool, error) {
+ input, err := term.Custom(question+"? (y/n)", func(input string) (string, bool) {
+ if input == "" {
+ return "", false
+ }
+ input = strings.ToLower(input)
+
+ if input == "y" || input == "yes" {
+ return "yes", true
+ }
+
+ return "", true
+ })
+
+ var ok bool
+ if input != "" {
+ ok = true
+ }
+
+ return ok, err
+}
+
+// Custom gets input and calls the given test function with the input to
+// check if the input is valid, a true return will return the string.
+func (term *Terminal) Custom(prefix string, test func(string) (string, bool)) (string, error) {
+ var err error
+ var input string
+ var ok bool
+
+ for !ok {
+ input, err = term.GetPrompt(prefix)
+ if err != nil && err != io.EOF {
+ return "", err
+ }
+
+ input, ok = test(input)
+ }
+
+ return input, nil
+}
+
+// Password retrieves a password from stdin without echoing it.
+func (term *Terminal) Password(prefix string) (string, error) {
+ var err error
+ var input string
+
+ for input == "" {
+ input, err = term.GetPassword(prefix)
+ if err != nil && err != io.EOF {
+ return "", err
+ }
+ }
+
+ return input, nil
+}
+
+// GetPrompt gets a line with the prefix and echos input.
+func (term *Terminal) GetPrompt(prefix string) (string, error) {
+ prefix += " "
+
+ if !term.t.supportsEditing {
+ return term.simplePrompt(prefix)
+ }
+
+ buf := NewBuffer(prefix, term.Out, true)
+ return term.prompt(buf, NewAnsiReader(term.In))
+}
+
+// GetPassword gets a line with the prefix and doesn't echo input.
+func (term *Terminal) GetPassword(prefix string) (string, error) {
+ prefix += " "
+
+ if !term.t.supportsEditing {
+ return term.simplePrompt(prefix)
+ }
+
+ buf := NewBuffer(prefix, term.Out, false)
+ return term.password(buf, NewAnsiReader(term.In))
+}
+
+// Close calls close on the internal terminal.
+func (term *Terminal) Close() error {
+ return term.t.Close()
+}
+
+// Reopen re-opens an internal terminal.
+func (term *Terminal) Reopen() error {
+ in := os.Stdin
+
+ t, err := newTerminal(in)
+ if err != nil {
+ return err
+ }
+
+ term.t = t
+ return nil
+}
+
+// simplePrompt is a fallback prompt without line editing support.
+func (term *Terminal) simplePrompt(prefix string) (string, error) {
+ if term.simpleReader == nil {
+ term.simpleReader = bufio.NewReader(term.In)
+ }
+
+ _, err := term.Out.Write([]byte(prefix))
+ if err != nil {
+ return "", err
+ }
+
+ line, err := term.simpleReader.ReadString('\n')
+ line = strings.TrimRight(line, "\r\n ")
+ line = strings.TrimLeft(line, " ")
+
+ return line, err
+}
+
+// setup initializes a prompt.
+func (term *Terminal) setup(buf *Buffer, in io.Reader) (*bufio.Reader, error) {
+ cols, _, err := TerminalSize(buf.Out)
+ if err != nil {
+ return nil, err
+ }
+
+ buf.Cols = cols
+ input := bufio.NewReader(in)
+
+ err = buf.Refresh()
+ if err != nil {
+ return nil, err
+ }
+
+ return input, nil
+}
+
+// read reads a rune and parses ANSI escape sequences found
+func (term *Terminal) read(in *bufio.Reader) (int, rune, error) {
+ char, _, err := in.ReadRune()
+ if err != nil {
+ return 0, 0, err
+ }
+
+ switch char {
+ default:
+ // Standard chars.
+ return evChar, char, nil
+ case tabKey, ctrlA, ctrlB, ctrlE, ctrlF, ctrlG, ctrlH, ctrlJ, ctrlK, ctrlN,
+ ctrlO, ctrlP, ctrlQ, ctrlR, ctrlS, ctrlT, ctrlU, ctrlV, ctrlW, ctrlX,
+ ctrlY, ctrlZ:
+ // Skip.
+ return evSkip, char, nil
+ case returnKey:
+ // End of line.
+ return evReturn, char, nil
+ case ctrlD:
+ // End of file.
+ return evEOF, char, nil
+ case ctrlC:
+ // End of line, interrupted.
+ return evCtrlC, char, nil
+ case backKey:
+ // Backspace.
+ return evBack, char, nil
+ case ctrlL:
+ // Clear screen.
+ return evClear, char, nil
+ case escKey:
+ // Functions like arrows, home, etc.
+ esc := make([]byte, 2)
+ _, err = in.Read(esc)
+ if err != nil {
+ return -1, char, err
+ }
+
+ // Home, end.
+ if esc[0] == 'O' {
+ switch esc[1] {
+ case 'H':
+ // Home.
+ return evHome, char, nil
+ case 'F':
+ // End.
+ return evEnd, char, nil
+ }
+
+ return evSkip, char, nil
+ }
+
+ // Arrows, delete, pgup, pgdown, insert.
+ if esc[0] == '[' {
+ switch esc[1] {
+ case 'A':
+ // Up.
+ return evUp, char, nil
+ case 'B':
+ // Down.
+ return evDown, char, nil
+ case 'C':
+ // Right.
+ return evRight, char, nil
+ case 'D':
+ // Left.
+ return evLeft, char, nil
+ }
+
+ // Delete, pgup, pgdown, insert.
+ if esc[1] > '0' && esc[1] < '7' {
+ extEsc := make([]byte, 3)
+ _, err = in.Read(extEsc)
+ if err != nil {
+ return -1, char, err
+ }
+
+ if extEsc[0] == '~' {
+ switch esc[1] {
+ case '2', '5', '6':
+ // Insert, pgup, pgdown.
+ return evSkip, char, err
+ case '3':
+ // Delete.
+ return evDel, char, err
+ }
+ }
+ }
+ }
+ }
+
+ return evSkip, char, nil
+}
+
+// prompt reads from in and parses ANSI escapes writing to buf.
+func (term *Terminal) prompt(buf *Buffer, in io.Reader) (string, error) {
+ input, err := term.setup(buf, in)
+ if err != nil {
+ return "", err
+ }
+ term.History = append(term.History, "")
+ term.histIdx = len(term.History) - 1
+ curHistIdx := term.histIdx
+
+ for {
+ typ, char, err := term.read(input)
+ if err != nil {
+ return buf.String(), err
+ }
+
+ switch typ {
+ case evChar:
+ err = buf.Insert(char)
+ if err != nil {
+ return buf.String(), err
+ }
+
+ term.History[curHistIdx] = buf.String()
+ case evSkip:
+ continue
+ case evReturn:
+ err = buf.EndLine()
+ if err != nil {
+ return buf.String(), err
+ }
+
+ line := buf.String()
+ if line == "" {
+ term.histIdx = curHistIdx - 1
+ term.History = term.History[:curHistIdx]
+ } else {
+ term.History[curHistIdx] = line
+ }
+
+ return line, nil
+ case evEOF:
+ err = buf.EndLine()
+ if err == nil {
+ err = ErrEOF
+ }
+
+ return buf.String(), err
+ case evCtrlC:
+ err = buf.EndLine()
+ if err == nil {
+ err = ErrCTRLC
+ }
+
+ return buf.String(), err
+ case evBack:
+ err = buf.DelLeft()
+ if err != nil {
+ return buf.String(), err
+ }
+
+ term.History[curHistIdx] = buf.String()
+ case evClear:
+ err = buf.ClsScreen()
+ if err != nil {
+ return buf.String(), err
+ }
+ case evHome:
+ err = buf.Start()
+ if err != nil {
+ return buf.String(), err
+ }
+ case evEnd:
+ err = buf.End()
+ if err != nil {
+ return buf.String(), err
+ }
+ case evUp:
+ idx := term.histIdx
+ if term.histIdx > 0 {
+ idx--
+ }
+
+ err = buf.Set([]rune(term.History[idx])...)
+ if err != nil {
+ return buf.String(), err
+ }
+
+ term.histIdx = idx
+ case evDown:
+ idx := term.histIdx
+ if term.histIdx < len(term.History)-1 {
+ idx++
+ }
+
+ err = buf.Set([]rune(term.History[idx])...)
+ if err != nil {
+ return buf.String(), err
+ }
+
+ term.histIdx = idx
+ case evRight:
+ err = buf.Right()
+ if err != nil {
+ return buf.String(), err
+ }
+ case evLeft:
+ err = buf.Left()
+ if err != nil {
+ return buf.String(), err
+ }
+ case evDel:
+ err = buf.Del()
+ if err != nil {
+ return buf.String(), err
+ }
+
+ term.History[curHistIdx] = buf.String()
+ }
+ }
+}
+
+// password reads from in and parses restricted ANSI escapes writing to buf.
+func (term *Terminal) password(buf *Buffer, in io.Reader) (string, error) {
+ input, err := term.setup(buf, in)
+ if err != nil {
+ return "", err
+ }
+
+ for {
+ typ, char, err := term.read(input)
+ if err != nil {
+ return buf.String(), err
+ }
+
+ switch typ {
+ case evChar:
+ err = buf.Insert(char)
+ if err != nil {
+ return buf.String(), err
+ }
+ case evSkip, evHome, evEnd, evUp, evDown, evRight, evLeft, evDel:
+ continue
+ case evReturn:
+ err = buf.EndLine()
+ return buf.String(), err
+ case evEOF:
+ err = buf.EndLine()
+ if err == nil {
+ err = ErrEOF
+ }
+
+ return buf.String(), err
+ case evCtrlC:
+ err = buf.EndLine()
+ if err == nil {
+ err = ErrCTRLC
+ }
+
+ return buf.String(), err
+ case evBack:
+ err = buf.DelLeft()
+ if err != nil {
+ return buf.String(), err
+ }
+ case evClear:
+ err = buf.ClsScreen()
+ if err != nil {
+ return buf.String(), err
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/Bowery/prompt/term_unix.go b/vendor/github.com/Bowery/prompt/term_unix.go
new file mode 100644
index 000000000..de3265bcf
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/term_unix.go
@@ -0,0 +1,96 @@
+// +build linux darwin freebsd openbsd netbsd dragonfly solaris
+
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// List of unsupported $TERM values.
+var unsupported = []string{"", "dumb", "cons25"}
+
+// supportsEditing checks if the terminal supports ansi escapes.
+func supportsEditing() bool {
+ term := os.Getenv("TERM")
+
+ for _, t := range unsupported {
+ if t == term {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isNotTerminal checks if an error is related to the input not being a terminal.
+func isNotTerminal(err error) bool {
+ return err == unix.ENOTTY
+}
+
+// terminal contains the private fields for a Unix terminal.
+type terminal struct {
+ supportsEditing bool
+ fd uintptr
+ origMode unix.Termios
+}
+
+// newTerminal creates a terminal and sets it to raw input mode.
+func newTerminal(in *os.File) (*terminal, error) {
+ term := &terminal{fd: in.Fd()}
+
+ if !supportsEditing() {
+ return term, nil
+ }
+
+ t, err := getTermios(term.fd)
+ if err != nil {
+ if IsNotTerminal(err) {
+ return term, nil
+ }
+
+ return nil, err
+ }
+ term.origMode = *t
+ mode := term.origMode
+ term.supportsEditing = true
+
+ // Set new mode flags, for reference see cfmakeraw(3).
+ mode.Iflag &^= (unix.BRKINT | unix.IGNBRK | unix.ICRNL |
+ unix.INLCR | unix.IGNCR | unix.ISTRIP | unix.IXON |
+ unix.PARMRK)
+
+ mode.Oflag &^= unix.OPOST
+
+ mode.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON |
+ unix.ISIG | unix.IEXTEN)
+
+ mode.Cflag &^= (unix.CSIZE | unix.PARENB)
+ mode.Cflag |= unix.CS8
+
+ // Set controls; min num of bytes, and timeouts.
+ mode.Cc[unix.VMIN] = 1
+ mode.Cc[unix.VTIME] = 0
+
+ err = setTermios(term.fd, true, &mode)
+ if err != nil {
+ return nil, err
+ }
+
+ return term, nil
+}
+
+// Close disables the terminals raw input.
+func (term *terminal) Close() error {
+ if term.supportsEditing {
+ err := setTermios(term.fd, false, &term.origMode)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Bowery/prompt/term_windows.go b/vendor/github.com/Bowery/prompt/term_windows.go
new file mode 100644
index 000000000..0ab135244
--- /dev/null
+++ b/vendor/github.com/Bowery/prompt/term_windows.go
@@ -0,0 +1,116 @@
+// Copyright 2013-2015 Bowery, Inc.
+
+package prompt
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// Flags to control the terminals mode.
+const (
+ echoInputFlag = 0x0004
+ insertModeFlag = 0x0020
+ lineInputFlag = 0x0002
+ mouseInputFlag = 0x0010
+ processedInputFlag = 0x0001
+ windowInputFlag = 0x0008
+)
+
+// Error number returned for an invalid handle.
+const errnoInvalidHandle = 0x6
+
+var (
+ kernel = syscall.NewLazyDLL("kernel32.dll")
+ getConsoleScreenBufferInfo = kernel.NewProc("GetConsoleScreenBufferInfo")
+ setConsoleMode = kernel.NewProc("SetConsoleMode")
+)
+
+// consoleScreenBufferInfo contains various fields for the terminal.
+type consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes uint16
+ window smallRect
+ maximumWindowSize coord
+}
+
+// coord contains coords for positioning.
+type coord struct {
+ x int16
+ y int16
+}
+
+// smallRect contains positions for the window edges.
+type smallRect struct {
+ left int16
+ top int16
+ right int16
+ bottom int16
+}
+
+// terminalSize retrieves the cols/rows for the terminal connected to out.
+func terminalSize(out *os.File) (int, int, error) {
+ csbi := new(consoleScreenBufferInfo)
+
+ ret, _, err := getConsoleScreenBufferInfo.Call(out.Fd(), uintptr(unsafe.Pointer(csbi)))
+ if ret == 0 {
+ return 0, 0, err
+ }
+
+ // Results are always off by one.
+ cols := csbi.window.right - csbi.window.left + 1
+ rows := csbi.window.bottom - csbi.window.top + 1
+
+ return int(cols), int(rows), nil
+}
+
+// isNotTerminal checks if an error is related to the input not being a terminal.
+func isNotTerminal(err error) bool {
+ errno, ok := err.(syscall.Errno)
+
+ return ok && errno == errnoInvalidHandle
+}
+
+// terminal contains the private fields for a Windows terminal.
+type terminal struct {
+ supportsEditing bool
+ fd uintptr
+ origMode uint32
+}
+
+// newTerminal creates a terminal and sets it to raw input mode.
+func newTerminal(in *os.File) (*terminal, error) {
+ term := &terminal{fd: in.Fd()}
+
+ err := syscall.GetConsoleMode(syscall.Handle(term.fd), &term.origMode)
+ if err != nil {
+ return term, nil
+ }
+ mode := term.origMode
+ term.supportsEditing = true
+
+ // Set new mode flags.
+ mode &^= (echoInputFlag | insertModeFlag | lineInputFlag | mouseInputFlag |
+ processedInputFlag | windowInputFlag)
+
+ ret, _, err := setConsoleMode.Call(term.fd, uintptr(mode))
+ if ret == 0 {
+ return nil, err
+ }
+
+ return term, nil
+}
+
+// Close disables the terminals raw input.
+func (term *terminal) Close() error {
+ if term.supportsEditing {
+ ret, _, err := setConsoleMode.Call(term.fd, uintptr(term.origMode))
+ if ret == 0 {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/dchest/safefile/LICENSE b/vendor/github.com/dchest/safefile/LICENSE
new file mode 100644
index 000000000..e753ecd8a
--- /dev/null
+++ b/vendor/github.com/dchest/safefile/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2013 Dmitry Chestnykh <dmitry@codingrobots.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/dchest/safefile/README.md b/vendor/github.com/dchest/safefile/README.md
new file mode 100644
index 000000000..b1894f35e
--- /dev/null
+++ b/vendor/github.com/dchest/safefile/README.md
@@ -0,0 +1,44 @@
+# safefile
+
+[![Build Status](https://travis-ci.org/dchest/safefile.svg)](https://travis-ci.org/dchest/safefile) [![Windows Build status](https://ci.appveyor.com/api/projects/status/owlifxeekg75t2ho?svg=true)](https://ci.appveyor.com/project/dchest/safefile)
+
+Go package safefile implements safe "atomic" saving of files.
+
+Instead of truncating and overwriting the destination file, it creates a
+temporary file in the same directory, writes to it, and then renames the
+temporary file to the original name when calling Commit.
+
+
+### Installation
+
+```
+$ go get github.com/dchest/safefile
+```
+
+### Documentation
+
+ <https://godoc.org/github.com/dchest/safefile>
+
+### Example
+
+```go
+f, err := safefile.Create("/home/ken/report.txt", 0644)
+if err != nil {
+ // ...
+}
+// Created temporary file /home/ken/sf-ppcyksu5hyw2mfec.tmp
+
+defer f.Close()
+
+_, err = io.WriteString(f, "Hello world")
+if err != nil {
+ // ...
+}
+// Wrote "Hello world" to /home/ken/sf-ppcyksu5hyw2mfec.tmp
+
+err = f.Commit()
+if err != nil {
+ // ...
+}
+// Renamed /home/ken/sf-ppcyksu5hyw2mfec.tmp to /home/ken/report.txt
+```
diff --git a/vendor/github.com/dchest/safefile/appveyor.yml b/vendor/github.com/dchest/safefile/appveyor.yml
new file mode 100644
index 000000000..198fb33d6
--- /dev/null
+++ b/vendor/github.com/dchest/safefile/appveyor.yml
@@ -0,0 +1,24 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\projects\src\github.com\dchest\safefile
+
+environment:
+ PATH: c:\projects\bin;%PATH%
+ GOPATH: c:\projects
+ NOTIFY_TIMEOUT: 5s
+
+install:
+ - go version
+ - go get golang.org/x/tools/cmd/vet
+ - go get -v -t ./...
+
+build_script:
+ - go tool vet -all .
+ - go build ./...
+ - go test -v -race ./...
+
+test: off
+
+deploy: off
diff --git a/vendor/github.com/dchest/safefile/rename.go b/vendor/github.com/dchest/safefile/rename.go
new file mode 100644
index 000000000..3193f2203
--- /dev/null
+++ b/vendor/github.com/dchest/safefile/rename.go
@@ -0,0 +1,9 @@
+// +build !plan9,!windows windows,go1.5
+
+package safefile
+
+import "os"
+
+func rename(oldname, newname string) error {
+ return os.Rename(oldname, newname)
+}
diff --git a/vendor/github.com/dchest/safefile/rename_nonatomic.go b/vendor/github.com/dchest/safefile/rename_nonatomic.go
new file mode 100644
index 000000000..8782c28db
--- /dev/null
+++ b/vendor/github.com/dchest/safefile/rename_nonatomic.go
@@ -0,0 +1,51 @@
+// +build plan9 windows,!go1.5
+
+// os.Rename on Windows before Go 1.5 and Plan 9 will not overwrite existing
+// files, thus we cannot guarantee atomic saving of file by doing rename.
+// We will have to do some voodoo to minimize data loss on those systems.
+
+package safefile
+
+import (
+ "os"
+ "path/filepath"
+)
+
+func rename(oldname, newname string) error {
+ err := os.Rename(oldname, newname)
+ if err != nil {
+ // If newname exists ("original"), we will try renaming it to a
+ // new temporary name, then renaming oldname to the newname,
+ // and deleting the renamed original. If system crashes between
+ // renaming and deleting, the original file will still be available
+ // under the temporary name, so users can manually recover data.
+ // (No automatic recovery is possible because after crash the
+ // temporary name is not known.)
+ var origtmp string
+ for {
+ origtmp, err = makeTempName(newname, filepath.Base(newname))
+ if err != nil {
+ return err
+ }
+ _, err = os.Stat(origtmp)
+ if err == nil {
+ continue // most likely will never happen
+ }
+ break
+ }
+ err = os.Rename(newname, origtmp)
+ if err != nil {
+ return err
+ }
+ err = os.Rename(oldname, newname)
+ if err != nil {
+ // Rename still fails, try to revert original rename,
+ // ignoring errors.
+ os.Rename(origtmp, newname)
+ return err
+ }
+ // Rename succeeded, now delete original file.
+ os.Remove(origtmp)
+ }
+ return nil
+}
diff --git a/vendor/github.com/dchest/safefile/safefile.go b/vendor/github.com/dchest/safefile/safefile.go
new file mode 100644
index 000000000..e7f21c1da
--- /dev/null
+++ b/vendor/github.com/dchest/safefile/safefile.go
@@ -0,0 +1,197 @@
+// Copyright 2013 Dmitry Chestnykh. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package safefile implements safe "atomic" saving of files.
+//
+// Instead of truncating and overwriting the destination file, it creates a
+// temporary file in the same directory, writes to it, and then renames the
+// temporary file to the original name when calling Commit.
+//
+// Example:
+//
+// f, err := safefile.Create("/home/ken/report.txt", 0644)
+// if err != nil {
+// // ...
+// }
+// // Created temporary file /home/ken/sf-ppcyksu5hyw2mfec.tmp
+//
+// defer f.Close()
+//
+// _, err = io.WriteString(f, "Hello world")
+// if err != nil {
+// // ...
+// }
+// // Wrote "Hello world" to /home/ken/sf-ppcyksu5hyw2mfec.tmp
+//
+// err = f.Commit()
+// if err != nil {
+// // ...
+// }
+// // Renamed /home/ken/sf-ppcyksu5hyw2mfec.tmp to /home/ken/report.txt
+//
+package safefile
+
+import (
+ "crypto/rand"
+ "encoding/base32"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// ErrAlreadyCommitted error is returned when calling Commit on a file that
+// has been already successfully committed.
+var ErrAlreadyCommitted = errors.New("file already committed")
+
+type File struct {
+ *os.File
+ origName string
+ closeFunc func(*File) error
+ isClosed bool // if true, temporary file has been closed, but not renamed
+ isCommitted bool // if true, the file has been successfully committed
+}
+
+func makeTempName(origname, prefix string) (tempname string, err error) {
+ origname = filepath.Clean(origname)
+ if len(origname) == 0 || origname[len(origname)-1] == filepath.Separator {
+ return "", os.ErrInvalid
+ }
+ // Generate 10 random bytes.
+ // This gives 80 bits of entropy, good enough
+ // for making temporary file name unpredictable.
+ var rnd [10]byte
+ if _, err := rand.Read(rnd[:]); err != nil {
+ return "", err
+ }
+ name := prefix + "-" + strings.ToLower(base32.StdEncoding.EncodeToString(rnd[:])) + ".tmp"
+ return filepath.Join(filepath.Dir(origname), name), nil
+}
+
+// Create creates a temporary file in the same directory as filename,
+// which will be renamed to the given filename when calling Commit.
+func Create(filename string, perm os.FileMode) (*File, error) {
+ for {
+ tempname, err := makeTempName(filename, "sf")
+ if err != nil {
+ return nil, err
+ }
+ f, err := os.OpenFile(tempname, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
+ if err != nil {
+ if os.IsExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ return &File{
+ File: f,
+ origName: filename,
+ closeFunc: closeUncommitted,
+ }, nil
+ }
+}
+
+// OrigName returns the original filename given to Create.
+func (f *File) OrigName() string {
+ return f.origName
+}
+
+// Close closes temporary file and removes it.
+// If the file has been committed, Close is no-op.
+func (f *File) Close() error {
+ return f.closeFunc(f)
+}
+
+func closeUncommitted(f *File) error {
+ err0 := f.File.Close()
+ err1 := os.Remove(f.Name())
+ f.closeFunc = closeAgainError
+ if err0 != nil {
+ return err0
+ }
+ return err1
+}
+
+func closeAfterFailedRename(f *File) error {
+ // Remove temporary file.
+ //
+ // The note from Commit function applies here too, as we may be
+ // removing a different file. However, since we rely on our temporary
+ // names being unpredictable, this should not be a concern.
+ f.closeFunc = closeAgainError
+ return os.Remove(f.Name())
+}
+
+func closeCommitted(f *File) error {
+ // noop
+ return nil
+}
+
+func closeAgainError(f *File) error {
+ return os.ErrInvalid
+}
+
+// Commit safely commits data into the original file by syncing temporary
+// file to disk, closing it and renaming to the original file name.
+//
+// In case of success, the temporary file is closed and no longer exists
+// on disk. It is safe to call Close after Commit: the operation will do
+// nothing.
+//
+// In case of error, the temporary file is still opened and exists on disk;
+// it must be closed by callers by calling Close or by trying to commit again.
+
+// Note that when trying to Commit again after a failed Commit when the file
+// has been closed, but not renamed to its original name (the new commit will
+// try again to rename it), safefile cannot guarantee that the temporary file
+// has not been changed, or that it is the same temporary file we were dealing
+// with. However, since the temporary name is unpredictable, it is unlikely
+// that this happened accidentally. If complete atomicity is needed, do not
+// Commit again after error, write the file again.
+func (f *File) Commit() error {
+ if f.isCommitted {
+ return ErrAlreadyCommitted
+ }
+ if !f.isClosed {
+ // Sync to disk.
+ err := f.Sync()
+ if err != nil {
+ return err
+ }
+ // Close underlying os.File.
+ err = f.File.Close()
+ if err != nil {
+ return err
+ }
+ f.isClosed = true
+ }
+ // Rename.
+ err := rename(f.Name(), f.origName)
+ if err != nil {
+ f.closeFunc = closeAfterFailedRename
+ return err
+ }
+ f.closeFunc = closeCommitted
+ f.isCommitted = true
+ return nil
+}
+
+// WriteFile is a safe analog of ioutil.WriteFile.
+func WriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := Create(filename, perm)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ n, err := f.Write(data)
+ if err != nil {
+ return err
+ }
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ return err
+ }
+ return f.Commit()
+}
diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/shlex/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README
new file mode 100644
index 000000000..c86bcc066
--- /dev/null
+++ b/vendor/github.com/google/shlex/README
@@ -0,0 +1,2 @@
+go-shlex is a simple lexer for go that supports shell-style quoting,
+commenting, and escaping.
diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go
new file mode 100644
index 000000000..d98308bce
--- /dev/null
+++ b/vendor/github.com/google/shlex/shlex.go
@@ -0,0 +1,416 @@
+/*
+Copyright 2012 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package shlex implements a simple lexer which splits input in to tokens using
+shell-style rules for quoting and commenting.
+
+The basic use case uses the default ASCII lexer to split a string into sub-strings:
+
+ shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
+
+To process a stream of strings:
+
+ l := NewLexer(os.Stdin)
+ for ; token, err := l.Next(); err != nil {
+ // process token
+ }
+
+To access the raw token stream (which includes tokens for comments):
+
+ t := NewTokenizer(os.Stdin)
+ for ; token, err := t.Next(); err != nil {
+ // process token
+ }
+
+*/
+package shlex
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// TokenType is a top-level token classification: A word, space, comment, unknown.
+type TokenType int
+
+// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
+type runeTokenClass int
+
+// the internal state used by the lexer state machine
+type lexerState int
+
+// Token is a (type, value) pair representing a lexographical token.
+type Token struct {
+ tokenType TokenType
+ value string
+}
+
+// Equal reports whether tokens a, and b, are equal.
+// Two tokens are equal if both their types and values are equal. A nil token can
+// never be equal to another token.
+func (a *Token) Equal(b *Token) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.tokenType != b.tokenType {
+ return false
+ }
+ return a.value == b.value
+}
+
+// Named classes of UTF-8 runes
+const (
+ spaceRunes = " \t\r\n"
+ escapingQuoteRunes = `"`
+ nonEscapingQuoteRunes = "'"
+ escapeRunes = `\`
+ commentRunes = "#"
+)
+
+// Classes of rune token
+const (
+ unknownRuneClass runeTokenClass = iota
+ spaceRuneClass
+ escapingQuoteRuneClass
+ nonEscapingQuoteRuneClass
+ escapeRuneClass
+ commentRuneClass
+ eofRuneClass
+)
+
+// Classes of lexographic token
+const (
+ UnknownToken TokenType = iota
+ WordToken
+ SpaceToken
+ CommentToken
+)
+
+// Lexer state machine states
+const (
+ startState lexerState = iota // no runes have been seen
+ inWordState // processing regular runes in a word
+ escapingState // we have just consumed an escape rune; the next rune is literal
+ escapingQuotedState // we have just consumed an escape rune within a quoted string
+ quotingEscapingState // we are within a quoted string that supports escaping ("...")
+ quotingState // we are within a string that does not support escaping ('...')
+ commentState // we are within a comment (everything following an unquoted or unescaped #
+)
+
+// tokenClassifier is used for classifying rune characters.
+type tokenClassifier map[rune]runeTokenClass
+
+func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
+ for _, runeChar := range runes {
+ typeMap[runeChar] = tokenType
+ }
+}
+
+// newDefaultClassifier creates a new classifier for ASCII characters.
+func newDefaultClassifier() tokenClassifier {
+ t := tokenClassifier{}
+ t.addRuneClass(spaceRunes, spaceRuneClass)
+ t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
+ t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
+ t.addRuneClass(escapeRunes, escapeRuneClass)
+ t.addRuneClass(commentRunes, commentRuneClass)
+ return t
+}
+
+// ClassifyRune classifiees a rune
+func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
+ return t[runeVal]
+}
+
+// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
+type Lexer Tokenizer
+
+// NewLexer creates a new lexer from an input stream.
+func NewLexer(r io.Reader) *Lexer {
+
+ return (*Lexer)(NewTokenizer(r))
+}
+
+// Next returns the next word, or an error. If there are no more words,
+// the error will be io.EOF.
+func (l *Lexer) Next() (string, error) {
+ for {
+ token, err := (*Tokenizer)(l).Next()
+ if err != nil {
+ return "", err
+ }
+ switch token.tokenType {
+ case WordToken:
+ return token.value, nil
+ case CommentToken:
+ // skip comments
+ default:
+ return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
+ }
+ }
+}
+
+// Tokenizer turns an input stream into a sequence of typed tokens
+type Tokenizer struct {
+ input bufio.Reader
+ classifier tokenClassifier
+}
+
+// NewTokenizer creates a new tokenizer from an input stream.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ input := bufio.NewReader(r)
+ classifier := newDefaultClassifier()
+ return &Tokenizer{
+ input: *input,
+ classifier: classifier}
+}
+
+// scanStream scans the stream for the next token using the internal state machine.
+// It will panic if it encounters a rune which it does not know how to handle.
+func (t *Tokenizer) scanStream() (*Token, error) {
+ state := startState
+ var tokenType TokenType
+ var value []rune
+ var nextRune rune
+ var nextRuneType runeTokenClass
+ var err error
+
+ for {
+ nextRune, _, err = t.input.ReadRune()
+ nextRuneType = t.classifier.ClassifyRune(nextRune)
+
+ if err == io.EOF {
+ nextRuneType = eofRuneClass
+ err = nil
+ } else if err != nil {
+ return nil, err
+ }
+
+ switch state {
+ case startState: // no runes read yet
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ return nil, io.EOF
+ }
+ case spaceRuneClass:
+ {
+ }
+ case escapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ tokenType = WordToken
+ state = escapingState
+ }
+ case commentRuneClass:
+ {
+ tokenType = CommentToken
+ state = commentState
+ }
+ default:
+ {
+ tokenType = WordToken
+ value = append(value, nextRune)
+ state = inWordState
+ }
+ }
+ }
+ case inWordState: // in a regular word
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingState: // the rune after an escape character
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = inWordState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingQuotedState: // the next rune after an escape character, in double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = quotingEscapingState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingEscapingState: // in escaping double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingQuotedState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingState: // in non-escaping single quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case commentState: // in a comment
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ if nextRune == '\n' {
+ state = startState
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ } else {
+ value = append(value, nextRune)
+ }
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ default:
+ {
+ return nil, fmt.Errorf("Unexpected state: %v", state)
+ }
+ }
+ }
+}
+
+// Next returns the next token in the stream.
+func (t *Tokenizer) Next() (*Token, error) {
+ return t.scanStream()
+}
+
+// Split partitions a string into a slice of strings.
+func Split(s string) ([]string, error) {
+ l := NewLexer(strings.NewReader(s))
+ subStrings := make([]string, 0)
+ for {
+ word, err := l.Next()
+ if err != nil {
+ if err == io.EOF {
+ return subStrings, nil
+ }
+ return subStrings, err
+ }
+ subStrings = append(subStrings, word)
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/LICENSE b/vendor/github.com/kardianos/govendor/LICENSE
new file mode 100644
index 000000000..d29b37261
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/kardianos/govendor/README.md b/vendor/github.com/kardianos/govendor/README.md
new file mode 100644
index 000000000..1815cb9ab
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/README.md
@@ -0,0 +1,206 @@
+## The Vendor Tool for Go
+`go get -u github.com/kardianos/govendor`
+
+# Use Go modules
+
+[Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
+were initially released with Go1.11. Fixes and improvements
+in Go1.11.2 and the to-be-released Go1.12 have and will make Go modules
+even better.
+
+Go modules as a release and build tool are ready to use now.
+Tool support for Go modules (such as static analysis tools or
+auto-complete tools in editors) should be in a much better state
+when Go1.12 is released.
+
+Please begin thinking about code organization in terms of modules today.
+Please begin to release modules with standardized build tags (vX.Y.Z) today.
+Please consider adding go.mod files to your projects if you consider them
+more of a "library" module. Please use Go modules in small or personal projects.
+
+For larger code bases with many different programmers, you may wish to
+hold off on migrating to Go modules until you have verified that all your existing
+tooling (editors, static analizers, and code generators) have support for
+Go modules.
+
+# Govendor Introduction
+
+New users please read the [FAQ](doc/faq.md)
+
+Package developers should read the [developer guide](doc/dev-guide.md).
+
+For a high level overview read the [whitepaper](doc/whitepaper.md)
+
+Uses the go1.5+ vendor folder. Multiple workflows supported, single tool.
+
+[![Build Status](https://travis-ci.org/kardianos/govendor.svg?branch=master)](https://travis-ci.org/kardianos/govendor)
+[![Build status](https://ci.appveyor.com/api/projects/status/skf1t3363y6tycuc/branch/master?svg=true)](https://ci.appveyor.com/project/kardianos/govendor/branch/master)
+[![GoDoc](https://godoc.org/github.com/kardianos/govendor?status.svg)](https://godoc.org/github.com/kardianos/govendor)
+
+ * Copy existing dependencies from $GOPATH with `govendor add/update`.
+ * If you ignore `vendor/*/`, restore dependencies with `govendor sync`.
+ * Pull in new dependencies or update existing dependencies directly from
+ remotes with `govendor fetch`.
+ * Migrate from legacy systems with `govendor migrate`.
+ * Supports Linux, OS X, Windows, probably all others.
+ * Supports git, hg, svn, bzr (must be installed and on the PATH).
+
+## Notes
+
+ * The project must be within a $GOPATH/src.
+ * If using go1.5, ensure you `set GO15VENDOREXPERIMENT=1`.
+
+### Quick Start, also see the [FAQ](doc/faq.md)
+```
+# Setup your project.
+cd "my project in GOPATH"
+govendor init
+
+# Add existing GOPATH files to vendor.
+govendor add +external
+
+# View your work.
+govendor list
+
+# Look at what is using a package
+govendor list -v fmt
+
+# Specify a specific version or revision to fetch
+govendor fetch golang.org/x/net/context@a4bbce9fcae005b22ae5443f6af064d80a6f5a55
+govendor fetch golang.org/x/net/context@v1 # Get latest v1.*.* tag or branch.
+govendor fetch golang.org/x/net/context@=v1 # Get the tag or branch named "v1".
+
+# Update a package to latest, given any prior version constraint
+govendor fetch golang.org/x/net/context
+
+# Format your repository only
+govendor fmt +local
+
+# Build everything in your repository only
+govendor install +local
+
+# Test your repository only
+govendor test +local
+
+```
+
+## Sub-commands
+```
+ init Create the "vendor" folder and the "vendor.json" file.
+ list List and filter existing dependencies and packages.
+ add Add packages from $GOPATH.
+ update Update packages from $GOPATH.
+ remove Remove packages from the vendor folder.
+ status Lists any packages missing, out-of-date, or modified locally.
+ fetch Add new or update vendor folder packages from remote repository.
+ sync Pull packages into vendor folder from remote repository with revisions
+ from vendor.json file.
+ migrate Move packages from a legacy tool to the vendor folder with metadata.
+ get Like "go get" but copies dependencies into a "vendor" folder.
+ license List discovered licenses for the given status or import paths.
+ shell Run a "shell" to make multiple sub-commands more efficient for large
+ projects.
+
+ go tool commands that are wrapped:
+ `+<status>` package selection may be used with them
+ fmt, build, install, clean, test, vet, generate, tool
+```
+
+## Status
+
+Packages can be specified by their "status".
+```
+ +local (l) packages in your project
+ +external (e) referenced packages in GOPATH but not in current project
+ +vendor (v) packages in the vendor folder
+ +std (s) packages in the standard library
+
+ +excluded (x) external packages explicitly excluded from vendoring
+ +unused (u) packages in the vendor folder, but unused
+ +missing (m) referenced packages but not found
+
+ +program (p) package is a main package
+
+ +outside +external +missing
+ +all +all packages
+```
+
+Status can be referenced by their initial letters.
+
+ * `+std` same as `+s`
+ * `+external` same as `+ext` same as `+e`
+ * `+excluded` same as `+exc` same as `+x`
+
+Status can be logically composed:
+
+ * `+local,program` (local AND program) local packages that are also programs
+ * `+local +vendor` (local OR vendor) local packages or vendor packages
+ * `+vendor,program +std` ((vendor AND program) OR std) vendor packages that are also programs
+ or std library packages
+ * `+vendor,^program` (vendor AND NOT program) vendor package that are not "main" packages.
+
+## Package specifier
+
+The full package-spec is:
+`<path>[{/...|/^}][::<origin>][@[<version-spec>]]`
+
+Some examples:
+
+ * `github.com/kardianos/govendor` specifies a single package and single folder.
+ * `github.com/kardianos/govendor/...` specifies `govendor` and all referenced
+ packages under that path.
+ * `github.com/kardianos/govendor/^` specifies the `govendor` folder and all
+ sub-folders. Useful for resources or if you don't want a partial repository.
+ * `github.com/kardianos/govendor/^::github.com/myself/govendor` same as above
+ but fetch from user "myself".
+ * `github.com/kardianos/govendor/...@abc12032` all referenced packages at
+ revision `abc12032`.
+ * `github.com/kardianos/govendor/...@v1` same as above, but get the most recent
+ "v1" tag, such as "v1.4.3".
+ * `github.com/kardianos/govendor/...@=v1` get the exact version "v1".
+
+## Packages and Status
+
+You may specify multiple package-specs and multiple status in a single command.
+Commands that accept status and package-spec:
+
+ * list
+ * add
+ * update
+ * remove
+ * fetch
+
+You may pass arguments to govendor through stdin if the last argument is a "-".
+For example `echo +vendor | govendor list -` will list all vendor packages.
+
+## Ignoring build tags and excluding packages
+Ignoring build tags is opt-out and is designed to be the opposite of the build
+file directives which are opt-in when specified. Typically a developer will
+want to support cross platform builds, but selectively opt out of tags, tests,
+and architectures as desired.
+
+To ignore additional tags edit the "vendor.json" file and add tag to the vendor
+"ignore" file field. The field uses spaces to separate tags to ignore.
+For example the following will ignore both test and appengine files.
+```
+{
+ "ignore": "test appengine",
+}
+```
+
+Similarly, some specific packages can be excluded from the vendoring process.
+These packages will be listed as `excluded` (`x`), and will not be copied to the
+"vendor" folder when running `govendor add|fetch|update`.
+
+Any sub-package `foo/bar` of an excluded package `foo` is also excluded (but
+package `bar/foo` is not). The import dependencies of excluded packages are not
+listed, and thus not vendored.
+
+To exclude packages, also use the "ignore" field of the "vendor.json" file.
+Packages are identified by their name, they should contain a "/" character
+(possibly at the end):
+```
+{
+ "ignore": "test appengine foo/",
+}
+```
diff --git a/vendor/github.com/kardianos/govendor/appveyor.yml b/vendor/github.com/kardianos/govendor/appveyor.yml
new file mode 100644
index 000000000..59538976e
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/appveyor.yml
@@ -0,0 +1,24 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+environment:
+ GOPATH: c:\gopath
+ GOVERSION: 1.7
+
+clone_folder: c:\gopath\src\github.com\kardianos\govendor
+
+install:
+ - mkdir c:\tmp
+ - set TMP=C:\tmp
+ - go version
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - rmdir c:\go /s /q
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip
+ - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL
+ - go version
+ - go env
+
+build_script:
+ - go test -i ./...
+ - go test ./...
diff --git a/vendor/github.com/kardianos/govendor/cliprompt/cliPrompt.go b/vendor/github.com/kardianos/govendor/cliprompt/cliPrompt.go
new file mode 100644
index 000000000..609603f46
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/cliprompt/cliPrompt.go
@@ -0,0 +1,154 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cliprompt uses the CLI to prompt for user feedback.
+package cliprompt
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/kardianos/govendor/prompt"
+
+ cp "github.com/Bowery/prompt"
+)
+
+type Prompt struct{}
+
+// Ask the user a question based on the CLI.
+// TODO (DT): Currently can't handle fetching empty responses do to cancel method.
+func (p *Prompt) Ask(q *prompt.Question) (prompt.Response, error) {
+ term, err := cp.NewTerminal()
+ if err != nil {
+ return prompt.RespCancel, err
+ }
+
+ if len(q.Error) > 0 {
+ fmt.Fprintf(term.Out, "%s\n\n", q.Error)
+ }
+
+ switch q.Type {
+ default:
+ panic("Unknown question type")
+ case prompt.TypeSelectMultiple:
+ return prompt.RespCancel, fmt.Errorf("Selecting multiple isn't currently supported")
+ case prompt.TypeSelectOne:
+ return getSingle(term, q)
+ }
+}
+
+func getSingle(term *cp.Terminal, q *prompt.Question) (prompt.Response, error) {
+ if len(q.Options) == 1 && q.Options[0].Other() {
+ opt := &q.Options[0]
+ opt.Chosen = true
+ return setOther(term, q, opt)
+ }
+
+ chosen := q.AnswerSingle(false)
+ if chosen == nil {
+ return setOption(term, q)
+ }
+ resp, err := setOther(term, q, chosen)
+ if err != nil {
+ return prompt.RespCancel, err
+ }
+ if resp == prompt.RespCancel {
+ chosen.Chosen = false
+ return setOption(term, q)
+ }
+ return resp, nil
+}
+
+func setOther(term *cp.Terminal, q *prompt.Question, opt *prompt.Option) (prompt.Response, error) {
+ var blankCount = 0
+ var internalMessage = ""
+ for {
+ // Write out messages
+ if len(internalMessage) > 0 {
+ fmt.Fprintf(term.Out, "%s\n\n", internalMessage)
+ }
+ if len(q.Prompt) > 0 {
+ fmt.Fprintf(term.Out, "%s\n", q.Prompt)
+ }
+ if len(opt.Validation()) > 0 {
+ fmt.Fprintf(term.Out, " ** %s\n", opt.Validation())
+ }
+ // Reset message.
+ internalMessage = ""
+ ln, err := term.Basic(" > ", false)
+ if err != nil {
+ return prompt.RespCancel, err
+ }
+ if len(ln) == 0 && blankCount > 0 {
+ return prompt.RespCancel, nil
+ }
+ if len(ln) == 0 {
+ internalMessage = "Press enter again to cancel"
+ blankCount++
+ continue
+ }
+ blankCount = 0
+ opt.Value = strings.TrimSpace(ln)
+ return prompt.RespAnswer, nil
+ }
+}
+
+func setOption(term *cp.Terminal, q *prompt.Question) (prompt.Response, error) {
+ var blankCount = 0
+ var internalMessage = ""
+ for {
+ // Write out messages
+ if len(internalMessage) > 0 {
+ fmt.Fprintf(term.Out, "%s\n\n", internalMessage)
+ }
+ if len(q.Prompt) > 0 {
+ fmt.Fprintf(term.Out, "%s\n", q.Prompt)
+ }
+ for index, opt := range q.Options {
+ fmt.Fprintf(term.Out, " (%d) %s\n", index+1, opt.Prompt())
+ if len(opt.Validation()) > 0 {
+ fmt.Fprintf(term.Out, " ** %s\n", opt.Validation())
+ }
+ }
+ // Reset message.
+ internalMessage = ""
+ ln, err := term.Basic(" # ", false)
+ if err != nil {
+ return prompt.RespCancel, err
+ }
+ if len(ln) == 0 && blankCount > 0 {
+ return prompt.RespCancel, nil
+ }
+ if len(ln) == 0 {
+ internalMessage = "Press enter again to cancel"
+ blankCount++
+ continue
+ }
+ blankCount = 0
+ choice, err := strconv.ParseInt(ln, 10, 32)
+ if err != nil {
+ internalMessage = "Not a valid number"
+ continue
+ }
+ index := int(choice - 1)
+ if index < 0 || index >= len(q.Options) {
+ internalMessage = "Not a valid choice."
+ continue
+ }
+ opt := &q.Options[index]
+ opt.Chosen = true
+ if opt.Other() {
+ res, err := setOther(term, q, opt)
+ if err != nil {
+ return prompt.RespCancel, err
+ }
+ if res == prompt.RespCancel {
+ opt.Chosen = false
+ continue
+ }
+ }
+ return prompt.RespAnswer, nil
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/context/context.go b/vendor/github.com/kardianos/govendor/context/context.go
new file mode 100644
index 000000000..f5557e7a7
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/context.go
@@ -0,0 +1,436 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context gathers the status of packages and stores it in Context.
+// A new Context needs to be pointed to the root of the project and any
+// project owned vendor file.
+package context
+
+import (
+ "fmt"
+ "io"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/kardianos/govendor/internal/pathos"
+ os "github.com/kardianos/govendor/internal/vos"
+ "github.com/kardianos/govendor/pkgspec"
+ "github.com/kardianos/govendor/vendorfile"
+)
+
+const (
+ debug = false
+ looplimit = 10000
+
+ vendorFilename = "vendor.json"
+)
+
+func dprintf(f string, v ...interface{}) {
+ if debug {
+ fmt.Printf(f, v...)
+ }
+}
+
+// Context represents the current project context.
+type Context struct {
+ Logger io.Writer // Write to the verbose log.
+ Insecure bool // Allow insecure network operations
+
+ GopathList []string // List of GOPATHs in environment. Includes "src" dir.
+ Goroot string // The path to the standard library.
+
+ RootDir string // Full path to the project root.
+ RootGopath string // The GOPATH the project is in.
+ RootImportPath string // The import path to the project.
+
+ VendorFile *vendorfile.File
+ VendorFilePath string // File path to vendor file.
+ VendorFolder string // Store vendor packages in this folder.
+ RootToVendorFile string // The relative path from the project root to the vendor file directory.
+
+ VendorDiscoverFolder string // Normally auto-set to "vendor"
+
+ // Package is a map where the import path is the key.
+ // Populated with LoadPackage.
+ Package map[string]*Package
+ // Change to unknown structure (rename). Maybe...
+
+ // MoveRule provides the translation from original import path to new import path.
+ RewriteRule map[string]string // map[from]to
+
+ TreeImport []*pkgspec.Pkg
+
+ Operation []*Operation
+
+ loaded, dirty bool
+ rewriteImports bool
+
+ ignoreTag []string // list of tags to ignore
+ excludePackage []string // list of package prefixes to exclude
+
+ statusCache []StatusItem
+ added map[string]bool
+}
+
+// Package maintains information pertaining to a package.
+type Package struct {
+ OriginDir string // Origin directory
+ Dir string // Physical directory path of the package.
+
+ Status Status // Status and location of the package.
+ *pkgspec.Pkg
+ Local string // Current location of a package relative to $GOPATH/src.
+ Gopath string // Includes trailing "src".
+ Files []*File
+
+ inVendor bool // Different than Status.Location, this is in *any* vendor tree.
+ inTree bool
+
+ ignoreFile []string
+
+ // used in resolveUnknown function. Not persisted.
+ referenced map[string]*Package
+}
+
+// File holds a reference to the imports in a file and the file locaiton.
+type File struct {
+ Package *Package
+ Path string
+ Imports []string
+
+ ImportComment string
+}
+
+type RootType byte
+
+const (
+ RootVendor RootType = iota
+ RootWD
+ RootVendorOrWD
+ RootVendorOrWDOrFirstGOPATH
+)
+
+func (pkg *Package) String() string {
+ return pkg.Local
+}
+
+type packageList []*Package
+
+func (li packageList) Len() int { return len(li) }
+func (li packageList) Swap(i, j int) { li[i], li[j] = li[j], li[i] }
+func (li packageList) Less(i, j int) bool {
+ if li[i].Path != li[j].Path {
+ return li[i].Path < li[j].Path
+ }
+ return li[i].Local < li[j].Local
+}
+
+type Env map[string]string
+
+func NewEnv() (Env, error) {
+ env := Env{}
+
+ // If GOROOT is not set, get from go cmd.
+ cmd := exec.Command("go", "env")
+ var goEnv []byte
+ goEnv, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(string(goEnv), "\n") {
+ if k, v, ok := pathos.ParseGoEnvLine(line); ok {
+ env[k] = v
+ }
+ }
+
+ return env, nil
+}
+
+// NewContextWD creates a new context. It looks for a root folder by finding
+// a vendor file.
+func NewContextWD(rt RootType) (*Context, error) {
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, err
+ }
+ rootIndicator := "vendor"
+
+ root := wd
+ switch rt {
+ case RootVendor:
+ tryRoot, err := findRoot(wd, rootIndicator)
+ if err != nil {
+ return nil, err
+ }
+ root = tryRoot
+ case RootVendorOrWD:
+ tryRoot, err := findRoot(wd, rootIndicator)
+ if err == nil {
+ root = tryRoot
+ }
+ case RootVendorOrWDOrFirstGOPATH:
+ root, err = findRoot(wd, rootIndicator)
+ if err != nil {
+ env, err := NewEnv()
+ if err != nil {
+ return nil, err
+ }
+ allgopath := env["GOPATH"]
+
+ if len(allgopath) == 0 {
+ return nil, ErrMissingGOPATH
+ }
+ gopathList := filepath.SplitList(allgopath)
+ root = filepath.Join(gopathList[0], "src")
+ }
+ }
+
+ // Check for old vendor file location.
+ oldLocation := filepath.Join(root, vendorFilename)
+ if _, err := os.Stat(oldLocation); err == nil {
+ return nil, ErrOldVersion{`Use the "migrate" command to update.`}
+ }
+
+ return NewContextRoot(root)
+}
+
+// NewContextRoot creates a new context for the given root folder.
+func NewContextRoot(root string) (*Context, error) {
+ pathToVendorFile := filepath.Join("vendor", vendorFilename)
+ vendorFolder := "vendor"
+
+ return NewContext(root, pathToVendorFile, vendorFolder, false)
+}
+
+// NewContext creates new context from a given root folder and vendor file path.
+// The vendorFolder is where vendor packages should be placed.
+func NewContext(root, vendorFilePathRel, vendorFolder string, rewriteImports bool) (*Context, error) {
+ dprintf("CTX: %s\n", root)
+ var err error
+
+ env, err := NewEnv()
+ if err != nil {
+ return nil, err
+ }
+ goroot := env["GOROOT"]
+ all := env["GOPATH"]
+
+ if goroot == "" {
+ return nil, ErrMissingGOROOT
+ }
+ goroot = filepath.Join(goroot, "src")
+
+ // Get the GOPATHs. Prepend the GOROOT to the list.
+ if len(all) == 0 {
+ return nil, ErrMissingGOPATH
+ }
+ gopathList := filepath.SplitList(all)
+ gopathGoroot := make([]string, 0, len(gopathList)+1)
+ gopathGoroot = append(gopathGoroot, goroot)
+ for _, gopath := range gopathList {
+ srcPath := filepath.Join(gopath, "src") + string(filepath.Separator)
+ srcPathEvaled, err := filepath.EvalSymlinks(srcPath)
+ if err != nil {
+ return nil, err
+ }
+ gopathGoroot = append(gopathGoroot, srcPath, srcPathEvaled+string(filepath.Separator))
+ }
+
+ rootToVendorFile, _ := filepath.Split(vendorFilePathRel)
+
+ vendorFilePath := filepath.Join(root, vendorFilePathRel)
+
+ ctx := &Context{
+ RootDir: root,
+ GopathList: gopathGoroot,
+ Goroot: goroot,
+
+ VendorFilePath: vendorFilePath,
+ VendorFolder: vendorFolder,
+ RootToVendorFile: pathos.SlashToImportPath(rootToVendorFile),
+
+ VendorDiscoverFolder: "vendor",
+
+ Package: make(map[string]*Package),
+
+ RewriteRule: make(map[string]string, 3),
+
+ rewriteImports: rewriteImports,
+ }
+
+ ctx.RootImportPath, ctx.RootGopath, err = ctx.findImportPath(root)
+ if err != nil {
+ return nil, err
+ }
+
+ vf, err := readVendorFile(path.Join(ctx.RootImportPath, vendorFolder)+"/", vendorFilePath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+ vf = &vendorfile.File{}
+ }
+ ctx.VendorFile = vf
+
+ ctx.IgnoreBuildAndPackage(vf.Ignore)
+
+ return ctx, nil
+}
+
+// IgnoreBuildAndPackage takes a space separated list of tags or package prefixes
+// to ignore.
+// Tags are words, packages are folders, containing or ending with a "/".
+// "a b c" will ignore tags "a" OR "b" OR "c".
+// "p/x q/" will ignore packages "p/x" OR "p/x/y" OR "q" OR "q/z", etc.
+func (ctx *Context) IgnoreBuildAndPackage(ignore string) {
+ ctx.dirty = true
+ ors := strings.Fields(ignore)
+ ctx.ignoreTag = make([]string, 0, len(ors))
+ ctx.excludePackage = make([]string, 0, len(ors))
+ for _, or := range ors {
+ if len(or) == 0 {
+ continue
+ }
+ if strings.Index(or, "/") != -1 {
+ // package
+ ctx.excludePackage = append(ctx.excludePackage, strings.Trim(or, "./"))
+ } else {
+ // tag
+ ctx.ignoreTag = append(ctx.ignoreTag, or)
+ }
+ }
+}
+
+// Write to the set io.Writer for logging.
+func (ctx *Context) Write(s []byte) (int, error) {
+ if ctx.Logger != nil {
+ return ctx.Logger.Write(s)
+ }
+ return len(s), nil
+}
+
+// VendorFilePackagePath finds a given vendor file package give the import path.
+func (ctx *Context) VendorFilePackagePath(path string) *vendorfile.Package {
+ for _, pkg := range ctx.VendorFile.Package {
+ if pkg.Remove {
+ continue
+ }
+ if pkg.Path == path {
+ return pkg
+ }
+ }
+ return nil
+}
+
+// findPackageChild finds any package under the current package.
+// Used for finding tree overlaps.
+func (ctx *Context) findPackageChild(ck *Package) []*Package {
+ out := make([]*Package, 0, 3)
+ for _, pkg := range ctx.Package {
+ if pkg == ck {
+ continue
+ }
+ if !pkg.inVendor {
+ continue
+ }
+ if pkg.Status.Presence == PresenceTree {
+ continue
+ }
+ if strings.HasPrefix(pkg.Path, ck.Path+"/") {
+ out = append(out, pkg)
+ }
+ }
+ return out
+}
+
+// findPackageParentTree finds any parent tree package that would
+// include the given canonical path.
+func (ctx *Context) findPackageParentTree(ck *Package) []string {
+ out := make([]string, 0, 1)
+ for _, pkg := range ctx.Package {
+ if !pkg.inVendor {
+ continue
+ }
+ if !pkg.IncludeTree || pkg == ck {
+ continue
+ }
+ // pkg.Path = github.com/usera/pkg, tree = true
+ // ck.Path = github.com/usera/pkg/dance
+ if strings.HasPrefix(ck.Path, pkg.Path+"/") {
+ out = append(out, pkg.Local)
+ }
+ }
+ return out
+}
+
+// updatePackageReferences populates the referenced field in each Package.
+func (ctx *Context) updatePackageReferences() {
+ pathUnderDirLookup := make(map[string]map[string]*Package)
+ findCanonicalUnderDir := func(dir, path string) *Package {
+ if importMap, found := pathUnderDirLookup[dir]; found {
+ if pkg, found2 := importMap[path]; found2 {
+ return pkg
+ }
+ } else {
+ pathUnderDirLookup[dir] = make(map[string]*Package)
+ }
+ for _, pkg := range ctx.Package {
+ if !pkg.inVendor {
+ continue
+ }
+
+ removeFromEnd := len(pkg.Path) + len(ctx.VendorDiscoverFolder) + 2
+ nextLen := len(pkg.Dir) - removeFromEnd
+ if nextLen < 0 {
+ continue
+ }
+ checkDir := pkg.Dir[:nextLen]
+ if !pathos.FileHasPrefix(dir, checkDir) {
+ continue
+ }
+ if pkg.Path != path {
+ continue
+ }
+ pathUnderDirLookup[dir][path] = pkg
+ return pkg
+ }
+ pathUnderDirLookup[dir][path] = nil
+ return nil
+ }
+ for _, pkg := range ctx.Package {
+ pkg.referenced = make(map[string]*Package, len(pkg.referenced))
+ }
+ for _, pkg := range ctx.Package {
+ for _, f := range pkg.Files {
+ for _, imp := range f.Imports {
+ if vpkg := findCanonicalUnderDir(pkg.Dir, imp); vpkg != nil {
+ vpkg.referenced[pkg.Local] = pkg
+ continue
+ }
+ if other, found := ctx.Package[imp]; found {
+ other.referenced[pkg.Local] = pkg
+ continue
+ }
+ }
+ }
+ }
+
+ // Transfer all references from the child to the top parent.
+ for _, pkg := range ctx.Package {
+ if parentTrees := ctx.findPackageParentTree(pkg); len(parentTrees) > 0 {
+ if parentPkg := ctx.Package[parentTrees[0]]; parentPkg != nil {
+ for opath, opkg := range pkg.referenced {
+ // Do not transfer internal references.
+ if strings.HasPrefix(opkg.Path, parentPkg.Path+"/") {
+ continue
+ }
+ parentPkg.referenced[opath] = opkg
+ }
+ pkg.referenced = make(map[string]*Package, 0)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/context/copy.go b/vendor/github.com/kardianos/govendor/context/copy.go
new file mode 100644
index 000000000..bf449ea17
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/copy.go
@@ -0,0 +1,203 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "fmt"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/kardianos/govendor/internal/pathos"
+ "github.com/pkg/errors"
+)
+
+type fileInfoSort []os.FileInfo
+
+func (l fileInfoSort) Len() int {
+ return len(l)
+}
+func (l fileInfoSort) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ if a.IsDir() == b.IsDir() {
+ return l[i].Name() < l[j].Name()
+ }
+ return !a.IsDir()
+}
+func (l fileInfoSort) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// CopyPackage copies the files from the srcPath to the destPath, destPath
+// folder and parents are are created if they don't already exist.
+func (ctx *Context) CopyPackage(destPath, srcPath, lookRoot, pkgPath string, ignoreFiles []string, tree bool, h hash.Hash, beforeCopy func(deps []string) error) error {
+ if pathos.FileStringEquals(destPath, srcPath) {
+ return fmt.Errorf("Attempting to copy package to same location %q.", destPath)
+ }
+ err := os.MkdirAll(destPath, 0777)
+ if err != nil {
+ return err
+ }
+
+ // Ensure the dest is empty of files.
+ destDir, err := os.Open(destPath)
+ if err != nil {
+ return err
+ }
+ ignoreTest := false
+ for _, ignore := range ctx.ignoreTag {
+ if ignore == "test" {
+ ignoreTest = true
+ break
+ }
+ }
+
+ fl, err := destDir.Readdir(-1)
+ destDir.Close()
+ if err != nil {
+ return err
+ }
+ for _, fi := range fl {
+ if fi.IsDir() {
+ if tree {
+ err = errors.Wrap(os.RemoveAll(filepath.Join(destPath, fi.Name())), "remove all existing tree entries")
+ if err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ err = errors.Wrap(os.Remove(filepath.Join(destPath, fi.Name())), "remove existing file")
+ if err != nil {
+ return err
+ }
+ }
+
+ // Copy files into dest.
+ srcDir, err := os.Open(srcPath)
+ if err != nil {
+ return errors.Wrap(err, "open srcPath directory")
+ }
+
+ fl, err = srcDir.Readdir(-1)
+ srcDir.Close()
+ if err != nil {
+ return errors.Wrap(err, "src readdir")
+ }
+ if h != nil {
+ // Write relative path to GOPATH.
+ h.Write([]byte(strings.Trim(pkgPath, "/")))
+ // Sort file list to present a stable hash.
+ sort.Sort(fileInfoSort(fl))
+ }
+fileLoop:
+ for _, fi := range fl {
+ name := fi.Name()
+ if name[0] == '.' {
+ continue
+ }
+ if fi.IsDir() {
+ isTestdata := name == "testdata"
+ if !tree && !isTestdata {
+ continue
+ }
+ if name[0] == '_' {
+ continue
+ }
+ if ignoreTest {
+ if strings.HasSuffix(name, "_test") || isTestdata {
+ continue
+ }
+ }
+ nextDestPath := filepath.Join(destPath, name)
+ nextSrcPath := filepath.Join(srcPath, name)
+ var nextIgnoreFiles, deps []string
+ if !isTestdata && !strings.Contains(pkgPath, "/testdata/") {
+ nextIgnoreFiles, deps, err = ctx.getIgnoreFiles(nextSrcPath)
+ if err != nil {
+ return err
+ }
+ }
+ if beforeCopy != nil {
+ err = beforeCopy(deps)
+ if err != nil {
+ return errors.Wrap(err, "beforeCopy")
+ }
+ }
+ err = ctx.CopyPackage(nextDestPath, nextSrcPath, lookRoot, path.Join(pkgPath, name), nextIgnoreFiles, true, h, beforeCopy)
+ if err != nil {
+ return errors.Wrapf(err,
+ "CopyPackage dest=%q src=%q lookRoot=%q pkgPath=%q ignoreFiles=%q tree=%t has beforeCopy=%t",
+ nextDestPath, nextSrcPath, lookRoot, path.Join(pkgPath, name), nextIgnoreFiles, true, beforeCopy != nil,
+ )
+ }
+ continue
+ }
+ for _, ignore := range ignoreFiles {
+ if pathos.FileStringEquals(name, ignore) {
+ continue fileLoop
+ }
+ }
+ if h != nil {
+ h.Write([]byte(name))
+ }
+ err = copyFile(
+ filepath.Join(destPath, name),
+ filepath.Join(srcPath, name),
+ h,
+ )
+ if err != nil {
+ return errors.Wrapf(err, "copyFile dest=%q src=%q", filepath.Join(destPath, name), filepath.Join(srcPath, name))
+ }
+ }
+
+ return errors.Wrapf(licenseCopy(lookRoot, srcPath, filepath.Join(ctx.RootDir, ctx.VendorFolder), pkgPath), "licenseCopy srcPath=%q", srcPath)
+}
+
+func copyFile(destPath, srcPath string, h hash.Hash) error {
+ ss, err := os.Stat(srcPath)
+ if err != nil {
+ return errors.Wrap(err, "copyFile Stat")
+ }
+ src, err := os.Open(srcPath)
+ if err != nil {
+ return errors.Wrapf(err, "open src=%q", srcPath)
+ }
+ defer src.Close()
+ // Ensure we are not trying to copy a directory. May happen with symlinks.
+ if st, err := src.Stat(); err == nil {
+ if st.IsDir() {
+ return nil
+ }
+ }
+
+ dest, err := os.Create(destPath)
+ if err != nil {
+ return errors.Wrapf(err, "create dest=%q", destPath)
+ }
+
+ r := io.Reader(src)
+
+ if h != nil {
+ r = io.TeeReader(src, h)
+ }
+
+ _, err = io.Copy(dest, r)
+ // Close before setting mod and time.
+ dest.Close()
+ if err != nil {
+ return errors.Wrap(err, "copy")
+ }
+ err = os.Chmod(destPath, ss.Mode())
+ if err != nil {
+ return err
+ }
+ return os.Chtimes(destPath, ss.ModTime(), ss.ModTime())
+}
diff --git a/vendor/github.com/kardianos/govendor/context/err.go b/vendor/github.com/kardianos/govendor/context/err.go
new file mode 100644
index 000000000..c5f0cd400
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/err.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ // ErrMissingGOROOT returns if the GOROOT was not found.
+ ErrMissingGOROOT = errors.New("Unable to determine GOROOT.")
+ // ErrMissingGOPATH returns if no GOPATH was found.
+ ErrMissingGOPATH = errors.New("Missing GOPATH. Check your environment variable GOPATH.")
+)
+
+// ErrNotInGOPATH returns if not currently in the GOPATH.
+type ErrNotInGOPATH struct {
+ Missing string
+}
+
+func (err ErrNotInGOPATH) Error() string {
+ return fmt.Sprintf("Package %q not a go package or not in GOPATH.", err.Missing)
+}
+
+// ErrDirtyPackage returns if package is in dirty version control.
+type ErrDirtyPackage struct {
+ ImportPath string
+}
+
+func (err ErrDirtyPackage) Error() string {
+ return fmt.Sprintf("Package %q has uncommitted changes in the vcs.", err.ImportPath)
+}
+
+// ErrPackageExists returns if package already exists.
+type ErrPackageExists struct {
+ Package string
+}
+
+func (err ErrPackageExists) Error() string {
+ return fmt.Sprintf("Package %q already in vendor.", err.Package)
+}
+
+// ErrMissingVendorFile returns if package already exists.
+type ErrMissingVendorFile struct {
+ Path string
+}
+
+func (err ErrMissingVendorFile) Error() string {
+ return fmt.Sprintf("Vendor file at %q not found.", err.Path)
+}
+
+// ErrOldVersion returns if vendor file is not in the vendor folder.
+type ErrOldVersion struct {
+ Message string
+}
+
+func (err ErrOldVersion) Error() string {
+ return fmt.Sprintf("The vendor file or is old. %s", err.Message)
+}
+
+type ErrTreeChildren struct {
+ path string
+ children []*Package
+}
+
+func (err ErrTreeChildren) Error() string {
+ return fmt.Sprintf("Cannot have a sub-tree %q contain sub-packages %q", err.path, err.children)
+}
+
+type ErrTreeParents struct {
+ path string
+ parents []string
+}
+
+func (err ErrTreeParents) Error() string {
+ return fmt.Sprintf("Cannot add package %q which is already found in sub-tree %q", err.path, err.parents)
+}
diff --git a/vendor/github.com/kardianos/govendor/context/fetch.go b/vendor/github.com/kardianos/govendor/context/fetch.go
new file mode 100644
index 000000000..79c391eee
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/fetch.go
@@ -0,0 +1,325 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/kardianos/govendor/internal/pathos"
+ "github.com/kardianos/govendor/pkgspec"
+ gvvcs "github.com/kardianos/govendor/vcs"
+ "github.com/kardianos/govendor/vendorfile"
+
+ "golang.org/x/tools/go/vcs"
+)
+
+type fetcher struct {
+ Ctx *Context
+ CacheRoot string
+ HavePkg map[string]bool
+}
+
+func newFetcher(ctx *Context) (*fetcher, error) {
+ // GOPATH here includes the "src" dir, go up one level.
+ cacheRoot := filepath.Join(ctx.RootGopath, "..", ".cache", "govendor")
+ err := os.MkdirAll(cacheRoot, 0700)
+ if err != nil {
+ return nil, err
+ }
+ return &fetcher{
+ Ctx: ctx,
+ CacheRoot: cacheRoot,
+ HavePkg: make(map[string]bool, 30),
+ }, nil
+}
+
+// op fetches the repo locally if not already present.
+// Transform the fetch op into a copy op.
+func (f *fetcher) op(op *Operation) ([]*Operation, error) {
+ // vcs.ShowCmd = true
+ var nextOps []*Operation
+ vpkg := f.Ctx.VendorFilePackagePath(op.Pkg.Path)
+ if vpkg == nil {
+ return nextOps, fmt.Errorf("Could not find vendor file package for %q. Internal error.", op.Pkg.Path)
+ }
+
+ op.Type = OpCopy
+ ps, err := pkgspec.Parse("", op.Src)
+ if err != nil {
+ return nextOps, err
+ }
+ if len(ps.Version) == 0 {
+ longest := ""
+ for _, pkg := range f.Ctx.Package {
+ if strings.HasPrefix(ps.Path, pkg.Path+"/") && len(pkg.Path) > len(longest) && pkg.HasVersion {
+ longest = pkg.Path
+ ps.Version = pkg.Version
+ ps.HasVersion = true
+ }
+ }
+ }
+
+ // Don't check for bundle, rather check physical directory.
+ // If no repo in dir, clone.
+ // If there is a repo in dir, update to latest.
+ // Get any tags.
+ // If we have a specific revision, update to that revision.
+
+ pkgDir := filepath.Join(f.CacheRoot, pathos.SlashToFilepath(ps.PathOrigin()))
+ sysVcsCmd, repoRoot, err := vcs.FromDir(pkgDir, f.CacheRoot)
+ var vcsCmd *VCSCmd
+ repoRootDir := filepath.Join(f.CacheRoot, repoRoot)
+ if err != nil {
+ rr, err := vcs.RepoRootForImportPath(ps.PathOrigin(), false)
+ if err != nil {
+ if strings.Contains(err.Error(), "unrecognized import path") {
+ return nextOps, nil
+ }
+ return nextOps, err
+ }
+ if !f.Ctx.Insecure && !vcsIsSecure(rr.Repo) {
+ return nextOps, fmt.Errorf("repo remote not secure")
+ }
+
+ vcsCmd = updateVcsCmd(rr.VCS)
+ repoRoot = rr.Root
+ repoRootDir = filepath.Join(f.CacheRoot, repoRoot)
+
+ err = vcsCmd.Create(repoRootDir, rr.Repo)
+ if err != nil {
+ return nextOps, fmt.Errorf("failed to create repo %q in %q %v", rr.Repo, repoRootDir, err)
+ }
+
+ } else {
+ vcsCmd = updateVcsCmd(sysVcsCmd)
+ err = vcsCmd.Download(repoRootDir)
+ if err != nil {
+ return nextOps, fmt.Errorf("failed to download repo into %q %v", repoRootDir, err)
+ }
+ }
+
+ revision := ""
+ if ps.HasVersion {
+ switch {
+ case len(ps.Version) == 0:
+ vpkg.Version = ""
+ case isVersion(ps.Version):
+ vpkg.Version = ps.Version
+ default:
+ revision = ps.Version
+ }
+ }
+
+ switch {
+ case len(revision) == 0 && len(vpkg.Version) > 0:
+ fmt.Fprintf(f.Ctx, "Get version %q@%s\n", vpkg.Path, vpkg.Version)
+ // Get a list of tags, match to version if possible.
+ var tagNames []string
+ tagNames, err = vcsCmd.Tags(repoRootDir)
+ if err != nil {
+ return nextOps, fmt.Errorf("failed to fetch tags %v", err)
+ }
+ labels := make([]Label, len(tagNames))
+ for i, tag := range tagNames {
+ labels[i].Source = LabelTag
+ labels[i].Text = tag
+ }
+ result := FindLabel(vpkg.Version, labels)
+ if result.Source == LabelNone {
+ return nextOps, fmt.Errorf("No label found for specified version %q from %s", vpkg.Version, ps.String())
+ }
+ vpkg.VersionExact = result.Text
+ fmt.Fprintf(f.Ctx, "\tFound exact version %q\n", vpkg.VersionExact)
+ err = vcsCmd.TagSync(repoRootDir, result.Text)
+ if err != nil {
+ return nextOps, fmt.Errorf("failed to sync repo to tag %q %v", result.Text, err)
+ }
+ case len(revision) > 0:
+ fmt.Fprintf(f.Ctx, "Get specific revision %q@%s\n", vpkg.Path, revision)
+ // Get specific version.
+ vpkg.Version = ""
+ vpkg.VersionExact = ""
+ err = vcsCmd.RevisionSync(repoRootDir, revision)
+ if err != nil {
+ return nextOps, fmt.Errorf("failed to sync repo to revision %q %v", revision, err)
+ }
+ default:
+ fmt.Fprintf(f.Ctx, "Get latest revision %q\n", vpkg.Path)
+ // Get latest version.
+ err = vcsCmd.TagSync(repoRootDir, "")
+ if err != nil {
+ return nextOps, fmt.Errorf("failed to sync to latest revision %v", err)
+ }
+ }
+
+ // set op.Src to download dir.
+ // /tmp/cache/1/[[github.com/kardianos/govendor]]context
+ op.Src = pkgDir
+ var deps []string
+ op.IgnoreFile, deps, err = f.Ctx.getIgnoreFiles(op.Src)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nextOps, nil
+ }
+ return nextOps, fmt.Errorf("failed to get ignore files and deps from %q %v", op.Src, err)
+ }
+
+ f.HavePkg[ps.Path] = true
+
+ // Once downloaded, be sure to set the revision and revisionTime
+ // in the vendor file package.
+ // Find the VCS information.
+ system, err := gvvcs.FindVcs(f.CacheRoot, op.Src)
+ if err != nil {
+ return nextOps, fmt.Errorf("failed to find vcs in %q %v", op.Src, err)
+ }
+ if system != nil {
+ if system.Dirty {
+ return nextOps, ErrDirtyPackage{ps.PathOrigin()}
+ }
+ vpkg.Revision = system.Revision
+ if system.RevisionTime != nil {
+ vpkg.RevisionTime = system.RevisionTime.UTC().Format(time.RFC3339)
+ }
+ }
+
+ processDeps := func(deps []string) error {
+ // Queue up any missing package deps.
+ depLoop:
+ for _, dep := range deps {
+ dep = strings.TrimSpace(dep)
+ if len(dep) == 0 {
+ continue
+ }
+
+ // Check for deps we already have.
+ if f.HavePkg[dep] {
+ continue
+ }
+
+ for _, test := range f.Ctx.Package {
+ if test.Path == dep {
+ switch test.Status.Location {
+ case LocationVendor, LocationLocal:
+ continue depLoop
+ }
+ }
+ }
+
+ // Look for std lib deps
+ var yes bool
+ yes, err = f.Ctx.isStdLib(dep)
+ if err != nil {
+ return fmt.Errorf("Failed to check if in stdlib: %v", err)
+ }
+ if yes {
+ continue
+ }
+
+ // Look for tree deps.
+ if op.Pkg.IncludeTree && strings.HasPrefix(dep, op.Pkg.Path+"/") {
+ continue
+ }
+ version := ""
+ hasVersion := false
+ revision := ""
+ hasOrigin := false
+ origin := ""
+ for _, vv := range f.Ctx.VendorFile.Package {
+ if vv.Remove {
+ continue
+ }
+ if strings.HasPrefix(dep, vv.Path+"/") {
+ if len(vv.Origin) > 0 {
+ origin = path.Join(vv.PathOrigin(), strings.TrimPrefix(dep, vv.Path))
+ hasOrigin = true
+ }
+ if len(vv.Version) > 0 {
+ version = vv.Version
+ hasVersion = true
+ revision = vv.Revision
+ break
+ }
+ if len(vv.Revision) > 0 {
+ revision = vv.Revision
+ }
+ }
+ }
+
+ // Look for tree match in explicit imports
+ for _, item := range f.Ctx.TreeImport {
+ if item.Path != dep && !strings.HasPrefix(dep, item.Path+"/") {
+ continue
+ }
+ if len(item.Origin) > 0 {
+ origin = path.Join(item.PathOrigin(), strings.TrimPrefix(dep, item.Path))
+ hasOrigin = true
+ }
+ if len(item.Version) > 0 {
+ version = item.Version
+ hasVersion = true
+ revision = ""
+ }
+ break
+ }
+
+ f.HavePkg[dep] = true
+ dest := filepath.Join(f.Ctx.RootDir, f.Ctx.VendorFolder, dep)
+
+ // Update vendor file with correct Local field.
+ vp := f.Ctx.VendorFilePackagePath(dep)
+ if vp == nil {
+ vp = &vendorfile.Package{
+ Add: true,
+ Path: dep,
+ Revision: revision,
+ Version: version,
+ Origin: origin,
+ }
+ f.Ctx.VendorFile.Package = append(f.Ctx.VendorFile.Package, vp)
+ }
+ if hasVersion {
+ vp.Version = version
+ }
+ if hasOrigin {
+ vp.Origin = origin
+ }
+ if len(vp.Revision) == 0 {
+ vp.Revision = revision
+ }
+ spec := &pkgspec.Pkg{
+ Path: dep,
+ Version: version,
+ HasVersion: hasVersion,
+ Origin: origin,
+ HasOrigin: hasOrigin,
+ }
+ nextOps = append(nextOps, &Operation{
+ Type: OpFetch,
+ Pkg: &Package{Pkg: spec},
+ Src: spec.String(),
+ Dest: dest,
+ })
+ }
+ return nil
+ }
+
+ err = processDeps(deps)
+ if err != nil {
+ return nextOps, err
+ }
+
+ err = f.Ctx.copyOperation(op, processDeps)
+ if err != nil {
+ return nextOps, err
+ }
+
+ return nextOps, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/context/get.go b/vendor/github.com/kardianos/govendor/context/get.go
new file mode 100644
index 000000000..9b08b1661
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/get.go
@@ -0,0 +1,91 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "fmt"
+ "go/build"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/kardianos/govendor/pkgspec"
+ "golang.org/x/tools/go/vcs"
+)
+
+func Get(logger io.Writer, pkgspecName string, insecure bool) (*pkgspec.Pkg, error) {
+ // Get the GOPATHs.
+ gopathList := filepath.SplitList(build.Default.GOPATH)
+ gopath := gopathList[0]
+
+ cwd, err := os.Getwd()
+ if err != nil {
+ return nil, err
+ }
+ ps, err := pkgspec.Parse(cwd, pkgspecName)
+ if err != nil {
+ return nil, err
+ }
+ return ps, get(logger, filepath.Join(gopath, "src"), ps, insecure)
+}
+
+func get(logger io.Writer, gopath string, ps *pkgspec.Pkg, insecure bool) error {
+ pkgDir := filepath.Join(gopath, ps.Path)
+ sysVcsCmd, repoRoot, err := vcs.FromDir(pkgDir, gopath)
+ var vcsCmd *VCSCmd
+ repoRootDir := filepath.Join(gopath, repoRoot)
+ if err != nil {
+ rr, err := vcs.RepoRootForImportPath(ps.PathOrigin(), false)
+ if err != nil {
+ return err
+ }
+ if !insecure && !vcsIsSecure(rr.Repo) {
+ return fmt.Errorf("repo remote not secure")
+ }
+
+ vcsCmd = updateVcsCmd(rr.VCS)
+ repoRoot = rr.Root
+ repoRootDir = filepath.Join(gopath, repoRoot)
+
+ err = vcsCmd.Create(repoRootDir, rr.Repo)
+ if err != nil {
+ return fmt.Errorf("failed to create repo %q in %q %v", rr.Repo, repoRootDir, err)
+ }
+
+ } else {
+ vcsCmd = updateVcsCmd(sysVcsCmd)
+ err = vcsCmd.Download(repoRootDir)
+ if err != nil {
+ return fmt.Errorf("failed to download repo into %q %v", repoRootDir, err)
+ }
+ }
+ err = os.MkdirAll(filepath.Join(repoRootDir, "vendor"), 0777)
+ if err != nil {
+ return err
+ }
+ ctx, err := NewContext(repoRootDir, filepath.Join("vendor", vendorFilename), "vendor", false)
+ if err != nil {
+ return err
+ }
+ ctx.Insecure = insecure
+ ctx.Logger = logger
+ statusList, err := ctx.Status()
+ if err != nil {
+ return err
+ }
+ added := make(map[string]bool, len(statusList))
+ for _, item := range statusList {
+ switch item.Status.Location {
+ case LocationExternal, LocationNotFound:
+ if added[item.Pkg.Path] {
+ continue
+ }
+ ctx.ModifyImport(item.Pkg, Fetch)
+ added[item.Pkg.Path] = true
+ }
+ }
+ defer ctx.WriteVendorFile()
+ return ctx.Alter()
+}
diff --git a/vendor/github.com/kardianos/govendor/context/label.go b/vendor/github.com/kardianos/govendor/context/label.go
new file mode 100644
index 000000000..c98df0e7e
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/label.go
@@ -0,0 +1,240 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+type LabelSource byte
+
+const (
+ LabelNone LabelSource = iota
+ LabelBranch
+ LabelTag
+)
+
+func (ls LabelSource) String() string {
+ switch ls {
+ default:
+ panic("unknown label source")
+ case LabelNone:
+ return "none"
+ case LabelBranch:
+ return "branch"
+ case LabelTag:
+ return "tag"
+ }
+}
+
+type Label struct {
+ Text string
+ Source LabelSource
+}
+
+func (l Label) String() string {
+ return fmt.Sprintf("[%s]%s", l.Source, l.Text)
+}
+
+type labelGroup struct {
+ seq string
+ sections []labelSection
+}
+
+type labelSection struct {
+ seq string
+ number int64
+ brokenBy rune
+}
+
+type labelAnalysis struct {
+ Label Label
+ Groups []labelGroup
+}
+
+func (item *labelAnalysis) fillSections(buf *bytes.Buffer) {
+ previousNumber := false
+ number := false
+
+ isBreak := func(r rune) bool {
+ return r == '.'
+ }
+ add := func(r rune, group *labelGroup) {
+ if buf.Len() > 0 {
+ sVal := buf.String()
+ buf.Reset()
+ value, err := strconv.ParseInt(sVal, 10, 64)
+ if err != nil {
+ value = -1
+ }
+ if !isBreak(r) {
+ r = 0
+ }
+ group.sections = append(group.sections, labelSection{
+ seq: sVal,
+ number: value,
+ brokenBy: r,
+ })
+ }
+ }
+ for _, groupText := range strings.Split(item.Label.Text, "-") {
+ group := labelGroup{
+ seq: groupText,
+ }
+ for index, r := range groupText {
+ number = unicode.IsNumber(r)
+ different := number != previousNumber && index > 0
+ previousNumber = number
+ if isBreak(r) {
+ add(r, &group)
+ continue
+ }
+ if different {
+ add(r, &group)
+ buf.WriteRune(r)
+ continue
+ }
+ buf.WriteRune(r)
+ }
+ add(0, &group)
+ buf.Reset()
+ item.Groups = append(item.Groups, group)
+ }
+}
+
+type labelAnalysisList []*labelAnalysis
+
+func (l labelAnalysisList) Len() int {
+ return len(l)
+}
+func (l labelAnalysisList) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l labelAnalysisList) Less(i, j int) bool {
+ const debug = false
+ df := func(f string, a ...interface{}) {
+ if debug {
+ fmt.Printf(f, a...)
+ }
+ }
+ a := l[i]
+ b := l[j]
+
+ // Want to return the *smaller* of the two group counts.
+ if len(a.Groups) != len(b.Groups) {
+ return len(a.Groups) < len(b.Groups)
+ }
+
+ gct := len(a.Groups)
+ if gct > len(b.Groups) {
+ gct = len(b.Groups)
+ }
+
+ df(":: %s vs %s ::\n", a.Label.Text, b.Label.Text)
+ for ig := 0; ig < gct; ig++ {
+ ga := a.Groups[ig]
+ gb := b.Groups[ig]
+
+ if ga.seq == gb.seq {
+ df("pt 1 %q\n", ga.seq)
+ continue
+ }
+
+ ct := len(ga.sections)
+ if ct > len(gb.sections) {
+ ct = len(gb.sections)
+ }
+
+ // Compare common sections.
+ for i := 0; i < ct; i++ {
+ sa := ga.sections[i]
+ sb := gb.sections[i]
+
+ // Sort each section by number and alpha.
+ if sa.number != sb.number {
+ df("PT A\n")
+ return sa.number > sb.number
+ }
+ if sa.seq != sb.seq {
+ df("PT B\n")
+ return sa.seq > sb.seq
+ }
+ }
+
+ // Sections that we can compare are equal, we want
+ // the longer of the two sections if lengths un-equal.
+ if len(ga.sections) != len(gb.sections) {
+ return len(ga.sections) > len(gb.sections)
+ }
+ }
+ // At this point we have same number of groups and same number
+ // of sections. We can assume the labels are the same.
+ // Check to see if the source of the label is different.
+ if a.Label.Source != b.Label.Source {
+ if a.Label.Source == LabelBranch {
+ df("PT C\n")
+ return true
+ }
+ }
+ // We ran out of things to check. Assume one is not "less" than the other.
+ df("PT D\n")
+ return false
+}
+
+// FindLabel matches a single label from a list of labels, given a version.
+// If the returning label.Source is LabelNone, then no labels match.
+//
+// Labels are first broken into sections separated by "-". Shortest wins.
+// If they have the same number of above sections, then they are compared
+// further. Number sequences are treated as numbers. Numbers do not need a
+// separator. The "." is a break point as well.
+func FindLabel(version string, labels []Label) Label {
+ list := make([]*labelAnalysis, 0, 6)
+
+ exact := strings.HasPrefix(version, "=")
+ version = strings.TrimPrefix(version, "=")
+
+ for _, label := range labels {
+ if exact {
+ if label.Text == version {
+ return label
+ }
+ continue
+ }
+ if !strings.HasPrefix(label.Text, version) {
+ continue
+ }
+ remain := strings.TrimPrefix(label.Text, version)
+ if len(remain) > 0 {
+ next := remain[0]
+ // The stated version must either be the full label,
+ // followed by a "." or "-".
+ if next != '.' && next != '-' {
+ continue
+ }
+ }
+ list = append(list, &labelAnalysis{
+ Label: label,
+ Groups: make([]labelGroup, 0, 3),
+ })
+ }
+ if len(list) == 0 {
+ return Label{Source: LabelNone}
+ }
+
+ buf := &bytes.Buffer{}
+ for _, item := range list {
+ item.fillSections(buf)
+ buf.Reset()
+ }
+ sort.Sort(labelAnalysisList(list))
+ return list[0].Label
+}
diff --git a/vendor/github.com/kardianos/govendor/context/license.go b/vendor/github.com/kardianos/govendor/context/license.go
new file mode 100644
index 000000000..7c0ec2bfe
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/license.go
@@ -0,0 +1,219 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/kardianos/govendor/internal/pathos"
+ "github.com/pkg/errors"
+)
+
+type License struct {
+ Path string
+ Filename string
+ Text string
+}
+
+type LicenseSort []License
+
+func (list LicenseSort) Len() int {
+ return len(list)
+}
+func (list LicenseSort) Swap(i, j int) {
+ list[i], list[j] = list[j], list[i]
+}
+func (list LicenseSort) Less(i, j int) bool {
+ a, b := list[i], list[j]
+ if a.Path == b.Path {
+ return a.Filename < b.Filename
+ }
+ return a.Path < b.Path
+}
+
+type licenseSearchType byte
+
+const (
+ licensePrefix licenseSearchType = iota
+ licenseSubstring
+ licenseSuffix
+)
+
+type licenseSearch struct {
+ Text string
+ Search licenseSearchType
+}
+
+func (t licenseSearchType) Test(filename, test string) bool {
+ switch t {
+ case licensePrefix:
+ return strings.HasPrefix(filename, test)
+ case licenseSubstring:
+ return strings.Contains(filename, test)
+ case licenseSuffix:
+ return strings.HasSuffix(filename, test)
+ }
+ return false
+}
+
+type licenseTest interface {
+ Test(filename, test string) bool
+}
+
+// licenses lists the filenames to copy over to the vendor folder.
+var licenses = []licenseSearch{
+ {Text: "license", Search: licensePrefix},
+ {Text: "unlicense", Search: licensePrefix},
+ {Text: "copying", Search: licensePrefix},
+ {Text: "copyright", Search: licensePrefix},
+ {Text: "copyright", Search: licensePrefix},
+ {Text: "legal", Search: licenseSubstring},
+ {Text: "notice", Search: licenseSubstring},
+ {Text: "disclaimer", Search: licenseSubstring},
+ {Text: "patent", Search: licenseSubstring},
+ {Text: "third-party", Search: licenseSubstring},
+ {Text: "thirdparty", Search: licenseSubstring},
+}
+
+var licenseNotExt = []string{
+ ".go",
+ ".c",
+ ".h",
+ ".cpp",
+ ".hpp",
+}
+
+func isLicenseFile(name string) bool {
+ cname := strings.ToLower(name)
+ for _, X := range licenseNotExt {
+ if filepath.Ext(name) == X {
+ return false
+ }
+ }
+ for _, L := range licenses {
+ if L.Search.Test(cname, L.Text) {
+ return true
+ }
+ }
+ return false
+}
+
+// licenseWalk starts in a folder and searches up the folder tree
+// for license like files. Found files are reported to the found function.
+func licenseWalk(root, startIn string, found func(folder, name string) error) error {
+ folder := startIn
+ for i := 0; i <= looplimit; i++ {
+ dir, err := os.Open(folder)
+ if err != nil {
+ return err
+ }
+
+ fl, err := dir.Readdir(-1)
+ dir.Close()
+ if err != nil {
+ return err
+ }
+ for _, fi := range fl {
+ name := fi.Name()
+ if name[0] == '.' {
+ continue
+ }
+ if fi.IsDir() {
+ continue
+ }
+ if !isLicenseFile(name) {
+ continue
+ }
+
+ err = found(folder, name)
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(folder) <= len(root) {
+ return nil
+ }
+
+ nextFolder := filepath.Clean(filepath.Join(folder, ".."))
+
+ if nextFolder == folder {
+ return nil
+ }
+ folder = nextFolder
+ }
+ panic("licenseFind loop limit")
+}
+
+// licenseCopy starts the search in the parent of "startIn" folder.
+// Looks in all sub-folders until root is reached. The root itself is not
+// searched.
+func licenseCopy(root, startIn, vendorRoot, pkgPath string) error {
+ addTo, _ := pathos.TrimCommonSuffix(pathos.SlashToFilepath(pkgPath), startIn)
+ startIn = filepath.Clean(filepath.Join(startIn, ".."))
+ return licenseWalk(root, startIn, func(folder, name string) error {
+ srcPath := filepath.Join(folder, name)
+ trimTo := pathos.FileTrimPrefix(getLastVendorRoot(folder), root)
+
+ /*
+ Path: "golang.org/x/tools/go/vcs"
+ Root: "/tmp/govendor-cache280388238/1"
+ StartIn: "/tmp/govendor-cache280388238/1/go/vcs"
+ addTo: "golang.org/x/tools"
+ $PROJ/vendor + addTo + pathos.FileTrimPrefix(folder, root) + "LICENSE"
+ */
+ destPath := filepath.Join(vendorRoot, addTo, trimTo, name)
+
+ // Only copy if file does not exist.
+ _, err := os.Stat(srcPath)
+ if err != nil {
+ return errors.Errorf("Source license path doesn't exist %q", srcPath)
+ }
+ destDir, _ := filepath.Split(destPath)
+ if err = os.MkdirAll(destDir, 0777); err != nil {
+ return errors.Wrapf(err, "Failed to create the directory %q", destDir)
+ }
+ return errors.Wrapf(copyFile(destPath, srcPath, nil), "copyFile dest=%q src=%q", destPath, srcPath)
+ })
+}
+
+func getLastVendorRoot(s string) string {
+ w := strings.Replace(s, "\\", "/", -1)
+ ix := strings.LastIndex(w, "/vendor/")
+ if ix < 0 {
+ return s
+ }
+ return s[ix+len("/vendor"):]
+}
+
+// LicenseDiscover looks for license files in a given path.
+func LicenseDiscover(root, startIn, overridePath string, list map[string]License) error {
+ return licenseWalk(root, startIn, func(folder, name string) error {
+ ipath := pathos.SlashToImportPath(strings.TrimPrefix(folder, root))
+ if len(overridePath) > 0 {
+ ipath = overridePath
+ }
+ if _, found := list[ipath]; found {
+ return nil
+ }
+ p := filepath.Join(folder, name)
+ text, err := ioutil.ReadFile(p)
+ if err != nil {
+ return fmt.Errorf("Failed to read license file %q %v", p, err)
+ }
+ key := path.Join(ipath, name)
+ list[key] = License{
+ Path: ipath,
+ Filename: name,
+ Text: string(text),
+ }
+ return nil
+ })
+}
diff --git a/vendor/github.com/kardianos/govendor/context/modify.go b/vendor/github.com/kardianos/govendor/context/modify.go
new file mode 100644
index 000000000..b3efd42c0
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/modify.go
@@ -0,0 +1,778 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context gathers the status of packages and stores it in Context.
+// A new Context needs to be pointed to the root of the project and any
+// project owned vendor file.
+package context
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/kardianos/govendor/internal/pathos"
+ os "github.com/kardianos/govendor/internal/vos"
+ "github.com/kardianos/govendor/pkgspec"
+ "github.com/kardianos/govendor/vcs"
+ "github.com/kardianos/govendor/vendorfile"
+ "github.com/pkg/errors"
+)
+
+// OperationState is the state of the given package move operation.
+type OperationState byte
+
+const (
+ OpReady OperationState = iota // Operation is ready to go.
+ OpIgnore // Operation should be ignored.
+ OpDone // Operation has been completed.
+)
+
+type OperationType byte
+
+const (
+ OpCopy OperationType = iota
+ OpRemove
+ OpFetch
+)
+
+func (t OperationType) String() string {
+ switch t {
+ default:
+ panic("unknown operation type")
+ case OpCopy:
+ return "copy"
+ case OpRemove:
+ return "remove"
+ case OpFetch:
+ return "fetch"
+ }
+}
+
+// Operation defines how packages should be moved.
+//
+// TODO (DT): Remove Pkg field and change Src and Dest to *pkgspec.Pkg types.
+type Operation struct {
+ Type OperationType
+
+ Pkg *Package
+
+ // Source file path to move packages from.
+ // Must not be empty.
+ Src string
+
+ // Destination file path to move package to.
+ // If Dest if empty the package is removed.
+ Dest string
+
+ // Files to ignore for operation.
+ IgnoreFile []string
+
+ State OperationState
+
+ // True if the operation should treat the package as uncommitted.
+ Uncommitted bool
+}
+
+// Conflict reports packages that are scheduled to conflict.
+type Conflict struct {
+ Canonical string
+ Local string
+ Operation []*Operation
+ OpIndex int
+ Resolved bool
+}
+
+// Modify is the type of modifcation to do.
+type Modify byte
+
+const (
+ AddUpdate Modify = iota // Add or update the import.
+ Add // Only add, error if it already exists.
+ Update // Only update, error if it doesn't currently exist.
+ Remove // Remove from vendor path.
+ Fetch // Get directly from remote repository.
+)
+
+type ModifyOption byte
+
+const (
+ Uncommitted ModifyOption = iota
+ MatchTree
+ IncludeTree
+)
+
+// ModifyStatus adds packages to the context by status.
+func (ctx *Context) ModifyStatus(sg StatusGroup, mod Modify, mops ...ModifyOption) error {
+ if ctx.added == nil {
+ ctx.added = make(map[string]bool, 10)
+ }
+
+ list, err := ctx.Status()
+ if err != nil {
+ return err
+ }
+
+ // Add packages from status.
+statusLoop:
+ for _, item := range list {
+ if !item.Status.MatchGroup(sg) {
+ continue
+ }
+ if ctx.added[item.Pkg.PathOrigin()] {
+ continue
+ }
+ // Do not add excluded packages
+ if item.Status.Presence == PresenceExcluded {
+ continue
+ }
+ // Do not attempt to add any existing status items that are
+ // already present in vendor folder.
+ if mod == Add {
+ if ctx.VendorFilePackagePath(item.Pkg.Path) != nil {
+ continue
+ }
+ for _, pkg := range ctx.Package {
+ if pkg.Status.Location == LocationVendor && item.Pkg.Path == pkg.Path {
+ continue statusLoop
+ }
+ }
+ }
+
+ err = ctx.modify(item.Pkg, mod, mops)
+ if err != nil {
+ // Skip these errors if from status.
+ if _, is := err.(ErrTreeChildren); is {
+ continue
+ }
+ if _, is := err.(ErrTreeParents); is {
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// ModifyImport adds the package to the context.
+func (ctx *Context) ModifyImport(imp *pkgspec.Pkg, mod Modify, mops ...ModifyOption) error {
+ var err error
+ if ctx.added == nil {
+ ctx.added = make(map[string]bool, 10)
+ }
+ // Grap the origin of the pkg spec from the vendor file as needed.
+ if len(imp.Origin) == 0 {
+ for _, vpkg := range ctx.VendorFile.Package {
+ if vpkg.Remove {
+ continue
+ }
+ if vpkg.Path == imp.Path {
+ imp.Origin = vpkg.Origin
+ }
+ }
+ }
+ if !imp.MatchTree {
+ if !ctx.added[imp.PathOrigin()] {
+ err = ctx.modify(imp, mod, mops)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ list, err := ctx.Status()
+ if err != nil {
+ return err
+ }
+ // If add any matched from "...".
+ match := imp.Path + "/"
+ for _, item := range list {
+ if ctx.added[item.Pkg.PathOrigin()] {
+ continue
+ }
+ if item.Pkg.Path != imp.Path && !strings.HasPrefix(item.Pkg.Path, match) {
+ continue
+ }
+ if imp.HasVersion {
+ item.Pkg.HasVersion = true
+ item.Pkg.Version = imp.Version
+ }
+ item.Pkg.HasOrigin = imp.HasOrigin
+ item.Pkg.Origin = path.Join(imp.PathOrigin(), strings.TrimPrefix(item.Pkg.Path, imp.Path))
+ err = ctx.modify(item.Pkg, mod, mops)
+ if err != nil {
+ return err
+ }
+ }
+ // cache for later use
+ ctx.TreeImport = append(ctx.TreeImport, imp)
+ return nil
+}
+
+func (ctx *Context) modify(ps *pkgspec.Pkg, mod Modify, mops []ModifyOption) error {
+ ctx.added[ps.PathOrigin()] = true
+ for _, mop := range mops {
+ switch mop {
+ default:
+ panic("unknown case")
+ case Uncommitted:
+ ps.Uncommitted = true
+ case MatchTree:
+ ps.MatchTree = true
+ case IncludeTree:
+ ps.IncludeTree = true
+ }
+ }
+ var err error
+ if !ctx.loaded || ctx.dirty {
+ err = ctx.loadPackage()
+ if err != nil {
+ return err
+ }
+ }
+ tree := ps.IncludeTree
+
+ switch mod {
+ // Determine if we can find the source path from an add or update.
+ case Add, Update, AddUpdate:
+ _, _, err = ctx.findImportDir("", ps.PathOrigin())
+ if err != nil {
+ return err
+ }
+ }
+
+ // Does the local import exist?
+ // If so either update or just return.
+ // If not find the disk path from the canonical path, copy locally and rewrite (if needed).
+ var pkg *Package
+ var foundPkg bool
+ if !foundPkg {
+ localPath := path.Join(ctx.RootImportPath, ctx.VendorFolder, ps.Path)
+ pkg, foundPkg = ctx.Package[localPath]
+ foundPkg = foundPkg && pkg.Status.Presence != PresenceMissing
+ }
+ if !foundPkg {
+ pkg, foundPkg = ctx.Package[ps.Path]
+ foundPkg = foundPkg && pkg.Status.Presence != PresenceMissing
+ }
+ if !foundPkg {
+ pkg, foundPkg = ctx.Package[ps.PathOrigin()]
+ foundPkg = foundPkg && pkg.Status.Presence != PresenceMissing
+ }
+ if !foundPkg {
+ pkg, err = ctx.addSingleImport(ctx.RootDir, ps.PathOrigin(), tree)
+ if err != nil {
+ return err
+ }
+ if pkg == nil {
+ return nil
+ }
+ pkg.Origin = ps.PathOrigin()
+ pkg.Path = ps.Path
+ }
+
+ pkg.HasOrigin = ps.HasOrigin
+ if ps.HasOrigin {
+ pkg.Origin = ps.Origin
+ }
+
+ // Do not support setting "tree" on Remove.
+ if tree && mod != Remove {
+ pkg.IncludeTree = true
+ }
+
+ // A restriction where packages cannot live inside a tree package.
+ if mod != Remove {
+ if pkg.IncludeTree {
+ children := ctx.findPackageChild(pkg)
+ if len(children) > 0 {
+ return ErrTreeChildren{path: pkg.Path, children: children}
+ }
+ }
+ treeParents := ctx.findPackageParentTree(pkg)
+ if len(treeParents) > 0 {
+ return ErrTreeParents{path: pkg.Path, parents: treeParents}
+ }
+ }
+
+ // TODO (DT): figure out how to upgrade a non-tree package to a tree package with correct checks.
+ localExists, err := hasGoFileInFolder(filepath.Join(ctx.RootDir, ctx.VendorFolder, pathos.SlashToFilepath(ps.Path)))
+ if err != nil {
+ return err
+ }
+ if mod == Add && localExists {
+ return ErrPackageExists{path.Join(ctx.RootImportPath, ctx.VendorFolder, ps.Path)}
+ }
+ dprintf("stage 2: begin!\n")
+ switch mod {
+ case Add:
+ return ctx.modifyAdd(pkg, ps.Uncommitted)
+ case AddUpdate:
+ return ctx.modifyAdd(pkg, ps.Uncommitted)
+ case Update:
+ return ctx.modifyAdd(pkg, ps.Uncommitted)
+ case Remove:
+ return ctx.modifyRemove(pkg)
+ case Fetch:
+ return ctx.modifyFetch(pkg, ps.Uncommitted, ps.HasVersion, ps.Version)
+ default:
+ panic("mod switch: case not handled")
+ }
+}
+
+func (ctx *Context) getIgnoreFiles(src string) (ignoreFile, imports []string, err error) {
+ srcDir, err := os.Open(src)
+ if err != nil {
+ return nil, nil, err
+ }
+ fl, err := srcDir.Readdir(-1)
+ srcDir.Close()
+ if err != nil {
+ return nil, nil, err
+ }
+ importMap := make(map[string]struct{}, 12)
+ imports = make([]string, 0, 12)
+ for _, fi := range fl {
+ if fi.IsDir() {
+ continue
+ }
+ if fi.Name()[0] == '.' {
+ continue
+ }
+ tags, fileImports, err := ctx.getFileTags(filepath.Join(src, fi.Name()), nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if tags.IgnoreItem(ctx.ignoreTag...) {
+ ignoreFile = append(ignoreFile, fi.Name())
+ } else {
+ // Only add imports for non-ignored files.
+ for _, imp := range fileImports {
+ importMap[imp] = struct{}{}
+ }
+ }
+ }
+ for imp := range importMap {
+ imports = append(imports, imp)
+ }
+ return ignoreFile, imports, nil
+}
+
+func (ctx *Context) modifyAdd(pkg *Package, uncommitted bool) error {
+ var err error
+ src := pkg.OriginDir
+ dprintf("found import: %q\n", src)
+ // If the canonical package is also the local package, then the package
+ // isn't copied locally already and has already been checked for tags.
+ // If it has been vendored the source still needs to be examined.
+ // Examine here and add to the operations list.
+ var ignoreFile []string
+ if cpkg, found := ctx.Package[pkg.Path]; found {
+ ignoreFile = cpkg.ignoreFile
+ } else {
+ var err error
+ ignoreFile, _, err = ctx.getIgnoreFiles(src)
+ if err != nil {
+ return err
+ }
+ }
+ dest := filepath.Join(ctx.RootDir, ctx.VendorFolder, pathos.SlashToFilepath(pkg.Path))
+ // TODO: This might cause other issues or might be hiding the underlying issues. Examine in depth later.
+ if pathos.FileStringEquals(src, dest) {
+ return nil
+ }
+ dprintf("add op: %q\n", src)
+
+ // Update vendor file with correct Local field.
+ vp := ctx.VendorFilePackagePath(pkg.Path)
+ if vp == nil {
+ vp = &vendorfile.Package{
+ Add: true,
+ Path: pkg.Path,
+ }
+ ctx.VendorFile.Package = append(ctx.VendorFile.Package, vp)
+ }
+ if pkg.IncludeTree {
+ vp.Tree = pkg.IncludeTree
+ }
+
+ if pkg.HasOrigin {
+ vp.Origin = pkg.Origin
+ }
+ if pkg.Path != pkg.Local && pkg.inVendor && vp.Add {
+ vp.Origin = pkg.Local
+ }
+
+ // Find the VCS information.
+ system, err := vcs.FindVcs(pkg.Gopath, src)
+ if err != nil {
+ return err
+ }
+ dirtyAndUncommitted := false
+ if system != nil {
+ if system.Dirty {
+ if !uncommitted {
+ return ErrDirtyPackage{pkg.Path}
+ }
+ dirtyAndUncommitted = true
+ if len(vp.ChecksumSHA1) == 0 {
+ vp.ChecksumSHA1 = "uncommitted/version="
+ }
+ } else {
+ vp.Revision = system.Revision
+ if system.RevisionTime != nil {
+ vp.RevisionTime = system.RevisionTime.UTC().Format(time.RFC3339)
+ }
+ }
+ }
+ ctx.Operation = append(ctx.Operation, &Operation{
+ Type: OpCopy,
+ Pkg: pkg,
+ Src: src,
+ Dest: dest,
+ IgnoreFile: ignoreFile,
+
+ Uncommitted: dirtyAndUncommitted,
+ })
+
+ if !ctx.rewriteImports {
+ return nil
+ }
+
+ mvSet := make(map[*Package]struct{}, 3)
+ ctx.makeSet(pkg, mvSet)
+
+ for r := range mvSet {
+ to := path.Join(ctx.RootImportPath, ctx.VendorFolder, r.Path)
+ dprintf("RULE: %s -> %s\n", r.Local, to)
+ ctx.RewriteRule[r.Path] = to
+ ctx.RewriteRule[r.Local] = to
+ }
+
+ return nil
+}
+
+func (ctx *Context) modifyRemove(pkg *Package) error {
+ // Update vendor file with correct Local field.
+ vp := ctx.VendorFilePackagePath(pkg.Path)
+ if vp != nil {
+ vp.Remove = true
+ }
+ if len(pkg.Dir) == 0 {
+ return nil
+ }
+ // Protect non-project paths from being removed.
+ if !pathos.FileHasPrefix(pkg.Dir, ctx.RootDir) {
+ return nil
+ }
+ if pkg.Status.Location == LocationLocal {
+ return nil
+ }
+ ctx.Operation = append(ctx.Operation, &Operation{
+ Type: OpRemove,
+ Pkg: pkg,
+ Src: pkg.Dir,
+ Dest: "",
+ })
+
+ if !ctx.rewriteImports {
+ return nil
+ }
+
+ mvSet := make(map[*Package]struct{}, 3)
+ ctx.makeSet(pkg, mvSet)
+
+ for r := range mvSet {
+ dprintf("RULE: %s -> %s\n", r.Local, r.Path)
+ ctx.RewriteRule[r.Local] = r.Path
+ }
+
+ return nil
+}
+
+// modify function to fetch given package.
+func (ctx *Context) modifyFetch(pkg *Package, uncommitted, hasVersion bool, version string) error {
+ vp := ctx.VendorFilePackagePath(pkg.Path)
+ if vp == nil {
+ vp = &vendorfile.Package{
+ Add: true,
+ Path: pkg.Path,
+ }
+ ctx.VendorFile.Package = append(ctx.VendorFile.Package, vp)
+ }
+ if hasVersion {
+ vp.Version = version
+ pkg.Version = version
+ pkg.HasVersion = true
+ }
+ if pkg.IncludeTree {
+ vp.Tree = pkg.IncludeTree
+ }
+ pkg.Origin = strings.TrimPrefix(pkg.Origin, ctx.RootImportPath+"/"+ctx.VendorFolder+"/")
+ vp.Origin = pkg.Origin
+ origin := vp.Origin
+ if len(vp.Origin) == 0 {
+ origin = vp.Path
+ }
+ ps := &pkgspec.Pkg{
+ Path: pkg.Path,
+ Origin: origin,
+ HasVersion: hasVersion,
+ Version: version,
+ }
+ dest := filepath.Join(ctx.RootDir, ctx.VendorFolder, pathos.SlashToFilepath(pkg.Path))
+ ctx.Operation = append(ctx.Operation, &Operation{
+ Type: OpFetch,
+ Pkg: pkg,
+ Src: ps.String(),
+ Dest: dest,
+ })
+ return nil
+}
+
+// Check returns any conflicts when more than one package can be moved into
+// the same path.
+func (ctx *Context) Check() []*Conflict {
+ // Find duplicate packages that have been marked for moving.
+ findDups := make(map[string][]*Operation, 3) // map[canonical][]local
+ for _, op := range ctx.Operation {
+ if op.State != OpReady {
+ continue
+ }
+ findDups[op.Pkg.Path] = append(findDups[op.Pkg.Path], op)
+ }
+
+ var ret []*Conflict
+ for canonical, lop := range findDups {
+ if len(lop) == 1 {
+ continue
+ }
+ destDir := path.Join(ctx.RootImportPath, ctx.VendorFolder, canonical)
+ ret = append(ret, &Conflict{
+ Canonical: canonical,
+ Local: destDir,
+ Operation: lop,
+ })
+ }
+ return ret
+}
+
+// ResolveApply applies the conflict resolution selected. It chooses the
+// Operation listed in the OpIndex field.
+func (ctx *Context) ResloveApply(cc []*Conflict) {
+ for _, c := range cc {
+ if !c.Resolved {
+ continue
+ }
+ for i, op := range c.Operation {
+ if op.State != OpReady {
+ continue
+ }
+ if i == c.OpIndex {
+ if vp := ctx.VendorFilePackagePath(c.Canonical); vp != nil {
+ vp.Origin = c.Local
+ }
+ continue
+ }
+ op.State = OpIgnore
+ }
+ }
+}
+
+// ResolveAutoLongestPath finds the longest local path in each conflict
+// and set it to be used.
+func ResolveAutoLongestPath(cc []*Conflict) []*Conflict {
+ for _, c := range cc {
+ if c.Resolved {
+ continue
+ }
+ longestLen := 0
+ longestIndex := 0
+ for i, op := range c.Operation {
+ if op.State != OpReady {
+ continue
+ }
+
+ if len(op.Pkg.Local) > longestLen {
+ longestLen = len(op.Pkg.Local)
+ longestIndex = i
+ }
+ }
+ c.OpIndex = longestIndex
+ c.Resolved = true
+ }
+ return cc
+}
+
+// ResolveAutoShortestPath finds the shortest local path in each conflict
+// and set it to be used.
+func ResolveAutoShortestPath(cc []*Conflict) []*Conflict {
+ for _, c := range cc {
+ if c.Resolved {
+ continue
+ }
+ shortestLen := math.MaxInt32
+ shortestIndex := 0
+ for i, op := range c.Operation {
+ if op.State != OpReady {
+ continue
+ }
+
+ if len(op.Pkg.Local) < shortestLen {
+ shortestLen = len(op.Pkg.Local)
+ shortestIndex = i
+ }
+ }
+ c.OpIndex = shortestIndex
+ c.Resolved = true
+ }
+ return cc
+}
+
+// ResolveAutoVendorFileOrigin resolves conflicts based on the vendor file
+// if possible.
+func (ctx *Context) ResolveAutoVendorFileOrigin(cc []*Conflict) []*Conflict {
+ for _, c := range cc {
+ if c.Resolved {
+ continue
+ }
+ vp := ctx.VendorFilePackagePath(c.Canonical)
+ if vp == nil {
+ continue
+ }
+ // If this was just added, we still can't rely on it.
+ // We still need to ask user.
+ if vp.Add {
+ continue
+ }
+ lookFor := vp.Path
+ if len(vp.Origin) != 0 {
+ lookFor = vp.Origin
+ }
+ for i, op := range c.Operation {
+ if op.State != OpReady {
+ continue
+ }
+
+ if op.Pkg.Local == lookFor {
+ c.OpIndex = i
+ c.Resolved = true
+ break
+ }
+ }
+ }
+ return cc
+}
+
+// Alter runs any requested package alterations.
+func (ctx *Context) Alter() error {
+ ctx.added = nil
+ // Ensure there are no conflicts at this time.
+ buf := &bytes.Buffer{}
+ for _, conflict := range ctx.Check() {
+ buf.WriteString(fmt.Sprintf("Different Canonical Packages for %s\n", conflict.Canonical))
+ for _, op := range conflict.Operation {
+ buf.WriteString(fmt.Sprintf("\t%s\n", op.Pkg.Local))
+ }
+ }
+ if buf.Len() != 0 {
+ return errors.New(buf.String())
+ }
+
+ var err error
+ fetch, err := newFetcher(ctx)
+ if err != nil {
+ return err
+ }
+ for {
+ var nextOps []*Operation
+ for _, op := range ctx.Operation {
+ if op.State != OpReady {
+ continue
+ }
+
+ switch op.Type {
+ case OpFetch:
+ var ops []*Operation
+ // Download packages, transform fetch op into a copy op.
+ ops, err = fetch.op(op)
+ if len(ops) > 0 {
+ nextOps = append(nextOps, ops...)
+ }
+ }
+ if err != nil {
+ return errors.Wrapf(err, "Failed to fetch package %q", op.Pkg.Path)
+ }
+ }
+ if len(nextOps) == 0 {
+ break
+ }
+ ctx.Operation = append(ctx.Operation, nextOps...)
+ }
+ // Move and possibly rewrite packages.
+ for _, op := range ctx.Operation {
+ if op.State != OpReady {
+ continue
+ }
+ pkg := op.Pkg
+
+ if pathos.FileStringEquals(op.Dest, op.Src) {
+ panic("For package " + pkg.Local + " attempt to copy to same location: " + op.Src)
+ }
+ dprintf("MV: %s (%q -> %q)\n", pkg.Local, op.Src, op.Dest)
+ // Copy the package or remove.
+ switch op.Type {
+ default:
+ panic("unknown operation type")
+ case OpRemove:
+ ctx.dirty = true
+ err = RemovePackage(op.Src, filepath.Join(ctx.RootDir, ctx.VendorFolder), pkg.IncludeTree)
+ op.State = OpDone
+ case OpCopy:
+ err = ctx.copyOperation(op, nil)
+ if os.IsNotExist(errors.Cause(err)) {
+ // Ignore packages that don't exist, like appengine.
+ err = nil
+ }
+ }
+ if err != nil {
+ return errors.Wrapf(err, "Failed to %v package %q -> %q", op.Type, op.Src, op.Dest)
+ }
+ }
+ if ctx.rewriteImports {
+ return ctx.rewrite()
+ }
+ return nil
+}
+
+func (ctx *Context) copyOperation(op *Operation, beforeCopy func(deps []string) error) error {
+ var err error
+ pkg := op.Pkg
+ ctx.dirty = true
+ h := sha1.New()
+ var checksum []byte
+
+ root, _ := pathos.TrimCommonSuffix(op.Src, pkg.Path)
+
+ err = ctx.CopyPackage(op.Dest, op.Src, root, pkg.Path, op.IgnoreFile, pkg.IncludeTree, h, beforeCopy)
+ if err == nil && !op.Uncommitted {
+ checksum = h.Sum(nil)
+ vpkg := ctx.VendorFilePackagePath(pkg.Path)
+ if vpkg != nil {
+ vpkg.ChecksumSHA1 = base64.StdEncoding.EncodeToString(checksum)
+ }
+ }
+ op.State = OpDone
+ if err != nil {
+ return errors.Wrapf(err, "copy failed. dest: %q, src: %q, pkgPath %q", op.Dest, op.Src, root)
+ }
+ return nil
+}
diff --git a/vendor/github.com/kardianos/govendor/context/path.go b/vendor/github.com/kardianos/govendor/context/path.go
new file mode 100644
index 000000000..e10e82f83
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/path.go
@@ -0,0 +1,235 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "io"
+ "path/filepath"
+
+ "github.com/kardianos/govendor/internal/pathos"
+ os "github.com/kardianos/govendor/internal/vos"
+)
+
+// Import path is in GOROOT or is a special package.
+func (ctx *Context) isStdLib(importPath string) (yes bool, err error) {
+ if importPath == "builtin" || importPath == "unsafe" || importPath == "C" {
+ yes = true
+ return
+ }
+
+ dir := filepath.Join(ctx.Goroot, importPath)
+ fi, _ := os.Stat(dir)
+ if fi == nil {
+ return
+ }
+ if !fi.IsDir() {
+ return
+ }
+
+ yes, err = hasGoFileInFolder(dir)
+ return
+}
+
+// findImportDir finds the absolute directory. If rel is empty vendor folders
+// are not looked in.
+func (ctx *Context) findImportDir(relative, importPath string) (dir, gopath string, err error) {
+ if importPath == "builtin" || importPath == "unsafe" || importPath == "C" {
+ return filepath.Join(ctx.Goroot, importPath), ctx.Goroot, nil
+ }
+ if len(relative) != 0 {
+ rel := relative
+ for {
+ look := filepath.Join(rel, ctx.VendorDiscoverFolder, importPath)
+ nextRel := filepath.Join(rel, "..")
+ if rel == nextRel {
+ break
+ }
+ rel = nextRel
+ fi, err := os.Stat(look)
+ if os.IsNotExist(err) {
+ continue
+ }
+ if err != nil {
+ continue
+ }
+ if !fi.IsDir() {
+ continue
+ }
+ for _, gopath = range ctx.GopathList {
+ if pathos.FileHasPrefix(look, gopath) {
+ hasGo, err := hasGoFileInFolder(look)
+ if err != nil {
+ return "", "", err
+ }
+ if hasGo {
+ return look, gopath, nil
+ }
+ }
+ }
+ }
+
+ }
+ for _, gopath = range ctx.GopathList {
+ dir := filepath.Join(gopath, importPath)
+ fi, err := os.Stat(dir)
+ if os.IsNotExist(err) {
+ continue
+ }
+ if fi == nil {
+ continue
+ }
+ if !fi.IsDir() {
+ continue
+ }
+
+ return dir, gopath, nil
+ }
+ return "", "", ErrNotInGOPATH{importPath}
+}
+
+// findImportPath takes a absolute directory and returns the import path and go path.
+func (ctx *Context) findImportPath(dir string) (importPath, gopath string, err error) {
+ dirResolved, err := filepath.EvalSymlinks(dir)
+ if err != nil {
+ return "", "", err
+ }
+ dirs := make([]string, 1)
+ dirs = append(dirs, dir)
+ if dir != dirResolved {
+ dirs = append(dirs, dirResolved)
+ }
+
+ for _, gopath := range ctx.GopathList {
+ for _, dir := range dirs {
+ if pathos.FileHasPrefix(dir, gopath) || pathos.FileStringEquals(dir, gopath) {
+ importPath = pathos.FileTrimPrefix(dir, gopath)
+ importPath = pathos.SlashToImportPath(importPath)
+ return importPath, gopath, nil
+ }
+ }
+ }
+ return "", "", ErrNotInGOPATH{dir}
+}
+
+func findRoot(folder, vendorPath string) (root string, err error) {
+ for i := 0; i <= looplimit; i++ {
+ test := filepath.Join(folder, vendorPath)
+ _, err := os.Stat(test)
+ if !os.IsNotExist(err) {
+ return folder, nil
+ }
+ nextFolder := filepath.Clean(filepath.Join(folder, ".."))
+
+ // Check for root folder.
+ if nextFolder == folder {
+ return "", ErrMissingVendorFile{vendorPath}
+ }
+ folder = nextFolder
+ }
+ panic("findRoot loop limit")
+}
+
+func hasGoFileInFolder(folder string) (bool, error) {
+ dir, err := os.Open(folder)
+ if err != nil {
+ if os.IsNotExist(err) {
+ // No folder present, no need to check for files.
+ return false, nil
+ }
+ return false, err
+ }
+ fl, err := dir.Readdir(-1)
+ dir.Close()
+ if err != nil {
+ return false, err
+ }
+ for _, fi := range fl {
+ if !fi.IsDir() && filepath.Ext(fi.Name()) == ".go" {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// RemovePackage removes the specified folder files. If folder is empty when
+// done (no nested folders, remove the folder and any empty parent folders.
+func RemovePackage(path, root string, tree bool) error {
+ // Ensure the path is empty of files.
+ dir, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ // Remove package files.
+ fl, err := dir.Readdir(-1)
+ dir.Close()
+ if err != nil {
+ return err
+ }
+ for _, fi := range fl {
+ fullPath := filepath.Join(path, fi.Name())
+ if fi.IsDir() {
+ if tree {
+ // If tree == true then remove sub-directories too.
+ err = os.RemoveAll(fullPath)
+ if err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ err = os.Remove(fullPath)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Remove empty parent folders.
+ // Ignore errors here.
+ for i := 0; i <= looplimit; i++ {
+ if pathos.FileStringEquals(path, root) {
+ return nil
+ }
+ dir, err := os.Open(path)
+ if err != nil {
+ // fmt.Fprintf(os.Stderr, "Failedd to open directory %q: %v\n", path, err)
+ return nil
+ }
+
+ fl, err := dir.Readdir(1)
+ dir.Close()
+ if err != nil && err != io.EOF {
+ // fmt.Fprintf(os.Stderr, "Failedd to list directory %q: %v\n", path, err)
+ return nil
+ }
+ if len(fl) > 0 {
+ allAreLicense := true
+ for _, fi := range fl {
+ if !isLicenseFile(fi.Name()) {
+ allAreLicense = false
+ break
+ }
+ }
+ if !allAreLicense {
+ return nil
+ }
+ }
+ err = os.RemoveAll(path)
+ if err != nil {
+ // fmt.Fprintf(os.Stderr, "Failedd to remove empty directory %q: %v\n", path, err)
+ return nil
+ }
+ nextPath := filepath.Clean(filepath.Join(path, ".."))
+ // Check for root.
+ if nextPath == path {
+ return nil
+ }
+ path = nextPath
+ }
+ panic("removePackage() remove parent folders")
+}
diff --git a/vendor/github.com/kardianos/govendor/context/resolve.go b/vendor/github.com/kardianos/govendor/context/resolve.go
new file mode 100644
index 000000000..f74df27c7
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/resolve.go
@@ -0,0 +1,550 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/kardianos/govendor/internal/pathos"
+ filepath "github.com/kardianos/govendor/internal/vfilepath"
+ os "github.com/kardianos/govendor/internal/vos"
+ "github.com/kardianos/govendor/pkgspec"
+)
+
+var knownOS = make(map[string]bool)
+var knownArch = make(map[string]bool)
+
+func init() {
+ for _, v := range strings.Fields(goosList) {
+ knownOS[v] = true
+ }
+ for _, v := range strings.Fields(goarchList) {
+ knownArch[v] = true
+ }
+}
+
+// loadPackage sets up the context with package information and
+// is called before any initial operation is performed.
+func (ctx *Context) loadPackage() error {
+ ctx.loaded = true
+ ctx.dirty = false
+ ctx.statusCache = nil
+ ctx.Package = make(map[string]*Package, len(ctx.Package))
+ // We following the root symlink only in case the root of the repo is symlinked into the GOPATH
+ // This could happen during on some CI that didn't checkout into the GOPATH
+ rootdir, err := filepath.EvalSymlinks(ctx.RootDir)
+ if err != nil {
+ return err
+ }
+ err = filepath.Walk(rootdir, func(path string, info os.FileInfo, err error) error {
+ if info == nil {
+ return err
+ }
+ if !info.IsDir() {
+ // We replace the directory path (followed by the symlink), to the real go repo package name/path
+ // ex : replace "<somewhere>/govendor.source.repo" to "github.com/kardianos/govendor"
+ path = strings.Replace(path, rootdir, ctx.RootDir, 1)
+ _, err = ctx.addFileImports(path, ctx.RootGopath)
+ return err
+ }
+ name := info.Name()
+ // Still go into "_workspace" to aid godep migration.
+ if name == "_workspace" {
+ return nil
+ }
+ switch name[0] {
+ case '.', '_':
+ return filepath.SkipDir
+ }
+ switch name {
+ case "testdata", "node_modules":
+ return filepath.SkipDir
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ // Finally, set any unset status.
+ return ctx.determinePackageStatus()
+}
+
+func (ctx *Context) getFileTags(pathname string, f *ast.File) (tags *TagSet, imports []string, err error) {
+ _, filenameExt := filepath.Split(pathname)
+
+ if !strings.HasSuffix(pathname, ".go") {
+ return nil, nil, nil
+ }
+ if f == nil {
+ f, _ = parser.ParseFile(token.NewFileSet(), pathname, nil, parser.ImportsOnly|parser.ParseComments)
+ if f == nil {
+ return nil, nil, nil
+ }
+ }
+ tags = &TagSet{}
+ if strings.HasSuffix(f.Name.Name, "_test") {
+ tags.AddFileTag("test")
+ }
+ pkgNameNormalized := strings.TrimSuffix(f.Name.Name, "_test")
+
+ // Files with package name "documentation" should be ignored, per go build tool.
+ if pkgNameNormalized == "documentation" {
+ return nil, nil, nil
+ }
+
+ filename := filenameExt[:len(filenameExt)-3]
+
+ l := strings.Split(filename, "_")
+
+ if n := len(l); n > 1 && l[n-1] == "test" {
+ l = l[:n-1]
+ tags.AddFileTag("test")
+ }
+ n := len(l)
+ if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
+ tags.AddFileTag(l[n-2])
+ tags.AddFileTag(l[n-1])
+ }
+ if n >= 1 && knownOS[l[n-1]] {
+ tags.AddFileTag(l[n-1])
+ }
+ if n >= 1 && knownArch[l[n-1]] {
+ tags.AddFileTag(l[n-1])
+ }
+
+ const buildPrefix = "// +build "
+ for _, cc := range f.Comments {
+ for _, c := range cc.List {
+ if strings.HasPrefix(c.Text, buildPrefix) {
+ text := strings.TrimPrefix(c.Text, buildPrefix)
+ tags.AddBuildTags(text)
+ }
+ }
+ }
+ imports = make([]string, 0, len(f.Imports))
+
+ for i := range f.Imports {
+ imp := f.Imports[i].Path.Value
+ imp, err = strconv.Unquote(imp)
+ if err != nil {
+ // Best errort
+ continue
+ }
+ imports = append(imports, imp)
+ }
+
+ return tags, imports, nil
+}
+
+// addFileImports is called from loadPackage and resolveUnknown.
+func (ctx *Context) addFileImports(pathname, gopath string) (*Package, error) {
+ dir, filenameExt := filepath.Split(pathname)
+ importPath := pathos.FileTrimPrefix(dir, gopath)
+ importPath = pathos.SlashToImportPath(importPath)
+ importPath = strings.Trim(importPath, "/")
+
+ if !strings.HasSuffix(pathname, ".go") {
+ return nil, nil
+ }
+ // No need to add the same file more than once.
+ for _, pkg := range ctx.Package {
+ if !pathos.FileStringEquals(pkg.Dir, dir) {
+ continue
+ }
+ for _, f := range pkg.Files {
+ if pathos.FileStringEquals(f.Path, pathname) {
+ return nil, nil
+ }
+ }
+ for _, f := range pkg.ignoreFile {
+ if pathos.FileStringEquals(f, filenameExt) {
+ return nil, nil
+ }
+ }
+ }
+ // Ignore error here and continue on best effort.
+ f, _ := parser.ParseFile(token.NewFileSet(), pathname, nil, parser.ImportsOnly|parser.ParseComments)
+ if f == nil {
+ return nil, nil
+ }
+ pkgNameNormalized := strings.TrimSuffix(f.Name.Name, "_test")
+
+ // Files with package name "documentation" should be ignored, per go build tool.
+ if pkgNameNormalized == "documentation" {
+ return nil, nil
+ }
+
+ tags, _, err := ctx.getFileTags(pathname, f)
+ if err != nil {
+ return nil, err
+ }
+ // If file has "// +build ignore", can mix package main with normal package.
+ // For now, just ignore ignored packages.
+ if tags.IgnoreItem() {
+ return nil, nil
+ }
+
+ pkg, found := ctx.Package[importPath]
+ if !found {
+ status := Status{
+ Type: TypePackage,
+ Location: LocationUnknown,
+ Presence: PresenceFound,
+ }
+ if pkgNameNormalized == "main" {
+ status.Type = TypeProgram
+ }
+ pkg = ctx.setPackage(dir, importPath, importPath, gopath, status)
+ ctx.Package[importPath] = pkg
+ }
+ if pkg.Status.Location != LocationLocal {
+ if tags.IgnoreItem(ctx.ignoreTag...) {
+ pkg.ignoreFile = append(pkg.ignoreFile, filenameExt)
+ return pkg, nil
+ }
+ // package excluded if non-local && same name or sub-package of an excluded package
+ for _, exclude := range ctx.excludePackage {
+ if importPath == exclude || strings.HasPrefix(importPath, exclude+"/") {
+ pkg.Status.Presence = PresenceExcluded
+ }
+ }
+ }
+ pf := &File{
+ Package: pkg,
+ Path: pathname,
+ Imports: make([]string, len(f.Imports)),
+ }
+ pkg.Files = append(pkg.Files, pf)
+ for i := range f.Imports {
+ imp := f.Imports[i].Path.Value
+ imp, err = strconv.Unquote(imp)
+ if err != nil {
+ // Best effort only.
+ continue
+ }
+ if strings.HasPrefix(imp, "./") {
+ imp = path.Join(importPath, imp)
+ }
+ pf.Imports[i] = imp
+ if pkg.Status.Presence != PresenceExcluded { // do not add package imports if it was explicitly excluded
+ _, err = ctx.addSingleImport(pkg.Dir, imp, pkg.IncludeTree)
+ if err != nil {
+ return pkg, err
+ }
+ }
+ }
+
+ // Record any import comment for file.
+ var ic *ast.Comment
+ if f.Name != nil {
+ pos := f.Name.Pos()
+ big:
+ // Find the next comment after the package name.
+ for _, cblock := range f.Comments {
+ for _, c := range cblock.List {
+ if c.Pos() > pos {
+ ic = c
+ break big
+ }
+ }
+ }
+ }
+ if ic != nil {
+ // If it starts with the import text, assume it is the import comment.
+ if index := strings.Index(ic.Text, " import "); index > 0 && index < 5 {
+ q := strings.TrimSpace(ic.Text[index+len(" import "):])
+ pf.ImportComment, err = strconv.Unquote(q)
+ if err != nil {
+ pf.ImportComment = q
+ }
+ }
+ }
+
+ return pkg, nil
+}
+
+func (ctx *Context) setPackage(dir, canonical, local, gopath string, status Status) *Package {
+ if pkg, exists := ctx.Package[local]; exists {
+ return pkg
+ }
+ at := 0
+ vMiddle := "/" + pathos.SlashToImportPath(ctx.VendorDiscoverFolder) + "/"
+ vStart := pathos.SlashToImportPath(ctx.VendorDiscoverFolder) + "/"
+ switch {
+ case strings.Contains(canonical, vMiddle):
+ at = strings.LastIndex(canonical, vMiddle) + len(vMiddle)
+ case strings.HasPrefix(canonical, vStart):
+ at = strings.LastIndex(canonical, vStart) + len(vStart)
+ }
+
+ originDir := dir
+ inVendor := false
+ tree := false
+ origin := ""
+ if at > 0 {
+ canonical = canonical[at:]
+ inVendor = true
+ if status.Location == LocationUnknown {
+ p := path.Join(ctx.RootImportPath, ctx.VendorDiscoverFolder)
+ if strings.HasPrefix(local, p) {
+ status.Location = LocationVendor
+ od, _, err := ctx.findImportDir("", canonical)
+ if err == nil {
+ originDir = od
+ }
+ }
+ }
+ }
+ if vp := ctx.VendorFilePackagePath(canonical); vp != nil {
+ tree = vp.Tree
+ origin = vp.Origin
+ }
+ // Set originDir correctly if origin is set.
+ if len(origin) > 0 {
+ od, _, err := ctx.findImportDir("", origin)
+ if err == nil {
+ originDir = od
+ }
+ }
+ if status.Location == LocationUnknown && filepath.HasPrefixDir(canonical, ctx.RootImportPath) {
+ status.Location = LocationLocal
+ }
+ spec, err := pkgspec.Parse("", canonical)
+ if err != nil {
+ panic(err)
+ }
+ if len(origin) > 0 && origin != canonical {
+ spec.Origin = origin
+ }
+ spec.IncludeTree = tree
+ pkg := &Package{
+ OriginDir: originDir,
+ Dir: dir,
+ Pkg: spec,
+ Local: local,
+ Gopath: gopath,
+ Status: status,
+ inVendor: inVendor,
+ }
+ ctx.Package[local] = pkg
+ return pkg
+}
+
+var testNeedsSortOrder = false
+
+func (ctx *Context) addSingleImport(pkgInDir, imp string, tree bool) (*Package, error) {
+ // Do not check for existing package right away. If a external package
+ // has been added and we are looking in a vendor package, this won't work.
+ // We need to search any relative vendor folders first.
+
+ // Also need to check for vendor paths that won't use the local path in import path.
+ for _, pkg := range ctx.Package {
+ if pkg.Path == imp && pkg.inVendor && pathos.FileHasPrefix(pkg.Dir, pkgInDir) {
+ return nil, nil
+ }
+ }
+ dir, gopath, err := ctx.findImportDir(pkgInDir, imp)
+ if err != nil {
+ if _, is := err.(ErrNotInGOPATH); is {
+ presence := PresenceMissing
+ // excluded packages, don't need to be present
+ for _, exclude := range ctx.excludePackage {
+ if imp == exclude || strings.HasPrefix(imp, exclude+"/") {
+ presence = PresenceExcluded
+ }
+ }
+ return ctx.setPackage("", imp, imp, "", Status{
+ Type: TypePackage,
+ Location: LocationNotFound,
+ Presence: presence,
+ }), nil
+ }
+ return nil, err
+ }
+ if pathos.FileStringEquals(gopath, ctx.Goroot) {
+ return ctx.setPackage(dir, imp, imp, ctx.Goroot, Status{
+ Type: TypePackage,
+ Location: LocationStandard,
+ Presence: PresenceFound,
+ }), nil
+ }
+ if tree {
+ return ctx.setPackage(dir, imp, imp, ctx.RootGopath, Status{
+ Type: TypePackage,
+ Location: LocationVendor,
+ Presence: PresenceFound,
+ }), nil
+ }
+ df, err := os.Open(dir)
+ if err != nil {
+ return nil, err
+ }
+ info, err := df.Readdir(-1)
+ df.Close()
+ if err != nil {
+ return nil, err
+ }
+ if testNeedsSortOrder {
+ sort.Sort(fileInfoSort(info))
+ }
+ var pkg *Package
+ for _, fi := range info {
+ if fi.IsDir() {
+ continue
+ }
+ switch fi.Name()[0] {
+ case '.', '_':
+ continue
+ }
+ if pathos.FileStringEquals(dir, pkgInDir) {
+ continue
+ }
+ path := filepath.Join(dir, fi.Name())
+ tryPkg, err := ctx.addFileImports(path, gopath)
+ if tryPkg != nil {
+ pkg = tryPkg
+ }
+ if err != nil {
+ return pkg, err
+ }
+ }
+ return pkg, nil
+}
+
+func (ctx *Context) determinePackageStatus() error {
+ // Add any packages in the vendor file but not in GOPATH or vendor dir.
+ for _, vp := range ctx.VendorFile.Package {
+ if vp.Remove {
+ continue
+ }
+ if _, found := ctx.Package[vp.Path]; found {
+ continue
+ }
+ pkg, err := ctx.addSingleImport(ctx.RootDir, vp.Path, vp.Tree)
+ if err != nil {
+ return err
+ }
+ if pkg != nil {
+ pkg.Origin = vp.Origin
+ pkg.inTree = vp.Tree
+ pkg.inVendor = true
+ }
+ }
+
+ // Determine the status of remaining imports.
+ for _, pkg := range ctx.Package {
+ if pkg.Status.Location != LocationUnknown {
+ continue
+ }
+ if filepath.HasPrefixDir(pkg.Path, ctx.RootImportPath) {
+ pkg.Status.Location = LocationLocal
+ continue
+ }
+ pkg.Status.Location = LocationExternal
+ }
+
+ ctx.updatePackageReferences()
+
+ // Mark sub-tree packages as "tree", but leave any existing bit (unused) on the
+ // parent most tree package.
+ for path, pkg := range ctx.Package {
+ if vp := ctx.VendorFilePackagePath(pkg.Path); vp != nil && vp.Tree {
+ // Remove internal tree references.
+ del := make([]string, 0, 6)
+ for opath, opkg := range pkg.referenced {
+ if strings.HasPrefix(opkg.Path, pkg.Path+"/") {
+ del = append(del, opath)
+ }
+ }
+ delete(pkg.referenced, pkg.Local) // remove any self reference
+ for _, d := range del {
+ delete(pkg.referenced, d)
+ }
+ continue
+ }
+
+ if parentTrees := ctx.findPackageParentTree(pkg); len(parentTrees) > 0 {
+ pkg.Status.Presence = PresenceTree
+
+ // Transfer all references from the child to the top parent.
+ if parentPkg := ctx.Package[parentTrees[0]]; parentPkg != nil {
+ for opath, opkg := range pkg.referenced {
+ // Do not transfer internal references.
+ if strings.HasPrefix(opkg.Path, parentPkg.Path+"/") {
+ continue
+ }
+ parentPkg.referenced[opath] = opkg
+ }
+ pkg.referenced = make(map[string]*Package, 0)
+ for _, opkg := range ctx.Package {
+ if _, has := opkg.referenced[path]; has {
+ opkg.referenced[parentPkg.Local] = parentPkg
+ delete(opkg.referenced, path)
+ }
+ }
+ }
+ }
+ }
+
+ ctx.updatePackageReferences()
+
+ // Determine any un-used internal vendor imports.
+ for i := 0; i <= looplimit; i++ {
+ altered := false
+ for path, pkg := range ctx.Package {
+ if pkg.Status.Presence == PresenceUnused || pkg.Status.Presence == PresenceTree || pkg.Status.Type == TypeProgram {
+ continue
+ }
+ if len(pkg.referenced) > 0 || pkg.Status.Location != LocationVendor {
+ continue
+ }
+ altered = true
+ pkg.Status.Presence = PresenceUnused
+ for _, other := range ctx.Package {
+ delete(other.referenced, path)
+ }
+ }
+ if !altered {
+ break
+ }
+ if i == looplimit {
+ panic("determinePackageStatus loop limit")
+ }
+ }
+
+ ctx.updatePackageReferences()
+
+ // Unused external references may have worked their way in through
+ // vendor file. Remove any external leafs.
+ for i := 0; i <= looplimit; i++ {
+ altered := false
+ for path, pkg := range ctx.Package {
+ if len(pkg.referenced) > 0 || pkg.Status.Location != LocationExternal {
+ continue
+ }
+ altered = true
+ delete(ctx.Package, path)
+ pkg.Status.Presence = PresenceUnused
+ for _, other := range ctx.Package {
+ delete(other.referenced, path)
+ }
+ continue
+ }
+ if !altered {
+ break
+ }
+ if i == looplimit {
+ panic("determinePackageStatus loop limit")
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/kardianos/govendor/context/rewrite.go b/vendor/github.com/kardianos/govendor/context/rewrite.go
new file mode 100644
index 000000000..eff55dad2
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/rewrite.go
@@ -0,0 +1,209 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "strconv"
+ "strings"
+
+ "github.com/dchest/safefile"
+ "github.com/kardianos/govendor/internal/pathos"
+ os "github.com/kardianos/govendor/internal/vos"
+)
+
+// Rewrite rewrites files to the local path.
+func (ctx *Context) rewrite() error {
+ if !ctx.rewriteImports {
+ return nil
+ }
+ if ctx.dirty {
+ if err := ctx.loadPackage(); err != nil {
+ return err
+ }
+ }
+ ctx.dirty = true
+
+ fileImports := make(map[string]map[string]*File) // map[ImportPath]map[FilePath]File
+ for _, pkg := range ctx.Package {
+ for _, f := range pkg.Files {
+ for _, imp := range f.Imports {
+ fileList := fileImports[imp]
+ if fileList == nil {
+ fileList = make(map[string]*File, 1)
+ fileImports[imp] = fileList
+ }
+ fileList[f.Path] = f
+ }
+ }
+ }
+ filePaths := make(map[string]*File, len(ctx.RewriteRule))
+ for from, to := range ctx.RewriteRule {
+ // Add files that contain an import path to rewrite.
+ for _, f := range fileImports[from] {
+ filePaths[f.Path] = f
+ }
+
+ // Add files that contain import comments to remove.
+ if pkg := ctx.Package[from]; pkg != nil {
+ for _, f := range pkg.Files {
+ if len(f.ImportComment) != 0 {
+ filePaths[f.Path] = f
+ }
+ }
+ }
+ if pkg := ctx.Package[to]; pkg != nil {
+ for _, f := range pkg.Files {
+ if len(f.ImportComment) != 0 {
+ filePaths[f.Path] = f
+ }
+ }
+ }
+ }
+
+ /*
+ RULE: co2/internal/co3/pk3 -> co1/internal/co3/pk3
+
+ i co1/internal/co2/pk2 [co2/pk2] < ["co1/pk1"]
+ i co1/internal/co3/pk3 [co3/pk3] < ["co1/pk1"]
+ e co2/internal/co3/pk3 [co3/pk3] < ["co1/internal/co2/pk2"]
+ l co1/pk1 < []
+ s strings < ["co1/internal/co3/pk3" "co2/internal/co3/pk3"]
+
+ Rewrite the package "co1/internal/co2/pk2" because it references a package with a rewrite.from package.
+ */
+ ctx.updatePackageReferences()
+ for from := range ctx.RewriteRule {
+ pkg := ctx.Package[from]
+ if pkg == nil {
+ continue
+ }
+ for _, ref := range pkg.referenced {
+ for _, f := range ref.Files {
+ dprintf("REF RW %s\n", f.Path)
+ filePaths[f.Path] = f
+ }
+ }
+ }
+
+ defer func() {
+ ctx.RewriteRule = make(map[string]string, 3)
+ }()
+
+ if len(ctx.RewriteRule) == 0 {
+ return nil
+ }
+ goprint := &printer.Config{
+ Mode: printer.TabIndent | printer.UseSpaces,
+ Tabwidth: 8,
+ }
+ for _, fileInfo := range filePaths {
+ if !pathos.FileHasPrefix(fileInfo.Path, ctx.RootDir) {
+ continue
+ }
+
+ // Read the file into AST, modify the AST.
+ fileset := token.NewFileSet()
+ f, _ := parser.ParseFile(fileset, fileInfo.Path, nil, parser.ParseComments)
+ if f == nil {
+ return nil
+ }
+ pkgNameNormalized := strings.TrimSuffix(f.Name.Name, "_test")
+ // Files with package name "documentation" should be ignored, per go build tool.
+ if pkgNameNormalized == "documentation" {
+ return nil
+ }
+
+ dprintf("RW:: File: %s\n", fileInfo.Path)
+
+ for _, impNode := range f.Imports {
+ imp, err := strconv.Unquote(impNode.Path.Value)
+ if err != nil {
+ return err
+ }
+ for from, to := range ctx.RewriteRule {
+ if imp != from {
+ continue
+ }
+ impNode.Path.Value = strconv.Quote(to)
+ for i, metaImport := range fileInfo.Imports {
+ if from == metaImport {
+ dprintf("\tImport: %s -> %s\n", from, to)
+ fileInfo.Imports[i] = to
+ }
+ }
+ break
+ }
+ }
+
+ // Remove import comment.
+ st := fileInfo.Package.Status
+ if st.Location == LocationVendor || st.Location == LocationExternal {
+ var ic *ast.Comment
+ if f.Name != nil {
+ pos := f.Name.Pos()
+ big:
+ // Find the next comment after the package name.
+ for _, cblock := range f.Comments {
+ for _, c := range cblock.List {
+ if c.Pos() > pos {
+ ic = c
+ break big
+ }
+ }
+ }
+ }
+ if ic != nil {
+ // If it starts with the import text, assume it is the import comment and remove.
+ if index := strings.Index(ic.Text, " import "); index > 0 && index < 5 {
+ ic.Text = strings.Repeat(" ", len(ic.Text))
+ }
+ }
+ }
+
+ // Don't sort or modify the imports to minimize diffs.
+
+ // Write the AST back to disk.
+ fi, err := os.Stat(fileInfo.Path)
+ if err != nil {
+ return err
+ }
+ w, err := safefile.Create(fileInfo.Path, fi.Mode())
+ if err != nil {
+ return err
+ }
+ err = goprint.Fprint(w, fileset, f)
+ if err != nil {
+ w.Close()
+ return err
+ }
+ err = w.Commit()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ctx *Context) makeSet(pkg *Package, mvSet map[*Package]struct{}) {
+ mvSet[pkg] = struct{}{}
+ for _, f := range pkg.Files {
+ for _, imp := range f.Imports {
+ next := ctx.Package[imp]
+ switch {
+ default:
+ if _, has := mvSet[next]; !has {
+ ctx.makeSet(next, mvSet)
+ }
+ case next == nil:
+ case next.Path == next.Local:
+ case next.Status.Location != LocationExternal:
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/context/status.go b/vendor/github.com/kardianos/govendor/context/status.go
new file mode 100644
index 000000000..000d85b28
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/status.go
@@ -0,0 +1,271 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/kardianos/govendor/pkgspec"
+)
+
+type (
+ // Status is the package type, location, and presence indicators.
+ Status struct {
+ Type StatusType // program, package
+ Location StatusLocation // vendor, local, external, stdlib
+ Presence StatusPresence // missing, unused, tree, excluded
+
+ Not bool // Not indicates boolean operation "not" on above.
+ }
+
+ StatusType byte // StatusType is main or not-main.
+ StatusLocation byte // StatusLocation is where the package is.
+ StatusPresence byte // StatusPresence is if it can be found or referenced.
+
+ // StatusGroup is the logical filter for status with "and", "not", and grouping.
+ StatusGroup struct {
+ Status []Status
+ Group []StatusGroup
+ And bool
+ Not bool
+ }
+)
+
+func (s Status) String() string {
+ t := ' '
+ l := ' '
+ p := ' '
+ not := ""
+ if s.Not {
+ not = "!"
+ }
+ switch s.Type {
+ default:
+ panic("Unknown Type type")
+ case TypeUnknown:
+ t = '_'
+ case TypePackage:
+ t = ' '
+ case TypeProgram:
+ t = 'p'
+ }
+ switch s.Location {
+ default:
+ panic("Unknown Location type")
+ case LocationUnknown:
+ l = '_'
+ case LocationNotFound:
+ l = ' '
+ case LocationLocal:
+ l = 'l'
+ case LocationExternal:
+ l = 'e'
+ case LocationVendor:
+ l = 'v'
+ case LocationStandard:
+ l = 's'
+ }
+ switch s.Presence {
+ default:
+ panic("Unknown Presence type")
+ case PresenceUnknown:
+ p = '_'
+ case PresenceFound:
+ p = ' '
+ case PresenceMissing:
+ p = 'm'
+ case PresenceUnused:
+ p = 'u'
+ case PresenceTree:
+ p = 't'
+ case PresenceExcluded:
+ p = 'x'
+ }
+ return not + string(t) + string(l) + string(p)
+}
+
+func (sg StatusGroup) String() string {
+ buf := &bytes.Buffer{}
+ if sg.And {
+ buf.WriteString("and")
+ } else {
+ buf.WriteString("or")
+ }
+ buf.WriteRune('(')
+ for i, s := range sg.Status {
+ if i != 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(s.String())
+ }
+ if len(sg.Status) > 0 && len(sg.Group) > 0 {
+ buf.WriteRune(',')
+ }
+ for i, ssg := range sg.Group {
+ if i != 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(ssg.String())
+ }
+ buf.WriteRune(')')
+ return buf.String()
+}
+
+func (pkgSt Status) Match(filterSt Status) bool {
+ // not: true, pkg: A, filter: B
+ // true == (A == B) -> true == false -> false
+ //
+ // not: false, pkg: A, filter: B
+ // false == (A == B) -> false == false -> true
+ //
+ // not: true, pkg: A, filter: A
+ // true == (A == A) -> true == true) -> true
+ //
+ // not: false, pkg: A, filter: A
+ // false == (A == A) -> false == true -> false
+ if filterSt.Location != LocationUnknown && filterSt.Not == (pkgSt.Location == filterSt.Location) {
+ return false
+ }
+ if filterSt.Type != TypeUnknown && filterSt.Not == (pkgSt.Type == filterSt.Type) {
+ return false
+ }
+ if filterSt.Presence != PresenceUnknown && filterSt.Not == (pkgSt.Presence == filterSt.Presence) {
+ return false
+ }
+ return true
+}
+
+func (status Status) MatchGroup(filter StatusGroup) bool {
+ or := !filter.And
+ for _, fs := range filter.Status {
+ if status.Match(fs) == or {
+ return or != filter.Not
+ }
+ }
+ for _, fg := range filter.Group {
+ if status.MatchGroup(fg) == or {
+ return or != filter.Not
+ }
+ }
+ return filter.And
+}
+
+const (
+ TypeUnknown StatusType = iota // TypeUnknown is unset StatusType.
+ TypePackage // TypePackage package is a non-main package.
+ TypeProgram // TypeProgram package is a main package.
+)
+
+const (
+ LocationUnknown StatusLocation = iota // LocationUnknown is unset StatusLocation.
+ LocationNotFound // LocationNotFound package is not to be found (use PresenceMissing).
+ LocationStandard // LocationStandard package is in the standard library.
+ LocationLocal // LocationLocal package is in a project, not in a vendor folder.
+ LocationExternal // LocationExternal package is not in a project, in GOPATH.
+ LocationVendor // LocationVendor package is in a vendor folder.
+)
+
+const (
+ PresenceUnknown StatusPresence = iota // PresenceUnknown is unset StatusPresence.
+ PresenceFound // PresenceFound package exists.
+ PresenceMissing // PresenceMissing package is referenced but not found.
+ PresenceUnused // PresenceUnused package is found locally but not referenced.
+ PresenceTree // PresenceTree package is in vendor folder, in a tree, but not referenced.
+ PresenceExcluded // PresenceExcluded package exists, but should not be vendored.
+)
+
+// ListItem represents a package in the current project.
+type StatusItem struct {
+ Status Status
+ Pkg *pkgspec.Pkg
+ VersionExact string
+ Local string
+ ImportedBy []*Package
+}
+
+func (li StatusItem) String() string {
+ if li.Local == li.Pkg.Path {
+ return fmt.Sprintf("%s %s < %q", li.Status, li.Pkg.Path, li.ImportedBy)
+ }
+ return fmt.Sprintf("%s %s [%s] < %q", li.Status, li.Local, li.Pkg.Path, li.ImportedBy)
+}
+
+type statusItemSort []StatusItem
+
+func (li statusItemSort) Len() int { return len(li) }
+func (li statusItemSort) Swap(i, j int) { li[i], li[j] = li[j], li[i] }
+func (li statusItemSort) Less(i, j int) bool {
+ if li[i].Status.Location != li[j].Status.Location {
+ return li[i].Status.Location > li[j].Status.Location
+ }
+ return li[i].Local < li[j].Local
+}
+
+// Status obtains the current package status list.
+func (ctx *Context) updateStatusCache() error {
+ var err error
+ if !ctx.loaded || ctx.dirty {
+ err = ctx.loadPackage()
+ if err != nil {
+ return err
+ }
+ }
+ ctx.updatePackageReferences()
+ list := make([]StatusItem, 0, len(ctx.Package))
+ for _, pkg := range ctx.Package {
+ version := ""
+ versionExact := ""
+ if vp := ctx.VendorFilePackagePath(pkg.Path); vp != nil {
+ version = vp.Version
+ versionExact = vp.VersionExact
+ }
+
+ origin := ""
+ if pkg.Origin != pkg.Path {
+ origin = pkg.Origin
+ }
+ if len(pkg.Origin) == 0 && pkg.Path != pkg.Local {
+ origin = pkg.Local
+ }
+
+ li := StatusItem{
+ Status: pkg.Status,
+ Pkg: &pkgspec.Pkg{Path: pkg.Path, IncludeTree: pkg.IncludeTree, Origin: origin, Version: version, FilePath: pkg.Dir},
+ Local: pkg.Local,
+ VersionExact: versionExact,
+ ImportedBy: make([]*Package, 0, len(pkg.referenced)),
+ }
+ for _, ref := range pkg.referenced {
+ li.ImportedBy = append(li.ImportedBy, ref)
+ }
+ sort.Sort(packageList(li.ImportedBy))
+ list = append(list, li)
+ }
+ // Sort li by Status, then Path.
+ sort.Sort(statusItemSort(list))
+
+ ctx.statusCache = list
+ return nil
+}
+
+// Status obtains the current package status list.
+func (ctx *Context) Status() ([]StatusItem, error) {
+ var err error
+ if !ctx.loaded || ctx.dirty {
+ err = ctx.loadPackage()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if ctx.statusCache == nil {
+ err = ctx.updateStatusCache()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ctx.statusCache, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/context/sync.go b/vendor/github.com/kardianos/govendor/context/sync.go
new file mode 100644
index 000000000..a5aa2b0f1
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/sync.go
@@ -0,0 +1,390 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "hash"
+ "io"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/kardianos/govendor/internal/pathos"
+ "github.com/kardianos/govendor/vendorfile"
+
+ "golang.org/x/tools/go/vcs"
+)
+
+func skipperTree(name string, dir bool) bool {
+ return false
+}
+func skipperPackage(name string, dir bool) bool {
+ return dir
+}
+
+func (ctx *Context) VerifyVendor() (outOfDate []*vendorfile.Package, err error) {
+ vf := ctx.VendorFile
+ root := filepath.Join(ctx.RootDir, ctx.VendorFolder)
+ add := func(vp *vendorfile.Package) {
+ outOfDate = append(outOfDate, vp)
+ }
+ for _, vp := range vf.Package {
+ if vp.Remove {
+ continue
+ }
+ if len(vp.Path) == 0 {
+ continue
+ }
+ if len(vp.ChecksumSHA1) == 0 {
+ add(vp)
+ continue
+ }
+ fp := filepath.Join(root, pathos.SlashToFilepath(vp.Path))
+ h := sha1.New()
+ sk := skipperPackage
+ if vp.Tree {
+ sk = skipperTree
+ }
+ err = getHash(root, fp, h, sk)
+ if err != nil {
+ return
+ }
+ checksum := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ if vp.ChecksumSHA1 != checksum {
+ add(vp)
+ }
+ }
+ return
+}
+
+func getHash(root, fp string, h hash.Hash, skipper func(name string, isDir bool) bool) error {
+ rel := pathos.FileTrimPrefix(fp, root)
+ rel = pathos.SlashToImportPath(rel)
+ rel = strings.Trim(rel, "/")
+
+ h.Write([]byte(rel))
+
+ dir, err := os.Open(fp)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("Failed to open dir %q: %v", fp, err)
+ }
+ filelist, err := dir.Readdir(-1)
+ dir.Close()
+ if err != nil {
+ return fmt.Errorf("Failed to read dir %q: %v", fp, err)
+ }
+ sort.Sort(fileInfoSort(filelist))
+ for _, fi := range filelist {
+ if skipper(fi.Name(), fi.IsDir()) {
+ continue
+ }
+ p := filepath.Join(fp, fi.Name())
+ if fi.IsDir() {
+ err = getHash(root, p, h, skipper)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ f, err := os.Open(p)
+ if err != nil {
+ return err
+ }
+ h.Write([]byte(fi.Name()))
+ _, err = io.Copy(h, f)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type remoteFailure struct {
+ Path string
+ Msg string
+ Err error
+}
+
+func (fail remoteFailure) Error() string {
+ return fmt.Sprintf("Failed for %q (%s): %v", fail.Path, fail.Msg, fail.Err)
+}
+
+type remoteFailureList []remoteFailure
+
+func (list remoteFailureList) Error() string {
+ if len(list) == 0 {
+ return "(no remote failure)"
+ }
+ buf := &bytes.Buffer{}
+ buf.WriteString("Remotes failed for:\n")
+ for _, item := range list {
+ buf.WriteString("\t")
+ buf.WriteString(item.Error())
+ buf.WriteString("\n")
+ }
+ return buf.String()
+}
+
+type VCSCmd struct {
+ *vcs.Cmd
+}
+
+func (vcsCmd *VCSCmd) RevisionSync(dir, revision string) error {
+ return vcsCmd.run(dir, vcsCmd.TagSyncCmd, "tag", revision)
+}
+
+func (v *VCSCmd) run(dir string, cmd string, keyval ...string) error {
+ _, err := v.run1(dir, cmd, keyval, true)
+ return err
+}
+
+// run1 is the generalized implementation of run and runOutput.
+func (vcsCmd *VCSCmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([]byte, error) {
+ v := vcsCmd.Cmd
+ m := make(map[string]string)
+ for i := 0; i < len(keyval); i += 2 {
+ m[keyval[i]] = keyval[i+1]
+ }
+ args := strings.Fields(cmdline)
+ for i, arg := range args {
+ args[i] = expand(m, arg)
+ }
+
+ _, err := exec.LookPath(v.Cmd)
+ if err != nil {
+ fmt.Fprintf(os.Stderr,
+ "go: missing %s command. See http://golang.org/s/gogetcmd\n",
+ v.Name)
+ return nil, err
+ }
+
+ cmd := exec.Command(v.Cmd, args...)
+ cmd.Dir = dir
+ cmd.Env = envForDir(cmd.Dir)
+ if vcs.ShowCmd {
+ fmt.Printf("cd %s\n", dir)
+ fmt.Printf("%s %s\n", v.Cmd, strings.Join(args, " "))
+ }
+ var buf bytes.Buffer
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ err = cmd.Run()
+ out := buf.Bytes()
+ if err != nil {
+ if verbose {
+ fmt.Fprintf(os.Stderr, "# cd %s; %s %s\n", dir, v.Cmd, strings.Join(args, " "))
+ os.Stderr.Write(out)
+ }
+ return nil, err
+ }
+ return out, nil
+}
+
+// expand rewrites s to replace {k} with match[k] for each key k in match.
+func expand(match map[string]string, s string) string {
+ for k, v := range match {
+ s = strings.Replace(s, "{"+k+"}", v, -1)
+ }
+ return s
+}
+
+// envForDir returns a copy of the environment
+// suitable for running in the given directory.
+// The environment is the current process's environment
+// but with an updated $PWD, so that an os.Getwd in the
+// child will be faster.
+func envForDir(dir string) []string {
+ env := os.Environ()
+ // Internally we only use rooted paths, so dir is rooted.
+ // Even if dir is not rooted, no harm done.
+ return mergeEnvLists([]string{"PWD=" + dir}, env)
+}
+
+// mergeEnvLists merges the two environment lists such that
+// variables with the same name in "in" replace those in "out".
+func mergeEnvLists(in, out []string) []string {
+NextVar:
+ for _, inkv := range in {
+ k := strings.SplitAfterN(inkv, "=", 2)[0]
+ for i, outkv := range out {
+ if strings.HasPrefix(outkv, k) {
+ out[i] = inkv
+ continue NextVar
+ }
+ }
+ out = append(out, inkv)
+ }
+ return out
+}
+
+func updateVcsCmd(cmd *vcs.Cmd) *VCSCmd {
+ switch cmd.Name {
+ case "Git":
+ cmd.TagSyncCmd = "reset --hard {tag}"
+ cmd.TagSyncDefault = "reset --hard origin/master"
+ cmd.DownloadCmd = "fetch"
+ case "Mercurial":
+ case "Bazaar":
+ case "Subversion":
+ }
+ return &VCSCmd{Cmd: cmd}
+}
+
+var isSecureScheme = map[string]bool{
+ "https": true,
+ "git+ssh": true,
+ "bzr+ssh": true,
+ "svn+ssh": true,
+ "ssh": true,
+}
+
+func vcsIsSecure(repo string) bool {
+ u, err := url.Parse(repo)
+ if err != nil {
+ // If repo is not a URL, it's not secure.
+ return false
+ }
+ return isSecureScheme[u.Scheme]
+}
+
+// Sync checks for outdated packages in the vendor folder and fetches the
+// correct revision from the remote.
+func (ctx *Context) Sync(dryrun bool) (err error) {
+ // vcs.ShowCmd = true
+ outOfDate, err := ctx.VerifyVendor()
+ if err != nil {
+ return fmt.Errorf("Failed to verify checksums: %v", err)
+ }
+ // GOPATH includes the src dir, move up a level.
+ cacheRoot := filepath.Join(ctx.RootGopath, "..", ".cache", "govendor")
+ err = os.MkdirAll(cacheRoot, 0700)
+ if err != nil {
+ return err
+ }
+
+ // collect errors and proceed where you can.
+ rem := remoteFailureList{}
+
+ h := sha1.New()
+ updatedVendorFile := false
+
+ for _, vp := range outOfDate {
+ // Bundle packages together that have the same revision and share at least one root segment.
+ if len(vp.Revision) == 0 {
+ continue
+ }
+ from := vp.Path
+ if len(vp.Origin) > 0 {
+ from = vp.Origin
+ }
+ if from != vp.Path {
+ fmt.Fprintf(ctx, "fetch %q from %q\n", vp.Path, from)
+ } else {
+ fmt.Fprintf(ctx, "fetch %q\n", vp.Path)
+ }
+ if dryrun {
+ continue
+ }
+ pkgDir := filepath.Join(cacheRoot, from)
+
+ // See if repo exists.
+ sysVcsCmd, repoRoot, err := vcs.FromDir(pkgDir, cacheRoot)
+ var vcsCmd *VCSCmd
+ repoRootDir := filepath.Join(cacheRoot, repoRoot)
+ if err != nil {
+ rr, err := vcs.RepoRootForImportPath(from, false)
+ if err != nil {
+ rem = append(rem, remoteFailure{Msg: "failed to ping remote repo", Path: vp.Path, Err: err})
+ continue
+ }
+ if !ctx.Insecure && !vcsIsSecure(rr.Repo) {
+ rem = append(rem, remoteFailure{Msg: "repo remote not secure", Path: vp.Path, Err: nil})
+ continue
+ }
+
+ vcsCmd = updateVcsCmd(rr.VCS)
+
+ repoRoot = rr.Root
+ repoRootDir = filepath.Join(cacheRoot, repoRoot)
+ err = os.MkdirAll(repoRootDir, 0700)
+ if err != nil {
+ rem = append(rem, remoteFailure{Msg: "failed to make repo root dir", Path: vp.Path, Err: err})
+ continue
+ }
+
+ err = vcsCmd.CreateAtRev(repoRootDir, rr.Repo, vp.Revision)
+ if err != nil {
+ rem = append(rem, remoteFailure{Msg: "failed to clone repo", Path: vp.Path, Err: err})
+ continue
+ }
+ } else {
+ // Use cache.
+ vcsCmd = updateVcsCmd(sysVcsCmd)
+
+ err = vcsCmd.RevisionSync(repoRootDir, vp.Revision)
+ // If revision was not found in the cache, download and try again.
+ if err != nil {
+ err = vcsCmd.Download(repoRootDir)
+ if err != nil {
+ rem = append(rem, remoteFailure{Msg: "failed to download repo", Path: vp.Path, Err: err})
+ continue
+ }
+ err = vcsCmd.RevisionSync(repoRootDir, vp.Revision)
+ if err != nil {
+ rem = append(rem, remoteFailure{Msg: "failed to sync repo to " + vp.Revision, Path: vp.Path, Err: err})
+ continue
+ }
+ }
+ }
+ dest := filepath.Join(ctx.RootDir, ctx.VendorFolder, pathos.SlashToFilepath(vp.Path))
+ // Path handling with single sub-packages and differing origins need to be properly handled.
+ src := pkgDir
+
+ // Scan go files for files that should be ignored based on tags and filenames.
+ ignoreFiles, _, err := ctx.getIgnoreFiles(src)
+ if err != nil {
+ rem = append(rem, remoteFailure{Msg: "failed to get ignore files", Path: vp.Path, Err: err})
+ continue
+ }
+
+ root, _ := pathos.TrimCommonSuffix(src, vp.Path)
+
+ // Need to ensure we copy files from "b.Root/<import-path>" for the following command.
+ err = ctx.CopyPackage(dest, src, root, vp.Path, ignoreFiles, vp.Tree, h, nil)
+ if err != nil {
+ fmt.Fprintf(ctx, "failed to copy package from %q to %q: %+v", src, dest, err)
+ }
+ checksum := h.Sum(nil)
+ h.Reset()
+ vp.ChecksumSHA1 = base64.StdEncoding.EncodeToString(checksum)
+ updatedVendorFile = true
+ }
+
+ // Only write a vendor file if something changes.
+ if updatedVendorFile {
+ err = ctx.WriteVendorFile()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Return network errors here.
+ if len(rem) > 0 {
+ return rem
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/kardianos/govendor/context/syslist.go b/vendor/github.com/kardianos/govendor/context/syslist.go
new file mode 100644
index 000000000..98df58d16
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/syslist.go
@@ -0,0 +1,8 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+const goosList = "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows "
+const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64 "
diff --git a/vendor/github.com/kardianos/govendor/context/tags.go b/vendor/github.com/kardianos/govendor/context/tags.go
new file mode 100644
index 000000000..b7ff9f0c0
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/tags.go
@@ -0,0 +1,240 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Build tags come in the format "tagA tagB,tagC" -> "taga OR (tagB AND tagC)"
+// File tags compose with this as "ftag1 AND ftag2 AND (<build-tags>)".
+// However in govendor all questions are reversed. Rather than asking
+// "What should be built?" we ask "What should be ignored?".
+
+type logical struct {
+ and bool
+ tag []logicalTag
+ sub []logical
+}
+type logicalTag struct {
+ not bool
+ tag string
+}
+
+func (lt logicalTag) match(lt2 logicalTag) bool {
+ // A logicalTag is a match if
+ // "tag == itag" or "!tag == !itag" or
+ // "tag != !itag" or "!tag != itag"
+ if lt.not || lt2.not {
+ return false
+ }
+ if lt.tag == lt2.tag {
+ return true
+ }
+
+ if lt.not == lt2.not {
+ return lt.tag == lt2.tag
+ }
+ return lt.tag != lt2.tag
+}
+
+func (lt logicalTag) conflict(lt2 logicalTag) bool {
+ return lt.tag == lt2.tag && lt.not != lt2.not
+}
+
+func (lt logicalTag) String() string {
+ if lt.not {
+ return "!" + lt.tag
+ }
+ return lt.tag
+}
+
+func newLogicalTag(tag string) logicalTag {
+ lt := logicalTag{}
+ lt.not = strings.HasPrefix(tag, "!")
+ lt.tag = strings.TrimPrefix(tag, "!")
+ return lt
+}
+
+func (l logical) empty() bool {
+ if len(l.tag) > 0 {
+ return false
+ }
+ for _, sub := range l.sub {
+ if !sub.empty() {
+ return false
+ }
+ }
+ return true
+}
+
+func (l logical) ignored(ignoreTags []logicalTag) bool {
+ // A logical is ignored if ANY AND conditions match or ALL OR conditions match.
+ if len(ignoreTags) == 0 {
+ return false
+ }
+ if l.empty() {
+ return !l.and
+ }
+ if l.and {
+ // Must have all tags in ignoreTags to be ignored.
+ for _, t := range l.tag {
+ for _, it := range ignoreTags {
+ if t.match(it) {
+ return true
+ }
+ }
+ }
+
+ // Must ignore all sub-logicals to be ignored.
+ for _, sub := range l.sub {
+ if sub.ignored(ignoreTags) {
+ return true
+ }
+ }
+
+ return false
+ }
+
+ hasOne := false
+ // OR'ing the pieces together.
+ // Must have at least one tag in ignoreTags to be ignored.
+ for _, t := range l.tag {
+ hasOne = true
+ hasIgnoreTag := false
+ for _, it := range ignoreTags {
+ if t.match(it) {
+ hasIgnoreTag = true
+ break
+ }
+ }
+ if !hasIgnoreTag {
+ return false
+ }
+ }
+
+ // Must have at least one sub section be ignored to be ignored.
+ for _, sub := range l.sub {
+ hasOne = true
+ if !sub.ignored(ignoreTags) {
+ return false
+ }
+ }
+ return hasOne
+}
+
+func (l logical) conflict(lt logicalTag) bool {
+ for _, t := range l.tag {
+ if t.conflict(lt) {
+ return true
+ }
+ }
+ for _, s := range l.sub {
+ if s.conflict(lt) {
+ return true
+ }
+ }
+ return false
+}
+
+func (l logical) String() string {
+ buf := bytes.Buffer{}
+ if l.and {
+ buf.WriteString(" AND (")
+ } else {
+ buf.WriteString(" OR (")
+ }
+
+ for index, tag := range l.tag {
+ if index != 0 {
+ buf.WriteString(" ")
+ }
+ buf.WriteString(tag.String())
+ }
+ for index, sub := range l.sub {
+ if index != 0 {
+ buf.WriteString(" ")
+ }
+ buf.WriteString(sub.String())
+ }
+
+ buf.WriteRune(')')
+ return buf.String()
+}
+
+type TagSet struct {
+ // ignore comes from a special build tag "ignore".
+ ignore bool
+
+ root logical
+}
+
+func (ts *TagSet) String() string {
+ if ts == nil {
+ return "(nil)"
+ }
+ if ts.ignore {
+ return "ignore"
+ }
+ return ts.root.String()
+}
+
+func (ts *TagSet) IgnoreItem(ignoreList ...string) bool {
+ if ts == nil {
+ return false
+ }
+ if ts.ignore {
+ return true
+ }
+ for _, fileTag := range ts.root.tag {
+ for _, buildTag := range ts.root.sub {
+ if buildTag.conflict(fileTag) {
+ return true
+ }
+ }
+ }
+ ts.root.and = true
+ ignoreTags := make([]logicalTag, len(ignoreList))
+ for i := 0; i < len(ignoreList); i++ {
+ ignoreTags[i] = newLogicalTag(ignoreList[i])
+ }
+ return ts.root.ignored(ignoreTags)
+}
+
+func (ts *TagSet) AddFileTag(tag string) {
+ if ts == nil {
+ return
+ }
+ ts.root.and = true
+ ts.root.tag = append(ts.root.tag, newLogicalTag(tag))
+}
+func (ts *TagSet) AddBuildTags(tags string) {
+ if ts == nil {
+ return
+ }
+ ts.root.and = true
+ if len(ts.root.sub) == 0 {
+ ts.root.sub = append(ts.root.sub, logical{})
+ }
+ buildlogical := &ts.root.sub[0]
+ ss := strings.Fields(tags)
+ for _, s := range ss {
+ if s == "ignore" {
+ ts.ignore = true
+ continue
+ }
+ if !strings.ContainsRune(s, ',') {
+ buildlogical.tag = append(buildlogical.tag, newLogicalTag(s))
+ continue
+ }
+ sub := logical{and: true}
+ for _, and := range strings.Split(s, ",") {
+ sub.tag = append(sub.tag, newLogicalTag(and))
+ }
+ buildlogical.sub = append(buildlogical.sub, sub)
+
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/context/vendorFile.go b/vendor/github.com/kardianos/govendor/context/vendorFile.go
new file mode 100644
index 000000000..7f5ac82fc
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/vendorFile.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "bytes"
+ ros "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/dchest/safefile"
+ "github.com/kardianos/govendor/vendorfile"
+
+ os "github.com/kardianos/govendor/internal/vos"
+)
+
+// WriteVendorFile writes the current vendor file to the context location.
+func (ctx *Context) WriteVendorFile() (err error) {
+ perm := ros.FileMode(0666)
+ fi, err := os.Stat(ctx.VendorFilePath)
+ if err == nil {
+ perm = fi.Mode()
+ }
+
+ ctx.VendorFile.RootPath = ctx.RootImportPath
+
+ buf := &bytes.Buffer{}
+ err = ctx.VendorFile.Marshal(buf)
+ if err != nil {
+ return
+ }
+ err = buf.WriteByte('\n')
+ if err != nil {
+ return
+ }
+ dir, _ := filepath.Split(ctx.VendorFilePath)
+ err = os.MkdirAll(dir, 0777)
+ if err != nil {
+ return
+ }
+
+ for i := range ctx.VendorFile.Package {
+ vp := ctx.VendorFile.Package[i]
+ vp.Add = false
+ }
+
+ err = safefile.WriteFile(ctx.VendorFilePath, buf.Bytes(), perm)
+ if err == nil {
+ for _, vp := range ctx.VendorFile.Package {
+ vp.Add = false
+ }
+ }
+
+ return
+}
+
+func readVendorFile(vendorRoot, vendorFilePath string) (*vendorfile.File, error) {
+ vf := &vendorfile.File{}
+ f, err := os.Open(vendorFilePath)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ err = vf.Unmarshal(f)
+ if err != nil {
+ return nil, err
+ }
+ // Remove any existing origin field if the prefix matches the
+ // context package root. This fixes a previous bug introduced in the file,
+ // that is now fixed.
+ for _, row := range vf.Package {
+ row.Origin = strings.TrimPrefix(row.Origin, vendorRoot)
+ }
+
+ return vf, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/context/version.go b/vendor/github.com/kardianos/govendor/context/version.go
new file mode 100644
index 000000000..7b797b3ad
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/context/version.go
@@ -0,0 +1,47 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "strconv"
+ "unicode"
+)
+
+// IsVersion returns true if the string is a version.
+func isVersion(s string) bool {
+ hasPunct := false
+ onlyNumber := true
+ onlyHexLetter := true
+ for _, r := range s {
+ isNumber := unicode.IsNumber(r)
+ isLetter := unicode.IsLetter(r)
+
+ hasPunct = hasPunct || unicode.IsPunct(r)
+ onlyNumber = onlyNumber && isNumber
+
+ if isLetter {
+ low := unicode.ToLower(r)
+ onlyHexLetter = onlyHexLetter && low <= 'f'
+ }
+ }
+ if hasPunct {
+ return true
+ }
+ if !onlyHexLetter {
+ return true
+ }
+
+ num, err := strconv.ParseInt(s, 10, 64)
+ if err == nil {
+ if num > 100 {
+ return false // numeric revision.
+ }
+ }
+
+ if len(s) > 5 && onlyHexLetter {
+ return false // hex revision
+ }
+ return true
+}
diff --git a/vendor/github.com/kardianos/govendor/help/gen-license.template b/vendor/github.com/kardianos/govendor/help/gen-license.template
new file mode 100644
index 000000000..462ec9fc2
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/help/gen-license.template
@@ -0,0 +1,8 @@
+// Machine generated; DO NOT EDIT.
+
+package help
+
+var msgGovendorLicenses = `{{range $index, $t := .}}{{if ne $index 0}}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+{{end}}{{.Filename}} - {{.Path}}
+{{.Text}}{{end}}
+`
diff --git a/vendor/github.com/kardianos/govendor/help/licenses.go b/vendor/github.com/kardianos/govendor/help/licenses.go
new file mode 100644
index 000000000..9216366fa
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/help/licenses.go
@@ -0,0 +1,395 @@
+// Machine generated; DO NOT EDIT.
+
+package help
+
+var msgGovendorLicenses = `LICENSE - go
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+PATENTS - go
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - github.com/kardianos/govendor
+Copyright (c) 2015 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - github.com/kardianos/govendor/vendor/github.com/Bowery/prompt
+The MIT License (MIT)
+
+Copyright (c) 2013-2015 Bowery, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - github.com/kardianos/govendor/vendor/github.com/dchest/safefile
+Copyright (c) 2013 Dmitry Chestnykh <dmitry@codingrobots.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+COPYING - github.com/kardianos/govendor/vendor/github.com/google/shlex
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+LICENSE - github.com/kardianos/govendor/vendor/golang.org/x/tools
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+PATENTS - github.com/kardianos/govendor/vendor/golang.org/x/tools
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
+
+`
diff --git a/vendor/github.com/kardianos/govendor/help/msg.go b/vendor/github.com/kardianos/govendor/help/msg.go
new file mode 100644
index 000000000..2146e1dfc
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/help/msg.go
@@ -0,0 +1,62 @@
+package help
+
+type HelpMessage byte
+
+const (
+ MsgNone HelpMessage = iota
+ MsgFull
+ MsgInit
+ MsgList
+ MsgAdd
+ MsgUpdate
+ MsgRemove
+ MsgFetch
+ MsgStatus
+ MsgSync
+ MsgMigrate
+ MsgGet
+ MsgLicense
+ MsgShell
+ MsgGovendorLicense
+ MsgGovendorVersion
+)
+
+func (msg HelpMessage) String() string {
+ msgText := ""
+ switch msg {
+ default:
+ panic("Unknown message type")
+ case MsgNone:
+ case MsgFull:
+ msgText = helpFull
+ case MsgInit:
+ msgText = helpInit
+ case MsgList:
+ msgText = helpList
+ case MsgAdd:
+ msgText = helpAdd
+ case MsgUpdate:
+ msgText = helpUpdate
+ case MsgRemove:
+ msgText = helpRemove
+ case MsgFetch:
+ msgText = helpFetch
+ case MsgStatus:
+ msgText = helpStatus
+ case MsgSync:
+ msgText = helpSync
+ case MsgMigrate:
+ msgText = helpMigrate
+ case MsgGet:
+ msgText = helpGet
+ case MsgLicense:
+ msgText = helpLicense
+ case MsgShell:
+ msgText = helpShell
+ case MsgGovendorLicense:
+ msgText = msgGovendorLicenses
+ case MsgGovendorVersion:
+ msgText = msgGovendorVersion
+ }
+ return msgText
+}
diff --git a/vendor/github.com/kardianos/govendor/help/text.go b/vendor/github.com/kardianos/govendor/help/text.go
new file mode 100644
index 000000000..45431b5ad
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/help/text.go
@@ -0,0 +1,173 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package help
+
+import (
+ "strings"
+
+ "github.com/kardianos/govendor/migrate"
+)
+
+//go:generate govendor license -o licenses.go -template gen-license.template
+
+var helpFull = `govendor (` + version + `): record dependencies and copy into vendor folder
+ -govendor-licenses Show govendor's licenses.
+ -version Show govendor version
+ -cpuprofile 'file' Writes a CPU profile to 'file' for debugging.
+ -memprofile 'file' Writes a heap profile to 'file' for debugging.
+
+Sub-Commands
+
+ init Create the "vendor" folder and the "vendor.json" file.
+ list List and filter existing dependencies and packages.
+ add Add packages from $GOPATH.
+ update Update packages from $GOPATH.
+ remove Remove packages from the vendor folder.
+ status Lists any packages missing, out-of-date, or modified locally.
+ fetch Add new or update vendor folder packages from remote repository.
+ sync Pull packages into vendor folder from remote repository with revisions
+ from vendor.json file.
+ migrate Move packages from a legacy tool to the vendor folder with metadata.
+ get Like "go get" but copies dependencies into a "vendor" folder.
+ license List discovered licenses for the given status or import paths.
+ shell Run a "shell" to make multiple sub-commands more efficient for large
+ projects.
+
+ go tool commands that are wrapped:
+ "+status" package selection may be used with them
+ fmt, build, install, clean, test, vet, generate, tool
+
+Status Types
+
+ +local (l) packages in your project
+ +external (e) referenced packages in GOPATH but not in current project
+ +vendor (v) packages in the vendor folder
+ +std (s) packages in the standard library
+
+ +excluded (x) external packages explicitly excluded from vendoring
+ +unused (u) packages in the vendor folder, but unused
+ +missing (m) referenced packages but not found
+
+ +program (p) package is a main package
+
+ +outside +external +missing
+ +all +all packages
+
+ Status can be referenced by their initial letters.
+
+Package specifier
+ <path>[::<origin>][{/...|/^}][@[<version-spec>]]
+
+Ignoring files with build tags, or excluding packages from being vendored:
+ The "vendor.json" file contains a string field named "ignore".
+ It may contain a space separated list of build tags to ignore when
+ listing and copying files.
+ This list may also contain package prefixes (containing a "/", possibly
+ as last character) to exclude when copying files in the vendor folder.
+ If "foo/" appears in this field, then package "foo" and all its sub-packages
+ ("foo/bar", …) will be excluded (but package "bar/foo" will not).
+ By default the init command adds the "test" tag to the ignore list.
+
+If using go1.5, ensure GO15VENDOREXPERIMENT=1 is set.
+
+`
+
+var helpInit = `govendor init
+ Create a vendor folder in the working directory and a vendor/vendor.json
+ metadata file.
+`
+
+var helpList = `govendor list [options] ( +status or import-path-filter )
+ List all dependencies and packages in folder tree.
+ Options:
+ -v verbose listing, show dependencies of each package
+ -p show file path to package instead of import path
+ -no-status do not prefix status to list, package names only
+Examples:
+ $ govendor list -no-status +local
+ $ govendor list -p -no-status +local
+ $ govendor list +vend,prog +local,program
+ $ govendor list +local,^prog
+`
+
+var helpAdd = `govendor add [options] ( +status or import-path-filter )
+ Copy one or more packages into the vendor folder from GOPATH.
+ Options:
+ -n dry run and print actions that would be taken
+ -tree copy package(s) and all sub-folders under each package
+ -uncommitted allows copying a package with uncommitted changes, doesn't
+ update revision or checksum so it will always be out-of-date.
+
+ The following may be replaced with something else in the future.
+ -short if conflict, take short path
+ -long if conflict, take long path
+`
+
+var helpUpdate = `govendor update [options] ( +status or import-path-filter )
+ Update one or more packages from GOPATH into the vendor folder from GOPATH.
+ Options:
+ -n dry run and print actions that would be taken
+ -tree copy package(s) and all sub-folders under each package
+ -uncommitted allows copying a package with uncommitted changes, doesn't
+ update revision or checksum so it will always be out-of-date.
+
+ The following may be replaced with something else in the future.
+ -short if conflict, take short path
+ -long if conflict, take long path
+`
+
+var helpRemove = `govendor remove [options] ( +status or import-path-filter )
+ Remove one or more packages from the vendor folder.
+ Options:
+ -n dry run and print actions that would be taken
+`
+
+var helpFetch = `govendor fetch [options] ( +status or package-spec )
+ Fetches packages directly into the vendor folder.
+ package-spec = <path>[::<origin>][{/...|/^}][@[<version-spec>]]
+ Options:
+ -tree copy package(s) and all sub-folders under each package
+ -insecure allow downloading over insecure connection
+ -v verbose mode
+`
+
+var helpSync = `govendor sync
+ Ensures the contents of the vendor folder matches the vendor file.
+ Options:
+ -n dry run, print out action only
+ -insecure allow downloading over insecure connection
+ -v verbose output
+`
+
+var helpStatus = `govendor status
+ Shows any packages that are missing, out-of-date, or modified locally (according to the
+ checksum) and should be sync'ed.
+`
+
+var helpMigrate = `govendor migrate [` + strings.Join(migrate.SystemList(), ", ") + `]
+ Change from a one schema to use the vendor folder. Default to auto detect.
+`
+
+var helpGet = `govendor get [options] (import-path)...
+ Download package into GOPATH, put all dependencies into vendor folder.
+ Options:
+ -insecure allow downloading over insecure connection
+ -v verbose mode
+`
+
+var helpLicense = `govendor license [options] ( +status or package-spec )
+ Attempt to find and list licenses for the specified packages.
+ Options:
+ -o output to file name
+ -template template file to use, input is "[]context.License"
+`
+var helpShell = `govendor shell
+ Open a govendor "shell". Useful for faster queries on large projects.
+ Options:
+ -pprof-handler expose a pprof HTTP server on the given address
+`
+
+var msgGovendorVersion = version + `
+`
diff --git a/vendor/github.com/kardianos/govendor/help/version.go b/vendor/github.com/kardianos/govendor/help/version.go
new file mode 100644
index 000000000..ebfae1a0c
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/help/version.go
@@ -0,0 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package help
+
+var version = "v1.0.9"
diff --git a/vendor/github.com/kardianos/govendor/internal/pathos/path.go b/vendor/github.com/kardianos/govendor/internal/pathos/path.go
new file mode 100644
index 000000000..6d82b7a7e
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/internal/pathos/path.go
@@ -0,0 +1,158 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pathos
+
+import (
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+func SlashToFilepath(path string) string {
+ if '/' == filepath.Separator {
+ return path
+ }
+ return strings.Replace(path, "/", string(filepath.Separator), -1)
+}
+
+func SlashToImportPath(path string) string {
+ return strings.Replace(path, `\`, "/", -1)
+}
+
+func FileHasPrefix(s, prefix string) bool {
+ if len(prefix) > len(s) {
+ return false
+ }
+ return caseInsensitiveEq(s[:len(prefix)], prefix)
+}
+
+func FileTrimPrefix(s, prefix string) string {
+ if FileHasPrefix(s, prefix) {
+ return s[len(prefix):]
+ } else if FileStringEquals(s, prefix) {
+ return ""
+ }
+ return s
+}
+
+func FileHasSuffix(s, suffix string) bool {
+ if len(suffix) > len(s) {
+ return false
+ }
+ return caseInsensitiveEq(s[len(s)-len(suffix):], suffix)
+}
+
+func FileTrimSuffix(s, suffix string) string {
+ if FileHasSuffix(s, suffix) {
+ return s[:len(s)-len(suffix)]
+ } else if FileStringEquals(s, suffix) {
+ return ""
+ }
+ return s
+}
+
+var slashSep = filepath.Separator
+
+func TrimCommonSuffix(base, suffix string) (string, string) {
+ a, b := base, suffix
+ if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
+ a = strings.ToLower(a)
+ b = strings.ToLower(b)
+ }
+ a = strings.TrimSuffix(strings.TrimSuffix(a, "\\"), "/")
+ b = strings.TrimSuffix(strings.TrimSuffix(b, "\\"), "/")
+ base = strings.TrimSuffix(strings.TrimSuffix(base, "\\"), "/")
+
+ ff := func(r rune) bool {
+ return r == '/' || r == '\\'
+ }
+ aa := strings.FieldsFunc(a, ff)
+ bb := strings.FieldsFunc(b, ff)
+
+ min := len(aa)
+ if min > len(bb) {
+ min = len(bb)
+ }
+ i := 1
+ for ; i <= min; i++ {
+ // fmt.Printf("(%d) end aa: %q, end bb: %q\n", i, aa[len(aa)-i], bb[len(bb)-i])
+ if aa[len(aa)-i] == bb[len(bb)-i] {
+ continue
+ }
+ break
+ }
+ baseParts := strings.FieldsFunc(base, ff)
+ // fmt.Printf("base parts: %q\n", baseParts)
+ base1 := FileTrimSuffix(base, strings.Join(baseParts[len(baseParts)-i+1:], string(slashSep)))
+ base1 = strings.TrimSuffix(strings.TrimSuffix(base1, "\\"), "/")
+ base2 := strings.Trim(base[len(base1):], `\/`)
+ return base1, base2
+}
+
+func FileStringEquals(s1, s2 string) bool {
+ if len(s1) == 0 {
+ return len(s2) == 0
+ }
+ if len(s2) == 0 {
+ return len(s1) == 0
+ }
+ r1End := s1[len(s1)-1]
+ r2End := s2[len(s2)-1]
+ if r1End == '/' || r1End == '\\' {
+ s1 = s1[:len(s1)-1]
+ }
+ if r2End == '/' || r2End == '\\' {
+ s2 = s2[:len(s2)-1]
+ }
+ return caseInsensitiveEq(s1, s2)
+}
+
+func caseInsensitiveEq(s1, s2 string) bool {
+ if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
+ return strings.EqualFold(s1, s2)
+ }
+ return s1 == s2
+}
+
+// ParseGoEnvLine parses a "go env" line into a key value pair.
+func ParseGoEnvLine(line string) (key, value string, ok bool) {
+ // Remove any leading "set " found on windows.
+ // Match the name to the env var + "=".
+ // Remove any quotes.
+ // Return result.
+ line = strings.TrimPrefix(line, "set ")
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) < 2 {
+ return "", "", false
+ }
+
+ un, err := strconv.Unquote(parts[1])
+ if err != nil {
+ return parts[0], parts[1], true
+ }
+ return parts[0], un, true
+}
+
+// GoEnv parses a "go env" line and checks for a specific
+// variable name.
+func GoEnv(name, line string) (value string, ok bool) {
+ // Remove any leading "set " found on windows.
+ // Match the name to the env var + "=".
+ // Remove any quotes.
+ // Return result.
+ line = strings.TrimPrefix(line, "set ")
+ if len(line) < len(name)+1 {
+ return "", false
+ }
+ if name != line[:len(name)] || line[len(name)] != '=' {
+ return "", false
+ }
+ line = line[len(name)+1:]
+ if un, err := strconv.Unquote(line); err == nil {
+ line = un
+ }
+ return line, true
+}
diff --git a/vendor/github.com/kardianos/govendor/internal/vfilepath/prefix.go b/vendor/github.com/kardianos/govendor/internal/vfilepath/prefix.go
new file mode 100644
index 000000000..c736fe8d3
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/internal/vfilepath/prefix.go
@@ -0,0 +1,14 @@
+package vfilepath
+
+import "strings"
+
+func HasPrefixDir(path string, prefix string) bool {
+ return strings.HasPrefix(makeDirPath(path), makeDirPath(prefix))
+}
+
+func makeDirPath(path string) string {
+ if path != "/" {
+ path += "/"
+ }
+ return path
+}
diff --git a/vendor/github.com/kardianos/govendor/internal/vfilepath/stub.go b/vendor/github.com/kardianos/govendor/internal/vfilepath/stub.go
new file mode 100644
index 000000000..b2186f2aa
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/internal/vfilepath/stub.go
@@ -0,0 +1,17 @@
+package vfilepath
+
+import (
+ "path/filepath"
+)
+
+func Split(path string) (string, string) {
+ return filepath.Split(path)
+}
+
+func Join(parts ...string) string {
+ return filepath.Join(parts...)
+}
+
+func EvalSymlinks(path string) (string, error) {
+ return filepath.EvalSymlinks(path)
+}
diff --git a/vendor/github.com/kardianos/govendor/internal/vfilepath/switch.go b/vendor/github.com/kardianos/govendor/internal/vfilepath/switch.go
new file mode 100644
index 000000000..3ad613efd
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/internal/vfilepath/switch.go
@@ -0,0 +1 @@
+package vfilepath
diff --git a/vendor/github.com/kardianos/govendor/internal/vfilepath/walk.go b/vendor/github.com/kardianos/govendor/internal/vfilepath/walk.go
new file mode 100644
index 000000000..d13d216b6
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/internal/vfilepath/walk.go
@@ -0,0 +1,83 @@
+package vfilepath
+
+import (
+ "path/filepath"
+ "sort"
+
+ os "github.com/kardianos/govendor/internal/vos"
+)
+
+// SkipDir is used as a return value from WalkFuncs to indicate that
+// the directory named in the call is to be skipped. It is not returned
+// as an error by any function.
+var SkipDir = filepath.SkipDir
+
+type WalkFunc func(path string, info os.FileInfo, err error) error
+
+// walk recursively descends path, calling w.
+func walk(path string, info os.FileInfo, walkFn WalkFunc) error {
+ err := walkFn(path, info, nil)
+ if err != nil {
+ if info.IsDir() && err == SkipDir {
+ return nil
+ }
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ names, err := readDirNames(path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, name := range names {
+ filename := filepath.Join(path, name)
+ fileInfo, err := os.Lstat(filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != SkipDir {
+ return err
+ }
+ } else {
+ err = walk(filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+func readDirNames(dirname string) ([]string, error) {
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root. All errors that arise visiting files
+// and directories are filtered by walkFn. The files are walked in lexical
+// order, which makes the output deterministic but means that for very
+// large directories Walk can be inefficient.
+// Walk does not follow symbolic links.
+func Walk(root string, walkFn WalkFunc) error {
+ info, err := os.Lstat(root)
+ if err != nil {
+ return walkFn(root, nil, err)
+ }
+ return walk(root, info, walkFn)
+}
diff --git a/vendor/github.com/kardianos/govendor/internal/vos/stub.go b/vendor/github.com/kardianos/govendor/internal/vos/stub.go
new file mode 100644
index 000000000..e8d673a20
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/internal/vos/stub.go
@@ -0,0 +1,61 @@
+package vos
+
+import (
+ "os"
+ "time"
+)
+
+type FileInfo os.FileInfo
+
+func Stat(name string) (FileInfo, error) {
+ l("stat", name)
+ fi, err := os.Stat(name)
+ return FileInfo(fi), err
+}
+func Lstat(name string) (FileInfo, error) {
+ l("lstat", name)
+ fi, err := os.Lstat(name)
+ return FileInfo(fi), err
+}
+func IsNotExist(err error) bool {
+ return os.IsNotExist(err)
+}
+
+func Getwd() (string, error) {
+ return os.Getwd()
+}
+
+func Getenv(key string) string {
+ return os.Getenv(key)
+}
+
+func Open(name string) (*os.File, error) {
+ l("open", name)
+ return os.Open(name)
+}
+
+func MkdirAll(path string, perm os.FileMode) error {
+ l("mkdirall", path)
+ return os.MkdirAll(path, perm)
+}
+
+func Remove(name string) error {
+ l("remove", name)
+ return os.Remove(name)
+}
+func RemoveAll(name string) error {
+ l("removeall", name)
+ return os.RemoveAll(name)
+}
+func Create(name string) (*os.File, error) {
+ l("create", name)
+ return os.Create(name)
+}
+func Chmod(name string, mode os.FileMode) error {
+ l("chmod", name)
+ return os.Chmod(name, mode)
+}
+func Chtimes(name string, atime, mtime time.Time) error {
+ l("chtimes", name)
+ return os.Chtimes(name, atime, mtime)
+}
diff --git a/vendor/github.com/kardianos/govendor/internal/vos/switch.go b/vendor/github.com/kardianos/govendor/internal/vos/switch.go
new file mode 100644
index 000000000..5120996be
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/internal/vos/switch.go
@@ -0,0 +1,13 @@
+package vos
+
+import (
+ "log"
+)
+
+const debugLog = false
+
+func l(fname, path string) {
+ if debugLog {
+ log.Println(fname, path)
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/main.go b/vendor/github.com/kardianos/govendor/main.go
new file mode 100644
index 000000000..6b25144f5
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/main.go
@@ -0,0 +1,55 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// vendor tool to copy external source code from GOPATH or remote location to the
+// local vendor folder. See README.md for usage.
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/kardianos/govendor/cliprompt"
+ "github.com/kardianos/govendor/help"
+ "github.com/kardianos/govendor/run"
+)
+
+func main() {
+ prompt := &cliprompt.Prompt{}
+
+ allArgs := os.Args
+
+ if allArgs[len(allArgs)-1] == "-" {
+ stdin := &bytes.Buffer{}
+ if _, err := io.Copy(stdin, os.Stdin); err == nil {
+ stdinArgs := strings.Fields(stdin.String())
+ allArgs = append(allArgs[:len(allArgs)-1], stdinArgs...)
+ }
+ }
+
+ msg, err := run.Run(os.Stdout, allArgs, prompt)
+ if err == flag.ErrHelp {
+ err = nil
+ }
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error: %+v\n", err)
+ }
+ msgText := msg.String()
+ if len(msgText) > 0 {
+ fmt.Fprint(os.Stderr, msgText)
+ }
+ if err != nil {
+ os.Exit(2)
+ }
+ switch msg {
+ case help.MsgNone, help.MsgGovendorVersion, help.MsgGovendorLicense:
+ os.Exit(0)
+ default:
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/migrate/gb.go b/vendor/github.com/kardianos/govendor/migrate/gb.go
new file mode 100644
index 000000000..b90542ba1
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/migrate/gb.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package migrate
+
+import (
+ "errors"
+ "path/filepath"
+)
+
+func init() {
+ register("gb", sysGb{})
+}
+
+type sysGb struct{}
+
+func (sys sysGb) Check(root string) (system, error) {
+ if hasDirs(root, "src", filepath.Join("vendor", "src")) {
+ return sys, nil
+ }
+ return nil, nil
+}
+func (sysGb) Migrate(root string) error {
+ // Move files from "src" to first GOPATH.
+ // Move vendor files from "vendor/src" to "vendor".
+ // Translate "vendor/manifest" to vendor.json file.
+ return errors.New("Migrate gb not implemented")
+}
diff --git a/vendor/github.com/kardianos/govendor/migrate/gdm.go b/vendor/github.com/kardianos/govendor/migrate/gdm.go
new file mode 100644
index 000000000..84b0d01af
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/migrate/gdm.go
@@ -0,0 +1,78 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package migrate
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/vendorfile"
+)
+
+func init() {
+ register("gdm", sysGdm{})
+}
+
+type sysGdm struct{}
+
+func (sys sysGdm) Check(root string) (system, error) {
+ if hasFiles(root, "Godeps") {
+ return sys, nil
+ }
+ return nil, nil
+}
+
+func (sys sysGdm) Migrate(root string) error {
+ gdmFilePath := filepath.Join(root, "Godeps")
+
+ ctx, err := context.NewContext(root, filepath.Join("vendor", "vendor.json"), "vendor", false)
+ if err != nil {
+ return err
+ }
+ ctx.VendorFile.Ignore = "test"
+
+ f, err := os.Open(gdmFilePath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ pkgs, err := sys.parseGdmFile(f)
+ if err != nil {
+ return err
+ }
+ ctx.VendorFile.Package = pkgs
+
+ if err := ctx.WriteVendorFile(); err != nil {
+ return err
+ }
+
+ return os.RemoveAll(gdmFilePath)
+}
+
+func (sysGdm) parseGdmFile(r io.Reader) ([]*vendorfile.Package, error) {
+ var pkgs []*vendorfile.Package
+ for {
+ var path, rev string
+ if _, err := fmt.Fscanf(r, "%s %s\n", &path, &rev); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, err
+ }
+
+ pkgs = append(pkgs, &vendorfile.Package{
+ Add: true,
+ Path: path,
+ Revision: rev,
+ Tree: true,
+ })
+ }
+
+ return pkgs, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/migrate/glide.go b/vendor/github.com/kardianos/govendor/migrate/glide.go
new file mode 100644
index 000000000..6381e5ac6
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/migrate/glide.go
@@ -0,0 +1,100 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package migrate
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/vendorfile"
+ "gopkg.in/yaml.v2"
+)
+
+func init() {
+ register("glide", sysGlide{})
+}
+
+type sysGlide struct{}
+
+func (sys sysGlide) Check(root string) (system, error) {
+ // Glide has two config files: glide.yaml and glide.lock. The
+ // first file is for manual configuration. The second file is
+ // autogenerated from the first one. Migration procedure uses
+ // autogenerated glide.lock because it has resolved recursive
+ // dependencies that glide make automatically from glide.yaml.
+ if hasFiles(root, "glide.lock") {
+ return sys, nil
+ }
+ return nil, nil
+}
+
+func (sys sysGlide) Migrate(root string) error {
+ // Create a new empty config.
+ ctx, err := context.NewContext(root, filepath.Join("vendor", "vendor.json"), "vendor", false)
+ if err != nil {
+ return err
+ }
+ ctx.VendorDiscoverFolder = "vendor"
+ ctx.VendorFile.Ignore = "test"
+
+ // Get&parse glide' config.
+ rawConfigData, err := ioutil.ReadFile(filepath.Join(root, "glide.lock"))
+ if err != nil {
+ return err
+ }
+ type (
+ imports struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Repo string `json:"repo,omitempty"`
+ Subpackages []string `json:"subpackages,omitempty"`
+ }
+ glideLock struct {
+ Imports []imports `json:"imports"`
+ }
+ )
+ parsedConfig := glideLock{}
+ err = yaml.Unmarshal(rawConfigData, &parsedConfig)
+ if err != nil {
+ return err
+ }
+
+ // Build a new config.
+ for _, i := range parsedConfig.Imports {
+ pkg := vendorfile.Package{
+ Add: true,
+ Path: i.Name,
+ Revision: i.Version,
+ }
+ if len(i.Subpackages) > 0 {
+ for _, p := range i.Subpackages {
+ subpkg := vendorfile.Package{
+ Add: true,
+ Path: path.Join(i.Name, p),
+ Revision: i.Version,
+ }
+ if i.Repo != "" {
+ subpkg.Origin = path.Join(i.Repo, p)
+ }
+ ctx.VendorFile.Package = append(ctx.VendorFile.Package, &subpkg)
+ }
+ }
+ if i.Repo != "" {
+ pkg.Origin = i.Repo
+ }
+ ctx.VendorFile.Package = append(ctx.VendorFile.Package, &pkg)
+ }
+ err = ctx.WriteVendorFile()
+ if err != nil {
+ return err
+ }
+
+ // Cleanup.
+ os.RemoveAll(filepath.Join(root, "glide.yaml"))
+ return os.RemoveAll(filepath.Join(root, "glide.lock"))
+}
diff --git a/vendor/github.com/kardianos/govendor/migrate/glock.go b/vendor/github.com/kardianos/govendor/migrate/glock.go
new file mode 100644
index 000000000..14766ce4a
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/migrate/glock.go
@@ -0,0 +1,97 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package migrate
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/pkgspec"
+)
+
+func init() {
+ register("glock", sysGlock{})
+}
+
+type sysGlock struct{}
+
+func (sys sysGlock) Check(root string) (system, error) {
+ if hasFiles(root, "GLOCKFILE") {
+ return sys, nil
+ }
+ return nil, nil
+}
+func (sysGlock) Migrate(root string) error {
+ err := os.MkdirAll(filepath.Join(root, "vendor"), 0777)
+ if err != nil {
+ return err
+ }
+ filebytes, err := ioutil.ReadFile(filepath.Join(root, "GLOCKFILE"))
+ if err != nil {
+ return err
+ }
+ lines := strings.Split(string(filebytes), "\n")
+ for i, l := range lines {
+ lines[i] = strings.TrimSpace(l)
+ }
+
+ /*
+ vf := &vendorfile.File{}
+ vf.Package = make([]*vendorfile.Package, 0, len(lines))
+ */
+ ctx, err := context.NewContext(root, filepath.Join("vendor", "vendor.json"), "vendor", false)
+ if err != nil {
+ return err
+ }
+
+ const cmdPrefix = "cmd "
+
+ for _, l := range lines {
+ if len(l) == 0 {
+ continue
+ }
+ isCmd := strings.HasPrefix(l, cmdPrefix)
+ if isCmd {
+ continue
+ }
+ field := strings.Fields(l)
+ if len(field) < 2 {
+ continue
+ }
+ ps, err := pkgspec.Parse("", field[0]+"@"+field[1])
+ if err != nil {
+ return err
+ }
+ ps.IncludeTree = true
+ err = ctx.ModifyImport(ps, context.Fetch)
+ if err != nil {
+ return err
+ }
+ }
+ for _, l := range lines {
+ if len(l) == 0 {
+ continue
+ }
+ isCmd := strings.HasPrefix(l, cmdPrefix)
+ if !isCmd {
+ continue
+ }
+ path := strings.TrimPrefix(l, cmdPrefix)
+ ps, err := pkgspec.Parse("", path)
+ if err != nil {
+ return err
+ }
+ err = ctx.ModifyImport(ps, context.Fetch)
+ if err != nil {
+ return err
+ }
+ }
+ err = ctx.WriteVendorFile()
+ os.Remove(filepath.Join(root, "GLOCKFILE"))
+ return err
+}
diff --git a/vendor/github.com/kardianos/govendor/migrate/godep.go b/vendor/github.com/kardianos/govendor/migrate/godep.go
new file mode 100644
index 000000000..a4f7ae376
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/migrate/godep.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package migrate
+
+import (
+ "encoding/json"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/vendorfile"
+)
+
+func init() {
+ register("godep", sysGodep{})
+}
+
+type sysGodep struct{}
+
+func (sys sysGodep) Check(root string) (system, error) {
+ if hasDirs(root, "Godeps") {
+ return sys, nil
+ }
+ return nil, nil
+}
+func (sysGodep) Migrate(root string) error {
+ // Determine if import paths are rewritten.
+ // Un-rewrite import paths.
+ // Copy files from Godeps/_workspace/src to "vendor".
+ // Translate Godeps/Godeps.json to vendor.json.
+
+ vendorFilePath := filepath.Join("Godeps", "_workspace", "src")
+ vendorPath := path.Join("Godeps", "_workspace", "src")
+ godepFilePath := filepath.Join(root, "Godeps", "Godeps.json")
+
+ ctx, err := context.NewContext(root, "vendor.json", vendorFilePath, true)
+ if err != nil {
+ return err
+ }
+ ctx.VendorDiscoverFolder = vendorPath
+
+ list, err := ctx.Status()
+ if err != nil {
+ return err
+ }
+
+ remove := make([]string, 0, len(list))
+ for _, item := range list {
+ if item.Status.Location != context.LocationVendor {
+ continue
+ }
+ pkg := ctx.Package[item.Local]
+ ctx.Operation = append(ctx.Operation, &context.Operation{
+ Pkg: pkg,
+ Src: pkg.Dir,
+ Dest: filepath.Join(ctx.RootDir, "vendor", filepath.ToSlash(item.Pkg.Path)),
+ })
+ remove = append(remove, filepath.Join(ctx.RootGopath, filepath.ToSlash(item.Local)))
+ ctx.RewriteRule[item.Local] = item.Pkg.Path
+ }
+ ctx.VendorFilePath = filepath.Join(ctx.RootDir, "vendor", "vendor.json")
+
+ ctx.VendorDiscoverFolder = "vendor"
+ ctx.VendorFile.Ignore = "test"
+
+ // Translate then remove godeps.json file.
+ type Godeps struct {
+ ImportPath string
+ GoVersion string // Abridged output of 'go version'.
+ Packages []string // Arguments to godep save, if any.
+ Deps []struct {
+ ImportPath string
+ Comment string // Description of commit, if present.
+ Rev string // VCS-specific commit ID.
+ }
+ }
+
+ godeps := Godeps{}
+ f, err := os.Open(godepFilePath)
+ if err != nil {
+ return err
+ }
+ coder := json.NewDecoder(f)
+ err = coder.Decode(&godeps)
+ f.Close()
+ if err != nil {
+ return err
+ }
+
+ for _, d := range godeps.Deps {
+ for _, pkg := range ctx.Package {
+ if !strings.HasPrefix(pkg.Path, d.ImportPath) {
+ continue
+ }
+ vf := ctx.VendorFilePackagePath(pkg.Path)
+ if vf == nil {
+ ctx.VendorFile.Package = append(ctx.VendorFile.Package, &vendorfile.Package{
+ Add: true,
+ Path: pkg.Path,
+ Comment: d.Comment,
+ Revision: d.Rev,
+ })
+ }
+ }
+ }
+
+ err = ctx.WriteVendorFile()
+ if err != nil {
+ return err
+ }
+ err = ctx.Alter()
+ if err != nil {
+ return err
+ }
+
+ // Remove existing.
+ for _, r := range remove {
+ err = context.RemovePackage(r, "", false)
+ if err != nil {
+ return err
+ }
+ }
+
+ return os.RemoveAll(filepath.Join(root, "Godeps"))
+}
diff --git a/vendor/github.com/kardianos/govendor/migrate/migrate.go b/vendor/github.com/kardianos/govendor/migrate/migrate.go
new file mode 100644
index 000000000..628ebf7d6
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/migrate/migrate.go
@@ -0,0 +1,133 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package migrate transforms a repository from a given vendor schema to
+// the vendor folder schema.
+package migrate
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+type ErrNoSuchSystem struct {
+ NotExist string
+ Has []string
+}
+
+func (err ErrNoSuchSystem) Error() string {
+ return fmt.Sprintf("Migration system for %q doesn't exist. Current systems %q.", err.NotExist, err.Has)
+}
+
+// From is the current vendor schema.
+type From string
+
+// Migrate from the given system using the current working directory.
+func MigrateWD(from From) error {
+ wd, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+ return Migrate(from, wd)
+}
+
+// SystemList list available migration systems.
+func SystemList() []string {
+ list := make([]string, 0, len(registered))
+ for key := range registered {
+ list = append(list, string(key))
+ }
+ sort.Strings(list)
+ return list
+}
+
+// Migrate from the given system using the given root.
+func Migrate(from From, root string) error {
+ sys, found := registered[from]
+ if !found {
+ return ErrNoSuchSystem{
+ NotExist: string(from),
+ Has: SystemList(),
+ }
+ }
+ sys, err := sys.Check(root)
+ if err != nil {
+ return err
+ }
+ if sys == nil {
+ return errors.New("Root not found.")
+ }
+ return sys.Migrate(root)
+}
+
+type system interface {
+ Check(root string) (system, error)
+ Migrate(root string) error
+}
+
+func register(name From, sys system) {
+ _, found := registered[name]
+ if found {
+ panic("system " + name + " already registered.")
+ }
+ registered[name] = sys
+}
+
+var registered = make(map[From]system, 10)
+
+var errAutoSystemNotFound = errors.New("Unable to determine vendor system.")
+
+func init() {
+ register("auto", sysAuto{})
+}
+
+type sysAuto struct{}
+
+func (auto sysAuto) Check(root string) (system, error) {
+ for _, sys := range registered {
+ if sys == auto {
+ continue
+ }
+ out, err := sys.Check(root)
+ if err != nil {
+ return nil, err
+ }
+ if out != nil {
+ return out, nil
+ }
+ }
+ return nil, errAutoSystemNotFound
+}
+func (sysAuto) Migrate(root string) error {
+ return errors.New("Auto.Migrate shouldn't be called")
+}
+
+func hasDirs(root string, dd ...string) bool {
+ for _, d := range dd {
+ fi, err := os.Stat(filepath.Join(root, d))
+ if err != nil {
+ return false
+ }
+ if !fi.IsDir() {
+ return false
+ }
+ }
+ return true
+}
+
+func hasFiles(root string, dd ...string) bool {
+ for _, d := range dd {
+ fi, err := os.Stat(filepath.Join(root, d))
+ if err != nil {
+ return false
+ }
+ if fi.IsDir() {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/kardianos/govendor/migrate/old.go b/vendor/github.com/kardianos/govendor/migrate/old.go
new file mode 100644
index 000000000..e3df59646
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/migrate/old.go
@@ -0,0 +1,97 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package migrate
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/kardianos/govendor/context"
+)
+
+func init() {
+ register("internal", sysInternal{})
+ register("old-vendor", sysOldVendor{})
+}
+
+type sysInternal struct{}
+
+func (sys sysInternal) Check(root string) (system, error) {
+ vendorFolder := "internal"
+ override := os.Getenv("GOVENDORFOLDER")
+ if len(override) != 0 {
+ vendorFolder = override
+ }
+ if hasDirs(root, vendorFolder) && hasFiles(root, filepath.Join(vendorFolder, "vendor.json")) {
+ return sys, nil
+ }
+ return nil, nil
+}
+func (sysInternal) Migrate(root string) error {
+ // Un-rewrite import paths.
+ // Copy files from internal to vendor.
+ // Update and move vendor file from "internal/vendor.json" to "vendor.json".
+ ctx, err := context.NewContext(root, filepath.Join("internal", "vendor.json"), "internal", true)
+ if err != nil {
+ return err
+ }
+ list, err := ctx.Status()
+ if err != nil {
+ return err
+ }
+ remove := make([]string, 0, len(list))
+ for _, item := range list {
+ if item.Status.Location != context.LocationVendor {
+ continue
+ }
+ pkg := ctx.Package[item.Local]
+ ctx.Operation = append(ctx.Operation, &context.Operation{
+ Pkg: pkg,
+ Src: pkg.Dir,
+ Dest: filepath.Join(ctx.RootDir, "vendor", filepath.ToSlash(item.Pkg.Path)),
+ })
+ remove = append(remove, filepath.Join(ctx.RootGopath, filepath.ToSlash(item.Local)))
+ ctx.RewriteRule[item.Local] = item.Pkg.Path
+ }
+ ctx.VendorFilePath = filepath.Join(ctx.RootDir, "vendor", "vendor.json")
+ err = ctx.WriteVendorFile()
+ if err != nil {
+ return err
+ }
+ err = ctx.Alter()
+ if err != nil {
+ return err
+ }
+
+ // Remove existing.
+ for _, r := range remove {
+ err = context.RemovePackage(r, "", false)
+ if err != nil {
+ return err
+ }
+ }
+ return os.Remove(filepath.Join(ctx.RootDir, "internal", "vendor.json"))
+}
+
+type sysOldVendor struct{}
+
+func (sys sysOldVendor) Check(root string) (system, error) {
+ if hasDirs(root, "vendor") && hasFiles(root, "vendor.json") {
+ return sys, nil
+ }
+ return nil, nil
+}
+func (sysOldVendor) Migrate(root string) error {
+ ctx, err := context.NewContext(root, "vendor.json", "vendor", false)
+ if err != nil {
+ return err
+ }
+ ctx.VendorFilePath = filepath.Join(ctx.RootDir, "vendor", "vendor.json")
+ err = ctx.WriteVendorFile()
+ if err != nil {
+ return err
+ }
+ return os.Remove(filepath.Join(ctx.RootDir, "vendor.json"))
+}
diff --git a/vendor/github.com/kardianos/govendor/pkgspec/pkg.go b/vendor/github.com/kardianos/govendor/pkgspec/pkg.go
new file mode 100644
index 000000000..e2f75eddf
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/pkgspec/pkg.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkgspec defines a schema that contains the path, origin, version
+// and other properties.
+package pkgspec
+
+import "bytes"
+
+type Pkg struct {
+ Path string
+ FilePath string
+ Origin string
+ IncludeTree bool
+ MatchTree bool
+ HasVersion bool
+ HasOrigin bool
+ Version string
+
+ Uncommitted bool
+}
+
+func (pkg *Pkg) String() string {
+ buf := &bytes.Buffer{}
+ buf.WriteString(pkg.Path)
+ if pkg.IncludeTree {
+ buf.WriteString(TreeIncludeSuffix)
+ } else if pkg.MatchTree {
+ buf.WriteString(TreeMatchSuffix)
+ }
+ if len(pkg.Origin) > 0 {
+ buf.WriteString(originMatch)
+ buf.WriteString(pkg.Origin)
+ }
+ if pkg.HasVersion {
+ buf.WriteString(versionMatch)
+ if len(pkg.Version) > 0 {
+ buf.WriteString(pkg.Version)
+ }
+ }
+ return buf.String()
+}
+
+func (pkg *Pkg) PathOrigin() string {
+ if len(pkg.Origin) > 0 {
+ return pkg.Origin
+ }
+ return pkg.Path
+}
diff --git a/vendor/github.com/kardianos/govendor/pkgspec/pkgspec.go b/vendor/github.com/kardianos/govendor/pkgspec/pkgspec.go
new file mode 100644
index 000000000..d6ca2d64c
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/pkgspec/pkgspec.go
@@ -0,0 +1,99 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package pkgspec parses the package specification string
+package pkgspec
+
+import (
+ "errors"
+ "path"
+ "strings"
+)
+
+const (
+ TreeIncludeSuffix = "/^"
+ TreeMatchSuffix = "/..."
+)
+
+const (
+ originMatch = "::"
+ versionMatch = "@"
+ vendorSegment = "/vendor/"
+)
+
+var (
+ ErrEmptyPath = errors.New("Empty package path")
+ ErrEmptyOrigin = errors.New("Empty origin specified")
+ ErrInvalidPath = errors.New("Path contains a vendor folder and a origin")
+)
+
+// Parse a package spec according to:
+// package-spec = <path>[{/...|/^}][::<origin>][@[<version-spec>]]
+func Parse(currentGoPath, s string) (*Pkg, error) {
+ // Clean up the import path before
+ s = strings.Trim(s, "/\\ \t")
+ if len(s) == 0 {
+ return nil, ErrEmptyPath
+ }
+ s = strings.Replace(s, `\`, `/`, -1)
+
+ originIndex := strings.Index(s, originMatch)
+ versionIndex := strings.LastIndex(s, versionMatch)
+
+ if originIndex == 0 {
+ return nil, ErrEmptyPath
+ }
+
+ // Don't count the origin if it is after the "@" symbol.
+ if originIndex > versionIndex && versionIndex > 0 {
+ originIndex = -1
+ }
+
+ pkg := &Pkg{
+ Path: s,
+ HasOrigin: (originIndex >= 0),
+ }
+
+ if versionIndex > 0 {
+ pkg.Path = s[:versionIndex]
+ pkg.Version = s[versionIndex+len(versionMatch):]
+ pkg.HasVersion = true
+ }
+ if originIndex > 0 {
+ pkg.Path = s[:originIndex]
+ endOrigin := len(s)
+ if versionIndex > 0 {
+ endOrigin = versionIndex
+ }
+ pkg.Origin = s[originIndex+len(originMatch) : endOrigin]
+ if len(pkg.Origin) == 0 {
+ return nil, ErrEmptyOrigin
+ }
+ }
+ // Look for vendor folder in package path.
+ // This is allowed in origin, but not path.
+ vendorIndex := strings.LastIndex(pkg.Path, vendorSegment)
+ if vendorIndex >= 0 {
+ if len(pkg.Origin) > 0 {
+ return nil, ErrInvalidPath
+ }
+ pkg.Origin = pkg.Path
+ pkg.Path = pkg.Path[vendorIndex+len(vendorSegment):]
+ }
+
+ if strings.HasSuffix(pkg.Path, TreeMatchSuffix) {
+ pkg.MatchTree = true
+ pkg.Path = strings.TrimSuffix(pkg.Path, TreeMatchSuffix)
+ } else if strings.HasSuffix(pkg.Path, TreeIncludeSuffix) {
+ pkg.IncludeTree = true
+ pkg.Path = strings.TrimSuffix(pkg.Path, TreeIncludeSuffix)
+ }
+ if strings.HasPrefix(pkg.Path, ".") && len(currentGoPath) != 0 {
+ currentGoPath = strings.Replace(currentGoPath, `\`, `/`, -1)
+ currentGoPath = strings.TrimPrefix(currentGoPath, "/")
+ pkg.Path = path.Join(currentGoPath, pkg.Path)
+ }
+
+ return pkg, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/prompt/prompt.go b/vendor/github.com/kardianos/govendor/prompt/prompt.go
new file mode 100644
index 000000000..33ccf08fa
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/prompt/prompt.go
@@ -0,0 +1,117 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package prompt prompts user for feedback.
+package prompt
+
+import (
+ "fmt"
+)
+
+type Option struct {
+ key interface{}
+ prompt string
+ validation string
+ other bool
+
+ Chosen bool // Set to true if chosen.
+ Value string // Value used if chosen and option is "other".
+}
+
+type OptionType byte
+
+const (
+ TypeSelectOne OptionType = iota // Allow user to choose single option.
+ TypeSelectMultiple // Allow user to choose multiple options.
+)
+
+type Response byte
+
+const (
+ RespAnswer Response = iota
+ RespCancel
+)
+
+func NewOption(key interface{}, prompt string, other bool) Option {
+ return Option{key: key, prompt: prompt, other: other}
+}
+
+func (opt Option) Key() interface{} {
+ return opt.key
+}
+func (opt Option) Prompt() string {
+ return opt.prompt
+}
+func (opt Option) Other() bool {
+ return opt.other
+}
+func (opt Option) Validation() string {
+ return opt.validation
+}
+func (opt Option) String() string {
+ if opt.other {
+ return opt.Value
+ }
+ return fmt.Sprintf("%v", opt.key)
+}
+
+func ValidateOption(opt Option, validation string) Option {
+ return Option{
+ key: opt.key,
+ prompt: opt.prompt,
+ other: opt.other,
+
+ validation: validation,
+
+ Chosen: opt.Chosen,
+ Value: opt.Value,
+ }
+}
+
+type Question struct {
+ Error string
+ Prompt string
+ Type OptionType
+ Options []Option
+}
+
+func (q *Question) AnswerMultiple(must bool) []*Option {
+ ans := []*Option{}
+ for i := range q.Options {
+ o := &q.Options[i]
+ if o.Chosen {
+ ans = append(ans, o)
+ }
+ }
+ if must && len(ans) == 0 {
+ panic("If no option is chosen, response must be cancelled")
+ }
+ return ans
+}
+
+func (q *Question) AnswerSingle(must bool) *Option {
+ var ans *Option
+ if q.Type != TypeSelectOne {
+ panic("Question Type should match answer type")
+ }
+ found := false
+ for i := range q.Options {
+ o := &q.Options[i]
+ if found && o.Chosen {
+ panic("Must only respond with single option")
+ }
+ if o.Chosen {
+ found = true
+ ans = o
+ }
+ }
+ if must && !found {
+ panic("If no option is chosen, response must be cancelled")
+ }
+ return ans
+}
+
+type Prompt interface {
+ Ask(q *Question) (Response, error)
+}
diff --git a/vendor/github.com/kardianos/govendor/run/command.go b/vendor/github.com/kardianos/govendor/run/command.go
new file mode 100644
index 000000000..be99accfe
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/run/command.go
@@ -0,0 +1,175 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package run
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/help"
+ "github.com/kardianos/govendor/migrate"
+)
+
+func (r *runner) Init(w io.Writer, subCmdArgs []string) (help.HelpMessage, error) {
+ flags := flag.NewFlagSet("init", flag.ContinueOnError)
+ flags.SetOutput(nullWriter{})
+ err := flags.Parse(subCmdArgs)
+ if err != nil {
+ return help.MsgInit, err
+ }
+ ctx, err := r.NewContextWD(context.RootWD)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ ctx.VendorFile.Ignore = "test" // Add default ignore rule.
+ err = ctx.WriteVendorFile()
+ if err != nil {
+ return help.MsgNone, err
+ }
+ err = os.MkdirAll(filepath.Join(ctx.RootDir, ctx.VendorFolder), 0777)
+ return help.MsgNone, err
+}
+func (r *runner) Migrate(w io.Writer, subCmdArgs []string) (help.HelpMessage, error) {
+ flags := flag.NewFlagSet("migrate", flag.ContinueOnError)
+ flags.SetOutput(nullWriter{})
+ err := flags.Parse(subCmdArgs)
+ if err != nil {
+ return help.MsgMigrate, err
+ }
+
+ from := migrate.From("auto")
+ if len(flags.Args()) > 0 {
+ from = migrate.From(flags.Arg(0))
+ }
+ err = migrate.MigrateWD(from)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ fmt.Fprintf(w, `You may wish to run "govendor sync" now.%s`, "\n")
+ return help.MsgNone, nil
+}
+
+func (r *runner) Get(w io.Writer, subCmdArgs []string) (help.HelpMessage, error) {
+ flags := flag.NewFlagSet("get", flag.ContinueOnError)
+ flags.SetOutput(nullWriter{})
+
+ insecure := flags.Bool("insecure", false, "allows insecure connection")
+ verbose := flags.Bool("v", false, "verbose")
+
+ flags.Bool("u", false, "update") // For compatibility with "go get".
+
+ err := flags.Parse(subCmdArgs)
+ if err != nil {
+ return help.MsgGet, err
+ }
+ logger := w
+ if !*verbose {
+ logger = nil
+ }
+ for _, a := range flags.Args() {
+ pkg, err := context.Get(logger, a, *insecure)
+ if err != nil {
+ return help.MsgNone, err
+ }
+
+ helpMessage, err := r.GoCmd("install", []string{pkg.Path})
+ if err != nil {
+ return helpMessage, err
+ }
+ }
+ return help.MsgNone, nil
+}
+
+func (r *runner) GoCmd(subcmd string, args []string) (help.HelpMessage, error) {
+ ctx, err := r.NewContextWD(context.RootVendorOrWDOrFirstGOPATH)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ list, err := ctx.Status()
+ if err != nil {
+ return help.MsgNone, err
+ }
+ cgp, err := currentGoPath(ctx)
+ if err != nil {
+ return help.MsgNone, err
+ }
+
+ otherArgs := make([]string, 1, len(args)+1)
+ otherArgs[0] = subcmd
+
+ // We keep track of whether any filtering was requested, and if so,
+ // whether a filter actually matched any items.
+ var filtersRequested, filtersFound bool
+
+ // Expand any status flags in-place. Some wrapped commands the order is
+ // important to the operation of the command.
+ for _, a := range args {
+ if len(a) > 0 && a[0] == '+' {
+ filtersRequested = true
+
+ f, err := parseFilter(cgp, []string{a})
+ if err != nil {
+ return help.MsgNone, err
+ }
+
+ for _, item := range list {
+ if f.HasStatus(item) {
+ filtersFound = true
+
+ add := item.Local
+ // "go tool vet" takes dirs, not pkgs, so special case it.
+ if subcmd == "tool" && len(args) > 0 && args[0] == "vet" {
+ add = filepath.Join(ctx.RootGopath, add)
+ }
+ otherArgs = append(otherArgs, add)
+ }
+ }
+ } else {
+ otherArgs = append(otherArgs, a)
+ }
+ }
+
+ // If at least one filter was requested but we didn't match any packages,
+ // we want to bail out; otherwise, the command will behave as if we ran it
+ // against the current package instead of the requested filters' packages.
+ if filtersRequested && !filtersFound {
+ return help.MsgNone, nil
+ }
+
+ cmd := exec.Command("go", otherArgs...)
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = os.Stdout
+ return help.MsgNone, cmd.Run()
+}
+
+func (r *runner) Status(w io.Writer, subCmdArgs []string) (help.HelpMessage, error) {
+ flags := flag.NewFlagSet("status", flag.ContinueOnError)
+ flags.SetOutput(nullWriter{})
+ err := flags.Parse(subCmdArgs)
+ if err != nil {
+ return help.MsgStatus, err
+ }
+ ctx, err := r.NewContextWD(context.RootVendor)
+ if err != nil {
+ return help.MsgStatus, err
+ }
+ outOfDate, err := ctx.VerifyVendor()
+ if err != nil {
+ return help.MsgStatus, err
+ }
+ if len(outOfDate) == 0 {
+ return help.MsgNone, nil
+ }
+ fmt.Fprintf(w, "The following packages are missing or modified locally:\n")
+ for _, pkg := range outOfDate {
+ fmt.Fprintf(w, "\t%s\n", pkg.Path)
+ }
+ return help.MsgNone, fmt.Errorf("status failed for %d package(s)", len(outOfDate))
+}
diff --git a/vendor/github.com/kardianos/govendor/run/filter.go b/vendor/github.com/kardianos/govendor/run/filter.go
new file mode 100644
index 000000000..6bafa77a0
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/run/filter.go
@@ -0,0 +1,179 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package run
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/internal/pathos"
+ "github.com/kardianos/govendor/pkgspec"
+)
+
+var (
+ outside = []context.Status{
+ {Location: context.LocationExternal},
+ {Presence: context.PresenceMissing},
+ }
+ normal = []context.Status{
+ {Location: context.LocationExternal},
+ {Location: context.LocationVendor},
+ {Location: context.LocationLocal},
+ {Location: context.LocationNotFound},
+ }
+ all = []context.Status{
+ {Location: context.LocationStandard},
+ {Location: context.LocationExternal},
+ {Location: context.LocationVendor},
+ {Location: context.LocationLocal},
+ {Location: context.LocationNotFound},
+ }
+)
+
+func statusGroupFromList(list []context.Status, and, not bool) context.StatusGroup {
+ sg := context.StatusGroup{
+ Not: not,
+ And: and,
+ }
+ for _, s := range list {
+ sg.Status = append(sg.Status, s)
+ }
+ return sg
+}
+
+const notOp = "^"
+
+func parseStatusGroup(statusString string) (sg context.StatusGroup, err error) {
+ ss := strings.Split(statusString, ",")
+ sg.And = true
+ for _, s := range ss {
+ st := context.Status{}
+ if strings.HasPrefix(s, notOp) {
+ st.Not = true
+ s = strings.TrimPrefix(s, notOp)
+ }
+ var list []context.Status
+ switch {
+ case strings.HasPrefix("external", s):
+ st.Location = context.LocationExternal
+ case strings.HasPrefix("vendor", s):
+ st.Location = context.LocationVendor
+ case strings.HasPrefix("unused", s):
+ st.Presence = context.PresenceUnused
+ case strings.HasPrefix("missing", s):
+ st.Presence = context.PresenceMissing
+ case strings.HasPrefix("xcluded", s):
+ st.Presence = context.PresenceExcluded
+ case len(s) >= 3 && strings.HasPrefix("excluded", s): // len >= 3 to distinguish from "external"
+ st.Presence = context.PresenceExcluded
+ case strings.HasPrefix("local", s):
+ st.Location = context.LocationLocal
+ case strings.HasPrefix("program", s):
+ st.Type = context.TypeProgram
+ case strings.HasPrefix("std", s):
+ st.Location = context.LocationStandard
+ case strings.HasPrefix("standard", s):
+ st.Location = context.LocationStandard
+ case strings.HasPrefix("all", s):
+ list = all
+ case strings.HasPrefix("normal", s):
+ list = normal
+ case strings.HasPrefix("outside", s):
+ list = outside
+ default:
+ err = fmt.Errorf("unknown status %q", s)
+ return
+ }
+ if len(list) == 0 {
+ sg.Status = append(sg.Status, st)
+ } else {
+ sg.Group = append(sg.Group, statusGroupFromList(list, false, st.Not))
+ }
+ }
+ return
+}
+
+type filter struct {
+ Status context.StatusGroup
+ Import []*pkgspec.Pkg
+}
+
+func (f filter) String() string {
+ return fmt.Sprintf("status %q, import: %q", f.Status, f.Import)
+}
+
+func (f filter) HasStatus(item context.StatusItem) bool {
+ return item.Status.MatchGroup(f.Status)
+}
+func (f filter) FindImport(item context.StatusItem) *pkgspec.Pkg {
+ for _, imp := range f.Import {
+ if imp.Path == item.Local || imp.Path == item.Pkg.Path {
+ return imp
+ }
+ if imp.MatchTree {
+ if strings.HasPrefix(item.Local, imp.Path) || strings.HasPrefix(item.Pkg.Path, imp.Path) {
+ return imp
+ }
+ }
+ }
+ return nil
+}
+
+func currentGoPath(ctx *context.Context) (string, error) {
+ wd, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ wdpath := pathos.FileTrimPrefix(wd, ctx.RootGopath)
+ wdpath = pathos.SlashToFilepath(wdpath)
+ wdpath = strings.Trim(wdpath, "/")
+ return wdpath, nil
+}
+
+func parseFilter(currentGoPath string, args []string) (filter, error) {
+ f := filter{
+ Import: make([]*pkgspec.Pkg, 0, len(args)),
+ }
+ for _, a := range args {
+ if len(a) == 0 {
+ continue
+ }
+ // Check if item is a status.
+ if a[0] == '+' {
+ sg, err := parseStatusGroup(a[1:])
+ if err != nil {
+ return f, err
+ }
+ f.Status.Group = append(f.Status.Group, sg)
+ } else {
+ pkg, err := pkgspec.Parse(currentGoPath, a)
+ if err != nil {
+ return f, err
+ }
+ f.Import = append(f.Import, pkg)
+ }
+ }
+ return f, nil
+}
+
+func insertListToAllNot(sg *context.StatusGroup, list []context.Status) {
+ if len(sg.Group) == 0 {
+ allStatusNot := true
+ for _, s := range sg.Status {
+ if !s.Not {
+ allStatusNot = false
+ break
+ }
+ }
+ if allStatusNot {
+ sg.Group = append(sg.Group, statusGroupFromList(list, false, false))
+ }
+ }
+ for i := range sg.Group {
+ insertListToAllNot(&sg.Group[i], list)
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/run/license.go b/vendor/github.com/kardianos/govendor/run/license.go
new file mode 100644
index 000000000..dc4d6b1df
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/run/license.go
@@ -0,0 +1,107 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package run
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "text/template"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/help"
+)
+
+var defaultLicenseTemplate = `{{range $index, $t := .}}{{if ne $index 0}}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+{{end}}{{.Filename}} - {{.Path}}
+{{.Text}}{{end}}
+`
+
+func (r *runner) License(w io.Writer, subCmdArgs []string) (help.HelpMessage, error) {
+ flags := flag.NewFlagSet("license", flag.ContinueOnError)
+ flags.SetOutput(nullWriter{})
+ outputFilename := flags.String("o", "", "output")
+ templateFilename := flags.String("template", "", "custom template file")
+ err := flags.Parse(subCmdArgs)
+ if err != nil {
+ return help.MsgLicense, err
+ }
+ args := flags.Args()
+
+ templateText := defaultLicenseTemplate
+ if len(*templateFilename) > 0 {
+ text, err := ioutil.ReadFile(*templateFilename)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ templateText = string(text)
+ }
+ t, err := template.New("").Parse(templateText)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ output := w
+ if len(*outputFilename) > 0 {
+ f, err := os.Create(*outputFilename)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ defer f.Close()
+ output = f
+ }
+
+ ctx, err := r.NewContextWD(context.RootVendorOrWD)
+ if err != nil {
+ return checkNewContextError(err)
+ }
+ cgp, err := currentGoPath(ctx)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ f, err := parseFilter(cgp, args)
+ if err != nil {
+ return help.MsgLicense, err
+ }
+ if len(f.Import) == 0 {
+ insertListToAllNot(&f.Status, normal)
+ } else {
+ insertListToAllNot(&f.Status, all)
+ }
+
+ list, err := ctx.Status()
+ if err != nil {
+ return help.MsgNone, err
+ }
+ var licenseList context.LicenseSort
+ var lmap = make(map[string]context.License, 9)
+
+ err = context.LicenseDiscover(filepath.Clean(filepath.Join(ctx.Goroot, "..")), ctx.Goroot, " go", lmap)
+ if err != nil {
+ return help.MsgNone, fmt.Errorf("Failed to discover license for Go %q %v", ctx.Goroot, err)
+ }
+
+ for _, item := range list {
+ if !f.HasStatus(item) {
+ continue
+ }
+ if len(f.Import) != 0 && f.FindImport(item) == nil {
+ continue
+ }
+ err = context.LicenseDiscover(ctx.RootGopath, filepath.Join(ctx.RootGopath, item.Local), "", lmap)
+ if err != nil {
+ return help.MsgNone, fmt.Errorf("Failed to discover license for %q %v", item.Local, err)
+ }
+ }
+ for _, l := range lmap {
+ licenseList = append(licenseList, l)
+ }
+ sort.Sort(licenseList)
+
+ return help.MsgNone, t.Execute(output, licenseList)
+}
diff --git a/vendor/github.com/kardianos/govendor/run/list.go b/vendor/github.com/kardianos/govendor/run/list.go
new file mode 100644
index 000000000..97e7a7e9d
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/run/list.go
@@ -0,0 +1,132 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package run
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/help"
+)
+
+func (r *runner) List(w io.Writer, subCmdArgs []string) (help.HelpMessage, error) {
+ listFlags := flag.NewFlagSet("list", flag.ContinueOnError)
+ listFlags.SetOutput(nullWriter{})
+ verbose := listFlags.Bool("v", false, "verbose")
+ asFilePath := listFlags.Bool("p", false, "show file path to package instead of import path")
+ noStatus := listFlags.Bool("no-status", false, "do not show the status")
+ err := listFlags.Parse(subCmdArgs)
+ if err != nil {
+ return help.MsgList, err
+ }
+ args := listFlags.Args()
+ // fmt.Printf("Status: %q\n", f.Status)
+
+ // Print all listed status.
+ ctx, err := r.NewContextWD(context.RootVendorOrWD)
+ if err != nil {
+ return checkNewContextError(err)
+ }
+ cgp, err := currentGoPath(ctx)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ f, err := parseFilter(cgp, args)
+ if err != nil {
+ return help.MsgList, err
+ }
+ if len(f.Import) == 0 {
+ insertListToAllNot(&f.Status, normal)
+ } else {
+ insertListToAllNot(&f.Status, all)
+ }
+
+ list, err := ctx.Status()
+ if err != nil {
+ return help.MsgNone, err
+ }
+
+ // If not verbose, remove any entries that will just confuse people.
+ // For example, one package may reference pkgA inside vendor, another
+ // package may reference pkgA outside vendor, resulting in both a
+ // external reference and a vendor reference.
+ // In the above case, remove the external reference.
+ if !*verbose {
+ next := make([]context.StatusItem, 0, len(list))
+ for checkIndex, check := range list {
+ if check.Status.Location != context.LocationExternal {
+ next = append(next, check)
+ continue
+ }
+ found := false
+ for lookIndex, look := range list {
+ if checkIndex == lookIndex {
+ continue
+ }
+ if check.Pkg.Path != look.Pkg.Path {
+ continue
+ }
+ if look.Status.Location == context.LocationVendor {
+ found = true
+ break
+ }
+ }
+ if !found {
+ next = append(next, check)
+ }
+ }
+ list = next
+ }
+
+ formatSame := "%[1]v %[2]s\t%[3]s\t%[4]s\n"
+ formatDifferent := "%[1]v %[2]s\t%[4]s\t%[5]s\n"
+ if *verbose {
+ formatDifferent = "%[1]v %[2]s ::%[3]s\t%[4]s\t%[5]s\n"
+ }
+ if *noStatus {
+ formatSame = "%[2]s\n"
+ formatDifferent = "%[2]s\n"
+ if *verbose {
+ formatDifferent = "%[2]s ::%[3]s\n"
+ }
+ }
+ tw := tabwriter.NewWriter(w, 0, 4, 2, ' ', 0)
+ defer tw.Flush()
+ for _, item := range list {
+ if !f.HasStatus(item) {
+ continue
+ }
+ if len(f.Import) != 0 && f.FindImport(item) == nil {
+ continue
+ }
+
+ var path string
+ if *asFilePath {
+ path = item.Pkg.FilePath
+ } else {
+ path = item.Pkg.Path
+ }
+
+ if item.Local == item.Pkg.Path {
+ fmt.Fprintf(tw, formatSame, item.Status, path, item.Pkg.Version, item.VersionExact)
+ } else {
+ fmt.Fprintf(tw, formatDifferent, item.Status, path, strings.TrimPrefix(item.Local, ctx.RootImportPath), item.Pkg.Version, item.VersionExact)
+ }
+ if *verbose {
+ for i, imp := range item.ImportedBy {
+ if i != len(item.ImportedBy)-1 {
+ fmt.Fprintf(tw, " ├── %s %s\n", imp.Status, imp)
+ } else {
+ fmt.Fprintf(tw, " └── %s %s\n", imp.Status, imp)
+ }
+ }
+ }
+ }
+ return help.MsgNone, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/run/modify.go b/vendor/github.com/kardianos/govendor/run/modify.go
new file mode 100644
index 000000000..796d4f914
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/run/modify.go
@@ -0,0 +1,161 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package run
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/help"
+ "github.com/kardianos/govendor/prompt"
+)
+
+func (r *runner) Modify(w io.Writer, subCmdArgs []string, mod context.Modify, ask prompt.Prompt) (help.HelpMessage, error) {
+ msg := help.MsgFull
+ switch mod {
+ case context.Add:
+ msg = help.MsgAdd
+ case context.Update, context.AddUpdate:
+ msg = help.MsgUpdate
+ case context.Remove:
+ msg = help.MsgRemove
+ case context.Fetch:
+ msg = help.MsgFetch
+ }
+ var err error
+ /*
+ // Fake example prompt.
+ q := &prompt.Question{
+ Error: "An error goes here",
+ Prompt: "Do you want to do this?",
+ Type: prompt.TypeSelectOne,
+ Options: []prompt.Option{
+ prompt.NewOption("yes", "Yes, continue", false),
+ prompt.NewOption("no", "No, stop", false),
+ prompt.NewOption("", "What?", true),
+ },
+ }
+ q.Options[2].Chosen = true
+ q.Options[2] = prompt.ValidateOption(q.Options[2], "Choose again!")
+ resp, err := ask.Ask(q)
+ if err != nil {
+ return msg, err
+ }
+ if resp == prompt.RespCancel {
+ fmt.Printf("Cancelled\n")
+ return help.MsgNone, nil
+ }
+ chosen := q.AnswerSingle(true)
+
+ fmt.Printf("Chosen: %s\n", chosen.String())
+ */
+
+ listFlags := flag.NewFlagSet("mod", flag.ContinueOnError)
+ listFlags.SetOutput(nullWriter{})
+ dryrun := listFlags.Bool("n", false, "dry-run")
+ verbose := listFlags.Bool("v", false, "verbose")
+ short := listFlags.Bool("short", false, "choose the short path")
+ long := listFlags.Bool("long", false, "choose the long path")
+ tree := listFlags.Bool("tree", false, "copy all folders including and under selected folder")
+ insecure := listFlags.Bool("insecure", false, "allow insecure network updates")
+ uncommitted := listFlags.Bool("uncommitted", false, "allows adding uncommitted changes. Doesn't update revision or checksum")
+ err = listFlags.Parse(subCmdArgs)
+ if err != nil {
+ return msg, err
+ }
+ if *short && *long {
+ return help.MsgNone, errors.New("cannot select both long and short path")
+ }
+ args := listFlags.Args()
+ if len(args) == 0 {
+ return msg, errors.New("missing package or status")
+ }
+ ctx, err := r.NewContextWD(context.RootVendor)
+ if err != nil {
+ return checkNewContextError(err)
+ }
+ if *verbose {
+ ctx.Logger = w
+ }
+ ctx.Insecure = *insecure
+ cgp, err := currentGoPath(ctx)
+ if err != nil {
+ return msg, err
+ }
+ f, err := parseFilter(cgp, args)
+ if err != nil {
+ return msg, err
+ }
+
+ mops := make([]context.ModifyOption, 0, 3)
+ if *uncommitted {
+ mops = append(mops, context.Uncommitted)
+ }
+ if *tree {
+ mops = append(mops, context.IncludeTree)
+ }
+
+ // Add explicit imports.
+ for _, imp := range f.Import {
+ err = ctx.ModifyImport(imp, mod, mops...)
+ if err != nil {
+ return help.MsgNone, err
+ }
+ }
+ err = ctx.ModifyStatus(f.Status, mod, mops...)
+ if err != nil {
+ return help.MsgNone, err
+ }
+
+ // Auto-resolve package conflicts.
+ conflicts := ctx.Check()
+ conflicts = ctx.ResolveAutoVendorFileOrigin(conflicts)
+ if *long {
+ conflicts = context.ResolveAutoLongestPath(conflicts)
+ }
+ if *short {
+ conflicts = context.ResolveAutoShortestPath(conflicts)
+ }
+ ctx.ResloveApply(conflicts)
+
+ // TODO: loop through conflicts to see if there are any remaining conflicts.
+ // Print out any here.
+
+ if *dryrun {
+ for _, op := range ctx.Operation {
+ switch op.Type {
+ case context.OpRemove:
+ fmt.Fprintf(w, "Remove %q\n", op.Src)
+ case context.OpCopy:
+ fmt.Fprintf(w, "Copy %q -> %q\n", op.Src, op.Dest)
+ for _, ignore := range op.IgnoreFile {
+ fmt.Fprintf(w, "\tIgnore %q\n", ignore)
+ }
+ case context.OpFetch:
+ fmt.Fprintf(w, "Fetch %q\n", op.Src)
+ }
+ }
+ return help.MsgNone, nil
+ }
+
+ // Write intent, make the changes, then record any checksums or recursive info.
+ err = ctx.WriteVendorFile()
+ if err != nil {
+ return help.MsgNone, err
+ }
+ // Write out vendor file and do change.
+ err = ctx.Alter()
+ vferr := ctx.WriteVendorFile()
+ if err != nil {
+ return help.MsgNone, err
+ }
+ if vferr != nil {
+ return help.MsgNone, vferr
+ }
+ return help.MsgNone, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/run/run.go b/vendor/github.com/kardianos/govendor/run/run.go
new file mode 100644
index 000000000..aaca3836c
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/run/run.go
@@ -0,0 +1,174 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package run is a front-end to govendor.
+package run
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "runtime/pprof"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/help"
+ "github.com/kardianos/govendor/prompt"
+)
+
+type nullWriter struct{}
+
+func (nw nullWriter) Write(p []byte) (n int, err error) {
+ return len(p), nil
+}
+
+type runner struct {
+ ctx *context.Context
+}
+
+func (r *runner) NewContextWD(rt context.RootType) (*context.Context, error) {
+ if r.ctx != nil {
+ return r.ctx, nil
+ }
+ var err error
+ r.ctx, err = context.NewContextWD(rt)
+ return r.ctx, err
+}
+
+// Run is isoloated from main and os.Args to help with testing.
+// Shouldn't directly print to console, just write through w.
+func Run(w io.Writer, appArgs []string, ask prompt.Prompt) (help.HelpMessage, error) {
+ r := &runner{}
+ return r.run(w, appArgs, ask)
+}
+func (r *runner) run(w io.Writer, appArgs []string, ask prompt.Prompt) (help.HelpMessage, error) {
+ if len(appArgs) == 1 {
+ return help.MsgFull, nil
+ }
+
+ flags := flag.NewFlagSet("govendor", flag.ContinueOnError)
+ licenses := flags.Bool("govendor-licenses", false, "show govendor's licenses")
+ version := flags.Bool("version", false, "show govendor version")
+ cpuProfile := flags.String("cpuprofile", "", "write a CPU profile to `file` to help debug slow operations")
+ heapProfile := flags.String("heapprofile", "", "write a heap profile to `file` to help debug slow operations")
+
+ flags.SetOutput(nullWriter{})
+ err := flags.Parse(appArgs[1:])
+ if err != nil {
+ return help.MsgFull, err
+ }
+ if *licenses {
+ return help.MsgGovendorLicense, nil
+ }
+ if *version {
+ return help.MsgGovendorVersion, nil
+ }
+
+ if *cpuProfile != "" {
+ done := collectCPUProfile(*cpuProfile)
+ defer done()
+ }
+ if *heapProfile != "" {
+ done := collectHeapProfile(*cpuProfile)
+ defer done()
+ }
+
+ args := flags.Args()
+
+ cmd := args[0]
+ switch cmd {
+ case "init":
+ return r.Init(w, args[1:])
+ case "list":
+ return r.List(w, args[1:])
+ case "add", "update", "remove", "fetch":
+ var mod context.Modify
+ switch cmd {
+ case "add":
+ mod = context.Add
+ case "update":
+ mod = context.Update
+ case "remove":
+ mod = context.Remove
+ case "fetch":
+ mod = context.Fetch
+ }
+ return r.Modify(w, args[1:], mod, ask)
+ case "sync":
+ return r.Sync(w, args[1:])
+ case "status":
+ return r.Status(w, args[1:])
+ case "migrate":
+ return r.Migrate(w, args[1:])
+ case "get":
+ return r.Get(w, args[1:])
+ case "license":
+ return r.License(w, args[1:])
+ case "shell":
+ return r.Shell(w, args[1:])
+ case "fmt", "build", "install", "clean", "test", "vet", "generate", "tool":
+ return r.GoCmd(cmd, args[1:])
+ default:
+ return help.MsgFull, fmt.Errorf("Unknown command %q", cmd)
+ }
+}
+
+func checkNewContextError(err error) (help.HelpMessage, error) {
+ // Diagnose error, show current value of 1.5vendor, suggest alter.
+ if err == nil {
+ return help.MsgNone, nil
+ }
+ if _, is := err.(context.ErrMissingVendorFile); is {
+ err = fmt.Errorf(`%v
+
+Ensure the current folder or a parent folder contains a folder named "vendor".
+If in doubt, run "govendor init" in the project root.
+`, err)
+ return help.MsgNone, err
+ }
+ return help.MsgNone, err
+}
+
+// collectHeapProfile collects a CPU profile for as long as
+// `done()` is not invoked.
+func collectCPUProfile(filename string) (done func()) {
+ cpuProf, err := os.Create(filename)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to create file for cpu profile: %v\n", err)
+ return func() {}
+ }
+ if err := pprof.StartCPUProfile(cpuProf); err != nil {
+ _ = cpuProf.Close()
+ fmt.Fprintf(os.Stderr, "failed to write cpu profile to file: %v\n", err)
+ return func() {}
+ }
+ return func() {
+ pprof.StopCPUProfile()
+ if err := cpuProf.Close(); err != nil {
+ fmt.Fprintf(os.Stderr, "failed to close file for cpu profile: %v\n", err)
+ }
+ }
+}
+
+// collectHeapProfile collects a heap profile _when_ `done()` is called.
+func collectHeapProfile(filename string) (done func()) {
+ heapProf, err := os.Create(filename)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to create file for heap profile: %v\n", err)
+ return
+ }
+ return func() {
+ runtime.GC() // get up-to-date statistics
+ if err := pprof.WriteHeapProfile(heapProf); err != nil {
+ _ = heapProf.Close()
+ fmt.Fprintf(os.Stderr, "failed to write heap profile to file: %v\n", err)
+ return
+ }
+ if err := heapProf.Close(); err != nil {
+ fmt.Fprintf(os.Stderr, "failed to close file for heap profile: %v\n", err)
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/kardianos/govendor/run/shell.go b/vendor/github.com/kardianos/govendor/run/shell.go
new file mode 100644
index 000000000..5016c7583
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/run/shell.go
@@ -0,0 +1,94 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package run
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "net"
+ "os"
+
+ "net/http"
+ _ "net/http/pprof" // imported for side effect of registering handler
+
+ "github.com/kardianos/govendor/help"
+
+ "github.com/Bowery/prompt"
+ "github.com/google/shlex"
+)
+
+func (r *runner) Shell(w io.Writer, subCmdArgs []string) (help.HelpMessage, error) {
+ flags := flag.NewFlagSet("shell", flag.ContinueOnError)
+
+ pprofHandlerAddr := flags.String("pprof-handler", "", "if set, turns on an HTTP server that offers pprof handlers")
+
+ flags.SetOutput(nullWriter{})
+ err := flags.Parse(subCmdArgs)
+ if err != nil {
+ return help.MsgShell, err
+ }
+
+ if *pprofHandlerAddr != "" {
+ tryEnableHTTPPprofHandler(*pprofHandlerAddr)
+ }
+
+ out := os.Stdout
+
+ for {
+ line, err := prompt.Basic("> ", false)
+ if err != nil {
+ break
+ }
+ args, err := shlex.Split(line)
+ if err != nil {
+ fmt.Fprintf(out, "%v", err.Error())
+ }
+ if len(args) == 0 {
+ continue
+ }
+ cmd := args[0]
+ next := make([]string, 0, len(args)+1)
+ next = append(next, "govendor")
+ args = append(next, args...)
+ switch cmd {
+ case "exit", "q", "quit", "/q":
+ return help.MsgNone, nil
+ case "shell":
+ continue
+ }
+ msg, err := r.run(out, args, nil)
+ if err != nil {
+ fmt.Fprintf(out, "%v", err.Error())
+ }
+ msgText := msg.String()
+ if len(msgText) > 0 {
+ fmt.Fprintf(out, "%s\tType \"exit\" to exit.\n", msgText)
+ }
+ }
+
+ return help.MsgNone, nil
+}
+
+// tryEnableHTTPPprofHandler tries to provide an http/pprof handler on `addr`.
+// if it fails, it logs an error but does not otherwise do anything.
+func tryEnableHTTPPprofHandler(addr string) {
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "http/pprof handlers failed to create a listener: %v\n", err)
+ return
+ }
+ // port 0 means a randomly allocated one, so we
+ // need to figure out where our listener ended up
+ realAddr := l.Addr()
+
+ fmt.Fprintf(os.Stderr, "http/pprof handlers are available on %v\n", realAddr)
+ go func() {
+ defer l.Close()
+ if err := http.Serve(l, nil); err != nil {
+ fmt.Fprintf(os.Stderr, "http/pprof handlers failed to start: %v\n", err)
+ }
+ }()
+}
diff --git a/vendor/github.com/kardianos/govendor/run/sync.go b/vendor/github.com/kardianos/govendor/run/sync.go
new file mode 100644
index 000000000..96deb67e5
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/run/sync.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package run
+
+import (
+ "flag"
+ "io"
+
+ "github.com/kardianos/govendor/context"
+ "github.com/kardianos/govendor/help"
+)
+
+func (r *runner) Sync(w io.Writer, subCmdArgs []string) (help.HelpMessage, error) {
+ flags := flag.NewFlagSet("sync", flag.ContinueOnError)
+ insecure := flags.Bool("insecure", false, "allow insecure network updates")
+ dryrun := flags.Bool("n", false, "dry run, print what would be done")
+ verbose := flags.Bool("v", false, "verbose output")
+ flags.SetOutput(nullWriter{})
+ err := flags.Parse(subCmdArgs)
+ if err != nil {
+ return help.MsgSync, err
+ }
+ ctx, err := r.NewContextWD(context.RootVendor)
+ if err != nil {
+ return help.MsgSync, err
+ }
+ ctx.Insecure = *insecure
+ if *dryrun || *verbose {
+ ctx.Logger = w
+ }
+ return help.MsgNone, ctx.Sync(*dryrun)
+}
diff --git a/vendor/github.com/kardianos/govendor/vcs/bzr.go b/vendor/github.com/kardianos/govendor/vcs/bzr.go
new file mode 100644
index 000000000..c911f50d9
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/vcs/bzr.go
@@ -0,0 +1,61 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ os "github.com/kardianos/govendor/internal/vos"
+)
+
+type VcsBzr struct{}
+
+func (VcsBzr) Find(dir string) (*VcsInfo, error) {
+ fi, err := os.Stat(filepath.Join(dir, ".bzr"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ if !fi.IsDir() {
+ return nil, nil
+ }
+
+ // Get info.
+ info := &VcsInfo{}
+
+ cmd := exec.Command("bzr", "status")
+ cmd.Dir = dir
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+ if string(output) != "" {
+ info.Dirty = true
+ }
+
+ cmd = exec.Command("bzr", "log", "-r-1")
+ cmd.Dir = dir
+ output, err = cmd.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(string(output), "\n") {
+ if strings.HasPrefix(line, "revno:") {
+ info.Revision = strings.Split(strings.TrimSpace(strings.TrimPrefix(line, "revno:")), " ")[0]
+ } else if strings.HasPrefix(line, "timestamp:") {
+ tm, err := time.Parse("Mon 2006-01-02 15:04:05 -0700", strings.TrimSpace(strings.TrimPrefix(line, "timestamp:")))
+ if err != nil {
+ return nil, err
+ }
+ info.RevisionTime = &tm
+ }
+ }
+ return info, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/vcs/git.go b/vendor/github.com/kardianos/govendor/vcs/git.go
new file mode 100644
index 000000000..d9b157afb
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/vcs/git.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ os "github.com/kardianos/govendor/internal/vos"
+)
+
+type VcsGit struct{}
+
+func (VcsGit) Find(dir string) (*VcsInfo, error) {
+ fi, err := os.Stat(filepath.Join(dir, ".git"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ if !fi.IsDir() {
+ return nil, nil
+ }
+
+ // Get info.
+ info := &VcsInfo{}
+
+ cmd := exec.Command("git", "status", "--short")
+ cmd.Dir = dir
+ err = cmd.Run()
+ if err != nil {
+ info.Dirty = true
+ }
+
+ cmd = exec.Command("git", "show", "--pretty=format:%H@%ai", "-s")
+
+ cmd.Dir = dir
+ cmd.Stderr = nil
+ output, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+ line := strings.TrimSpace(string(output))
+
+ // remove gpg parts from git show
+ gpgLine := strings.Split(line, "\n")
+ if len(gpgLine) > 1 {
+ line = gpgLine[len(gpgLine)-1]
+ }
+
+ ss := strings.Split(line, "@")
+ info.Revision = ss[0]
+ tm, err := time.Parse("2006-01-02 15:04:05 -0700", ss[1])
+ if err != nil {
+ return nil, err
+ }
+ info.RevisionTime = &tm
+ return info, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/vcs/hg.go b/vendor/github.com/kardianos/govendor/vcs/hg.go
new file mode 100644
index 000000000..a4ac7b09b
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/vcs/hg.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ os "github.com/kardianos/govendor/internal/vos"
+)
+
+type VcsHg struct{}
+
+func (VcsHg) Find(dir string) (*VcsInfo, error) {
+ fi, err := os.Stat(filepath.Join(dir, ".hg"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ if !fi.IsDir() {
+ return nil, nil
+ }
+
+ // Get info.
+ info := &VcsInfo{}
+
+ cmd := exec.Command("hg", "identify", "-i")
+ cmd.Dir = dir
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+ rev := strings.TrimSpace(string(output))
+ if strings.HasSuffix(rev, "+") {
+ info.Dirty = true
+ rev = strings.TrimSuffix(rev, "+")
+ }
+
+ cmd = exec.Command("hg", "log", "-r", rev)
+ cmd.Dir = dir
+ output, err = cmd.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(string(output), "\n") {
+ if strings.HasPrefix(line, "changeset:") {
+ ss := strings.Split(line, ":")
+ info.Revision = strings.TrimSpace(ss[len(ss)-1])
+ }
+ if strings.HasPrefix(line, "date:") {
+ line = strings.TrimPrefix(line, "date:")
+ tm, err := time.Parse("Mon Jan 02 15:04:05 2006 -0700", strings.TrimSpace(line))
+ if err == nil {
+ info.RevisionTime = &tm
+ }
+ }
+ }
+ return info, nil
+}
diff --git a/vendor/github.com/kardianos/govendor/vcs/svn.go b/vendor/github.com/kardianos/govendor/vcs/svn.go
new file mode 100644
index 000000000..4d758c06d
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/vcs/svn.go
@@ -0,0 +1,60 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "encoding/xml"
+ "os/exec"
+ "path/filepath"
+ "time"
+
+ os "github.com/kardianos/govendor/internal/vos"
+)
+
+type VcsSvn struct{}
+
+func (svn VcsSvn) Find(dir string) (*VcsInfo, error) {
+ fi, err := os.Stat(filepath.Join(dir, ".svn"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ if !fi.IsDir() {
+ return nil, nil
+ }
+
+ // Get info.
+ info := &VcsInfo{}
+
+ cmd := exec.Command("svn", "info", "--xml")
+ cmd.Dir = dir
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+
+ return info, svn.parseInfo(output, info)
+}
+func (svn VcsSvn) parseInfo(output []byte, info *VcsInfo) error {
+ var err error
+ XX := struct {
+ Commit struct {
+ Revision string `xml:"revision,attr"`
+ RevisionTime string `xml:"date"`
+ } `xml:"entry>commit"`
+ }{}
+ err = xml.Unmarshal(output, &XX)
+ if err != nil {
+ return err
+ }
+ info.Revision = XX.Commit.Revision
+ tm, err := time.Parse(time.RFC3339, XX.Commit.RevisionTime)
+ if err == nil {
+ info.RevisionTime = &tm
+ }
+ return nil
+}
diff --git a/vendor/github.com/kardianos/govendor/vcs/vcs.go b/vendor/github.com/kardianos/govendor/vcs/vcs.go
new file mode 100644
index 000000000..d79405947
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/vcs/vcs.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package vcs gets version control information from the file system.
+package vcs
+
+import (
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/kardianos/govendor/internal/pathos"
+)
+
+// VcsInfo returns information about a given repo.
+type VcsInfo struct {
+ Dirty bool
+ Revision string
+ RevisionTime *time.Time
+}
+
+// Vcs represents a version control system.
+type Vcs interface {
+ // Return nil VcsInfo if unable to determine VCS from directory.
+ Find(dir string) (*VcsInfo, error)
+}
+
+var vcsRegistry = []Vcs{
+ VcsGit{},
+ VcsHg{},
+ VcsSvn{},
+ VcsBzr{},
+}
+var registerSync = sync.Mutex{}
+
+// RegisterVCS adds a new VCS to use.
+func RegisterVCS(vcs Vcs) {
+ registerSync.Lock()
+ defer registerSync.Unlock()
+
+ vcsRegistry = append(vcsRegistry, vcs)
+}
+
+const looplimit = 10000
+
+// FindVcs determines the version control information given a package dir and
+// lowest root dir.
+func FindVcs(root, packageDir string) (info *VcsInfo, err error) {
+ if !filepath.IsAbs(root) {
+ return nil, nil
+ }
+ if !filepath.IsAbs(packageDir) {
+ return nil, nil
+ }
+ path := packageDir
+ for i := 0; i <= looplimit; i++ {
+ for _, vcs := range vcsRegistry {
+ info, err = vcs.Find(path)
+ if err != nil {
+ return nil, err
+ }
+ if info != nil {
+ return info, nil
+ }
+ }
+
+ nextPath := filepath.Clean(filepath.Join(path, ".."))
+ // Check for root.
+ if nextPath == path {
+ return nil, nil
+ }
+ if !pathos.FileHasPrefix(nextPath, root) {
+ return nil, nil
+ }
+ path = nextPath
+ }
+ panic("loop limit")
+}
diff --git a/vendor/github.com/kardianos/govendor/vendorfile/file.go b/vendor/github.com/kardianos/govendor/vendorfile/file.go
new file mode 100644
index 000000000..e500447eb
--- /dev/null
+++ b/vendor/github.com/kardianos/govendor/vendorfile/file.go
@@ -0,0 +1,335 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vendorfile is the meta-data file for vendoring.
+// Round-trips unknown fields.
+// It will also allow moving the vendor file to new locations.
+package vendorfile
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "sort"
+)
+
+// Name of the vendor file.
+const Name = "vendor.json"
+
+// File is the structure of the vendor file.
+type File struct {
+ RootPath string // Import path of vendor folder
+
+ Comment string
+
+ Ignore string
+
+ Package []*Package
+
+ // all preserves unknown values.
+ all map[string]interface{}
+}
+
+// Package represents each package.
+type Package struct {
+ field map[string]interface{}
+
+ // If delete is set to true the package will not be written to the vendor file.
+ Remove bool
+
+ // If new is set to true the package will be treated as a new package to the file.
+ Add bool
+
+ // See the vendor spec for definitions.
+ Origin string
+ Path string
+ Tree bool
+ Revision string
+ RevisionTime string
+ Version string
+ VersionExact string
+ ChecksumSHA1 string
+ Comment string
+}
+
+func (pkg *Package) PathOrigin() string {
+ if len(pkg.Origin) > 0 {
+ return pkg.Origin
+ }
+ return pkg.Path
+}
+
+// The following stringer functions are useful for debugging.
+
+type packageList []*Package
+
+func (list packageList) String() string {
+ buf := &bytes.Buffer{}
+ for _, item := range list {
+ buf.WriteString("\t")
+ buf.WriteString(fmt.Sprintf("(%v) ", item.field))
+ if item.Remove {
+ buf.WriteString(" X ")
+ }
+ buf.WriteString(item.Path)
+ buf.WriteRune('\n')
+ }
+ buf.WriteRune('\n')
+ return buf.String()
+}
+
+var (
+ rootPathNames = []string{"rootPath"}
+ packageNames = []string{"package", "Package"}
+ ignoreNames = []string{"ignore"}
+ originNames = []string{"origin"}
+ pathNames = []string{"path", "canonical", "Canonical", "vendor", "Vendor"}
+ treeNames = []string{"tree"}
+ revisionNames = []string{"revision", "Revision", "version", "Version"}
+ revisionTimeNames = []string{"revisionTime", "RevisionTime", "versionTime", "VersionTime"}
+ versionNames = []string{"version"}
+ versionExactNames = []string{"versionExact"}
+ checksumSHA1Names = []string{"checksumSHA1"}
+ commentNames = []string{"comment", "Comment"}
+)
+
+type vendorPackageSort []interface{}
+
+func (vp vendorPackageSort) Len() int { return len(vp) }
+func (vp vendorPackageSort) Swap(i, j int) { vp[i], vp[j] = vp[j], vp[i] }
+func (vp vendorPackageSort) Less(i, j int) bool {
+ a := vp[i].(map[string]interface{})
+ b := vp[j].(map[string]interface{})
+ aPath, _ := a[pathNames[0]].(string)
+ bPath, _ := b[pathNames[0]].(string)
+
+ if aPath == bPath {
+ aOrigin, _ := a[originNames[0]].(string)
+ bOrigin, _ := b[originNames[0]].(string)
+ return len(aOrigin) > len(bOrigin)
+ }
+ return aPath < bPath
+}
+
+func setField(fieldObj interface{}, object map[string]interface{}, names []string) {
+loop:
+ for _, name := range names {
+ raw, found := object[name]
+ if !found {
+ continue
+ }
+ switch field := fieldObj.(type) {
+ default:
+ panic("unknown type")
+ case *string:
+ value, is := raw.(string)
+ if !is {
+ continue loop
+ }
+ *field = value
+ if len(value) != 0 {
+ break loop
+ }
+ case *bool:
+ value, is := raw.(bool)
+ if !is {
+ continue loop
+ }
+ *field = value
+ if value {
+ break loop
+ }
+ }
+ }
+}
+
+func setObject(fieldObj interface{}, object map[string]interface{}, names []string, hideEmpty bool) {
+ switch field := fieldObj.(type) {
+ default:
+ panic("unknown type")
+ case string:
+ for i, name := range names {
+ if i != 0 || (hideEmpty && len(field) == 0) {
+ delete(object, name)
+ continue
+ }
+ object[name] = field
+ }
+ case bool:
+ for i, name := range names {
+ if i != 0 || (hideEmpty && !field) {
+ delete(object, name)
+ continue
+ }
+ object[name] = field
+ }
+ }
+}
+
+// getRawPackageList gets the array of items from all object.
+func (vf *File) getRawPackageList() []interface{} {
+ var rawPackageList []interface{}
+ for index, name := range packageNames {
+ rawPackageListObject, found := vf.all[name]
+ if !found {
+ continue
+ }
+ if index != 0 {
+ vf.all[packageNames[0]] = rawPackageListObject
+ delete(vf.all, name)
+ }
+ var is bool
+ rawPackageList, is = rawPackageListObject.([]interface{})
+ if is {
+ break
+ }
+ }
+ return rawPackageList
+}
+
+// toFields moves values from "all" to the field values.
+func (vf *File) toFields() {
+ setField(&vf.RootPath, vf.all, rootPathNames)
+ setField(&vf.Comment, vf.all, commentNames)
+ setField(&vf.Ignore, vf.all, ignoreNames)
+
+ rawPackageList := vf.getRawPackageList()
+
+ vf.Package = make([]*Package, len(rawPackageList))
+
+ for index, rawPackage := range rawPackageList {
+ object, is := rawPackage.(map[string]interface{})
+ if !is {
+ continue
+ }
+ pkg := &Package{}
+ vf.Package[index] = pkg
+ pkg.field = object
+ setField(&pkg.Origin, object, originNames)
+ setField(&pkg.Path, object, pathNames)
+ setField(&pkg.Tree, object, treeNames)
+ setField(&pkg.Revision, object, revisionNames)
+ setField(&pkg.RevisionTime, object, revisionTimeNames)
+ setField(&pkg.Version, object, versionNames)
+ setField(&pkg.VersionExact, object, versionExactNames)
+ setField(&pkg.ChecksumSHA1, object, checksumSHA1Names)
+ setField(&pkg.Comment, object, commentNames)
+ }
+}
+
+// toAll moves values from field values to "all".
+func (vf *File) toAll() {
+ delete(vf.all, "Tool")
+
+ setObject(vf.RootPath, vf.all, rootPathNames, true)
+ setObject(vf.Comment, vf.all, commentNames, false)
+ setObject(vf.Ignore, vf.all, ignoreNames, false)
+
+ rawPackageList := vf.getRawPackageList()
+
+ setPkgFields := func(pkg *Package) {
+ if pkg.Origin == pkg.Path {
+ pkg.Origin = ""
+ }
+ if pkg.field == nil {
+ pkg.field = make(map[string]interface{}, 10)
+ }
+ setObject(pkg.Origin, pkg.field, originNames, true)
+ setObject(pkg.Path, pkg.field, pathNames, false)
+ setObject(pkg.Tree, pkg.field, treeNames, true)
+ setObject(pkg.Revision, pkg.field, revisionNames, false)
+ setObject(pkg.RevisionTime, pkg.field, revisionTimeNames, true)
+ setObject(pkg.Version, pkg.field, versionNames, true)
+ setObject(pkg.VersionExact, pkg.field, versionExactNames, true)
+ setObject(pkg.ChecksumSHA1, pkg.field, checksumSHA1Names, true)
+ setObject(pkg.Comment, pkg.field, commentNames, true)
+ }
+
+ for i := len(vf.Package) - 1; i >= 0; i-- {
+ pkg := vf.Package[i]
+ switch {
+ case pkg.Remove:
+ for index, rawObj := range rawPackageList {
+ raw, is := rawObj.(map[string]interface{})
+ if !is {
+ continue
+ }
+ same := true
+ for key, value := range pkg.field {
+ if raw[key] != value {
+ same = false
+ break
+ }
+ }
+ if same {
+ rawPackageList[index] = nil
+ }
+ }
+ case pkg.Add:
+ setPkgFields(pkg)
+ rawPackageList = append(rawPackageList, pkg.field)
+ default:
+ if pkg.field == nil {
+ pkg.field = make(map[string]interface{}, 10)
+ }
+
+ delete(pkg.field, "local")
+ delete(pkg.field, "Local")
+ setPkgFields(pkg)
+ }
+ }
+ nextRawPackageList := make([]interface{}, 0, len(rawPackageList))
+ for _, raw := range rawPackageList {
+ if raw == nil {
+ continue
+ }
+ nextRawPackageList = append(nextRawPackageList, raw)
+ }
+ vf.all[packageNames[0]] = nextRawPackageList
+}
+
+// Marshal the vendor file to the specified writer.
+// Retains read fields.
+func (vf *File) Marshal(w io.Writer) error {
+ if vf.all == nil {
+ vf.all = map[string]interface{}{}
+ }
+ vf.toAll()
+
+ rawList := vf.getRawPackageList()
+ sort.Sort(vendorPackageSort(rawList))
+
+ jb, err := json.Marshal(vf.all)
+ if err != nil {
+ return err
+ }
+ buf := &bytes.Buffer{}
+ err = json.Indent(buf, jb, "", "\t")
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(w, buf)
+ return err
+}
+
+// Unmarshal the vendor file from the specified reader.
+// Stores internally all fields.
+func (vf *File) Unmarshal(r io.Reader) error {
+ bb, err := ioutil.ReadAll(r)
+ if err != nil {
+ return err
+ }
+
+ if vf.all == nil {
+ vf.all = make(map[string]interface{}, 3)
+ }
+ err = json.Unmarshal(bb, &vf.all)
+ if err != nil {
+ return err
+ }
+ vf.toFields()
+ return nil
+}
diff --git a/vendor/github.com/wadey/gocovmerge/LICENSE b/vendor/github.com/wadey/gocovmerge/LICENSE
new file mode 100644
index 000000000..455fb1087
--- /dev/null
+++ b/vendor/github.com/wadey/gocovmerge/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2015, Wade Simmons
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/wadey/gocovmerge/README.md b/vendor/github.com/wadey/gocovmerge/README.md
new file mode 100644
index 000000000..982d34913
--- /dev/null
+++ b/vendor/github.com/wadey/gocovmerge/README.md
@@ -0,0 +1,16 @@
+gocovmerge
+==========
+
+gocovmerge takes the results from multiple `go test -coverprofile` runs and
+merges them into one profile.
+
+usage
+-----
+
+ gocovmerge [coverprofiles...]
+
+gocovmerge takes the source coverprofiles as the arguments (output from
+`go test -coverprofile coverage.out`) and outputs a merged version of the
+files to standard out. You can only merge profiles that were generated from the
+same source code. If there are source lines that overlap or do not merge, the
+process will exit with an error code.
diff --git a/vendor/github.com/wadey/gocovmerge/gocovmerge.go b/vendor/github.com/wadey/gocovmerge/gocovmerge.go
new file mode 100644
index 000000000..e8099839e
--- /dev/null
+++ b/vendor/github.com/wadey/gocovmerge/gocovmerge.go
@@ -0,0 +1,111 @@
+// gocovmerge takes the results from multiple `go test -coverprofile` runs and
+// merges them into one profile
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "sort"
+
+ "golang.org/x/tools/cover"
+)
+
+func mergeProfiles(p *cover.Profile, merge *cover.Profile) {
+ if p.Mode != merge.Mode {
+ log.Fatalf("cannot merge profiles with different modes")
+ }
+ // Since the blocks are sorted, we can keep track of where the last block
+ // was inserted and only look at the blocks after that as targets for merge
+ startIndex := 0
+ for _, b := range merge.Blocks {
+ startIndex = mergeProfileBlock(p, b, startIndex)
+ }
+}
+
+func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) int {
+ sortFunc := func(i int) bool {
+ pi := p.Blocks[i+startIndex]
+ return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
+ }
+
+ i := 0
+ if sortFunc(i) != true {
+ i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
+ }
+ i += startIndex
+ if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
+ if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
+ log.Fatalf("OVERLAP MERGE: %v %v %v", p.FileName, p.Blocks[i], pb)
+ }
+ switch p.Mode {
+ case "set":
+ p.Blocks[i].Count |= pb.Count
+ case "count", "atomic":
+ p.Blocks[i].Count += pb.Count
+ default:
+ log.Fatalf("unsupported covermode: '%s'", p.Mode)
+ }
+ } else {
+ if i > 0 {
+ pa := p.Blocks[i-1]
+ if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
+ log.Fatalf("OVERLAP BEFORE: %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ if i < len(p.Blocks)-1 {
+ pa := p.Blocks[i+1]
+ if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
+ log.Fatalf("OVERLAP AFTER: %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ p.Blocks = append(p.Blocks, cover.ProfileBlock{})
+ copy(p.Blocks[i+1:], p.Blocks[i:])
+ p.Blocks[i] = pb
+ }
+ return i + 1
+}
+
+func addProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
+ i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
+ if i < len(profiles) && profiles[i].FileName == p.FileName {
+ mergeProfiles(profiles[i], p)
+ } else {
+ profiles = append(profiles, nil)
+ copy(profiles[i+1:], profiles[i:])
+ profiles[i] = p
+ }
+ return profiles
+}
+
+func dumpProfiles(profiles []*cover.Profile, out io.Writer) {
+ if len(profiles) == 0 {
+ return
+ }
+ fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode)
+ for _, p := range profiles {
+ for _, b := range p.Blocks {
+ fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count)
+ }
+ }
+}
+
+func main() {
+ flag.Parse()
+
+ var merged []*cover.Profile
+
+ for _, file := range flag.Args() {
+ profiles, err := cover.ParseProfiles(file)
+ if err != nil {
+ log.Fatalf("failed to parse profiles: %v", err)
+ }
+ for _, p := range profiles {
+ merged = addProfile(merged, p)
+ }
+ }
+
+ dumpProfiles(merged, os.Stdout)
+}
diff --git a/vendor/golang.org/x/lint/CONTRIBUTING.md b/vendor/golang.org/x/lint/CONTRIBUTING.md
new file mode 100644
index 000000000..1fadda62d
--- /dev/null
+++ b/vendor/golang.org/x/lint/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing to Golint
+
+## Before filing an issue:
+
+### Are you having trouble building golint?
+
+Check you have the latest version of its dependencies. Run
+```
+go get -u golang.org/x/lint/golint
+```
+If you still have problems, consider searching for existing issues before filing a new issue.
+
+## Before sending a pull request:
+
+Have you understood the purpose of golint? Make sure to carefully read `README`.
diff --git a/vendor/golang.org/x/lint/LICENSE b/vendor/golang.org/x/lint/LICENSE
new file mode 100644
index 000000000..65d761bc9
--- /dev/null
+++ b/vendor/golang.org/x/lint/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/lint/README.md b/vendor/golang.org/x/lint/README.md
new file mode 100644
index 000000000..487eba710
--- /dev/null
+++ b/vendor/golang.org/x/lint/README.md
@@ -0,0 +1,88 @@
+Golint is a linter for Go source code.
+
+[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint)
+
+## Installation
+
+Golint requires a
+[supported release of Go](https://golang.org/doc/devel/release.html#policy).
+
+ go get -u golang.org/x/lint/golint
+
+To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting.
+
+## Usage
+
+Invoke `golint` with one or more filenames, directories, or packages named
+by its import path. Golint uses the same
+[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
+the `go` command and therefore
+also supports relative import paths like `./...`. Additionally the `...`
+wildcard can be used as suffix on relative and absolute file paths to recurse
+into them.
+
+The output of this tool is a list of suggestions in Vim quickfix format,
+which is accepted by lots of different editors.
+
+## Purpose
+
+Golint differs from gofmt. Gofmt reformats Go source code, whereas
+golint prints out style mistakes.
+
+Golint differs from govet. Govet is concerned with correctness, whereas
+golint is concerned with coding style. Golint is in use at Google, and it
+seeks to match the accepted style of the open source Go project.
+
+The suggestions made by golint are exactly that: suggestions.
+Golint is not perfect, and has both false positives and false negatives.
+Do not treat its output as a gold standard. We will not be adding pragmas
+or other knobs to suppress specific warnings, so do not expect or require
+code to be completely "lint-free".
+In short, this tool is not, and will never be, trustworthy enough for its
+suggestions to be enforced automatically, for example as part of a build process.
+Golint makes suggestions for many of the mechanically checkable items listed in
+[Effective Go](https://golang.org/doc/effective_go.html) and the
+[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
+
+## Scope
+
+Golint is meant to carry out the stylistic conventions put forth in
+[Effective Go](https://golang.org/doc/effective_go.html) and
+[CodeReviewComments](https://golang.org/wiki/CodeReviewComments).
+Changes that are not aligned with those documents will not be considered.
+
+## Contributions
+
+Contributions to this project are welcome provided they are [in scope](#scope),
+though please send mail before starting work on anything major.
+Contributors retain their copyright, so we need you to fill out
+[a short form](https://developers.google.com/open-source/cla/individual)
+before we can accept your contribution.
+
+## Vim
+
+Add this to your ~/.vimrc:
+
+ set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim
+
+If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
+
+Running `:Lint` will run golint on the current file and populate the quickfix list.
+
+Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w`
+
+ autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
+
+
+## Emacs
+
+Add this to your `.emacs` file:
+
+ (add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs"))
+ (require 'golint)
+
+If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
+
+Running M-x golint will run golint on the current file.
+
+For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html).
diff --git a/vendor/golang.org/x/lint/go.mod b/vendor/golang.org/x/lint/go.mod
new file mode 100644
index 000000000..d5ba4dbfd
--- /dev/null
+++ b/vendor/golang.org/x/lint/go.mod
@@ -0,0 +1,3 @@
+module golang.org/x/lint
+
+require golang.org/x/tools v0.0.0-20190311212946-11955173bddd
diff --git a/vendor/golang.org/x/lint/go.sum b/vendor/golang.org/x/lint/go.sum
new file mode 100644
index 000000000..7d0e2e618
--- /dev/null
+++ b/vendor/golang.org/x/lint/go.sum
@@ -0,0 +1,6 @@
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
diff --git a/vendor/golang.org/x/lint/golint/bin/golint b/vendor/golang.org/x/lint/golint/bin/golint
new file mode 100755
index 000000000..1b914a1c0
--- /dev/null
+++ b/vendor/golang.org/x/lint/golint/bin/golint
Binary files differ
diff --git a/vendor/golang.org/x/lint/golint/golint.go b/vendor/golang.org/x/lint/golint/golint.go
new file mode 100644
index 000000000..ac024b6d2
--- /dev/null
+++ b/vendor/golang.org/x/lint/golint/golint.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// golint lints the Go source files named on its command line.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/build"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/lint"
+)
+
+var (
+ minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it")
+ setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found")
+ suggestions int
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n")
+ fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n")
+ fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n")
+ fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n")
+ fmt.Fprintf(os.Stderr, "Flags:\n")
+ flag.PrintDefaults()
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ if flag.NArg() == 0 {
+ lintDir(".")
+ } else {
+ // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to
+ // directory, file or package targets. The distinction affects which
+ // checks are run. It is no valid to mix target types.
+ var dirsRun, filesRun, pkgsRun int
+ var args []string
+ for _, arg := range flag.Args() {
+ if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) {
+ dirsRun = 1
+ for _, dirname := range allPackagesInFS(arg) {
+ args = append(args, dirname)
+ }
+ } else if isDir(arg) {
+ dirsRun = 1
+ args = append(args, arg)
+ } else if exists(arg) {
+ filesRun = 1
+ args = append(args, arg)
+ } else {
+ pkgsRun = 1
+ args = append(args, arg)
+ }
+ }
+
+ if dirsRun+filesRun+pkgsRun != 1 {
+ usage()
+ os.Exit(2)
+ }
+ switch {
+ case dirsRun == 1:
+ for _, dir := range args {
+ lintDir(dir)
+ }
+ case filesRun == 1:
+ lintFiles(args...)
+ case pkgsRun == 1:
+ for _, pkg := range importPaths(args) {
+ lintPackage(pkg)
+ }
+ }
+ }
+
+ if *setExitStatus && suggestions > 0 {
+ fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions)
+ os.Exit(1)
+ }
+}
+
+func isDir(filename string) bool {
+ fi, err := os.Stat(filename)
+ return err == nil && fi.IsDir()
+}
+
+func exists(filename string) bool {
+ _, err := os.Stat(filename)
+ return err == nil
+}
+
+func lintFiles(filenames ...string) {
+ files := make(map[string][]byte)
+ for _, filename := range filenames {
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+ files[filename] = src
+ }
+
+ l := new(lint.Linter)
+ ps, err := l.LintFiles(files)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return
+ }
+ for _, p := range ps {
+ if p.Confidence >= *minConfidence {
+ fmt.Printf("%v: %s\n", p.Position, p.Text)
+ suggestions++
+ }
+ }
+}
+
+func lintDir(dirname string) {
+ pkg, err := build.ImportDir(dirname, 0)
+ lintImportedPackage(pkg, err)
+}
+
+func lintPackage(pkgname string) {
+ pkg, err := build.Import(pkgname, ".", 0)
+ lintImportedPackage(pkg, err)
+}
+
+func lintImportedPackage(pkg *build.Package, err error) {
+ if err != nil {
+ if _, nogo := err.(*build.NoGoError); nogo {
+ // Don't complain if the failure is due to no Go source files.
+ return
+ }
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+
+ var files []string
+ files = append(files, pkg.GoFiles...)
+ files = append(files, pkg.CgoFiles...)
+ files = append(files, pkg.TestGoFiles...)
+ if pkg.Dir != "." {
+ for i, f := range files {
+ files[i] = filepath.Join(pkg.Dir, f)
+ }
+ }
+ // TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles)
+
+ lintFiles(files...)
+}
diff --git a/vendor/golang.org/x/lint/golint/import.go b/vendor/golang.org/x/lint/golint/import.go
new file mode 100644
index 000000000..2ba9dea77
--- /dev/null
+++ b/vendor/golang.org/x/lint/golint/import.go
@@ -0,0 +1,309 @@
+package main
+
+/*
+
+This file holds a direct copy of the import path matching code of
+https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be
+replaced when https://golang.org/issue/8768 is resolved.
+
+It has been updated to follow upstream changes in a few ways.
+
+*/
+
+import (
+ "fmt"
+ "go/build"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+var (
+ buildContext = build.Default
+ goroot = filepath.Clean(runtime.GOROOT())
+ gorootSrc = filepath.Join(goroot, "src")
+)
+
+// importPathsNoDotExpansion returns the import paths to use for the given
+// command line, but it does no ... expansion.
+func importPathsNoDotExpansion(args []string) []string {
+ if len(args) == 0 {
+ return []string{"."}
+ }
+ var out []string
+ for _, a := range args {
+ // Arguments are supposed to be import paths, but
+ // as a courtesy to Windows developers, rewrite \ to /
+ // in command-line arguments. Handles .\... and so on.
+ if filepath.Separator == '\\' {
+ a = strings.Replace(a, `\`, `/`, -1)
+ }
+
+ // Put argument in canonical form, but preserve leading ./.
+ if strings.HasPrefix(a, "./") {
+ a = "./" + path.Clean(a)
+ if a == "./." {
+ a = "."
+ }
+ } else {
+ a = path.Clean(a)
+ }
+ if a == "all" || a == "std" {
+ out = append(out, allPackages(a)...)
+ continue
+ }
+ out = append(out, a)
+ }
+ return out
+}
+
+// importPaths returns the import paths to use for the given command line.
+func importPaths(args []string) []string {
+ args = importPathsNoDotExpansion(args)
+ var out []string
+ for _, a := range args {
+ if strings.Contains(a, "...") {
+ if build.IsLocalImport(a) {
+ out = append(out, allPackagesInFS(a)...)
+ } else {
+ out = append(out, allPackages(a)...)
+ }
+ continue
+ }
+ out = append(out, a)
+ }
+ return out
+}
+
+// matchPattern(pattern)(name) reports whether
+// name matches pattern. Pattern is a limited glob
+// pattern in which '...' means 'any string' and there
+// is no other special syntax.
+func matchPattern(pattern string) func(name string) bool {
+ re := regexp.QuoteMeta(pattern)
+ re = strings.Replace(re, `\.\.\.`, `.*`, -1)
+ // Special case: foo/... matches foo too.
+ if strings.HasSuffix(re, `/.*`) {
+ re = re[:len(re)-len(`/.*`)] + `(/.*)?`
+ }
+ reg := regexp.MustCompile(`^` + re + `$`)
+ return func(name string) bool {
+ return reg.MatchString(name)
+ }
+}
+
+// hasPathPrefix reports whether the path s begins with the
+// elements in prefix.
+func hasPathPrefix(s, prefix string) bool {
+ switch {
+ default:
+ return false
+ case len(s) == len(prefix):
+ return s == prefix
+ case len(s) > len(prefix):
+ if prefix != "" && prefix[len(prefix)-1] == '/' {
+ return strings.HasPrefix(s, prefix)
+ }
+ return s[len(prefix)] == '/' && s[:len(prefix)] == prefix
+ }
+}
+
+// treeCanMatchPattern(pattern)(name) reports whether
+// name or children of name can possibly match pattern.
+// Pattern is the same limited glob accepted by matchPattern.
+func treeCanMatchPattern(pattern string) func(name string) bool {
+ wildCard := false
+ if i := strings.Index(pattern, "..."); i >= 0 {
+ wildCard = true
+ pattern = pattern[:i]
+ }
+ return func(name string) bool {
+ return len(name) <= len(pattern) && hasPathPrefix(pattern, name) ||
+ wildCard && strings.HasPrefix(name, pattern)
+ }
+}
+
+// allPackages returns all the packages that can be found
+// under the $GOPATH directories and $GOROOT matching pattern.
+// The pattern is either "all" (all packages), "std" (standard packages)
+// or a path including "...".
+func allPackages(pattern string) []string {
+ pkgs := matchPackages(pattern)
+ if len(pkgs) == 0 {
+ fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
+ }
+ return pkgs
+}
+
+func matchPackages(pattern string) []string {
+ match := func(string) bool { return true }
+ treeCanMatch := func(string) bool { return true }
+ if pattern != "all" && pattern != "std" {
+ match = matchPattern(pattern)
+ treeCanMatch = treeCanMatchPattern(pattern)
+ }
+
+ have := map[string]bool{
+ "builtin": true, // ignore pseudo-package that exists only for documentation
+ }
+ if !buildContext.CgoEnabled {
+ have["runtime/cgo"] = true // ignore during walk
+ }
+ var pkgs []string
+
+ // Commands
+ cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator)
+ filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error {
+ if err != nil || !fi.IsDir() || path == cmd {
+ return nil
+ }
+ name := path[len(cmd):]
+ if !treeCanMatch(name) {
+ return filepath.SkipDir
+ }
+ // Commands are all in cmd/, not in subdirectories.
+ if strings.Contains(name, string(filepath.Separator)) {
+ return filepath.SkipDir
+ }
+
+ // We use, e.g., cmd/gofmt as the pseudo import path for gofmt.
+ name = "cmd/" + name
+ if have[name] {
+ return nil
+ }
+ have[name] = true
+ if !match(name) {
+ return nil
+ }
+ _, err = buildContext.ImportDir(path, 0)
+ if err != nil {
+ if _, noGo := err.(*build.NoGoError); !noGo {
+ log.Print(err)
+ }
+ return nil
+ }
+ pkgs = append(pkgs, name)
+ return nil
+ })
+
+ for _, src := range buildContext.SrcDirs() {
+ if (pattern == "std" || pattern == "cmd") && src != gorootSrc {
+ continue
+ }
+ src = filepath.Clean(src) + string(filepath.Separator)
+ root := src
+ if pattern == "cmd" {
+ root += "cmd" + string(filepath.Separator)
+ }
+ filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+ if err != nil || !fi.IsDir() || path == src {
+ return nil
+ }
+
+ // Avoid .foo, _foo, and testdata directory trees.
+ _, elem := filepath.Split(path)
+ if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ return filepath.SkipDir
+ }
+
+ name := filepath.ToSlash(path[len(src):])
+ if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") {
+ // The name "std" is only the standard library.
+ // If the name is cmd, it's the root of the command tree.
+ return filepath.SkipDir
+ }
+ if !treeCanMatch(name) {
+ return filepath.SkipDir
+ }
+ if have[name] {
+ return nil
+ }
+ have[name] = true
+ if !match(name) {
+ return nil
+ }
+ _, err = buildContext.ImportDir(path, 0)
+ if err != nil {
+ if _, noGo := err.(*build.NoGoError); noGo {
+ return nil
+ }
+ }
+ pkgs = append(pkgs, name)
+ return nil
+ })
+ }
+ return pkgs
+}
+
+// allPackagesInFS is like allPackages but is passed a pattern
+// beginning ./ or ../, meaning it should scan the tree rooted
+// at the given directory. There are ... in the pattern too.
+func allPackagesInFS(pattern string) []string {
+ pkgs := matchPackagesInFS(pattern)
+ if len(pkgs) == 0 {
+ fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
+ }
+ return pkgs
+}
+
+func matchPackagesInFS(pattern string) []string {
+ // Find directory to begin the scan.
+ // Could be smarter but this one optimization
+ // is enough for now, since ... is usually at the
+ // end of a path.
+ i := strings.Index(pattern, "...")
+ dir, _ := path.Split(pattern[:i])
+
+ // pattern begins with ./ or ../.
+ // path.Clean will discard the ./ but not the ../.
+ // We need to preserve the ./ for pattern matching
+ // and in the returned import paths.
+ prefix := ""
+ if strings.HasPrefix(pattern, "./") {
+ prefix = "./"
+ }
+ match := matchPattern(pattern)
+
+ var pkgs []string
+ filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
+ if err != nil || !fi.IsDir() {
+ return nil
+ }
+ if path == dir {
+ // filepath.Walk starts at dir and recurses. For the recursive case,
+ // the path is the result of filepath.Join, which calls filepath.Clean.
+ // The initial case is not Cleaned, though, so we do this explicitly.
+ //
+ // This converts a path like "./io/" to "io". Without this step, running
+ // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io
+ // package, because prepending the prefix "./" to the unclean path would
+ // result in "././io", and match("././io") returns false.
+ path = filepath.Clean(path)
+ }
+
+ // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..".
+ _, elem := filepath.Split(path)
+ dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".."
+ if dot || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ return filepath.SkipDir
+ }
+
+ name := prefix + filepath.ToSlash(path)
+ if !match(name) {
+ return nil
+ }
+ if _, err = build.ImportDir(path, 0); err != nil {
+ if _, noGo := err.(*build.NoGoError); !noGo {
+ log.Print(err)
+ }
+ return nil
+ }
+ pkgs = append(pkgs, name)
+ return nil
+ })
+ return pkgs
+}
diff --git a/vendor/golang.org/x/lint/golint/importcomment.go b/vendor/golang.org/x/lint/golint/importcomment.go
new file mode 100644
index 000000000..d5b32f734
--- /dev/null
+++ b/vendor/golang.org/x/lint/golint/importcomment.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2018 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// +build go1.12
+
+// Require use of the correct import path only for Go 1.12+ users, so
+// any breakages coincide with people updating their CI configs or
+// whatnot.
+
+package main // import "golang.org/x/lint/golint"
diff --git a/vendor/golang.org/x/lint/lint.go b/vendor/golang.org/x/lint/lint.go
new file mode 100644
index 000000000..6b9fd6e2b
--- /dev/null
+++ b/vendor/golang.org/x/lint/lint.go
@@ -0,0 +1,1693 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package lint contains a linter for Go source code.
+package lint // import "golang.org/x/lint"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/gcexportdata"
+)
+
+const styleGuideBase = "https://golang.org/wiki/CodeReviewComments"
+
+// A Linter lints Go source code.
+type Linter struct {
+}
+
+// Problem represents a problem in some source code.
+type Problem struct {
+ Position token.Position // position in source file
+ Text string // the prose that describes the problem
+ Link string // (optional) the link to the style guide for the problem
+ Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness
+ LineText string // the source line
+ Category string // a short name for the general category of the problem
+
+ // If the problem has a suggested fix (the minority case),
+ // ReplacementLine is a full replacement for the relevant line of the source file.
+ ReplacementLine string
+}
+
+func (p *Problem) String() string {
+ if p.Link != "" {
+ return p.Text + "\n\n" + p.Link
+ }
+ return p.Text
+}
+
+type byPosition []Problem
+
+func (p byPosition) Len() int { return len(p) }
+func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p byPosition) Less(i, j int) bool {
+ pi, pj := p[i].Position, p[j].Position
+
+ if pi.Filename != pj.Filename {
+ return pi.Filename < pj.Filename
+ }
+ if pi.Line != pj.Line {
+ return pi.Line < pj.Line
+ }
+ if pi.Column != pj.Column {
+ return pi.Column < pj.Column
+ }
+
+ return p[i].Text < p[j].Text
+}
+
+// Lint lints src.
+func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) {
+ return l.LintFiles(map[string][]byte{filename: src})
+}
+
+// LintFiles lints a set of files of a single package.
+// The argument is a map of filename to source.
+func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) {
+ pkg := &pkg{
+ fset: token.NewFileSet(),
+ files: make(map[string]*file),
+ }
+ var pkgName string
+ for filename, src := range files {
+ if isGenerated(src) {
+ continue // See issue #239
+ }
+ f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+ if pkgName == "" {
+ pkgName = f.Name.Name
+ } else if f.Name.Name != pkgName {
+ return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName)
+ }
+ pkg.files[filename] = &file{
+ pkg: pkg,
+ f: f,
+ fset: pkg.fset,
+ src: src,
+ filename: filename,
+ }
+ }
+ if len(pkg.files) == 0 {
+ return nil, nil
+ }
+ return pkg.lint(), nil
+}
+
+var (
+ genHdr = []byte("// Code generated ")
+ genFtr = []byte(" DO NOT EDIT.")
+)
+
+// isGenerated reports whether the source file is generated code
+// according the rules from https://golang.org/s/generatedcode.
+func isGenerated(src []byte) bool {
+ sc := bufio.NewScanner(bytes.NewReader(src))
+ for sc.Scan() {
+ b := sc.Bytes()
+ if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) {
+ return true
+ }
+ }
+ return false
+}
+
+// pkg represents a package being linted.
+type pkg struct {
+ fset *token.FileSet
+ files map[string]*file
+
+ typesPkg *types.Package
+ typesInfo *types.Info
+
+ // sortable is the set of types in the package that implement sort.Interface.
+ sortable map[string]bool
+ // main is whether this is a "main" package.
+ main bool
+
+ problems []Problem
+}
+
+func (p *pkg) lint() []Problem {
+ if err := p.typeCheck(); err != nil {
+ /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages.
+ if e, ok := err.(types.Error); ok {
+ pos := p.fset.Position(e.Pos)
+ conf := 1.0
+ if strings.Contains(e.Msg, "can't find import: ") {
+ // Golint is probably being run in a context that doesn't support
+ // typechecking (e.g. package files aren't found), so don't warn about it.
+ conf = 0
+ }
+ if conf > 0 {
+ p.errorfAt(pos, conf, category("typechecking"), e.Msg)
+ }
+
+ // TODO(dsymonds): Abort if !e.Soft?
+ }
+ */
+ }
+
+ p.scanSortable()
+ p.main = p.isMain()
+
+ for _, f := range p.files {
+ f.lint()
+ }
+
+ sort.Sort(byPosition(p.problems))
+
+ return p.problems
+}
+
+// file represents a file being linted.
+type file struct {
+ pkg *pkg
+ f *ast.File
+ fset *token.FileSet
+ src []byte
+ filename string
+}
+
+func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") }
+
+func (f *file) lint() {
+ f.lintPackageComment()
+ f.lintImports()
+ f.lintBlankImports()
+ f.lintExported()
+ f.lintNames()
+ f.lintVarDecls()
+ f.lintElses()
+ f.lintRanges()
+ f.lintErrorf()
+ f.lintErrors()
+ f.lintErrorStrings()
+ f.lintReceiverNames()
+ f.lintIncDec()
+ f.lintErrorReturn()
+ f.lintUnexportedReturn()
+ f.lintTimeNames()
+ f.lintContextKeyTypes()
+ f.lintContextArgs()
+}
+
+type link string
+type category string
+
+// The variadic arguments may start with link and category types,
+// and must end with a format string and any arguments.
+// It returns the new Problem.
+func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem {
+ pos := f.fset.Position(n.Pos())
+ if pos.Filename == "" {
+ pos.Filename = f.filename
+ }
+ return f.pkg.errorfAt(pos, confidence, args...)
+}
+
+func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem {
+ problem := Problem{
+ Position: pos,
+ Confidence: confidence,
+ }
+ if pos.Filename != "" {
+ // The file might not exist in our mapping if a //line directive was encountered.
+ if f, ok := p.files[pos.Filename]; ok {
+ problem.LineText = srcLine(f.src, pos)
+ }
+ }
+
+argLoop:
+ for len(args) > 1 { // always leave at least the format string in args
+ switch v := args[0].(type) {
+ case link:
+ problem.Link = string(v)
+ case category:
+ problem.Category = string(v)
+ default:
+ break argLoop
+ }
+ args = args[1:]
+ }
+
+ problem.Text = fmt.Sprintf(args[0].(string), args[1:]...)
+
+ p.problems = append(p.problems, problem)
+ return &p.problems[len(p.problems)-1]
+}
+
+var newImporter = func(fset *token.FileSet) types.ImporterFrom {
+ return gcexportdata.NewImporter(fset, make(map[string]*types.Package))
+}
+
+func (p *pkg) typeCheck() error {
+ config := &types.Config{
+ // By setting a no-op error reporter, the type checker does as much work as possible.
+ Error: func(error) {},
+ Importer: newImporter(p.fset),
+ }
+ info := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ }
+ var anyFile *file
+ var astFiles []*ast.File
+ for _, f := range p.files {
+ anyFile = f
+ astFiles = append(astFiles, f.f)
+ }
+ pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info)
+ // Remember the typechecking info, even if config.Check failed,
+ // since we will get partial information.
+ p.typesPkg = pkg
+ p.typesInfo = info
+ return err
+}
+
+func (p *pkg) typeOf(expr ast.Expr) types.Type {
+ if p.typesInfo == nil {
+ return nil
+ }
+ return p.typesInfo.TypeOf(expr)
+}
+
+func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool {
+ n, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ tn := n.Obj()
+ return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name
+}
+
+// scopeOf returns the tightest scope encompassing id.
+func (p *pkg) scopeOf(id *ast.Ident) *types.Scope {
+ var scope *types.Scope
+ if obj := p.typesInfo.ObjectOf(id); obj != nil {
+ scope = obj.Parent()
+ }
+ if scope == p.typesPkg.Scope() {
+ // We were given a top-level identifier.
+ // Use the file-level scope instead of the package-level scope.
+ pos := id.Pos()
+ for _, f := range p.files {
+ if f.f.Pos() <= pos && pos < f.f.End() {
+ scope = p.typesInfo.Scopes[f.f]
+ break
+ }
+ }
+ }
+ return scope
+}
+
+func (p *pkg) scanSortable() {
+ p.sortable = make(map[string]bool)
+
+ // bitfield for which methods exist on each type.
+ const (
+ Len = 1 << iota
+ Less
+ Swap
+ )
+ nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap}
+ has := make(map[string]int)
+ for _, f := range p.files {
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
+ return true
+ }
+ // TODO(dsymonds): We could check the signature to be more precise.
+ recv := receiverType(fn)
+ if i, ok := nmap[fn.Name.Name]; ok {
+ has[recv] |= i
+ }
+ return false
+ })
+ }
+ for typ, ms := range has {
+ if ms == Len|Less|Swap {
+ p.sortable[typ] = true
+ }
+ }
+}
+
+func (p *pkg) isMain() bool {
+ for _, f := range p.files {
+ if f.isMain() {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *file) isMain() bool {
+ if f.f.Name.Name == "main" {
+ return true
+ }
+ return false
+}
+
+// lintPackageComment checks package comments. It complains if
+// there is no package comment, or if it is not of the right form.
+// This has a notable false positive in that a package comment
+// could rightfully appear in a different file of the same package,
+// but that's not easy to fix since this linter is file-oriented.
+func (f *file) lintPackageComment() {
+ if f.isTest() {
+ return
+ }
+
+ const ref = styleGuideBase + "#package-comments"
+ prefix := "Package " + f.f.Name.Name + " "
+
+ // Look for a detached package comment.
+ // First, scan for the last comment that occurs before the "package" keyword.
+ var lastCG *ast.CommentGroup
+ for _, cg := range f.f.Comments {
+ if cg.Pos() > f.f.Package {
+ // Gone past "package" keyword.
+ break
+ }
+ lastCG = cg
+ }
+ if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) {
+ endPos := f.fset.Position(lastCG.End())
+ pkgPos := f.fset.Position(f.f.Package)
+ if endPos.Line+1 < pkgPos.Line {
+ // There isn't a great place to anchor this error;
+ // the start of the blank lines between the doc and the package statement
+ // is at least pointing at the location of the problem.
+ pos := token.Position{
+ Filename: endPos.Filename,
+ // Offset not set; it is non-trivial, and doesn't appear to be needed.
+ Line: endPos.Line + 1,
+ Column: 1,
+ }
+ f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement")
+ return
+ }
+ }
+
+ if f.f.Doc == nil {
+ f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package")
+ return
+ }
+ s := f.f.Doc.Text()
+ if ts := strings.TrimLeft(s, " \t"); ts != s {
+ f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space")
+ s = ts
+ }
+ // Only non-main packages need to keep to this form.
+ if !f.pkg.main && !strings.HasPrefix(s, prefix) {
+ f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix)
+ }
+}
+
+// lintBlankImports complains if a non-main package has blank imports that are
+// not documented.
+func (f *file) lintBlankImports() {
+ // In package main and in tests, we don't complain about blank imports.
+ if f.pkg.main || f.isTest() {
+ return
+ }
+
+ // The first element of each contiguous group of blank imports should have
+ // an explanatory comment of some kind.
+ for i, imp := range f.f.Imports {
+ pos := f.fset.Position(imp.Pos())
+
+ if !isBlank(imp.Name) {
+ continue // Ignore non-blank imports.
+ }
+ if i > 0 {
+ prev := f.f.Imports[i-1]
+ prevPos := f.fset.Position(prev.Pos())
+ if isBlank(prev.Name) && prevPos.Line+1 == pos.Line {
+ continue // A subsequent blank in a group.
+ }
+ }
+
+ // This is the first blank import of a group.
+ if imp.Doc == nil && imp.Comment == nil {
+ ref := ""
+ f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it")
+ }
+ }
+}
+
+// lintImports examines import blocks.
+func (f *file) lintImports() {
+ for i, is := range f.f.Imports {
+ _ = i
+ if is.Name != nil && is.Name.Name == "." && !f.isTest() {
+ f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports")
+ }
+
+ }
+}
+
+const docCommentsLink = styleGuideBase + "#doc-comments"
+
+// lintExported examines the exported names.
+// It complains if any required doc comments are missing,
+// or if they are not of the right form. The exact rules are in
+// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function
+// also tracks the GenDecl structure being traversed to permit
+// doc comments for constants to be on top of the const block.
+// It also complains if the names stutter when combined with
+// the package name.
+func (f *file) lintExported() {
+ if f.isTest() {
+ return
+ }
+
+ var lastGen *ast.GenDecl // last GenDecl entered.
+
+ // Set of GenDecls that have already had missing comments flagged.
+ genDeclMissingComments := make(map[*ast.GenDecl]bool)
+
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.GenDecl:
+ if v.Tok == token.IMPORT {
+ return false
+ }
+ // token.CONST, token.TYPE or token.VAR
+ lastGen = v
+ return true
+ case *ast.FuncDecl:
+ f.lintFuncDoc(v)
+ if v.Recv == nil {
+ // Only check for stutter on functions, not methods.
+ // Method names are not used package-qualified.
+ f.checkStutter(v.Name, "func")
+ }
+ // Don't proceed inside funcs.
+ return false
+ case *ast.TypeSpec:
+ // inside a GenDecl, which usually has the doc
+ doc := v.Doc
+ if doc == nil {
+ doc = lastGen.Doc
+ }
+ f.lintTypeDoc(v, doc)
+ f.checkStutter(v.Name, "type")
+ // Don't proceed inside types.
+ return false
+ case *ast.ValueSpec:
+ f.lintValueSpecDoc(v, lastGen, genDeclMissingComments)
+ return false
+ }
+ return true
+ })
+}
+
+var (
+ allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`)
+ anyCapsRE = regexp.MustCompile(`[A-Z]`)
+)
+
+// knownNameExceptions is a set of names that are known to be exempt from naming checks.
+// This is usually because they are constrained by having to match names in the
+// standard library.
+var knownNameExceptions = map[string]bool{
+ "LastInsertId": true, // must match database/sql
+ "kWh": true,
+}
+
+func isInTopLevel(f *ast.File, ident *ast.Ident) bool {
+ path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End())
+ for _, f := range path {
+ switch f.(type) {
+ case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident:
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// lintNames examines all names in the file.
+// It complains if any use underscores or incorrect known initialisms.
+func (f *file) lintNames() {
+ // Package names need slightly different handling than other names.
+ if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") {
+ f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name")
+ }
+ if anyCapsRE.MatchString(f.f.Name.Name) {
+ f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name))
+ }
+
+ check := func(id *ast.Ident, thing string) {
+ if id.Name == "_" {
+ return
+ }
+ if knownNameExceptions[id.Name] {
+ return
+ }
+
+ // Handle two common styles from other languages that don't belong in Go.
+ if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") {
+ capCount := 0
+ for _, c := range id.Name {
+ if 'A' <= c && c <= 'Z' {
+ capCount++
+ }
+ }
+ if capCount >= 2 {
+ f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase")
+ return
+ }
+ }
+ if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) {
+ if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' {
+ should := string(id.Name[1]+'a'-'A') + id.Name[2:]
+ f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should)
+ }
+ }
+
+ should := lintName(id.Name)
+ if id.Name == should {
+ return
+ }
+
+ if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") {
+ f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should)
+ return
+ }
+ f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should)
+ }
+ checkList := func(fl *ast.FieldList, thing string) {
+ if fl == nil {
+ return
+ }
+ for _, f := range fl.List {
+ for _, id := range f.Names {
+ check(id, thing)
+ }
+ }
+ }
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.AssignStmt:
+ if v.Tok == token.ASSIGN {
+ return true
+ }
+ for _, exp := range v.Lhs {
+ if id, ok := exp.(*ast.Ident); ok {
+ check(id, "var")
+ }
+ }
+ case *ast.FuncDecl:
+ if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) {
+ return true
+ }
+
+ thing := "func"
+ if v.Recv != nil {
+ thing = "method"
+ }
+
+ // Exclude naming warnings for functions that are exported to C but
+ // not exported in the Go API.
+ // See https://github.com/golang/lint/issues/144.
+ if ast.IsExported(v.Name.Name) || !isCgoExported(v) {
+ check(v.Name, thing)
+ }
+
+ checkList(v.Type.Params, thing+" parameter")
+ checkList(v.Type.Results, thing+" result")
+ case *ast.GenDecl:
+ if v.Tok == token.IMPORT {
+ return true
+ }
+ var thing string
+ switch v.Tok {
+ case token.CONST:
+ thing = "const"
+ case token.TYPE:
+ thing = "type"
+ case token.VAR:
+ thing = "var"
+ }
+ for _, spec := range v.Specs {
+ switch s := spec.(type) {
+ case *ast.TypeSpec:
+ check(s.Name, thing)
+ case *ast.ValueSpec:
+ for _, id := range s.Names {
+ check(id, thing)
+ }
+ }
+ }
+ case *ast.InterfaceType:
+ // Do not check interface method names.
+ // They are often constrainted by the method names of concrete types.
+ for _, x := range v.Methods.List {
+ ft, ok := x.Type.(*ast.FuncType)
+ if !ok { // might be an embedded interface name
+ continue
+ }
+ checkList(ft.Params, "interface method parameter")
+ checkList(ft.Results, "interface method result")
+ }
+ case *ast.RangeStmt:
+ if v.Tok == token.ASSIGN {
+ return true
+ }
+ if id, ok := v.Key.(*ast.Ident); ok {
+ check(id, "range var")
+ }
+ if id, ok := v.Value.(*ast.Ident); ok {
+ check(id, "range var")
+ }
+ case *ast.StructType:
+ for _, f := range v.Fields.List {
+ for _, id := range f.Names {
+ check(id, "struct field")
+ }
+ }
+ }
+ return true
+ })
+}
+
+// lintName returns a different name if it should be different.
+func lintName(name string) (should string) {
+ // Fast path for simple cases: "_" and all lowercase.
+ if name == "_" {
+ return name
+ }
+ allLower := true
+ for _, r := range name {
+ if !unicode.IsLower(r) {
+ allLower = false
+ break
+ }
+ }
+ if allLower {
+ return name
+ }
+
+ // Split camelCase at any lower->upper transition, and split on underscores.
+ // Check each word for common initialisms.
+ runes := []rune(name)
+ w, i := 0, 0 // index of start of word, scan
+ for i+1 <= len(runes) {
+ eow := false // whether we hit the end of a word
+ if i+1 == len(runes) {
+ eow = true
+ } else if runes[i+1] == '_' {
+ // underscore; shift the remainder forward over any run of underscores
+ eow = true
+ n := 1
+ for i+n+1 < len(runes) && runes[i+n+1] == '_' {
+ n++
+ }
+
+ // Leave at most one underscore if the underscore is between two digits
+ if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) {
+ n--
+ }
+
+ copy(runes[i+1:], runes[i+n+1:])
+ runes = runes[:len(runes)-n]
+ } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
+ // lower->non-lower
+ eow = true
+ }
+ i++
+ if !eow {
+ continue
+ }
+
+ // [w,i) is a word.
+ word := string(runes[w:i])
+ if u := strings.ToUpper(word); commonInitialisms[u] {
+ // Keep consistent case, which is lowercase only at the start.
+ if w == 0 && unicode.IsLower(runes[w]) {
+ u = strings.ToLower(u)
+ }
+ // All the common initialisms are ASCII,
+ // so we can replace the bytes exactly.
+ copy(runes[w:], []rune(u))
+ } else if w > 0 && strings.ToLower(word) == word {
+ // already all lowercase, and not the first word, so uppercase the first character.
+ runes[w] = unicode.ToUpper(runes[w])
+ }
+ w = i
+ }
+ return string(runes)
+}
+
+// commonInitialisms is a set of common initialisms.
+// Only add entries that are highly unlikely to be non-initialisms.
+// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
+var commonInitialisms = map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTP": true,
+ "HTTPS": true,
+ "ID": true,
+ "IP": true,
+ "JSON": true,
+ "LHS": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+}
+
+// lintTypeDoc examines the doc comment on a type.
+// It complains if they are missing from an exported type,
+// or if they are not of the standard form.
+func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) {
+ if !ast.IsExported(t.Name.Name) {
+ return
+ }
+ if doc == nil {
+ f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name)
+ return
+ }
+
+ s := doc.Text()
+ articles := [...]string{"A", "An", "The"}
+ for _, a := range articles {
+ if strings.HasPrefix(s, a+" ") {
+ s = s[len(a)+1:]
+ break
+ }
+ }
+ if !strings.HasPrefix(s, t.Name.Name+" ") {
+ f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name)
+ }
+}
+
+var commonMethods = map[string]bool{
+ "Error": true,
+ "Read": true,
+ "ServeHTTP": true,
+ "String": true,
+ "Write": true,
+}
+
+// lintFuncDoc examines doc comments on functions and methods.
+// It complains if they are missing, or not of the right form.
+// It has specific exclusions for well-known methods (see commonMethods above).
+func (f *file) lintFuncDoc(fn *ast.FuncDecl) {
+ if !ast.IsExported(fn.Name.Name) {
+ // func is unexported
+ return
+ }
+ kind := "function"
+ name := fn.Name.Name
+ if fn.Recv != nil && len(fn.Recv.List) > 0 {
+ // method
+ kind = "method"
+ recv := receiverType(fn)
+ if !ast.IsExported(recv) {
+ // receiver is unexported
+ return
+ }
+ if commonMethods[name] {
+ return
+ }
+ switch name {
+ case "Len", "Less", "Swap":
+ if f.pkg.sortable[recv] {
+ return
+ }
+ }
+ name = recv + "." + name
+ }
+ if fn.Doc == nil {
+ f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name)
+ return
+ }
+ s := fn.Doc.Text()
+ prefix := fn.Name.Name + " "
+ if !strings.HasPrefix(s, prefix) {
+ f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
+ }
+}
+
+// lintValueSpecDoc examines package-global variables and constants.
+// It complains if they are not individually declared,
+// or if they are not suitably documented in the right form (unless they are in a block that is commented).
+func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) {
+ kind := "var"
+ if gd.Tok == token.CONST {
+ kind = "const"
+ }
+
+ if len(vs.Names) > 1 {
+ // Check that none are exported except for the first.
+ for _, n := range vs.Names[1:] {
+ if ast.IsExported(n.Name) {
+ f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name)
+ return
+ }
+ }
+ }
+
+ // Only one name.
+ name := vs.Names[0].Name
+ if !ast.IsExported(name) {
+ return
+ }
+
+ if vs.Doc == nil && gd.Doc == nil {
+ if genDeclMissingComments[gd] {
+ return
+ }
+ block := ""
+ if kind == "const" && gd.Lparen.IsValid() {
+ block = " (or a comment on this block)"
+ }
+ f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block)
+ genDeclMissingComments[gd] = true
+ return
+ }
+ // If this GenDecl has parens and a comment, we don't check its comment form.
+ if gd.Lparen.IsValid() && gd.Doc != nil {
+ return
+ }
+ // The relevant text to check will be on either vs.Doc or gd.Doc.
+ // Use vs.Doc preferentially.
+ doc := vs.Doc
+ if doc == nil {
+ doc = gd.Doc
+ }
+ prefix := name + " "
+ if !strings.HasPrefix(doc.Text(), prefix) {
+ f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
+ }
+}
+
+func (f *file) checkStutter(id *ast.Ident, thing string) {
+ pkg, name := f.f.Name.Name, id.Name
+ if !ast.IsExported(name) {
+ // unexported name
+ return
+ }
+ // A name stutters if the package name is a strict prefix
+ // and the next character of the name starts a new word.
+ if len(name) <= len(pkg) {
+ // name is too short to stutter.
+ // This permits the name to be the same as the package name.
+ return
+ }
+ if !strings.EqualFold(pkg, name[:len(pkg)]) {
+ return
+ }
+ // We can assume the name is well-formed UTF-8.
+ // If the next rune after the package name is uppercase or an underscore
+ // the it's starting a new word and thus this name stutters.
+ rem := name[len(pkg):]
+ if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) {
+ f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem)
+ }
+}
+
+// zeroLiteral is a set of ast.BasicLit values that are zero values.
+// It is not exhaustive.
+var zeroLiteral = map[string]bool{
+ "false": true, // bool
+ // runes
+ `'\x00'`: true,
+ `'\000'`: true,
+ // strings
+ `""`: true,
+ "``": true,
+ // numerics
+ "0": true,
+ "0.": true,
+ "0.0": true,
+ "0i": true,
+}
+
+// lintVarDecls examines variable declarations. It complains about declarations with
+// redundant LHS types that can be inferred from the RHS.
+func (f *file) lintVarDecls() {
+ var lastGen *ast.GenDecl // last GenDecl entered.
+
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.GenDecl:
+ if v.Tok != token.CONST && v.Tok != token.VAR {
+ return false
+ }
+ lastGen = v
+ return true
+ case *ast.ValueSpec:
+ if lastGen.Tok == token.CONST {
+ return false
+ }
+ if len(v.Names) > 1 || v.Type == nil || len(v.Values) == 0 {
+ return false
+ }
+ rhs := v.Values[0]
+ // An underscore var appears in a common idiom for compile-time interface satisfaction,
+ // as in "var _ Interface = (*Concrete)(nil)".
+ if isIdent(v.Names[0], "_") {
+ return false
+ }
+ // If the RHS is a zero value, suggest dropping it.
+ zero := false
+ if lit, ok := rhs.(*ast.BasicLit); ok {
+ zero = zeroLiteral[lit.Value]
+ } else if isIdent(rhs, "nil") {
+ zero = true
+ }
+ if zero {
+ f.errorf(rhs, 0.9, category("zero-value"), "should drop = %s from declaration of var %s; it is the zero value", f.render(rhs), v.Names[0])
+ return false
+ }
+ lhsTyp := f.pkg.typeOf(v.Type)
+ rhsTyp := f.pkg.typeOf(rhs)
+
+ if !validType(lhsTyp) || !validType(rhsTyp) {
+ // Type checking failed (often due to missing imports).
+ return false
+ }
+
+ if !types.Identical(lhsTyp, rhsTyp) {
+ // Assignment to a different type is not redundant.
+ return false
+ }
+
+ // The next three conditions are for suppressing the warning in situations
+ // where we were unable to typecheck.
+
+ // If the LHS type is an interface, don't warn, since it is probably a
+ // concrete type on the RHS. Note that our feeble lexical check here
+ // will only pick up interface{} and other literal interface types;
+ // that covers most of the cases we care to exclude right now.
+ if _, ok := v.Type.(*ast.InterfaceType); ok {
+ return false
+ }
+ // If the RHS is an untyped const, only warn if the LHS type is its default type.
+ if defType, ok := f.isUntypedConst(rhs); ok && !isIdent(v.Type, defType) {
+ return false
+ }
+
+ f.errorf(v.Type, 0.8, category("type-inference"), "should omit type %s from declaration of var %s; it will be inferred from the right-hand side", f.render(v.Type), v.Names[0])
+ return false
+ }
+ return true
+ })
+}
+
+func validType(T types.Type) bool {
+ return T != nil &&
+ T != types.Typ[types.Invalid] &&
+ !strings.Contains(T.String(), "invalid type") // good but not foolproof
+}
+
+// lintElses examines else blocks. It complains about any else block whose if block ends in a return.
+func (f *file) lintElses() {
+ // We don't want to flag if { } else if { } else { } constructions.
+ // They will appear as an IfStmt whose Else field is also an IfStmt.
+ // Record such a node so we ignore it when we visit it.
+ ignore := make(map[*ast.IfStmt]bool)
+
+ f.walk(func(node ast.Node) bool {
+ ifStmt, ok := node.(*ast.IfStmt)
+ if !ok || ifStmt.Else == nil {
+ return true
+ }
+ if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok {
+ ignore[elseif] = true
+ return true
+ }
+ if ignore[ifStmt] {
+ return true
+ }
+ if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok {
+ // only care about elses without conditions
+ return true
+ }
+ if len(ifStmt.Body.List) == 0 {
+ return true
+ }
+ shortDecl := false // does the if statement have a ":=" initialization statement?
+ if ifStmt.Init != nil {
+ if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE {
+ shortDecl = true
+ }
+ }
+ lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1]
+ if _, ok := lastStmt.(*ast.ReturnStmt); ok {
+ extra := ""
+ if shortDecl {
+ extra = " (move short variable declaration to its own line if necessary)"
+ }
+ f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra)
+ }
+ return true
+ })
+}
+
+// lintRanges examines range clauses. It complains about redundant constructions.
+func (f *file) lintRanges() {
+ f.walk(func(node ast.Node) bool {
+ rs, ok := node.(*ast.RangeStmt)
+ if !ok {
+ return true
+ }
+
+ if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) {
+ p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`")
+
+ newRS := *rs // shallow copy
+ newRS.Value = nil
+ newRS.Key = nil
+ p.ReplacementLine = f.firstLineOf(&newRS, rs)
+
+ return true
+ }
+
+ if isIdent(rs.Value, "_") {
+ p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok)
+
+ newRS := *rs // shallow copy
+ newRS.Value = nil
+ p.ReplacementLine = f.firstLineOf(&newRS, rs)
+ }
+
+ return true
+ })
+}
+
+// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation.
+func (f *file) lintErrorf() {
+ f.walk(func(node ast.Node) bool {
+ ce, ok := node.(*ast.CallExpr)
+ if !ok || len(ce.Args) != 1 {
+ return true
+ }
+ isErrorsNew := isPkgDot(ce.Fun, "errors", "New")
+ var isTestingError bool
+ se, ok := ce.Fun.(*ast.SelectorExpr)
+ if ok && se.Sel.Name == "Error" {
+ if typ := f.pkg.typeOf(se.X); typ != nil {
+ isTestingError = typ.String() == "*testing.T"
+ }
+ }
+ if !isErrorsNew && !isTestingError {
+ return true
+ }
+ if !f.imports("errors") {
+ return true
+ }
+ arg := ce.Args[0]
+ ce, ok = arg.(*ast.CallExpr)
+ if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") {
+ return true
+ }
+ errorfPrefix := "fmt"
+ if isTestingError {
+ errorfPrefix = f.render(se.X)
+ }
+ p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix)
+
+ m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`)
+ if m != nil {
+ p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3]
+ }
+
+ return true
+ })
+}
+
+// lintErrors examines global error vars. It complains if they aren't named in the standard way.
+func (f *file) lintErrors() {
+ for _, decl := range f.f.Decls {
+ gd, ok := decl.(*ast.GenDecl)
+ if !ok || gd.Tok != token.VAR {
+ continue
+ }
+ for _, spec := range gd.Specs {
+ spec := spec.(*ast.ValueSpec)
+ if len(spec.Names) != 1 || len(spec.Values) != 1 {
+ continue
+ }
+ ce, ok := spec.Values[0].(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
+ continue
+ }
+
+ id := spec.Names[0]
+ prefix := "err"
+ if id.IsExported() {
+ prefix = "Err"
+ }
+ if !strings.HasPrefix(id.Name, prefix) {
+ f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix)
+ }
+ }
+ }
+}
+
+func lintErrorString(s string) (isClean bool, conf float64) {
+ const basicConfidence = 0.8
+ const capConfidence = basicConfidence - 0.2
+ first, firstN := utf8.DecodeRuneInString(s)
+ last, _ := utf8.DecodeLastRuneInString(s)
+ if last == '.' || last == ':' || last == '!' || last == '\n' {
+ return false, basicConfidence
+ }
+ if unicode.IsUpper(first) {
+ // People use proper nouns and exported Go identifiers in error strings,
+ // so decrease the confidence of warnings for capitalization.
+ if len(s) <= firstN {
+ return false, capConfidence
+ }
+ // Flag strings starting with something that doesn't look like an initialism.
+ if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) {
+ return false, capConfidence
+ }
+ }
+ return true, 0
+}
+
+// lintErrorStrings examines error strings.
+// It complains if they are capitalized or end in punctuation or a newline.
+func (f *file) lintErrorStrings() {
+ f.walk(func(node ast.Node) bool {
+ ce, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
+ return true
+ }
+ if len(ce.Args) < 1 {
+ return true
+ }
+ str, ok := ce.Args[0].(*ast.BasicLit)
+ if !ok || str.Kind != token.STRING {
+ return true
+ }
+ s, _ := strconv.Unquote(str.Value) // can assume well-formed Go
+ if s == "" {
+ return true
+ }
+ clean, conf := lintErrorString(s)
+ if clean {
+ return true
+ }
+
+ f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"),
+ "error strings should not be capitalized or end with punctuation or a newline")
+ return true
+ })
+}
+
+// lintReceiverNames examines receiver names. It complains about inconsistent
+// names used for the same type and names such as "this".
+func (f *file) lintReceiverNames() {
+ typeReceiver := map[string]string{}
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
+ return true
+ }
+ names := fn.Recv.List[0].Names
+ if len(names) < 1 {
+ return true
+ }
+ name := names[0].Name
+ const ref = styleGuideBase + "#receiver-names"
+ if name == "_" {
+ f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`)
+ return true
+ }
+ if name == "this" || name == "self" {
+ f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`)
+ return true
+ }
+ recv := receiverType(fn)
+ if prev, ok := typeReceiver[recv]; ok && prev != name {
+ f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv)
+ return true
+ }
+ typeReceiver[recv] = name
+ return true
+ })
+}
+
+// lintIncDec examines statements that increment or decrement a variable.
+// It complains if they don't use x++ or x--.
+func (f *file) lintIncDec() {
+ f.walk(func(n ast.Node) bool {
+ as, ok := n.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if len(as.Lhs) != 1 {
+ return true
+ }
+ if !isOne(as.Rhs[0]) {
+ return true
+ }
+ var suffix string
+ switch as.Tok {
+ case token.ADD_ASSIGN:
+ suffix = "++"
+ case token.SUB_ASSIGN:
+ suffix = "--"
+ default:
+ return true
+ }
+ f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix)
+ return true
+ })
+}
+
+// lintErrorReturn examines function declarations that return an error.
+// It complains if the error isn't the last parameter.
+func (f *file) lintErrorReturn() {
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Type.Results == nil {
+ return true
+ }
+ ret := fn.Type.Results.List
+ if len(ret) <= 1 {
+ return true
+ }
+ if isIdent(ret[len(ret)-1].Type, "error") {
+ return true
+ }
+ // An error return parameter should be the last parameter.
+ // Flag any error parameters found before the last.
+ for _, r := range ret[:len(ret)-1] {
+ if isIdent(r.Type, "error") {
+ f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items")
+ break // only flag one
+ }
+ }
+ return true
+ })
+}
+
+// lintUnexportedReturn examines exported function declarations.
+// It complains if any return an unexported type.
+func (f *file) lintUnexportedReturn() {
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok {
+ return true
+ }
+ if fn.Type.Results == nil {
+ return false
+ }
+ if !fn.Name.IsExported() {
+ return false
+ }
+ thing := "func"
+ if fn.Recv != nil && len(fn.Recv.List) > 0 {
+ thing = "method"
+ if !ast.IsExported(receiverType(fn)) {
+ // Don't report exported methods of unexported types,
+ // such as private implementations of sort.Interface.
+ return false
+ }
+ }
+ for _, ret := range fn.Type.Results.List {
+ typ := f.pkg.typeOf(ret.Type)
+ if exportedType(typ) {
+ continue
+ }
+ f.errorf(ret.Type, 0.8, category("unexported-type-in-api"),
+ "exported %s %s returns unexported type %s, which can be annoying to use",
+ thing, fn.Name.Name, typ)
+ break // only flag one
+ }
+ return false
+ })
+}
+
+// exportedType reports whether typ is an exported type.
+// It is imprecise, and will err on the side of returning true,
+// such as for composite types.
+func exportedType(typ types.Type) bool {
+ switch T := typ.(type) {
+ case *types.Named:
+ // Builtin types have no package.
+ return T.Obj().Pkg() == nil || T.Obj().Exported()
+ case *types.Map:
+ return exportedType(T.Key()) && exportedType(T.Elem())
+ case interface {
+ Elem() types.Type
+ }: // array, slice, pointer, chan
+ return exportedType(T.Elem())
+ }
+ // Be conservative about other types, such as struct, interface, etc.
+ return true
+}
+
+// timeSuffixes is a list of name suffixes that imply a time unit.
+// This is not an exhaustive list.
+var timeSuffixes = []string{
+ "Sec", "Secs", "Seconds",
+ "Msec", "Msecs",
+ "Milli", "Millis", "Milliseconds",
+ "Usec", "Usecs", "Microseconds",
+ "MS", "Ms",
+}
+
+func (f *file) lintTimeNames() {
+ f.walk(func(node ast.Node) bool {
+ v, ok := node.(*ast.ValueSpec)
+ if !ok {
+ return true
+ }
+ for _, name := range v.Names {
+ origTyp := f.pkg.typeOf(name)
+ // Look for time.Duration or *time.Duration;
+ // the latter is common when using flag.Duration.
+ typ := origTyp
+ if pt, ok := typ.(*types.Pointer); ok {
+ typ = pt.Elem()
+ }
+ if !f.pkg.isNamedType(typ, "time", "Duration") {
+ continue
+ }
+ suffix := ""
+ for _, suf := range timeSuffixes {
+ if strings.HasSuffix(name.Name, suf) {
+ suffix = suf
+ break
+ }
+ }
+ if suffix == "" {
+ continue
+ }
+ f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix)
+ }
+ return true
+ })
+}
+
+// lintContextKeyTypes checks for call expressions to context.WithValue with
+// basic types used for the key argument.
+// See: https://golang.org/issue/17293
+func (f *file) lintContextKeyTypes() {
+ f.walk(func(node ast.Node) bool {
+ switch node := node.(type) {
+ case *ast.CallExpr:
+ f.checkContextKeyType(node)
+ }
+
+ return true
+ })
+}
+
+// checkContextKeyType reports an error if the call expression calls
+// context.WithValue with a key argument of basic type.
+func (f *file) checkContextKeyType(x *ast.CallExpr) {
+ sel, ok := x.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return
+ }
+ pkg, ok := sel.X.(*ast.Ident)
+ if !ok || pkg.Name != "context" {
+ return
+ }
+ if sel.Sel.Name != "WithValue" {
+ return
+ }
+
+ // key is second argument to context.WithValue
+ if len(x.Args) != 3 {
+ return
+ }
+ key := f.pkg.typesInfo.Types[x.Args[1]]
+
+ if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid {
+ f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type))
+ }
+}
+
+// lintContextArgs examines function declarations that contain an
+// argument with a type of context.Context
+// It complains if that argument isn't the first parameter.
+func (f *file) lintContextArgs() {
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || len(fn.Type.Params.List) <= 1 {
+ return true
+ }
+ // A context.Context should be the first parameter of a function.
+ // Flag any that show up after the first.
+ for _, arg := range fn.Type.Params.List[1:] {
+ if isPkgDot(arg.Type, "context", "Context") {
+ f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function")
+ break // only flag one
+ }
+ }
+ return true
+ })
+}
+
+// containsComments returns whether the interval [start, end) contains any
+// comments without "// MATCH " prefix.
+func (f *file) containsComments(start, end token.Pos) bool {
+ for _, cgroup := range f.f.Comments {
+ comments := cgroup.List
+ if comments[0].Slash >= end {
+ // All comments starting with this group are after end pos.
+ return false
+ }
+ if comments[len(comments)-1].Slash < start {
+ // Comments group ends before start pos.
+ continue
+ }
+ for _, c := range comments {
+ if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// receiverType returns the named type of the method receiver, sans "*",
+// or "invalid-type" if fn.Recv is ill formed.
+func receiverType(fn *ast.FuncDecl) string {
+ switch e := fn.Recv.List[0].Type.(type) {
+ case *ast.Ident:
+ return e.Name
+ case *ast.StarExpr:
+ if id, ok := e.X.(*ast.Ident); ok {
+ return id.Name
+ }
+ }
+ // The parser accepts much more than just the legal forms.
+ return "invalid-type"
+}
+
+func (f *file) walk(fn func(ast.Node) bool) {
+ ast.Walk(walker(fn), f.f)
+}
+
+func (f *file) render(x interface{}) string {
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, f.fset, x); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func (f *file) debugRender(x interface{}) string {
+ var buf bytes.Buffer
+ if err := ast.Fprint(&buf, f.fset, x, nil); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+// walker adapts a function to satisfy the ast.Visitor interface.
+// The function return whether the walk should proceed into the node's children.
+type walker func(ast.Node) bool
+
+func (w walker) Visit(node ast.Node) ast.Visitor {
+ if w(node) {
+ return w
+ }
+ return nil
+}
+
+func isIdent(expr ast.Expr, ident string) bool {
+ id, ok := expr.(*ast.Ident)
+ return ok && id.Name == ident
+}
+
+// isBlank returns whether id is the blank identifier "_".
+// If id == nil, the answer is false.
+func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" }
+
+func isPkgDot(expr ast.Expr, pkg, name string) bool {
+ sel, ok := expr.(*ast.SelectorExpr)
+ return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name)
+}
+
+func isOne(expr ast.Expr) bool {
+ lit, ok := expr.(*ast.BasicLit)
+ return ok && lit.Kind == token.INT && lit.Value == "1"
+}
+
+func isCgoExported(f *ast.FuncDecl) bool {
+ if f.Recv != nil || f.Doc == nil {
+ return false
+ }
+
+ cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name)))
+ for _, c := range f.Doc.List {
+ if cgoExport.MatchString(c.Text) {
+ return true
+ }
+ }
+ return false
+}
+
+var basicTypeKinds = map[types.BasicKind]string{
+ types.UntypedBool: "bool",
+ types.UntypedInt: "int",
+ types.UntypedRune: "rune",
+ types.UntypedFloat: "float64",
+ types.UntypedComplex: "complex128",
+ types.UntypedString: "string",
+}
+
+// isUntypedConst reports whether expr is an untyped constant,
+// and indicates what its default type is.
+// scope may be nil.
+func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) {
+ // Re-evaluate expr outside of its context to see if it's untyped.
+ // (An expr evaluated within, for example, an assignment context will get the type of the LHS.)
+ exprStr := f.render(expr)
+ tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr)
+ if err != nil {
+ return "", false
+ }
+ if b, ok := tv.Type.(*types.Basic); ok {
+ if dt, ok := basicTypeKinds[b.Kind()]; ok {
+ return dt, true
+ }
+ }
+
+ return "", false
+}
+
+// firstLineOf renders the given node and returns its first line.
+// It will also match the indentation of another node.
+func (f *file) firstLineOf(node, match ast.Node) string {
+ line := f.render(node)
+ if i := strings.Index(line, "\n"); i >= 0 {
+ line = line[:i]
+ }
+ return f.indentOf(match) + line
+}
+
+func (f *file) indentOf(node ast.Node) string {
+ line := srcLine(f.src, f.fset.Position(node.Pos()))
+ for i, r := range line {
+ switch r {
+ case ' ', '\t':
+ default:
+ return line[:i]
+ }
+ }
+ return line // unusual or empty line
+}
+
+func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) {
+ line := srcLine(f.src, f.fset.Position(node.Pos()))
+ line = strings.TrimSuffix(line, "\n")
+ rx := regexp.MustCompile(pattern)
+ return rx.FindStringSubmatch(line)
+}
+
+// imports returns true if the current file imports the specified package path.
+func (f *file) imports(importPath string) bool {
+ all := astutil.Imports(f.fset, f.f)
+ for _, p := range all {
+ for _, i := range p {
+ uq, err := strconv.Unquote(i.Path.Value)
+ if err == nil && importPath == uq {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// srcLine returns the complete line at p, including the terminating newline.
+func srcLine(src []byte, p token.Position) string {
+ // Run to end of line in both directions if not at line start/end.
+ lo, hi := p.Offset, p.Offset+1
+ for lo > 0 && src[lo-1] != '\n' {
+ lo--
+ }
+ for hi < len(src) && src[hi-1] != '\n' {
+ hi++
+ }
+ return string(src[lo:hi])
+}
diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/golang.org/x/tools/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/tools/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/tools/cmd/goimports/doc.go b/vendor/golang.org/x/tools/cmd/goimports/doc.go
new file mode 100644
index 000000000..7033e4d4c
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/goimports/doc.go
@@ -0,0 +1,43 @@
+/*
+
+Command goimports updates your Go import lines,
+adding missing ones and removing unreferenced ones.
+
+ $ go get golang.org/x/tools/cmd/goimports
+
+In addition to fixing imports, goimports also formats
+your code in the same style as gofmt so it can be used
+as a replacement for your editor's gofmt-on-save hook.
+
+For emacs, make sure you have the latest go-mode.el:
+ https://github.com/dominikh/go-mode.el
+Then in your .emacs file:
+ (setq gofmt-command "goimports")
+ (add-hook 'before-save-hook 'gofmt-before-save)
+
+For vim, set "gofmt_command" to "goimports":
+ https://golang.org/change/39c724dd7f252
+ https://golang.org/wiki/IDEsAndTextEditorPlugins
+ etc
+
+For GoSublime, follow the steps described here:
+ http://michaelwhatcott.com/gosublime-goimports/
+
+For other editors, you probably know what to do.
+
+To exclude directories in your $GOPATH from being scanned for Go
+files, goimports respects a configuration file at
+$GOPATH/src/.goimportsignore which may contain blank lines, comment
+lines (beginning with '#'), or lines naming a directory relative to
+the configuration file to ignore when scanning. No globbing or regex
+patterns are allowed. Use the "-v" verbose flag to verify it's
+working and see what goimports is doing.
+
+File bugs or feature requests at:
+
+ https://golang.org/issues/new?title=x/tools/cmd/goimports:+
+
+Happy hacking!
+
+*/
+package main // import "golang.org/x/tools/cmd/goimports"
diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go
new file mode 100644
index 000000000..0ce85c9c3
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go
@@ -0,0 +1,369 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "go/scanner"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "runtime/pprof"
+ "strings"
+
+ "golang.org/x/tools/imports"
+)
+
+var (
+ // main operation modes
+ list = flag.Bool("l", false, "list files whose formatting differs from goimport's")
+ write = flag.Bool("w", false, "write result to (source) file instead of stdout")
+ doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
+ srcdir = flag.String("srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.")
+ verbose bool // verbose logging
+
+ cpuProfile = flag.String("cpuprofile", "", "CPU profile output")
+ memProfile = flag.String("memprofile", "", "memory profile output")
+ memProfileRate = flag.Int("memrate", 0, "if > 0, sets runtime.MemProfileRate")
+
+ options = &imports.Options{
+ TabWidth: 8,
+ TabIndent: true,
+ Comments: true,
+ Fragment: true,
+ }
+ exitCode = 0
+)
+
+func init() {
+ flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)")
+ flag.StringVar(&imports.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list")
+}
+
+func report(err error) {
+ scanner.PrintError(os.Stderr, err)
+ exitCode = 2
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func isGoFile(f os.FileInfo) bool {
+ // ignore non-Go files
+ name := f.Name()
+ return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
+}
+
+// argumentType is which mode goimports was invoked as.
+type argumentType int
+
+const (
+ // fromStdin means the user is piping their source into goimports.
+ fromStdin argumentType = iota
+
+ // singleArg is the common case from editors, when goimports is run on
+ // a single file.
+ singleArg
+
+ // multipleArg is when the user ran "goimports file1.go file2.go"
+ // or ran goimports on a directory tree.
+ multipleArg
+)
+
+func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error {
+ opt := options
+ if argType == fromStdin {
+ nopt := *options
+ nopt.Fragment = true
+ opt = &nopt
+ }
+
+ if in == nil {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ in = f
+ }
+
+ src, err := ioutil.ReadAll(in)
+ if err != nil {
+ return err
+ }
+
+ target := filename
+ if *srcdir != "" {
+ // Determine whether the provided -srcdirc is a directory or file
+ // and then use it to override the target.
+ //
+ // See https://github.com/dominikh/go-mode.el/issues/146
+ if isFile(*srcdir) {
+ if argType == multipleArg {
+ return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories")
+ }
+ target = *srcdir
+ } else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) {
+ // For a file which doesn't exist on disk yet, but might shortly.
+ // e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk.
+ // The goimports on-save hook writes the buffer to a temp file
+ // first and runs goimports before the actual save to newfile.go.
+ // The editor's buffer is named "newfile.go" so that is passed to goimports as:
+ // goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go
+ // and then the editor reloads the result from the tmp file and writes
+ // it to newfile.go.
+ target = *srcdir
+ } else {
+ // Pretend that file is from *srcdir in order to decide
+ // visible imports correctly.
+ target = filepath.Join(*srcdir, filepath.Base(filename))
+ }
+ }
+
+ res, err := imports.Process(target, src, opt)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(src, res) {
+ // formatting has changed
+ if *list {
+ fmt.Fprintln(out, filename)
+ }
+ if *write {
+ if argType == fromStdin {
+ // filename is "<standard input>"
+ return errors.New("can't use -w on stdin")
+ }
+ err = ioutil.WriteFile(filename, res, 0)
+ if err != nil {
+ return err
+ }
+ }
+ if *doDiff {
+ if argType == fromStdin {
+ filename = "stdin.go" // because <standard input>.orig looks silly
+ }
+ data, err := diff(src, res, filename)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename))
+ out.Write(data)
+ }
+ }
+
+ if !*list && !*write && !*doDiff {
+ _, err = out.Write(res)
+ }
+
+ return err
+}
+
+func visitFile(path string, f os.FileInfo, err error) error {
+ if err == nil && isGoFile(f) {
+ err = processFile(path, nil, os.Stdout, multipleArg)
+ }
+ if err != nil {
+ report(err)
+ }
+ return nil
+}
+
+func walkDir(path string) {
+ filepath.Walk(path, visitFile)
+}
+
+func main() {
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ // call gofmtMain in a separate function
+ // so that it can use defer and have them
+ // run before the exit.
+ gofmtMain()
+ os.Exit(exitCode)
+}
+
+// parseFlags parses command line flags and returns the paths to process.
+// It's a var so that custom implementations can replace it in other files.
+var parseFlags = func() []string {
+ flag.BoolVar(&verbose, "v", false, "verbose logging")
+
+ flag.Parse()
+ return flag.Args()
+}
+
+func bufferedFileWriter(dest string) (w io.Writer, close func()) {
+ f, err := os.Create(dest)
+ if err != nil {
+ log.Fatal(err)
+ }
+ bw := bufio.NewWriter(f)
+ return bw, func() {
+ if err := bw.Flush(); err != nil {
+ log.Fatalf("error flushing %v: %v", dest, err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func gofmtMain() {
+ flag.Usage = usage
+ paths := parseFlags()
+
+ if *cpuProfile != "" {
+ bw, flush := bufferedFileWriter(*cpuProfile)
+ pprof.StartCPUProfile(bw)
+ defer flush()
+ defer pprof.StopCPUProfile()
+ }
+ // doTrace is a conditionally compiled wrapper around runtime/trace. It is
+ // used to allow goimports to compile under gccgo, which does not support
+ // runtime/trace. See https://golang.org/issue/15544.
+ defer doTrace()()
+ if *memProfileRate > 0 {
+ runtime.MemProfileRate = *memProfileRate
+ bw, flush := bufferedFileWriter(*memProfile)
+ defer func() {
+ runtime.GC() // materialize all statistics
+ if err := pprof.WriteHeapProfile(bw); err != nil {
+ log.Fatal(err)
+ }
+ flush()
+ }()
+ }
+
+ if verbose {
+ log.SetFlags(log.LstdFlags | log.Lmicroseconds)
+ imports.Debug = true
+ }
+ if options.TabWidth < 0 {
+ fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth)
+ exitCode = 2
+ return
+ }
+
+ if len(paths) == 0 {
+ if err := processFile("<standard input>", os.Stdin, os.Stdout, fromStdin); err != nil {
+ report(err)
+ }
+ return
+ }
+
+ argType := singleArg
+ if len(paths) > 1 {
+ argType = multipleArg
+ }
+
+ for _, path := range paths {
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ report(err)
+ case dir.IsDir():
+ walkDir(path)
+ default:
+ if err := processFile(path, nil, os.Stdout, argType); err != nil {
+ report(err)
+ }
+ }
+ }
+}
+
+func writeTempFile(dir, prefix string, data []byte) (string, error) {
+ file, err := ioutil.TempFile(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ _, err = file.Write(data)
+ if err1 := file.Close(); err == nil {
+ err = err1
+ }
+ if err != nil {
+ os.Remove(file.Name())
+ return "", err
+ }
+ return file.Name(), nil
+}
+
+func diff(b1, b2 []byte, filename string) (data []byte, err error) {
+ f1, err := writeTempFile("", "gofmt", b1)
+ if err != nil {
+ return
+ }
+ defer os.Remove(f1)
+
+ f2, err := writeTempFile("", "gofmt", b2)
+ if err != nil {
+ return
+ }
+ defer os.Remove(f2)
+
+ cmd := "diff"
+ if runtime.GOOS == "plan9" {
+ cmd = "/bin/ape/diff"
+ }
+
+ data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ return replaceTempFilename(data, filename)
+ }
+ return
+}
+
+// replaceTempFilename replaces temporary filenames in diff with actual one.
+//
+// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500
+// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500
+// ...
+// ->
+// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500
+// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500
+// ...
+func replaceTempFilename(diff []byte, filename string) ([]byte, error) {
+ bs := bytes.SplitN(diff, []byte{'\n'}, 3)
+ if len(bs) < 3 {
+ return nil, fmt.Errorf("got unexpected diff for %s", filename)
+ }
+ // Preserve timestamps.
+ var t0, t1 []byte
+ if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 {
+ t0 = bs[0][i:]
+ }
+ if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 {
+ t1 = bs[1][i:]
+ }
+ // Always print filepath with slash separator.
+ f := filepath.ToSlash(filename)
+ bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0))
+ bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1))
+ return bytes.Join(bs, []byte{'\n'}), nil
+}
+
+// isFile reports whether name is a file.
+func isFile(name string) bool {
+ fi, err := os.Stat(name)
+ return err == nil && fi.Mode().IsRegular()
+}
+
+// isDir reports whether name is a directory.
+func isDir(name string) bool {
+ fi, err := os.Stat(name)
+ return err == nil && fi.IsDir()
+}
diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go
new file mode 100644
index 000000000..21d867eaa
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go
@@ -0,0 +1,26 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gc
+
+package main
+
+import (
+ "flag"
+ "runtime/trace"
+)
+
+var traceProfile = flag.String("trace", "", "trace profile output")
+
+func doTrace() func() {
+ if *traceProfile != "" {
+ bw, flush := bufferedFileWriter(*traceProfile)
+ trace.Start(bw)
+ return func() {
+ flush()
+ trace.Stop()
+ }
+ }
+ return func() {}
+}
diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go
new file mode 100644
index 000000000..f5531ceb3
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gc
+
+package main
+
+func doTrace() func() {
+ return func() {}
+}
diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go
new file mode 100644
index 000000000..b6c8120a5
--- /dev/null
+++ b/vendor/golang.org/x/tools/cover/profile.go
@@ -0,0 +1,213 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cover provides support for parsing coverage profiles
+// generated by "go test -coverprofile=cover.out".
+package cover // import "golang.org/x/tools/cover"
+
+import (
+ "bufio"
+ "fmt"
+ "math"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Profile represents the profiling data for a specific file.
+type Profile struct {
+ FileName string
+ Mode string
+ Blocks []ProfileBlock
+}
+
+// ProfileBlock represents a single block of profiling data.
+type ProfileBlock struct {
+ StartLine, StartCol int
+ EndLine, EndCol int
+ NumStmt, Count int
+}
+
+type byFileName []*Profile
+
+func (p byFileName) Len() int { return len(p) }
+func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
+func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// ParseProfiles parses profile data in the specified file and returns a
+// Profile for each source file described therein.
+func ParseProfiles(fileName string) ([]*Profile, error) {
+ pf, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer pf.Close()
+
+ files := make(map[string]*Profile)
+ buf := bufio.NewReader(pf)
+ // First line is "mode: foo", where foo is "set", "count", or "atomic".
+ // Rest of file is in the format
+ // encoding/base64/base64.go:34.44,37.40 3 1
+ // where the fields are: name.go:line.column,line.column numberOfStatements count
+ s := bufio.NewScanner(buf)
+ mode := ""
+ for s.Scan() {
+ line := s.Text()
+ if mode == "" {
+ const p = "mode: "
+ if !strings.HasPrefix(line, p) || line == p {
+ return nil, fmt.Errorf("bad mode line: %v", line)
+ }
+ mode = line[len(p):]
+ continue
+ }
+ m := lineRe.FindStringSubmatch(line)
+ if m == nil {
+ return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, lineRe)
+ }
+ fn := m[1]
+ p := files[fn]
+ if p == nil {
+ p = &Profile{
+ FileName: fn,
+ Mode: mode,
+ }
+ files[fn] = p
+ }
+ p.Blocks = append(p.Blocks, ProfileBlock{
+ StartLine: toInt(m[2]),
+ StartCol: toInt(m[3]),
+ EndLine: toInt(m[4]),
+ EndCol: toInt(m[5]),
+ NumStmt: toInt(m[6]),
+ Count: toInt(m[7]),
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ for _, p := range files {
+ sort.Sort(blocksByStart(p.Blocks))
+ // Merge samples from the same location.
+ j := 1
+ for i := 1; i < len(p.Blocks); i++ {
+ b := p.Blocks[i]
+ last := p.Blocks[j-1]
+ if b.StartLine == last.StartLine &&
+ b.StartCol == last.StartCol &&
+ b.EndLine == last.EndLine &&
+ b.EndCol == last.EndCol {
+ if b.NumStmt != last.NumStmt {
+ return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)
+ }
+ if mode == "set" {
+ p.Blocks[j-1].Count |= b.Count
+ } else {
+ p.Blocks[j-1].Count += b.Count
+ }
+ continue
+ }
+ p.Blocks[j] = b
+ j++
+ }
+ p.Blocks = p.Blocks[:j]
+ }
+ // Generate a sorted slice.
+ profiles := make([]*Profile, 0, len(files))
+ for _, profile := range files {
+ profiles = append(profiles, profile)
+ }
+ sort.Sort(byFileName(profiles))
+ return profiles, nil
+}
+
+type blocksByStart []ProfileBlock
+
+func (b blocksByStart) Len() int { return len(b) }
+func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b blocksByStart) Less(i, j int) bool {
+ bi, bj := b[i], b[j]
+ return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
+}
+
+var lineRe = regexp.MustCompile(`^(.+):([0-9]+).([0-9]+),([0-9]+).([0-9]+) ([0-9]+) ([0-9]+)$`)
+
+func toInt(s string) int {
+ i, err := strconv.Atoi(s)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+// Boundary represents the position in a source file of the beginning or end of a
+// block as reported by the coverage profile. In HTML mode, it will correspond to
+// the opening or closing of a <span> tag and will be used to colorize the source
+type Boundary struct {
+ Offset int // Location as a byte offset in the source file.
+ Start bool // Is this the start of a block?
+ Count int // Event count from the cover profile.
+ Norm float64 // Count normalized to [0..1].
+}
+
+// Boundaries returns a Profile as a set of Boundary objects within the provided src.
+func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
+ // Find maximum count.
+ max := 0
+ for _, b := range p.Blocks {
+ if b.Count > max {
+ max = b.Count
+ }
+ }
+ // Divisor for normalization.
+ divisor := math.Log(float64(max))
+
+ // boundary returns a Boundary, populating the Norm field with a normalized Count.
+ boundary := func(offset int, start bool, count int) Boundary {
+ b := Boundary{Offset: offset, Start: start, Count: count}
+ if !start || count == 0 {
+ return b
+ }
+ if max <= 1 {
+ b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
+ } else if count > 0 {
+ b.Norm = math.Log(float64(count)) / divisor
+ }
+ return b
+ }
+
+ line, col := 1, 2 // TODO: Why is this 2?
+ for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
+ b := p.Blocks[bi]
+ if b.StartLine == line && b.StartCol == col {
+ boundaries = append(boundaries, boundary(si, true, b.Count))
+ }
+ if b.EndLine == line && b.EndCol == col || line > b.EndLine {
+ boundaries = append(boundaries, boundary(si, false, 0))
+ bi++
+ continue // Don't advance through src; maybe the next block starts here.
+ }
+ if src[si] == '\n' {
+ line++
+ col = 0
+ }
+ col++
+ si++
+ }
+ sort.Sort(boundariesByPos(boundaries))
+ return
+}
+
+type boundariesByPos []Boundary
+
+func (b boundariesByPos) Len() int { return len(b) }
+func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b boundariesByPos) Less(i, j int) bool {
+ if b[i].Offset == b[j].Offset {
+ return !b[i].Start && b[j].Start
+ }
+ return b[i].Offset < b[j].Offset
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
new file mode 100644
index 000000000..6b7052b89
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
@@ -0,0 +1,627 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package astutil
+
+// This file defines utilities for working with source positions.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "sort"
+)
+
+// PathEnclosingInterval returns the node that encloses the source
+// interval [start, end), and all its ancestors up to the AST root.
+//
+// The definition of "enclosing" used by this function considers
+// additional whitespace abutting a node to be enclosed by it.
+// In this example:
+//
+// z := x + y // add them
+// <-A->
+// <----B----->
+//
+// the ast.BinaryExpr(+) node is considered to enclose interval B
+// even though its [Pos()..End()) is actually only interval A.
+// This behaviour makes user interfaces more tolerant of imperfect
+// input.
+//
+// This function treats tokens as nodes, though they are not included
+// in the result. e.g. PathEnclosingInterval("+") returns the
+// enclosing ast.BinaryExpr("x + y").
+//
+// If start==end, the 1-char interval following start is used instead.
+//
+// The 'exact' result is true if the interval contains only path[0]
+// and perhaps some adjacent whitespace. It is false if the interval
+// overlaps multiple children of path[0], or if it contains only
+// interior whitespace of path[0].
+// In this example:
+//
+// z := x + y // add them
+// <--C--> <---E-->
+// ^
+// D
+//
+// intervals C, D and E are inexact. C is contained by the
+// z-assignment statement, because it spans three of its children (:=,
+// x, +). So too is the 1-char interval D, because it contains only
+// interior whitespace of the assignment. E is considered interior
+// whitespace of the BlockStmt containing the assignment.
+//
+// Precondition: [start, end) both lie within the same file as root.
+// TODO(adonovan): return (nil, false) in this case and remove precond.
+// Requires FileSet; see loader.tokenFileContainsPos.
+//
+// Postcondition: path is never nil; it always contains at least 'root'.
+//
+func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
+ // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
+
+ // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
+ var visit func(node ast.Node) bool
+ visit = func(node ast.Node) bool {
+ path = append(path, node)
+
+ nodePos := node.Pos()
+ nodeEnd := node.End()
+
+ // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
+
+ // Intersect [start, end) with interval of node.
+ if start < nodePos {
+ start = nodePos
+ }
+ if end > nodeEnd {
+ end = nodeEnd
+ }
+
+ // Find sole child that contains [start, end).
+ children := childrenOf(node)
+ l := len(children)
+ for i, child := range children {
+ // [childPos, childEnd) is unaugmented interval of child.
+ childPos := child.Pos()
+ childEnd := child.End()
+
+ // [augPos, augEnd) is whitespace-augmented interval of child.
+ augPos := childPos
+ augEnd := childEnd
+ if i > 0 {
+ augPos = children[i-1].End() // start of preceding whitespace
+ }
+ if i < l-1 {
+ nextChildPos := children[i+1].Pos()
+ // Does [start, end) lie between child and next child?
+ if start >= augEnd && end <= nextChildPos {
+ return false // inexact match
+ }
+ augEnd = nextChildPos // end of following whitespace
+ }
+
+ // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
+ // i, augPos, augEnd, start, end) // debugging
+
+ // Does augmented child strictly contain [start, end)?
+ if augPos <= start && end <= augEnd {
+ _, isToken := child.(tokenNode)
+ return isToken || visit(child)
+ }
+
+ // Does [start, end) overlap multiple children?
+ // i.e. left-augmented child contains start
+ // but LR-augmented child does not contain end.
+ if start < childEnd && end > augEnd {
+ break
+ }
+ }
+
+ // No single child contained [start, end),
+ // so node is the result. Is it exact?
+
+ // (It's tempting to put this condition before the
+ // child loop, but it gives the wrong result in the
+ // case where a node (e.g. ExprStmt) and its sole
+ // child have equal intervals.)
+ if start == nodePos && end == nodeEnd {
+ return true // exact match
+ }
+
+ return false // inexact: overlaps multiple children
+ }
+
+ if start > end {
+ start, end = end, start
+ }
+
+ if start < root.End() && end > root.Pos() {
+ if start == end {
+ end = start + 1 // empty interval => interval of size 1
+ }
+ exact = visit(root)
+
+ // Reverse the path:
+ for i, l := 0, len(path); i < l/2; i++ {
+ path[i], path[l-1-i] = path[l-1-i], path[i]
+ }
+ } else {
+ // Selection lies within whitespace preceding the
+ // first (or following the last) declaration in the file.
+ // The result nonetheless always includes the ast.File.
+ path = append(path, root)
+ }
+
+ return
+}
+
+// tokenNode is a dummy implementation of ast.Node for a single token.
+// They are used transiently by PathEnclosingInterval but never escape
+// this package.
+//
+type tokenNode struct {
+ pos token.Pos
+ end token.Pos
+}
+
+func (n tokenNode) Pos() token.Pos {
+ return n.pos
+}
+
+func (n tokenNode) End() token.Pos {
+ return n.end
+}
+
+func tok(pos token.Pos, len int) ast.Node {
+ return tokenNode{pos, pos + token.Pos(len)}
+}
+
+// childrenOf returns the direct non-nil children of ast.Node n.
+// It may include fake ast.Node implementations for bare tokens.
+// it is not safe to call (e.g.) ast.Walk on such nodes.
+//
+func childrenOf(n ast.Node) []ast.Node {
+ var children []ast.Node
+
+ // First add nodes for all true subtrees.
+ ast.Inspect(n, func(node ast.Node) bool {
+ if node == n { // push n
+ return true // recur
+ }
+ if node != nil { // push child
+ children = append(children, node)
+ }
+ return false // no recursion
+ })
+
+ // Then add fake Nodes for bare tokens.
+ switch n := n.(type) {
+ case *ast.ArrayType:
+ children = append(children,
+ tok(n.Lbrack, len("[")),
+ tok(n.Elt.End(), len("]")))
+
+ case *ast.AssignStmt:
+ children = append(children,
+ tok(n.TokPos, len(n.Tok.String())))
+
+ case *ast.BasicLit:
+ children = append(children,
+ tok(n.ValuePos, len(n.Value)))
+
+ case *ast.BinaryExpr:
+ children = append(children, tok(n.OpPos, len(n.Op.String())))
+
+ case *ast.BlockStmt:
+ children = append(children,
+ tok(n.Lbrace, len("{")),
+ tok(n.Rbrace, len("}")))
+
+ case *ast.BranchStmt:
+ children = append(children,
+ tok(n.TokPos, len(n.Tok.String())))
+
+ case *ast.CallExpr:
+ children = append(children,
+ tok(n.Lparen, len("(")),
+ tok(n.Rparen, len(")")))
+ if n.Ellipsis != 0 {
+ children = append(children, tok(n.Ellipsis, len("...")))
+ }
+
+ case *ast.CaseClause:
+ if n.List == nil {
+ children = append(children,
+ tok(n.Case, len("default")))
+ } else {
+ children = append(children,
+ tok(n.Case, len("case")))
+ }
+ children = append(children, tok(n.Colon, len(":")))
+
+ case *ast.ChanType:
+ switch n.Dir {
+ case ast.RECV:
+ children = append(children, tok(n.Begin, len("<-chan")))
+ case ast.SEND:
+ children = append(children, tok(n.Begin, len("chan<-")))
+ case ast.RECV | ast.SEND:
+ children = append(children, tok(n.Begin, len("chan")))
+ }
+
+ case *ast.CommClause:
+ if n.Comm == nil {
+ children = append(children,
+ tok(n.Case, len("default")))
+ } else {
+ children = append(children,
+ tok(n.Case, len("case")))
+ }
+ children = append(children, tok(n.Colon, len(":")))
+
+ case *ast.Comment:
+ // nop
+
+ case *ast.CommentGroup:
+ // nop
+
+ case *ast.CompositeLit:
+ children = append(children,
+ tok(n.Lbrace, len("{")),
+ tok(n.Rbrace, len("{")))
+
+ case *ast.DeclStmt:
+ // nop
+
+ case *ast.DeferStmt:
+ children = append(children,
+ tok(n.Defer, len("defer")))
+
+ case *ast.Ellipsis:
+ children = append(children,
+ tok(n.Ellipsis, len("...")))
+
+ case *ast.EmptyStmt:
+ // nop
+
+ case *ast.ExprStmt:
+ // nop
+
+ case *ast.Field:
+ // TODO(adonovan): Field.{Doc,Comment,Tag}?
+
+ case *ast.FieldList:
+ children = append(children,
+ tok(n.Opening, len("(")),
+ tok(n.Closing, len(")")))
+
+ case *ast.File:
+ // TODO test: Doc
+ children = append(children,
+ tok(n.Package, len("package")))
+
+ case *ast.ForStmt:
+ children = append(children,
+ tok(n.For, len("for")))
+
+ case *ast.FuncDecl:
+ // TODO(adonovan): FuncDecl.Comment?
+
+ // Uniquely, FuncDecl breaks the invariant that
+ // preorder traversal yields tokens in lexical order:
+ // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
+ //
+ // As a workaround, we inline the case for FuncType
+ // here and order things correctly.
+ //
+ children = nil // discard ast.Walk(FuncDecl) info subtrees
+ children = append(children, tok(n.Type.Func, len("func")))
+ if n.Recv != nil {
+ children = append(children, n.Recv)
+ }
+ children = append(children, n.Name)
+ if n.Type.Params != nil {
+ children = append(children, n.Type.Params)
+ }
+ if n.Type.Results != nil {
+ children = append(children, n.Type.Results)
+ }
+ if n.Body != nil {
+ children = append(children, n.Body)
+ }
+
+ case *ast.FuncLit:
+ // nop
+
+ case *ast.FuncType:
+ if n.Func != 0 {
+ children = append(children,
+ tok(n.Func, len("func")))
+ }
+
+ case *ast.GenDecl:
+ children = append(children,
+ tok(n.TokPos, len(n.Tok.String())))
+ if n.Lparen != 0 {
+ children = append(children,
+ tok(n.Lparen, len("(")),
+ tok(n.Rparen, len(")")))
+ }
+
+ case *ast.GoStmt:
+ children = append(children,
+ tok(n.Go, len("go")))
+
+ case *ast.Ident:
+ children = append(children,
+ tok(n.NamePos, len(n.Name)))
+
+ case *ast.IfStmt:
+ children = append(children,
+ tok(n.If, len("if")))
+
+ case *ast.ImportSpec:
+ // TODO(adonovan): ImportSpec.{Doc,EndPos}?
+
+ case *ast.IncDecStmt:
+ children = append(children,
+ tok(n.TokPos, len(n.Tok.String())))
+
+ case *ast.IndexExpr:
+ children = append(children,
+ tok(n.Lbrack, len("{")),
+ tok(n.Rbrack, len("}")))
+
+ case *ast.InterfaceType:
+ children = append(children,
+ tok(n.Interface, len("interface")))
+
+ case *ast.KeyValueExpr:
+ children = append(children,
+ tok(n.Colon, len(":")))
+
+ case *ast.LabeledStmt:
+ children = append(children,
+ tok(n.Colon, len(":")))
+
+ case *ast.MapType:
+ children = append(children,
+ tok(n.Map, len("map")))
+
+ case *ast.ParenExpr:
+ children = append(children,
+ tok(n.Lparen, len("(")),
+ tok(n.Rparen, len(")")))
+
+ case *ast.RangeStmt:
+ children = append(children,
+ tok(n.For, len("for")),
+ tok(n.TokPos, len(n.Tok.String())))
+
+ case *ast.ReturnStmt:
+ children = append(children,
+ tok(n.Return, len("return")))
+
+ case *ast.SelectStmt:
+ children = append(children,
+ tok(n.Select, len("select")))
+
+ case *ast.SelectorExpr:
+ // nop
+
+ case *ast.SendStmt:
+ children = append(children,
+ tok(n.Arrow, len("<-")))
+
+ case *ast.SliceExpr:
+ children = append(children,
+ tok(n.Lbrack, len("[")),
+ tok(n.Rbrack, len("]")))
+
+ case *ast.StarExpr:
+ children = append(children, tok(n.Star, len("*")))
+
+ case *ast.StructType:
+ children = append(children, tok(n.Struct, len("struct")))
+
+ case *ast.SwitchStmt:
+ children = append(children, tok(n.Switch, len("switch")))
+
+ case *ast.TypeAssertExpr:
+ children = append(children,
+ tok(n.Lparen-1, len(".")),
+ tok(n.Lparen, len("(")),
+ tok(n.Rparen, len(")")))
+
+ case *ast.TypeSpec:
+ // TODO(adonovan): TypeSpec.{Doc,Comment}?
+
+ case *ast.TypeSwitchStmt:
+ children = append(children, tok(n.Switch, len("switch")))
+
+ case *ast.UnaryExpr:
+ children = append(children, tok(n.OpPos, len(n.Op.String())))
+
+ case *ast.ValueSpec:
+ // TODO(adonovan): ValueSpec.{Doc,Comment}?
+
+ case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
+ // nop
+ }
+
+ // TODO(adonovan): opt: merge the logic of ast.Inspect() into
+ // the switch above so we can make interleaved callbacks for
+ // both Nodes and Tokens in the right order and avoid the need
+ // to sort.
+ sort.Sort(byPos(children))
+
+ return children
+}
+
+type byPos []ast.Node
+
+func (sl byPos) Len() int {
+ return len(sl)
+}
+func (sl byPos) Less(i, j int) bool {
+ return sl[i].Pos() < sl[j].Pos()
+}
+func (sl byPos) Swap(i, j int) {
+ sl[i], sl[j] = sl[j], sl[i]
+}
+
+// NodeDescription returns a description of the concrete type of n suitable
+// for a user interface.
+//
+// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
+// StarExpr) we could be much more specific given the path to the AST
+// root. Perhaps we should do that.
+//
+func NodeDescription(n ast.Node) string {
+ switch n := n.(type) {
+ case *ast.ArrayType:
+ return "array type"
+ case *ast.AssignStmt:
+ return "assignment"
+ case *ast.BadDecl:
+ return "bad declaration"
+ case *ast.BadExpr:
+ return "bad expression"
+ case *ast.BadStmt:
+ return "bad statement"
+ case *ast.BasicLit:
+ return "basic literal"
+ case *ast.BinaryExpr:
+ return fmt.Sprintf("binary %s operation", n.Op)
+ case *ast.BlockStmt:
+ return "block"
+ case *ast.BranchStmt:
+ switch n.Tok {
+ case token.BREAK:
+ return "break statement"
+ case token.CONTINUE:
+ return "continue statement"
+ case token.GOTO:
+ return "goto statement"
+ case token.FALLTHROUGH:
+ return "fall-through statement"
+ }
+ case *ast.CallExpr:
+ if len(n.Args) == 1 && !n.Ellipsis.IsValid() {
+ return "function call (or conversion)"
+ }
+ return "function call"
+ case *ast.CaseClause:
+ return "case clause"
+ case *ast.ChanType:
+ return "channel type"
+ case *ast.CommClause:
+ return "communication clause"
+ case *ast.Comment:
+ return "comment"
+ case *ast.CommentGroup:
+ return "comment group"
+ case *ast.CompositeLit:
+ return "composite literal"
+ case *ast.DeclStmt:
+ return NodeDescription(n.Decl) + " statement"
+ case *ast.DeferStmt:
+ return "defer statement"
+ case *ast.Ellipsis:
+ return "ellipsis"
+ case *ast.EmptyStmt:
+ return "empty statement"
+ case *ast.ExprStmt:
+ return "expression statement"
+ case *ast.Field:
+ // Can be any of these:
+ // struct {x, y int} -- struct field(s)
+ // struct {T} -- anon struct field
+ // interface {I} -- interface embedding
+ // interface {f()} -- interface method
+ // func (A) func(B) C -- receiver, param(s), result(s)
+ return "field/method/parameter"
+ case *ast.FieldList:
+ return "field/method/parameter list"
+ case *ast.File:
+ return "source file"
+ case *ast.ForStmt:
+ return "for loop"
+ case *ast.FuncDecl:
+ return "function declaration"
+ case *ast.FuncLit:
+ return "function literal"
+ case *ast.FuncType:
+ return "function type"
+ case *ast.GenDecl:
+ switch n.Tok {
+ case token.IMPORT:
+ return "import declaration"
+ case token.CONST:
+ return "constant declaration"
+ case token.TYPE:
+ return "type declaration"
+ case token.VAR:
+ return "variable declaration"
+ }
+ case *ast.GoStmt:
+ return "go statement"
+ case *ast.Ident:
+ return "identifier"
+ case *ast.IfStmt:
+ return "if statement"
+ case *ast.ImportSpec:
+ return "import specification"
+ case *ast.IncDecStmt:
+ if n.Tok == token.INC {
+ return "increment statement"
+ }
+ return "decrement statement"
+ case *ast.IndexExpr:
+ return "index expression"
+ case *ast.InterfaceType:
+ return "interface type"
+ case *ast.KeyValueExpr:
+ return "key/value association"
+ case *ast.LabeledStmt:
+ return "statement label"
+ case *ast.MapType:
+ return "map type"
+ case *ast.Package:
+ return "package"
+ case *ast.ParenExpr:
+ return "parenthesized " + NodeDescription(n.X)
+ case *ast.RangeStmt:
+ return "range loop"
+ case *ast.ReturnStmt:
+ return "return statement"
+ case *ast.SelectStmt:
+ return "select statement"
+ case *ast.SelectorExpr:
+ return "selector"
+ case *ast.SendStmt:
+ return "channel send"
+ case *ast.SliceExpr:
+ return "slice expression"
+ case *ast.StarExpr:
+ return "*-operation" // load/store expr or pointer type
+ case *ast.StructType:
+ return "struct type"
+ case *ast.SwitchStmt:
+ return "switch statement"
+ case *ast.TypeAssertExpr:
+ return "type assertion"
+ case *ast.TypeSpec:
+ return "type specification"
+ case *ast.TypeSwitchStmt:
+ return "type switch"
+ case *ast.UnaryExpr:
+ return fmt.Sprintf("unary %s operation", n.Op)
+ case *ast.ValueSpec:
+ return "value specification"
+
+ }
+ panic(fmt.Sprintf("unexpected node type: %T", n))
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
new file mode 100644
index 000000000..3e4b19536
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
@@ -0,0 +1,481 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package astutil contains common utilities for working with the Go AST.
+package astutil // import "golang.org/x/tools/go/ast/astutil"
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strconv"
+ "strings"
+)
+
+// AddImport adds the import path to the file f, if absent.
+func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
+ return AddNamedImport(fset, f, "", path)
+}
+
+// AddNamedImport adds the import with the given name and path to the file f, if absent.
+// If name is not empty, it is used to rename the import.
+//
+// For example, calling
+// AddNamedImport(fset, f, "pathpkg", "path")
+// adds
+// import pathpkg "path"
+func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
+ if imports(f, name, path) {
+ return false
+ }
+
+ newImport := &ast.ImportSpec{
+ Path: &ast.BasicLit{
+ Kind: token.STRING,
+ Value: strconv.Quote(path),
+ },
+ }
+ if name != "" {
+ newImport.Name = &ast.Ident{Name: name}
+ }
+
+ // Find an import decl to add to.
+ // The goal is to find an existing import
+ // whose import path has the longest shared
+ // prefix with path.
+ var (
+ bestMatch = -1 // length of longest shared prefix
+ lastImport = -1 // index in f.Decls of the file's final import decl
+ impDecl *ast.GenDecl // import decl containing the best match
+ impIndex = -1 // spec index in impDecl containing the best match
+
+ isThirdPartyPath = isThirdParty(path)
+ )
+ for i, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if ok && gen.Tok == token.IMPORT {
+ lastImport = i
+ // Do not add to import "C", to avoid disrupting the
+ // association with its doc comment, breaking cgo.
+ if declImports(gen, "C") {
+ continue
+ }
+
+ // Match an empty import decl if that's all that is available.
+ if len(gen.Specs) == 0 && bestMatch == -1 {
+ impDecl = gen
+ }
+
+ // Compute longest shared prefix with imports in this group and find best
+ // matched import spec.
+ // 1. Always prefer import spec with longest shared prefix.
+ // 2. While match length is 0,
+ // - for stdlib package: prefer first import spec.
+ // - for third party package: prefer first third party import spec.
+ // We cannot use last import spec as best match for third party package
+ // because grouped imports are usually placed last by goimports -local
+ // flag.
+ // See issue #19190.
+ seenAnyThirdParty := false
+ for j, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ p := importPath(impspec)
+ n := matchLen(p, path)
+ if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
+ bestMatch = n
+ impDecl = gen
+ impIndex = j
+ }
+ seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p)
+ }
+ }
+ }
+
+ // If no import decl found, add one after the last import.
+ if impDecl == nil {
+ impDecl = &ast.GenDecl{
+ Tok: token.IMPORT,
+ }
+ if lastImport >= 0 {
+ impDecl.TokPos = f.Decls[lastImport].End()
+ } else {
+ // There are no existing imports.
+ // Our new import, preceded by a blank line, goes after the package declaration
+ // and after the comment, if any, that starts on the same line as the
+ // package declaration.
+ impDecl.TokPos = f.Package
+
+ file := fset.File(f.Package)
+ pkgLine := file.Line(f.Package)
+ for _, c := range f.Comments {
+ if file.Line(c.Pos()) > pkgLine {
+ break
+ }
+ // +2 for a blank line
+ impDecl.TokPos = c.End() + 2
+ }
+ }
+ f.Decls = append(f.Decls, nil)
+ copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
+ f.Decls[lastImport+1] = impDecl
+ }
+
+ // Insert new import at insertAt.
+ insertAt := 0
+ if impIndex >= 0 {
+ // insert after the found import
+ insertAt = impIndex + 1
+ }
+ impDecl.Specs = append(impDecl.Specs, nil)
+ copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
+ impDecl.Specs[insertAt] = newImport
+ pos := impDecl.Pos()
+ if insertAt > 0 {
+ // If there is a comment after an existing import, preserve the comment
+ // position by adding the new import after the comment.
+ if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
+ pos = spec.Comment.End()
+ } else {
+ // Assign same position as the previous import,
+ // so that the sorter sees it as being in the same block.
+ pos = impDecl.Specs[insertAt-1].Pos()
+ }
+ }
+ if newImport.Name != nil {
+ newImport.Name.NamePos = pos
+ }
+ newImport.Path.ValuePos = pos
+ newImport.EndPos = pos
+
+ // Clean up parens. impDecl contains at least one spec.
+ if len(impDecl.Specs) == 1 {
+ // Remove unneeded parens.
+ impDecl.Lparen = token.NoPos
+ } else if !impDecl.Lparen.IsValid() {
+ // impDecl needs parens added.
+ impDecl.Lparen = impDecl.Specs[0].Pos()
+ }
+
+ f.Imports = append(f.Imports, newImport)
+
+ if len(f.Decls) <= 1 {
+ return true
+ }
+
+ // Merge all the import declarations into the first one.
+ var first *ast.GenDecl
+ for i := 0; i < len(f.Decls); i++ {
+ decl := f.Decls[i]
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
+ continue
+ }
+ if first == nil {
+ first = gen
+ continue // Don't touch the first one.
+ }
+ // We now know there is more than one package in this import
+ // declaration. Ensure that it ends up parenthesized.
+ first.Lparen = first.Pos()
+ // Move the imports of the other import declaration to the first one.
+ for _, spec := range gen.Specs {
+ spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
+ first.Specs = append(first.Specs, spec)
+ }
+ f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
+ i--
+ }
+
+ return true
+}
+
+func isThirdParty(importPath string) bool {
+ // Third party package import path usually contains "." (".com", ".org", ...)
+ // This logic is taken from golang.org/x/tools/imports package.
+ return strings.Contains(importPath, ".")
+}
+
+// DeleteImport deletes the import path from the file f, if present.
+// If there are duplicate import declarations, all matching ones are deleted.
+func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
+ return DeleteNamedImport(fset, f, "", path)
+}
+
+// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
+// If there are duplicate import declarations, all matching ones are deleted.
+func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
+ var delspecs []*ast.ImportSpec
+ var delcomments []*ast.CommentGroup
+
+ // Find the import nodes that import path, if any.
+ for i := 0; i < len(f.Decls); i++ {
+ decl := f.Decls[i]
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok || gen.Tok != token.IMPORT {
+ continue
+ }
+ for j := 0; j < len(gen.Specs); j++ {
+ spec := gen.Specs[j]
+ impspec := spec.(*ast.ImportSpec)
+ if importName(impspec) != name || importPath(impspec) != path {
+ continue
+ }
+
+ // We found an import spec that imports path.
+ // Delete it.
+ delspecs = append(delspecs, impspec)
+ deleted = true
+ copy(gen.Specs[j:], gen.Specs[j+1:])
+ gen.Specs = gen.Specs[:len(gen.Specs)-1]
+
+ // If this was the last import spec in this decl,
+ // delete the decl, too.
+ if len(gen.Specs) == 0 {
+ copy(f.Decls[i:], f.Decls[i+1:])
+ f.Decls = f.Decls[:len(f.Decls)-1]
+ i--
+ break
+ } else if len(gen.Specs) == 1 {
+ if impspec.Doc != nil {
+ delcomments = append(delcomments, impspec.Doc)
+ }
+ if impspec.Comment != nil {
+ delcomments = append(delcomments, impspec.Comment)
+ }
+ for _, cg := range f.Comments {
+ // Found comment on the same line as the import spec.
+ if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
+ delcomments = append(delcomments, cg)
+ break
+ }
+ }
+
+ spec := gen.Specs[0].(*ast.ImportSpec)
+
+ // Move the documentation right after the import decl.
+ if spec.Doc != nil {
+ for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line {
+ fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
+ }
+ }
+ for _, cg := range f.Comments {
+ if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line {
+ for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line {
+ fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
+ }
+ break
+ }
+ }
+ }
+ if j > 0 {
+ lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
+ lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
+ line := fset.Position(impspec.Path.ValuePos).Line
+
+ // We deleted an entry but now there may be
+ // a blank line-sized hole where the import was.
+ if line-lastLine > 1 {
+ // There was a blank line immediately preceding the deleted import,
+ // so there's no need to close the hole.
+ // Do nothing.
+ } else if line != fset.File(gen.Rparen).LineCount() {
+ // There was no blank line. Close the hole.
+ fset.File(gen.Rparen).MergeLine(line)
+ }
+ }
+ j--
+ }
+ }
+
+ // Delete imports from f.Imports.
+ for i := 0; i < len(f.Imports); i++ {
+ imp := f.Imports[i]
+ for j, del := range delspecs {
+ if imp == del {
+ copy(f.Imports[i:], f.Imports[i+1:])
+ f.Imports = f.Imports[:len(f.Imports)-1]
+ copy(delspecs[j:], delspecs[j+1:])
+ delspecs = delspecs[:len(delspecs)-1]
+ i--
+ break
+ }
+ }
+ }
+
+ // Delete comments from f.Comments.
+ for i := 0; i < len(f.Comments); i++ {
+ cg := f.Comments[i]
+ for j, del := range delcomments {
+ if cg == del {
+ copy(f.Comments[i:], f.Comments[i+1:])
+ f.Comments = f.Comments[:len(f.Comments)-1]
+ copy(delcomments[j:], delcomments[j+1:])
+ delcomments = delcomments[:len(delcomments)-1]
+ i--
+ break
+ }
+ }
+ }
+
+ if len(delspecs) > 0 {
+ panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
+ }
+
+ return
+}
+
+// RewriteImport rewrites any import of path oldPath to path newPath.
+func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
+ for _, imp := range f.Imports {
+ if importPath(imp) == oldPath {
+ rewrote = true
+ // record old End, because the default is to compute
+ // it using the length of imp.Path.Value.
+ imp.EndPos = imp.End()
+ imp.Path.Value = strconv.Quote(newPath)
+ }
+ }
+ return
+}
+
+// UsesImport reports whether a given import is used.
+func UsesImport(f *ast.File, path string) (used bool) {
+ spec := importSpec(f, path)
+ if spec == nil {
+ return
+ }
+
+ name := spec.Name.String()
+ switch name {
+ case "<nil>":
+ // If the package name is not explicitly specified,
+ // make an educated guess. This is not guaranteed to be correct.
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 {
+ name = path
+ } else {
+ name = path[lastSlash+1:]
+ }
+ case "_", ".":
+ // Not sure if this import is used - err on the side of caution.
+ return true
+ }
+
+ ast.Walk(visitFn(func(n ast.Node) {
+ sel, ok := n.(*ast.SelectorExpr)
+ if ok && isTopName(sel.X, name) {
+ used = true
+ }
+ }), f)
+
+ return
+}
+
+type visitFn func(node ast.Node)
+
+func (fn visitFn) Visit(node ast.Node) ast.Visitor {
+ fn(node)
+ return fn
+}
+
+// imports reports whether f has an import with the specified name and path.
+func imports(f *ast.File, name, path string) bool {
+ for _, s := range f.Imports {
+ if importName(s) == name && importPath(s) == path {
+ return true
+ }
+ }
+ return false
+}
+
+// importSpec returns the import spec if f imports path,
+// or nil otherwise.
+func importSpec(f *ast.File, path string) *ast.ImportSpec {
+ for _, s := range f.Imports {
+ if importPath(s) == path {
+ return s
+ }
+ }
+ return nil
+}
+
+// importName returns the name of s,
+// or "" if the import is not named.
+func importName(s *ast.ImportSpec) string {
+ if s.Name == nil {
+ return ""
+ }
+ return s.Name.Name
+}
+
+// importPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err != nil {
+ return ""
+ }
+ return t
+}
+
+// declImports reports whether gen contains an import of path.
+func declImports(gen *ast.GenDecl, path string) bool {
+ if gen.Tok != token.IMPORT {
+ return false
+ }
+ for _, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ if importPath(impspec) == path {
+ return true
+ }
+ }
+ return false
+}
+
+// matchLen returns the length of the longest path segment prefix shared by x and y.
+func matchLen(x, y string) int {
+ n := 0
+ for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
+ if x[i] == '/' {
+ n++
+ }
+ }
+ return n
+}
+
+// isTopName returns true if n is a top-level unresolved identifier with the given name.
+func isTopName(n ast.Expr, name string) bool {
+ id, ok := n.(*ast.Ident)
+ return ok && id.Name == name && id.Obj == nil
+}
+
+// Imports returns the file imports grouped by paragraph.
+func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
+ var groups [][]*ast.ImportSpec
+
+ for _, decl := range f.Decls {
+ genDecl, ok := decl.(*ast.GenDecl)
+ if !ok || genDecl.Tok != token.IMPORT {
+ break
+ }
+
+ group := []*ast.ImportSpec{}
+
+ var lastLine int
+ for _, spec := range genDecl.Specs {
+ importSpec := spec.(*ast.ImportSpec)
+ pos := importSpec.Path.ValuePos
+ line := fset.Position(pos).Line
+ if lastLine > 0 && pos > 0 && line-lastLine > 1 {
+ groups = append(groups, group)
+ group = []*ast.ImportSpec{}
+ }
+ group = append(group, importSpec)
+ lastLine = line
+ }
+ groups = append(groups, group)
+ }
+
+ return groups
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
new file mode 100644
index 000000000..cf72ea990
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
@@ -0,0 +1,477 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package astutil
+
+import (
+ "fmt"
+ "go/ast"
+ "reflect"
+ "sort"
+)
+
+// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
+// before and/or after the node's children, using a Cursor describing
+// the current node and providing operations on it.
+//
+// The return value of ApplyFunc controls the syntax tree traversal.
+// See Apply for details.
+type ApplyFunc func(*Cursor) bool
+
+// Apply traverses a syntax tree recursively, starting with root,
+// and calling pre and post for each node as described below.
+// Apply returns the syntax tree, possibly modified.
+//
+// If pre is not nil, it is called for each node before the node's
+// children are traversed (pre-order). If pre returns false, no
+// children are traversed, and post is not called for that node.
+//
+// If post is not nil, and a prior call of pre didn't return false,
+// post is called for each node after its children are traversed
+// (post-order). If post returns false, traversal is terminated and
+// Apply returns immediately.
+//
+// Only fields that refer to AST nodes are considered children;
+// i.e., token.Pos, Scopes, Objects, and fields of basic types
+// (strings, etc.) are ignored.
+//
+// Children are traversed in the order in which they appear in the
+// respective node's struct definition. A package's files are
+// traversed in the filenames' alphabetical order.
+//
+func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
+ parent := &struct{ ast.Node }{root}
+ defer func() {
+ if r := recover(); r != nil && r != abort {
+ panic(r)
+ }
+ result = parent.Node
+ }()
+ a := &application{pre: pre, post: post}
+ a.apply(parent, "Node", nil, root)
+ return
+}
+
+var abort = new(int) // singleton, to signal termination of Apply
+
+// A Cursor describes a node encountered during Apply.
+// Information about the node and its parent is available
+// from the Node, Parent, Name, and Index methods.
+//
+// If p is a variable of type and value of the current parent node
+// c.Parent(), and f is the field identifier with name c.Name(),
+// the following invariants hold:
+//
+// p.f == c.Node() if c.Index() < 0
+// p.f[c.Index()] == c.Node() if c.Index() >= 0
+//
+// The methods Replace, Delete, InsertBefore, and InsertAfter
+// can be used to change the AST without disrupting Apply.
+type Cursor struct {
+ parent ast.Node
+ name string
+ iter *iterator // valid if non-nil
+ node ast.Node
+}
+
+// Node returns the current Node.
+func (c *Cursor) Node() ast.Node { return c.node }
+
+// Parent returns the parent of the current Node.
+func (c *Cursor) Parent() ast.Node { return c.parent }
+
+// Name returns the name of the parent Node field that contains the current Node.
+// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
+// the filename for the current Node.
+func (c *Cursor) Name() string { return c.name }
+
+// Index reports the index >= 0 of the current Node in the slice of Nodes that
+// contains it, or a value < 0 if the current Node is not part of a slice.
+// The index of the current node changes if InsertBefore is called while
+// processing the current node.
+func (c *Cursor) Index() int {
+ if c.iter != nil {
+ return c.iter.index
+ }
+ return -1
+}
+
+// field returns the current node's parent field value.
+func (c *Cursor) field() reflect.Value {
+ return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
+}
+
+// Replace replaces the current Node with n.
+// The replacement node is not walked by Apply.
+func (c *Cursor) Replace(n ast.Node) {
+ if _, ok := c.node.(*ast.File); ok {
+ file, ok := n.(*ast.File)
+ if !ok {
+ panic("attempt to replace *ast.File with non-*ast.File")
+ }
+ c.parent.(*ast.Package).Files[c.name] = file
+ return
+ }
+
+ v := c.field()
+ if i := c.Index(); i >= 0 {
+ v = v.Index(i)
+ }
+ v.Set(reflect.ValueOf(n))
+}
+
+// Delete deletes the current Node from its containing slice.
+// If the current Node is not part of a slice, Delete panics.
+// As a special case, if the current node is a package file,
+// Delete removes it from the package's Files map.
+func (c *Cursor) Delete() {
+ if _, ok := c.node.(*ast.File); ok {
+ delete(c.parent.(*ast.Package).Files, c.name)
+ return
+ }
+
+ i := c.Index()
+ if i < 0 {
+ panic("Delete node not contained in slice")
+ }
+ v := c.field()
+ l := v.Len()
+ reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
+ v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
+ v.SetLen(l - 1)
+ c.iter.step--
+}
+
+// InsertAfter inserts n after the current Node in its containing slice.
+// If the current Node is not part of a slice, InsertAfter panics.
+// Apply does not walk n.
+func (c *Cursor) InsertAfter(n ast.Node) {
+ i := c.Index()
+ if i < 0 {
+ panic("InsertAfter node not contained in slice")
+ }
+ v := c.field()
+ v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
+ l := v.Len()
+ reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
+ v.Index(i + 1).Set(reflect.ValueOf(n))
+ c.iter.step++
+}
+
+// InsertBefore inserts n before the current Node in its containing slice.
+// If the current Node is not part of a slice, InsertBefore panics.
+// Apply will not walk n.
+func (c *Cursor) InsertBefore(n ast.Node) {
+ i := c.Index()
+ if i < 0 {
+ panic("InsertBefore node not contained in slice")
+ }
+ v := c.field()
+ v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
+ l := v.Len()
+ reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
+ v.Index(i).Set(reflect.ValueOf(n))
+ c.iter.index++
+}
+
+// application carries all the shared data so we can pass it around cheaply.
+type application struct {
+ pre, post ApplyFunc
+ cursor Cursor
+ iter iterator
+}
+
+func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
+ // convert typed nil into untyped nil
+ if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
+ n = nil
+ }
+
+ // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
+ saved := a.cursor
+ a.cursor.parent = parent
+ a.cursor.name = name
+ a.cursor.iter = iter
+ a.cursor.node = n
+
+ if a.pre != nil && !a.pre(&a.cursor) {
+ a.cursor = saved
+ return
+ }
+
+ // walk children
+ // (the order of the cases matches the order of the corresponding node types in go/ast)
+ switch n := n.(type) {
+ case nil:
+ // nothing to do
+
+ // Comments and fields
+ case *ast.Comment:
+ // nothing to do
+
+ case *ast.CommentGroup:
+ if n != nil {
+ a.applyList(n, "List")
+ }
+
+ case *ast.Field:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Names")
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Tag", nil, n.Tag)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.FieldList:
+ a.applyList(n, "List")
+
+ // Expressions
+ case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+ // nothing to do
+
+ case *ast.Ellipsis:
+ a.apply(n, "Elt", nil, n.Elt)
+
+ case *ast.FuncLit:
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.CompositeLit:
+ a.apply(n, "Type", nil, n.Type)
+ a.applyList(n, "Elts")
+
+ case *ast.ParenExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.SelectorExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Sel", nil, n.Sel)
+
+ case *ast.IndexExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Index", nil, n.Index)
+
+ case *ast.SliceExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Low", nil, n.Low)
+ a.apply(n, "High", nil, n.High)
+ a.apply(n, "Max", nil, n.Max)
+
+ case *ast.TypeAssertExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Type", nil, n.Type)
+
+ case *ast.CallExpr:
+ a.apply(n, "Fun", nil, n.Fun)
+ a.applyList(n, "Args")
+
+ case *ast.StarExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.UnaryExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.BinaryExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Y", nil, n.Y)
+
+ case *ast.KeyValueExpr:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+
+ // Types
+ case *ast.ArrayType:
+ a.apply(n, "Len", nil, n.Len)
+ a.apply(n, "Elt", nil, n.Elt)
+
+ case *ast.StructType:
+ a.apply(n, "Fields", nil, n.Fields)
+
+ case *ast.FuncType:
+ a.apply(n, "Params", nil, n.Params)
+ a.apply(n, "Results", nil, n.Results)
+
+ case *ast.InterfaceType:
+ a.apply(n, "Methods", nil, n.Methods)
+
+ case *ast.MapType:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+
+ case *ast.ChanType:
+ a.apply(n, "Value", nil, n.Value)
+
+ // Statements
+ case *ast.BadStmt:
+ // nothing to do
+
+ case *ast.DeclStmt:
+ a.apply(n, "Decl", nil, n.Decl)
+
+ case *ast.EmptyStmt:
+ // nothing to do
+
+ case *ast.LabeledStmt:
+ a.apply(n, "Label", nil, n.Label)
+ a.apply(n, "Stmt", nil, n.Stmt)
+
+ case *ast.ExprStmt:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.SendStmt:
+ a.apply(n, "Chan", nil, n.Chan)
+ a.apply(n, "Value", nil, n.Value)
+
+ case *ast.IncDecStmt:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.AssignStmt:
+ a.applyList(n, "Lhs")
+ a.applyList(n, "Rhs")
+
+ case *ast.GoStmt:
+ a.apply(n, "Call", nil, n.Call)
+
+ case *ast.DeferStmt:
+ a.apply(n, "Call", nil, n.Call)
+
+ case *ast.ReturnStmt:
+ a.applyList(n, "Results")
+
+ case *ast.BranchStmt:
+ a.apply(n, "Label", nil, n.Label)
+
+ case *ast.BlockStmt:
+ a.applyList(n, "List")
+
+ case *ast.IfStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Cond", nil, n.Cond)
+ a.apply(n, "Body", nil, n.Body)
+ a.apply(n, "Else", nil, n.Else)
+
+ case *ast.CaseClause:
+ a.applyList(n, "List")
+ a.applyList(n, "Body")
+
+ case *ast.SwitchStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Tag", nil, n.Tag)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.TypeSwitchStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Assign", nil, n.Assign)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.CommClause:
+ a.apply(n, "Comm", nil, n.Comm)
+ a.applyList(n, "Body")
+
+ case *ast.SelectStmt:
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.ForStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Cond", nil, n.Cond)
+ a.apply(n, "Post", nil, n.Post)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.RangeStmt:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Body", nil, n.Body)
+
+ // Declarations
+ case *ast.ImportSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Path", nil, n.Path)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.ValueSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Names")
+ a.apply(n, "Type", nil, n.Type)
+ a.applyList(n, "Values")
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.TypeSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.BadDecl:
+ // nothing to do
+
+ case *ast.GenDecl:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Specs")
+
+ case *ast.FuncDecl:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Recv", nil, n.Recv)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Body", nil, n.Body)
+
+ // Files and packages
+ case *ast.File:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.applyList(n, "Decls")
+ // Don't walk n.Comments; they have either been walked already if
+ // they are Doc comments, or they can be easily walked explicitly.
+
+ case *ast.Package:
+ // collect and sort names for reproducible behavior
+ var names []string
+ for name := range n.Files {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ a.apply(n, name, nil, n.Files[name])
+ }
+
+ default:
+ panic(fmt.Sprintf("Apply: unexpected node type %T", n))
+ }
+
+ if a.post != nil && !a.post(&a.cursor) {
+ panic(abort)
+ }
+
+ a.cursor = saved
+}
+
+// An iterator controls iteration over a slice of nodes.
+type iterator struct {
+ index, step int
+}
+
+func (a *application) applyList(parent ast.Node, name string) {
+ // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
+ saved := a.iter
+ a.iter.index = 0
+ for {
+ // must reload parent.name each time, since cursor modifications might change it
+ v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
+ if a.iter.index >= v.Len() {
+ break
+ }
+
+ // element x may be nil in a bad AST - be cautious
+ var x ast.Node
+ if e := v.Index(a.iter.index); e.IsValid() {
+ x = e.Interface().(ast.Node)
+ }
+
+ a.iter.step = 1
+ a.apply(parent, name, &a.iter, x)
+ a.iter.index += a.iter.step
+ }
+ a.iter = saved
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go
new file mode 100644
index 000000000..763062982
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go
@@ -0,0 +1,14 @@
+package astutil
+
+import "go/ast"
+
+// Unparen returns e with any enclosing parentheses stripped.
+func Unparen(e ast.Expr) ast.Expr {
+ for {
+ p, ok := e.(*ast.ParenExpr)
+ if !ok {
+ return e
+ }
+ e = p.X
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
new file mode 100644
index 000000000..ddbdd3f08
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
@@ -0,0 +1,182 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package inspector provides helper functions for traversal over the
+// syntax trees of a package, including node filtering by type, and
+// materialization of the traversal stack.
+//
+// During construction, the inspector does a complete traversal and
+// builds a list of push/pop events and their node type. Subsequent
+// method calls that request a traversal scan this list, rather than walk
+// the AST, and perform type filtering using efficient bit sets.
+//
+// Experiments suggest the inspector's traversals are about 2.5x faster
+// than ast.Inspect, but it may take around 5 traversals for this
+// benefit to amortize the inspector's construction cost.
+// If efficiency is the primary concern, do not use Inspector for
+// one-off traversals.
+package inspector
+
+// There are four orthogonal features in a traversal:
+// 1 type filtering
+// 2 pruning
+// 3 postorder calls to f
+// 4 stack
+// Rather than offer all of them in the API,
+// only a few combinations are exposed:
+// - Preorder is the fastest and has fewest features,
+// but is the most commonly needed traversal.
+// - Nodes and WithStack both provide pruning and postorder calls,
+// even though few clients need it, because supporting two versions
+// is not justified.
+// More combinations could be supported by expressing them as
+// wrappers around a more generic traversal, but this was measured
+// and found to degrade performance significantly (30%).
+
+import (
+ "go/ast"
+)
+
+// An Inspector provides methods for inspecting
+// (traversing) the syntax trees of a package.
+type Inspector struct {
+ events []event
+}
+
+// New returns an Inspector for the specified syntax trees.
+func New(files []*ast.File) *Inspector {
+ return &Inspector{traverse(files)}
+}
+
+// An event represents a push or a pop
+// of an ast.Node during a traversal.
+type event struct {
+ node ast.Node
+ typ uint64 // typeOf(node)
+ index int // 1 + index of corresponding pop event, or 0 if this is a pop
+}
+
+// Preorder visits all the nodes of the files supplied to New in
+// depth-first order. It calls f(n) for each node n before it visits
+// n's children.
+//
+// The types argument, if non-empty, enables type-based filtering of
+// events. The function f if is called only for nodes whose type
+// matches an element of the types slice.
+func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
+ // Because it avoids postorder calls to f, and the pruning
+ // check, Preorder is almost twice as fast as Nodes. The two
+ // features seem to contribute similar slowdowns (~1.4x each).
+
+ mask := maskOf(types)
+ for i := 0; i < len(in.events); {
+ ev := in.events[i]
+ if ev.typ&mask != 0 {
+ if ev.index > 0 {
+ f(ev.node)
+ }
+ }
+ i++
+ }
+}
+
+// Nodes visits the nodes of the files supplied to New in depth-first
+// order. It calls f(n, true) for each node n before it visits n's
+// children. If f returns true, Nodes invokes f recursively for each
+// of the non-nil children of the node, followed by a call of
+// f(n, false).
+//
+// The types argument, if non-empty, enables type-based filtering of
+// events. The function f if is called only for nodes whose type
+// matches an element of the types slice.
+func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prune bool)) {
+ mask := maskOf(types)
+ for i := 0; i < len(in.events); {
+ ev := in.events[i]
+ if ev.typ&mask != 0 {
+ if ev.index > 0 {
+ // push
+ if !f(ev.node, true) {
+ i = ev.index // jump to corresponding pop + 1
+ continue
+ }
+ } else {
+ // pop
+ f(ev.node, false)
+ }
+ }
+ i++
+ }
+}
+
+// WithStack visits nodes in a similar manner to Nodes, but it
+// supplies each call to f an additional argument, the current
+// traversal stack. The stack's first element is the outermost node,
+// an *ast.File; its last is the innermost, n.
+func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (prune bool)) {
+ mask := maskOf(types)
+ var stack []ast.Node
+ for i := 0; i < len(in.events); {
+ ev := in.events[i]
+ if ev.index > 0 {
+ // push
+ stack = append(stack, ev.node)
+ if ev.typ&mask != 0 {
+ if !f(ev.node, true, stack) {
+ i = ev.index
+ stack = stack[:len(stack)-1]
+ continue
+ }
+ }
+ } else {
+ // pop
+ if ev.typ&mask != 0 {
+ f(ev.node, false, stack)
+ }
+ stack = stack[:len(stack)-1]
+ }
+ i++
+ }
+}
+
+// traverse builds the table of events representing a traversal.
+func traverse(files []*ast.File) []event {
+ // Preallocate approximate number of events
+ // based on source file extent.
+ // This makes traverse faster by 4x (!).
+ var extent int
+ for _, f := range files {
+ extent += int(f.End() - f.Pos())
+ }
+ // This estimate is based on the net/http package.
+ events := make([]event, 0, extent*33/100)
+
+ var stack []event
+ for _, f := range files {
+ ast.Inspect(f, func(n ast.Node) bool {
+ if n != nil {
+ // push
+ ev := event{
+ node: n,
+ typ: typeOf(n),
+ index: len(events), // push event temporarily holds own index
+ }
+ stack = append(stack, ev)
+ events = append(events, ev)
+ } else {
+ // pop
+ ev := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+
+ events[ev.index].index = len(events) + 1 // make push refer to pop
+
+ ev.index = 0 // turn ev into a pop event
+ events = append(events, ev)
+ }
+ return true
+ })
+ }
+
+ return events
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
new file mode 100644
index 000000000..d61301b13
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -0,0 +1,216 @@
+package inspector
+
+// This file defines func typeOf(ast.Node) uint64.
+//
+// The initial map-based implementation was too slow;
+// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196
+
+import "go/ast"
+
+const (
+ nArrayType = iota
+ nAssignStmt
+ nBadDecl
+ nBadExpr
+ nBadStmt
+ nBasicLit
+ nBinaryExpr
+ nBlockStmt
+ nBranchStmt
+ nCallExpr
+ nCaseClause
+ nChanType
+ nCommClause
+ nComment
+ nCommentGroup
+ nCompositeLit
+ nDeclStmt
+ nDeferStmt
+ nEllipsis
+ nEmptyStmt
+ nExprStmt
+ nField
+ nFieldList
+ nFile
+ nForStmt
+ nFuncDecl
+ nFuncLit
+ nFuncType
+ nGenDecl
+ nGoStmt
+ nIdent
+ nIfStmt
+ nImportSpec
+ nIncDecStmt
+ nIndexExpr
+ nInterfaceType
+ nKeyValueExpr
+ nLabeledStmt
+ nMapType
+ nPackage
+ nParenExpr
+ nRangeStmt
+ nReturnStmt
+ nSelectStmt
+ nSelectorExpr
+ nSendStmt
+ nSliceExpr
+ nStarExpr
+ nStructType
+ nSwitchStmt
+ nTypeAssertExpr
+ nTypeSpec
+ nTypeSwitchStmt
+ nUnaryExpr
+ nValueSpec
+)
+
+// typeOf returns a distinct single-bit value that represents the type of n.
+//
+// Various implementations were benchmarked with BenchmarkNewInspector:
+// GOGC=off
+// - type switch 4.9-5.5ms 2.1ms
+// - binary search over a sorted list of types 5.5-5.9ms 2.5ms
+// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms
+// - linear scan, unordered list 6.4ms 2.7ms
+// - hash table 6.5ms 3.1ms
+// A perfect hash seemed like overkill.
+//
+// The compiler's switch statement is the clear winner
+// as it produces a binary tree in code,
+// with constant conditions and good branch prediction.
+// (Sadly it is the most verbose in source code.)
+// Binary search suffered from poor branch prediction.
+//
+func typeOf(n ast.Node) uint64 {
+ // Fast path: nearly half of all nodes are identifiers.
+ if _, ok := n.(*ast.Ident); ok {
+ return 1 << nIdent
+ }
+
+ // These cases include all nodes encountered by ast.Inspect.
+ switch n.(type) {
+ case *ast.ArrayType:
+ return 1 << nArrayType
+ case *ast.AssignStmt:
+ return 1 << nAssignStmt
+ case *ast.BadDecl:
+ return 1 << nBadDecl
+ case *ast.BadExpr:
+ return 1 << nBadExpr
+ case *ast.BadStmt:
+ return 1 << nBadStmt
+ case *ast.BasicLit:
+ return 1 << nBasicLit
+ case *ast.BinaryExpr:
+ return 1 << nBinaryExpr
+ case *ast.BlockStmt:
+ return 1 << nBlockStmt
+ case *ast.BranchStmt:
+ return 1 << nBranchStmt
+ case *ast.CallExpr:
+ return 1 << nCallExpr
+ case *ast.CaseClause:
+ return 1 << nCaseClause
+ case *ast.ChanType:
+ return 1 << nChanType
+ case *ast.CommClause:
+ return 1 << nCommClause
+ case *ast.Comment:
+ return 1 << nComment
+ case *ast.CommentGroup:
+ return 1 << nCommentGroup
+ case *ast.CompositeLit:
+ return 1 << nCompositeLit
+ case *ast.DeclStmt:
+ return 1 << nDeclStmt
+ case *ast.DeferStmt:
+ return 1 << nDeferStmt
+ case *ast.Ellipsis:
+ return 1 << nEllipsis
+ case *ast.EmptyStmt:
+ return 1 << nEmptyStmt
+ case *ast.ExprStmt:
+ return 1 << nExprStmt
+ case *ast.Field:
+ return 1 << nField
+ case *ast.FieldList:
+ return 1 << nFieldList
+ case *ast.File:
+ return 1 << nFile
+ case *ast.ForStmt:
+ return 1 << nForStmt
+ case *ast.FuncDecl:
+ return 1 << nFuncDecl
+ case *ast.FuncLit:
+ return 1 << nFuncLit
+ case *ast.FuncType:
+ return 1 << nFuncType
+ case *ast.GenDecl:
+ return 1 << nGenDecl
+ case *ast.GoStmt:
+ return 1 << nGoStmt
+ case *ast.Ident:
+ return 1 << nIdent
+ case *ast.IfStmt:
+ return 1 << nIfStmt
+ case *ast.ImportSpec:
+ return 1 << nImportSpec
+ case *ast.IncDecStmt:
+ return 1 << nIncDecStmt
+ case *ast.IndexExpr:
+ return 1 << nIndexExpr
+ case *ast.InterfaceType:
+ return 1 << nInterfaceType
+ case *ast.KeyValueExpr:
+ return 1 << nKeyValueExpr
+ case *ast.LabeledStmt:
+ return 1 << nLabeledStmt
+ case *ast.MapType:
+ return 1 << nMapType
+ case *ast.Package:
+ return 1 << nPackage
+ case *ast.ParenExpr:
+ return 1 << nParenExpr
+ case *ast.RangeStmt:
+ return 1 << nRangeStmt
+ case *ast.ReturnStmt:
+ return 1 << nReturnStmt
+ case *ast.SelectStmt:
+ return 1 << nSelectStmt
+ case *ast.SelectorExpr:
+ return 1 << nSelectorExpr
+ case *ast.SendStmt:
+ return 1 << nSendStmt
+ case *ast.SliceExpr:
+ return 1 << nSliceExpr
+ case *ast.StarExpr:
+ return 1 << nStarExpr
+ case *ast.StructType:
+ return 1 << nStructType
+ case *ast.SwitchStmt:
+ return 1 << nSwitchStmt
+ case *ast.TypeAssertExpr:
+ return 1 << nTypeAssertExpr
+ case *ast.TypeSpec:
+ return 1 << nTypeSpec
+ case *ast.TypeSwitchStmt:
+ return 1 << nTypeSwitchStmt
+ case *ast.UnaryExpr:
+ return 1 << nUnaryExpr
+ case *ast.ValueSpec:
+ return 1 << nValueSpec
+ }
+ return 0
+}
+
+func maskOf(nodes []ast.Node) uint64 {
+ if nodes == nil {
+ return 1<<64 - 1 // match all node types
+ }
+ var mask uint64
+ for _, n := range nodes {
+ mask |= typeOf(n)
+ }
+ return mask
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go
new file mode 100644
index 000000000..c0cb03e7b
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/allpackages.go
@@ -0,0 +1,198 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package buildutil provides utilities related to the go/build
+// package in the standard library.
+//
+// All I/O is done via the build.Context file system interface, which must
+// be concurrency-safe.
+package buildutil // import "golang.org/x/tools/go/buildutil"
+
+import (
+ "go/build"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// AllPackages returns the package path of each Go package in any source
+// directory of the specified build context (e.g. $GOROOT or an element
+// of $GOPATH). Errors are ignored. The results are sorted.
+// All package paths are canonical, and thus may contain "/vendor/".
+//
+// The result may include import paths for directories that contain no
+// *.go files, such as "archive" (in $GOROOT/src).
+//
+// All I/O is done via the build.Context file system interface,
+// which must be concurrency-safe.
+//
+func AllPackages(ctxt *build.Context) []string {
+ var list []string
+ ForEachPackage(ctxt, func(pkg string, _ error) {
+ list = append(list, pkg)
+ })
+ sort.Strings(list)
+ return list
+}
+
+// ForEachPackage calls the found function with the package path of
+// each Go package it finds in any source directory of the specified
+// build context (e.g. $GOROOT or an element of $GOPATH).
+// All package paths are canonical, and thus may contain "/vendor/".
+//
+// If the package directory exists but could not be read, the second
+// argument to the found function provides the error.
+//
+// All I/O is done via the build.Context file system interface,
+// which must be concurrency-safe.
+//
+func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
+ ch := make(chan item)
+
+ var wg sync.WaitGroup
+ for _, root := range ctxt.SrcDirs() {
+ root := root
+ wg.Add(1)
+ go func() {
+ allPackages(ctxt, root, ch)
+ wg.Done()
+ }()
+ }
+ go func() {
+ wg.Wait()
+ close(ch)
+ }()
+
+ // All calls to found occur in the caller's goroutine.
+ for i := range ch {
+ found(i.importPath, i.err)
+ }
+}
+
+type item struct {
+ importPath string
+ err error // (optional)
+}
+
+// We use a process-wide counting semaphore to limit
+// the number of parallel calls to ReadDir.
+var ioLimit = make(chan bool, 20)
+
+func allPackages(ctxt *build.Context, root string, ch chan<- item) {
+ root = filepath.Clean(root) + string(os.PathSeparator)
+
+ var wg sync.WaitGroup
+
+ var walkDir func(dir string)
+ walkDir = func(dir string) {
+ // Avoid .foo, _foo, and testdata directory trees.
+ base := filepath.Base(dir)
+ if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
+ return
+ }
+
+ pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
+
+ // Prune search if we encounter any of these import paths.
+ switch pkg {
+ case "builtin":
+ return
+ }
+
+ ioLimit <- true
+ files, err := ReadDir(ctxt, dir)
+ <-ioLimit
+ if pkg != "" || err != nil {
+ ch <- item{pkg, err}
+ }
+ for _, fi := range files {
+ fi := fi
+ if fi.IsDir() {
+ wg.Add(1)
+ go func() {
+ walkDir(filepath.Join(dir, fi.Name()))
+ wg.Done()
+ }()
+ }
+ }
+ }
+
+ walkDir(root)
+ wg.Wait()
+}
+
+// ExpandPatterns returns the set of packages matched by patterns,
+// which may have the following forms:
+//
+// golang.org/x/tools/cmd/guru # a single package
+// golang.org/x/tools/... # all packages beneath dir
+// ... # the entire workspace.
+//
+// Order is significant: a pattern preceded by '-' removes matching
+// packages from the set. For example, these patterns match all encoding
+// packages except encoding/xml:
+//
+// encoding/... -encoding/xml
+//
+// A trailing slash in a pattern is ignored. (Path components of Go
+// package names are separated by slash, not the platform's path separator.)
+//
+func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
+ // TODO(adonovan): support other features of 'go list':
+ // - "std"/"cmd"/"all" meta-packages
+ // - "..." not at the end of a pattern
+ // - relative patterns using "./" or "../" prefix
+
+ pkgs := make(map[string]bool)
+ doPkg := func(pkg string, neg bool) {
+ if neg {
+ delete(pkgs, pkg)
+ } else {
+ pkgs[pkg] = true
+ }
+ }
+
+ // Scan entire workspace if wildcards are present.
+ // TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
+ var all []string
+ for _, arg := range patterns {
+ if strings.HasSuffix(arg, "...") {
+ all = AllPackages(ctxt)
+ break
+ }
+ }
+
+ for _, arg := range patterns {
+ if arg == "" {
+ continue
+ }
+
+ neg := arg[0] == '-'
+ if neg {
+ arg = arg[1:]
+ }
+
+ if arg == "..." {
+ // ... matches all packages
+ for _, pkg := range all {
+ doPkg(pkg, neg)
+ }
+ } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
+ // dir/... matches all packages beneath dir
+ for _, pkg := range all {
+ if strings.HasPrefix(pkg, dir) &&
+ (len(pkg) == len(dir) || pkg[len(dir)] == '/') {
+ doPkg(pkg, neg)
+ }
+ }
+ } else {
+ // single package
+ doPkg(strings.TrimSuffix(arg, "/"), neg)
+ }
+ }
+
+ return pkgs
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go
new file mode 100644
index 000000000..8b7f06673
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go
@@ -0,0 +1,109 @@
+package buildutil
+
+import (
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+)
+
+// FakeContext returns a build.Context for the fake file tree specified
+// by pkgs, which maps package import paths to a mapping from file base
+// names to contents.
+//
+// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
+// the necessary file access methods to read from memory instead of the
+// real file system.
+//
+// Unlike a real file tree, the fake one has only two levels---packages
+// and files---so ReadDir("/go/src/") returns all packages under
+// /go/src/ including, for instance, "math" and "math/big".
+// ReadDir("/go/src/math/big") would return all the files in the
+// "math/big" package.
+//
+func FakeContext(pkgs map[string]map[string]string) *build.Context {
+ clean := func(filename string) string {
+ f := path.Clean(filepath.ToSlash(filename))
+ // Removing "/go/src" while respecting segment
+ // boundaries has this unfortunate corner case:
+ if f == "/go/src" {
+ return ""
+ }
+ return strings.TrimPrefix(f, "/go/src/")
+ }
+
+ ctxt := build.Default // copy
+ ctxt.GOROOT = "/go"
+ ctxt.GOPATH = ""
+ ctxt.Compiler = "gc"
+ ctxt.IsDir = func(dir string) bool {
+ dir = clean(dir)
+ if dir == "" {
+ return true // needed by (*build.Context).SrcDirs
+ }
+ return pkgs[dir] != nil
+ }
+ ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
+ dir = clean(dir)
+ var fis []os.FileInfo
+ if dir == "" {
+ // enumerate packages
+ for importPath := range pkgs {
+ fis = append(fis, fakeDirInfo(importPath))
+ }
+ } else {
+ // enumerate files of package
+ for basename := range pkgs[dir] {
+ fis = append(fis, fakeFileInfo(basename))
+ }
+ }
+ sort.Sort(byName(fis))
+ return fis, nil
+ }
+ ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
+ filename = clean(filename)
+ dir, base := path.Split(filename)
+ content, ok := pkgs[path.Clean(dir)][base]
+ if !ok {
+ return nil, fmt.Errorf("file not found: %s", filename)
+ }
+ return ioutil.NopCloser(strings.NewReader(content)), nil
+ }
+ ctxt.IsAbsPath = func(path string) bool {
+ path = filepath.ToSlash(path)
+ // Don't rely on the default (filepath.Path) since on
+ // Windows, it reports virtual paths as non-absolute.
+ return strings.HasPrefix(path, "/")
+ }
+ return &ctxt
+}
+
+type byName []os.FileInfo
+
+func (s byName) Len() int { return len(s) }
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
+
+type fakeFileInfo string
+
+func (fi fakeFileInfo) Name() string { return string(fi) }
+func (fakeFileInfo) Sys() interface{} { return nil }
+func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
+func (fakeFileInfo) IsDir() bool { return false }
+func (fakeFileInfo) Size() int64 { return 0 }
+func (fakeFileInfo) Mode() os.FileMode { return 0644 }
+
+type fakeDirInfo string
+
+func (fd fakeDirInfo) Name() string { return string(fd) }
+func (fakeDirInfo) Sys() interface{} { return nil }
+func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
+func (fakeDirInfo) IsDir() bool { return true }
+func (fakeDirInfo) Size() int64 { return 0 }
+func (fakeDirInfo) Mode() os.FileMode { return 0755 }
diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go
new file mode 100644
index 000000000..3f71c4fef
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/overlay.go
@@ -0,0 +1,103 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// OverlayContext overlays a build.Context with additional files from
+// a map. Files in the map take precedence over other files.
+//
+// In addition to plain string comparison, two file names are
+// considered equal if their base names match and their directory
+// components point at the same directory on the file system. That is,
+// symbolic links are followed for directories, but not files.
+//
+// A common use case for OverlayContext is to allow editors to pass in
+// a set of unsaved, modified files.
+//
+// Currently, only the Context.OpenFile function will respect the
+// overlay. This may change in the future.
+func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context {
+ // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir
+
+ rc := func(data []byte) (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewBuffer(data)), nil
+ }
+
+ copy := *orig // make a copy
+ ctxt := &copy
+ ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
+ // Fast path: names match exactly.
+ if content, ok := overlay[path]; ok {
+ return rc(content)
+ }
+
+ // Slow path: check for same file under a different
+ // alias, perhaps due to a symbolic link.
+ for filename, content := range overlay {
+ if sameFile(path, filename) {
+ return rc(content)
+ }
+ }
+
+ return OpenFile(orig, path)
+ }
+ return ctxt
+}
+
+// ParseOverlayArchive parses an archive containing Go files and their
+// contents. The result is intended to be used with OverlayContext.
+//
+//
+// Archive format
+//
+// The archive consists of a series of files. Each file consists of a
+// name, a decimal file size and the file contents, separated by
+// newlinews. No newline follows after the file contents.
+func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) {
+ overlay := make(map[string][]byte)
+ r := bufio.NewReader(archive)
+ for {
+ // Read file name.
+ filename, err := r.ReadString('\n')
+ if err != nil {
+ if err == io.EOF {
+ break // OK
+ }
+ return nil, fmt.Errorf("reading archive file name: %v", err)
+ }
+ filename = filepath.Clean(strings.TrimSpace(filename))
+
+ // Read file size.
+ sz, err := r.ReadString('\n')
+ if err != nil {
+ return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err)
+ }
+ sz = strings.TrimSpace(sz)
+ size, err := strconv.ParseUint(sz, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err)
+ }
+
+ // Read file content.
+ content := make([]byte, size)
+ if _, err := io.ReadFull(r, content); err != nil {
+ return nil, fmt.Errorf("reading archive file %s: %v", filename, err)
+ }
+ overlay[filename] = content
+ }
+
+ return overlay, nil
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go
new file mode 100644
index 000000000..486606f37
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/tags.go
@@ -0,0 +1,75 @@
+package buildutil
+
+// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
+
+import "fmt"
+
+const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
+ "For more information about build tags, see the description of " +
+ "build constraints in the documentation for the go/build package"
+
+// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
+// a flag value in the same manner as go build's -tags flag and
+// populates a []string slice.
+//
+// See $GOROOT/src/go/build/doc.go for description of build tags.
+// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
+//
+// Example:
+// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
+type TagsFlag []string
+
+func (v *TagsFlag) Set(s string) error {
+ var err error
+ *v, err = splitQuotedFields(s)
+ if *v == nil {
+ *v = []string{}
+ }
+ return err
+}
+
+func (v *TagsFlag) Get() interface{} { return *v }
+
+func splitQuotedFields(s string) ([]string, error) {
+ // Split fields allowing '' or "" around elements.
+ // Quotes further inside the string do not count.
+ var f []string
+ for len(s) > 0 {
+ for len(s) > 0 && isSpaceByte(s[0]) {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ break
+ }
+ // Accepted quoted string. No unescaping inside.
+ if s[0] == '"' || s[0] == '\'' {
+ quote := s[0]
+ s = s[1:]
+ i := 0
+ for i < len(s) && s[i] != quote {
+ i++
+ }
+ if i >= len(s) {
+ return nil, fmt.Errorf("unterminated %c string", quote)
+ }
+ f = append(f, s[:i])
+ s = s[i+1:]
+ continue
+ }
+ i := 0
+ for i < len(s) && !isSpaceByte(s[i]) {
+ i++
+ }
+ f = append(f, s[:i])
+ s = s[i:]
+ }
+ return f, nil
+}
+
+func (v *TagsFlag) String() string {
+ return "<tagsFlag>"
+}
+
+func isSpaceByte(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/util.go b/vendor/golang.org/x/tools/go/buildutil/util.go
new file mode 100644
index 000000000..fc923d7a7
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/util.go
@@ -0,0 +1,212 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+// ParseFile behaves like parser.ParseFile,
+// but uses the build context's file system interface, if any.
+//
+// If file is not absolute (as defined by IsAbsPath), the (dir, file)
+// components are joined using JoinPath; dir must be absolute.
+//
+// The displayPath function, if provided, is used to transform the
+// filename that will be attached to the ASTs.
+//
+// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
+//
+func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
+ if !IsAbsPath(ctxt, file) {
+ file = JoinPath(ctxt, dir, file)
+ }
+ rd, err := OpenFile(ctxt, file)
+ if err != nil {
+ return nil, err
+ }
+ defer rd.Close() // ignore error
+ if displayPath != nil {
+ file = displayPath(file)
+ }
+ return parser.ParseFile(fset, file, rd, mode)
+}
+
+// ContainingPackage returns the package containing filename.
+//
+// If filename is not absolute, it is interpreted relative to working directory dir.
+// All I/O is via the build context's file system interface, if any.
+//
+// The '...Files []string' fields of the resulting build.Package are not
+// populated (build.FindOnly mode).
+//
+func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
+ if !IsAbsPath(ctxt, filename) {
+ filename = JoinPath(ctxt, dir, filename)
+ }
+
+ // We must not assume the file tree uses
+ // "/" always,
+ // `\` always,
+ // or os.PathSeparator (which varies by platform),
+ // but to make any progress, we are forced to assume that
+ // paths will not use `\` unless the PathSeparator
+ // is also `\`, thus we can rely on filepath.ToSlash for some sanity.
+
+ dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
+
+ // We assume that no source root (GOPATH[i] or GOROOT) contains any other.
+ for _, srcdir := range ctxt.SrcDirs() {
+ srcdirSlash := filepath.ToSlash(srcdir) + "/"
+ if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok {
+ return ctxt.Import(importPath, dir, build.FindOnly)
+ }
+ }
+
+ return nil, fmt.Errorf("can't find package containing %s", filename)
+}
+
+// -- Effective methods of file system interface -------------------------
+
+// (go/build.Context defines these as methods, but does not export them.)
+
+// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// the local file system to answer the question.
+func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
+ if f := ctxt.HasSubdir; f != nil {
+ return f(root, dir)
+ }
+
+ // Try using paths we received.
+ if rel, ok = hasSubdir(root, dir); ok {
+ return
+ }
+
+ // Try expanding symlinks and comparing
+ // expanded against unexpanded and
+ // expanded against expanded.
+ rootSym, _ := filepath.EvalSymlinks(root)
+ dirSym, _ := filepath.EvalSymlinks(dir)
+
+ if rel, ok = hasSubdir(rootSym, dir); ok {
+ return
+ }
+ if rel, ok = hasSubdir(root, dirSym); ok {
+ return
+ }
+ return hasSubdir(rootSym, dirSym)
+}
+
+func hasSubdir(root, dir string) (rel string, ok bool) {
+ const sep = string(filepath.Separator)
+ root = filepath.Clean(root)
+ if !strings.HasSuffix(root, sep) {
+ root += sep
+ }
+
+ dir = filepath.Clean(dir)
+ if !strings.HasPrefix(dir, root) {
+ return "", false
+ }
+
+ return filepath.ToSlash(dir[len(root):]), true
+}
+
+// FileExists returns true if the specified file exists,
+// using the build context's file system interface.
+func FileExists(ctxt *build.Context, path string) bool {
+ if ctxt.OpenFile != nil {
+ r, err := ctxt.OpenFile(path)
+ if err != nil {
+ return false
+ }
+ r.Close() // ignore error
+ return true
+ }
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+// OpenFile behaves like os.Open,
+// but uses the build context's file system interface, if any.
+func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
+ if ctxt.OpenFile != nil {
+ return ctxt.OpenFile(path)
+ }
+ return os.Open(path)
+}
+
+// IsAbsPath behaves like filepath.IsAbs,
+// but uses the build context's file system interface, if any.
+func IsAbsPath(ctxt *build.Context, path string) bool {
+ if ctxt.IsAbsPath != nil {
+ return ctxt.IsAbsPath(path)
+ }
+ return filepath.IsAbs(path)
+}
+
+// JoinPath behaves like filepath.Join,
+// but uses the build context's file system interface, if any.
+func JoinPath(ctxt *build.Context, path ...string) string {
+ if ctxt.JoinPath != nil {
+ return ctxt.JoinPath(path...)
+ }
+ return filepath.Join(path...)
+}
+
+// IsDir behaves like os.Stat plus IsDir,
+// but uses the build context's file system interface, if any.
+func IsDir(ctxt *build.Context, path string) bool {
+ if ctxt.IsDir != nil {
+ return ctxt.IsDir(path)
+ }
+ fi, err := os.Stat(path)
+ return err == nil && fi.IsDir()
+}
+
+// ReadDir behaves like ioutil.ReadDir,
+// but uses the build context's file system interface, if any.
+func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
+ if ctxt.ReadDir != nil {
+ return ctxt.ReadDir(path)
+ }
+ return ioutil.ReadDir(path)
+}
+
+// SplitPathList behaves like filepath.SplitList,
+// but uses the build context's file system interface, if any.
+func SplitPathList(ctxt *build.Context, s string) []string {
+ if ctxt.SplitPathList != nil {
+ return ctxt.SplitPathList(s)
+ }
+ return filepath.SplitList(s)
+}
+
+// sameFile returns true if x and y have the same basename and denote
+// the same file.
+//
+func sameFile(x, y string) bool {
+ if path.Clean(x) == path.Clean(y) {
+ return true
+ }
+ if filepath.Base(x) == filepath.Base(y) { // (optimisation)
+ if xi, err := os.Stat(x); err == nil {
+ if yi, err := os.Stat(y); err == nil {
+ return os.SameFile(xi, yi)
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
new file mode 100644
index 000000000..98b3987b9
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -0,0 +1,109 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gcexportdata provides functions for locating, reading, and
+// writing export data files containing type information produced by the
+// gc compiler. This package supports go1.7 export data format and all
+// later versions.
+//
+// Although it might seem convenient for this package to live alongside
+// go/types in the standard library, this would cause version skew
+// problems for developer tools that use it, since they must be able to
+// consume the outputs of the gc compiler both before and after a Go
+// update such as from Go 1.7 to Go 1.8. Because this package lives in
+// golang.org/x/tools, sites can update their version of this repo some
+// time before the Go 1.8 release and rebuild and redeploy their
+// developer tools, which will then be able to consume both Go 1.7 and
+// Go 1.8 export data files, so they will work before and after the
+// Go update. (See discussion at https://golang.org/issue/15651.)
+//
+package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/token"
+ "go/types"
+ "io"
+ "io/ioutil"
+
+ "golang.org/x/tools/go/internal/gcimporter"
+)
+
+// Find returns the name of an object (.o) or archive (.a) file
+// containing type information for the specified import path,
+// using the workspace layout conventions of go/build.
+// If no file was found, an empty filename is returned.
+//
+// A relative srcDir is interpreted relative to the current working directory.
+//
+// Find also returns the package's resolved (canonical) import path,
+// reflecting the effects of srcDir and vendoring on importPath.
+func Find(importPath, srcDir string) (filename, path string) {
+ return gcimporter.FindPkg(importPath, srcDir)
+}
+
+// NewReader returns a reader for the export data section of an object
+// (.o) or archive (.a) file read from r. The new reader may provide
+// additional trailing data beyond the end of the export data.
+func NewReader(r io.Reader) (io.Reader, error) {
+ buf := bufio.NewReader(r)
+ _, err := gcimporter.FindExportData(buf)
+ // If we ever switch to a zip-like archive format with the ToC
+ // at the end, we can return the correct portion of export data,
+ // but for now we must return the entire rest of the file.
+ return buf, err
+}
+
+// Read reads export data from in, decodes it, and returns type
+// information for the package.
+// The package name is specified by path.
+// File position information is added to fset.
+//
+// Read may inspect and add to the imports map to ensure that references
+// within the export data to other packages are consistent. The caller
+// must ensure that imports[path] does not exist, or exists but is
+// incomplete (see types.Package.Complete), and Read inserts the
+// resulting package into this map entry.
+//
+// On return, the state of the reader is undefined.
+func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
+ data, err := ioutil.ReadAll(in)
+ if err != nil {
+ return nil, fmt.Errorf("reading export data for %q: %v", path, err)
+ }
+
+ if bytes.HasPrefix(data, []byte("!<arch>")) {
+ return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
+ }
+
+ // The App Engine Go runtime v1.6 uses the old export data format.
+ // TODO(adonovan): delete once v1.7 has been around for a while.
+ if bytes.HasPrefix(data, []byte("package ")) {
+ return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
+ }
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 && data[0] == 'i' {
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+ return pkg, err
+ }
+
+ _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
+ return pkg, err
+}
+
+// Write writes encoded type information for the specified package to out.
+// The FileSet provides file position information for named objects.
+func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ b, err := gcimporter.BExportData(fset, pkg)
+ if err != nil {
+ return err
+ }
+ _, err = out.Write(b)
+ return err
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go
new file mode 100644
index 000000000..efe221e7e
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go
@@ -0,0 +1,73 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcexportdata
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "os"
+)
+
+// NewImporter returns a new instance of the types.Importer interface
+// that reads type information from export data files written by gc.
+// The Importer also satisfies types.ImporterFrom.
+//
+// Export data files are located using "go build" workspace conventions
+// and the build.Default context.
+//
+// Use this importer instead of go/importer.For("gc", ...) to avoid the
+// version-skew problems described in the documentation of this package,
+// or to control the FileSet or access the imports map populated during
+// package loading.
+//
+func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
+ return importer{fset, imports}
+}
+
+type importer struct {
+ fset *token.FileSet
+ imports map[string]*types.Package
+}
+
+func (imp importer) Import(importPath string) (*types.Package, error) {
+ return imp.ImportFrom(importPath, "", 0)
+}
+
+func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
+ filename, path := Find(importPath, srcDir)
+ if filename == "" {
+ if importPath == "unsafe" {
+ // Even for unsafe, call Find first in case
+ // the package was vendored.
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %s", importPath)
+ }
+
+ if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
+ return pkg, nil // cache hit
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ f.Close()
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ }
+ }()
+
+ r, err := NewReader(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return Read(r, imp.fset, imp.imports, path)
+}
diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go
new file mode 100644
index 000000000..0f652ea6f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go
@@ -0,0 +1,220 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgo
+
+// This file handles cgo preprocessing of files containing `import "C"`.
+//
+// DESIGN
+//
+// The approach taken is to run the cgo processor on the package's
+// CgoFiles and parse the output, faking the filenames of the
+// resulting ASTs so that the synthetic file containing the C types is
+// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
+// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
+// not the names of the actual temporary files.
+//
+// The advantage of this approach is its fidelity to 'go build'. The
+// downside is that the token.Position.Offset for each AST node is
+// incorrect, being an offset within the temporary file. Line numbers
+// should still be correct because of the //line comments.
+//
+// The logic of this file is mostly plundered from the 'go build'
+// tool, which also invokes the cgo preprocessor.
+//
+//
+// REJECTED ALTERNATIVE
+//
+// An alternative approach that we explored is to extend go/types'
+// Importer mechanism to provide the identity of the importing package
+// so that each time `import "C"` appears it resolves to a different
+// synthetic package containing just the objects needed in that case.
+// The loader would invoke cgo but parse only the cgo_types.go file
+// defining the package-level objects, discarding the other files
+// resulting from preprocessing.
+//
+// The benefit of this approach would have been that source-level
+// syntax information would correspond exactly to the original cgo
+// file, with no preprocessing involved, making source tools like
+// godoc, guru, and eg happy. However, the approach was rejected
+// due to the additional complexity it would impose on go/types. (It
+// made for a beautiful demo, though.)
+//
+// cgo files, despite their *.go extension, are not legal Go source
+// files per the specification since they may refer to unexported
+// members of package "C" such as C.int. Also, a function such as
+// C.getpwent has in effect two types, one matching its C type and one
+// which additionally returns (errno C.int). The cgo preprocessor
+// uses name mangling to distinguish these two functions in the
+// processed code, but go/types would need to duplicate this logic in
+// its handling of function calls, analogous to the treatment of map
+// lookups in which y=m[k] and y,ok=m[k] are both legal.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
+// the output and returns the resulting ASTs.
+//
+func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
+ tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
+ if err != nil {
+ return nil, err
+ }
+ defer os.RemoveAll(tmpdir)
+
+ pkgdir := bp.Dir
+ if DisplayPath != nil {
+ pkgdir = DisplayPath(pkgdir)
+ }
+
+ cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
+ if err != nil {
+ return nil, err
+ }
+ var files []*ast.File
+ for i := range cgoFiles {
+ rd, err := os.Open(cgoFiles[i])
+ if err != nil {
+ return nil, err
+ }
+ display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
+ f, err := parser.ParseFile(fset, display, rd, mode)
+ rd.Close()
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, f)
+ }
+ return files, nil
+}
+
+var cgoRe = regexp.MustCompile(`[/\\:]`)
+
+// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
+// lists of files: the resulting processed files (in temporary
+// directory tmpdir) and the corresponding names of the unprocessed files.
+//
+// Run is adapted from (*builder).cgo in
+// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
+// Objective C, CGOPKGPATH, CGO_FLAGS.
+//
+// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
+// to the cgo preprocessor. This in turn will set the // line comments
+// referring to those files to use absolute paths. This is needed for
+// go/packages using the legacy go list support so it is able to find
+// the original files.
+func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
+ cgoCPPFLAGS, _, _, _ := cflags(bp, true)
+ _, cgoexeCFLAGS, _, _ := cflags(bp, false)
+
+ if len(bp.CgoPkgConfig) > 0 {
+ pcCFLAGS, err := pkgConfigFlags(bp)
+ if err != nil {
+ return nil, nil, err
+ }
+ cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
+ }
+
+ // Allows including _cgo_export.h from .[ch] files in the package.
+ cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
+
+ // _cgo_gotypes.go (displayed "C") contains the type definitions.
+ files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
+ displayFiles = append(displayFiles, "C")
+ for _, fn := range bp.CgoFiles {
+ // "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
+ f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
+ files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
+ displayFiles = append(displayFiles, fn)
+ }
+
+ var cgoflags []string
+ if bp.Goroot && bp.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-import_runtime_cgo=false")
+ }
+ if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-import_syscall=false")
+ }
+
+ var cgoFiles []string = bp.CgoFiles
+ if useabs {
+ cgoFiles = make([]string, len(bp.CgoFiles))
+ for i := range cgoFiles {
+ cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
+ }
+ }
+
+ args := stringList(
+ "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
+ cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
+ )
+ if false {
+ log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
+ }
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Dir = pkgdir
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
+ }
+
+ return files, displayFiles, nil
+}
+
+// -- unmodified from 'go build' ---------------------------------------
+
+// Return the flags to use when invoking the C or C++ compilers, or cgo.
+func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
+ var defaults string
+ if def {
+ defaults = "-g -O2"
+ }
+
+ cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
+ cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
+ cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
+ ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
+ return
+}
+
+// envList returns the value of the given environment variable broken
+// into fields, using the default value when the variable is empty.
+func envList(key, def string) []string {
+ v := os.Getenv(key)
+ if v == "" {
+ v = def
+ }
+ return strings.Fields(v)
+}
+
+// stringList's arguments should be a sequence of string or []string values.
+// stringList flattens them into a single []string.
+func stringList(args ...interface{}) []string {
+ var x []string
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case []string:
+ x = append(x, arg...)
+ case string:
+ x = append(x, arg)
+ default:
+ panic("stringList: invalid argument")
+ }
+ }
+ return x
+}
diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
new file mode 100644
index 000000000..b5bb95a63
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
@@ -0,0 +1,39 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgo
+
+import (
+ "errors"
+ "fmt"
+ "go/build"
+ "os/exec"
+ "strings"
+)
+
+// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
+func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
+ cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
+ if len(out) > 0 {
+ s = fmt.Sprintf("%s: %s", s, out)
+ }
+ return nil, errors.New(s)
+ }
+ if len(out) > 0 {
+ flags = strings.Fields(string(out))
+ }
+ return
+}
+
+// pkgConfigFlags calls pkg-config if needed and returns the cflags
+// needed to build the package.
+func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
+ if len(p.CgoPkgConfig) == 0 {
+ return nil, nil
+ }
+ return pkgConfig("--cflags", p.CgoPkgConfig)
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
new file mode 100644
index 000000000..a807d0aaa
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
@@ -0,0 +1,852 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
+// see that file for specification of the format.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "math"
+ "math/big"
+ "sort"
+ "strings"
+)
+
+// If debugFormat is set, each integer and string value is preceded by a marker
+// and position information in the encoding. This mechanism permits an importer
+// to recognize immediately when it is out of sync. The importer recognizes this
+// mode automatically (i.e., it can import export data produced with debugging
+// support even if debugFormat is not set at the time of import). This mode will
+// lead to massively larger export data (by a factor of 2 to 3) and should only
+// be enabled during development and debugging.
+//
+// NOTE: This flag is the first flag to enable if importing dies because of
+// (suspected) format errors, and whenever a change is made to the format.
+const debugFormat = false // default: false
+
+// If trace is set, debugging output is printed to std out.
+const trace = false // default: false
+
+// Current export format version. Increase with each format change.
+// Note: The latest binary (non-indexed) export format is at version 6.
+// This exporter is still at level 4, but it doesn't matter since
+// the binary importer can handle older versions just fine.
+// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
+// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
+// 4: type name objects support type aliases, uses aliasTag
+// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
+// 2: removed unused bool in ODCL export (compiler only)
+// 1: header format change (more regular), export package for _ struct fields
+// 0: Go1.7 encoding
+const exportVersion = 4
+
+// trackAllTypes enables cycle tracking for all types, not just named
+// types. The existing compiler invariants assume that unnamed types
+// that are not completely set up are not used, or else there are spurious
+// errors.
+// If disabled, only named types are tracked, possibly leading to slightly
+// less efficient encoding in rare cases. It also prevents the export of
+// some corner-case type declarations (but those are not handled correctly
+// with with the textual export format either).
+// TODO(gri) enable and remove once issues caused by it are fixed
+const trackAllTypes = false
+
+type exporter struct {
+ fset *token.FileSet
+ out bytes.Buffer
+
+ // object -> index maps, indexed in order of serialization
+ strIndex map[string]int
+ pkgIndex map[*types.Package]int
+ typIndex map[types.Type]int
+
+ // position encoding
+ posInfoFormat bool
+ prevFile string
+ prevLine int
+
+ // debugging support
+ written int // bytes written
+ indent int // for trace
+}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
+
+// BExportData returns binary export data for pkg.
+// If no file set is provided, position info will be missing.
+func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+
+ p := exporter{
+ fset: fset,
+ strIndex: map[string]int{"": 0}, // empty string is mapped to 0
+ pkgIndex: make(map[*types.Package]int),
+ typIndex: make(map[types.Type]int),
+ posInfoFormat: true, // TODO(gri) might become a flag, eventually
+ }
+
+ // write version info
+ // The version string must start with "version %d" where %d is the version
+ // number. Additional debugging information may follow after a blank; that
+ // text is ignored by the importer.
+ p.rawStringln(fmt.Sprintf("version %d", exportVersion))
+ var debug string
+ if debugFormat {
+ debug = "debug"
+ }
+ p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
+ p.bool(trackAllTypes)
+ p.bool(p.posInfoFormat)
+
+ // --- generic export data ---
+
+ // populate type map with predeclared "known" types
+ for index, typ := range predeclared() {
+ p.typIndex[typ] = index
+ }
+ if len(p.typIndex) != len(predeclared()) {
+ return nil, internalError("duplicate entries in type map?")
+ }
+
+ // write package data
+ p.pkg(pkg, true)
+ if trace {
+ p.tracef("\n")
+ }
+
+ // write objects
+ objcount := 0
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if !ast.IsExported(name) {
+ continue
+ }
+ if trace {
+ p.tracef("\n")
+ }
+ p.obj(scope.Lookup(name))
+ objcount++
+ }
+
+ // indicate end of list
+ if trace {
+ p.tracef("\n")
+ }
+ p.tag(endTag)
+
+ // for self-verification only (redundant)
+ p.int(objcount)
+
+ if trace {
+ p.tracef("\n")
+ }
+
+ // --- end of export data ---
+
+ return p.out.Bytes(), nil
+}
+
+func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
+ if pkg == nil {
+ panic(internalError("unexpected nil pkg"))
+ }
+
+ // if we saw the package before, write its index (>= 0)
+ if i, ok := p.pkgIndex[pkg]; ok {
+ p.index('P', i)
+ return
+ }
+
+ // otherwise, remember the package, write the package tag (< 0) and package data
+ if trace {
+ p.tracef("P%d = { ", len(p.pkgIndex))
+ defer p.tracef("} ")
+ }
+ p.pkgIndex[pkg] = len(p.pkgIndex)
+
+ p.tag(packageTag)
+ p.string(pkg.Name())
+ if emptypath {
+ p.string("")
+ } else {
+ p.string(pkg.Path())
+ }
+}
+
+func (p *exporter) obj(obj types.Object) {
+ switch obj := obj.(type) {
+ case *types.Const:
+ p.tag(constTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ p.typ(obj.Type())
+ p.value(obj.Val())
+
+ case *types.TypeName:
+ if obj.IsAlias() {
+ p.tag(aliasTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ } else {
+ p.tag(typeTag)
+ }
+ p.typ(obj.Type())
+
+ case *types.Var:
+ p.tag(varTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ p.typ(obj.Type())
+
+ case *types.Func:
+ p.tag(funcTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ sig := obj.Type().(*types.Signature)
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+
+ default:
+ panic(internalErrorf("unexpected object %v (%T)", obj, obj))
+ }
+}
+
+func (p *exporter) pos(obj types.Object) {
+ if !p.posInfoFormat {
+ return
+ }
+
+ file, line := p.fileLine(obj)
+ if file == p.prevFile {
+ // common case: write line delta
+ // delta == 0 means different file or no line change
+ delta := line - p.prevLine
+ p.int(delta)
+ if delta == 0 {
+ p.int(-1) // -1 means no file change
+ }
+ } else {
+ // different file
+ p.int(0)
+ // Encode filename as length of common prefix with previous
+ // filename, followed by (possibly empty) suffix. Filenames
+ // frequently share path prefixes, so this can save a lot
+ // of space and make export data size less dependent on file
+ // path length. The suffix is unlikely to be empty because
+ // file names tend to end in ".go".
+ n := commonPrefixLen(p.prevFile, file)
+ p.int(n) // n >= 0
+ p.string(file[n:]) // write suffix only
+ p.prevFile = file
+ p.int(line)
+ }
+ p.prevLine = line
+}
+
+func (p *exporter) fileLine(obj types.Object) (file string, line int) {
+ if p.fset != nil {
+ pos := p.fset.Position(obj.Pos())
+ file = pos.Filename
+ line = pos.Line
+ }
+ return
+}
+
+func commonPrefixLen(a, b string) int {
+ if len(a) > len(b) {
+ a, b = b, a
+ }
+ // len(a) <= len(b)
+ i := 0
+ for i < len(a) && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+func (p *exporter) qualifiedName(obj types.Object) {
+ p.string(obj.Name())
+ p.pkg(obj.Pkg(), false)
+}
+
+func (p *exporter) typ(t types.Type) {
+ if t == nil {
+ panic(internalError("nil type"))
+ }
+
+ // Possible optimization: Anonymous pointer types *T where
+ // T is a named type are common. We could canonicalize all
+ // such types *T to a single type PT = *T. This would lead
+ // to at most one *T entry in typIndex, and all future *T's
+ // would be encoded as the respective index directly. Would
+ // save 1 byte (pointerTag) per *T and reduce the typIndex
+ // size (at the cost of a canonicalization map). We can do
+ // this later, without encoding format change.
+
+ // if we saw the type before, write its index (>= 0)
+ if i, ok := p.typIndex[t]; ok {
+ p.index('T', i)
+ return
+ }
+
+ // otherwise, remember the type, write the type tag (< 0) and type data
+ if trackAllTypes {
+ if trace {
+ p.tracef("T%d = {>\n", len(p.typIndex))
+ defer p.tracef("<\n} ")
+ }
+ p.typIndex[t] = len(p.typIndex)
+ }
+
+ switch t := t.(type) {
+ case *types.Named:
+ if !trackAllTypes {
+ // if we don't track all types, track named types now
+ p.typIndex[t] = len(p.typIndex)
+ }
+
+ p.tag(namedTag)
+ p.pos(t.Obj())
+ p.qualifiedName(t.Obj())
+ p.typ(t.Underlying())
+ if !types.IsInterface(t) {
+ p.assocMethods(t)
+ }
+
+ case *types.Array:
+ p.tag(arrayTag)
+ p.int64(t.Len())
+ p.typ(t.Elem())
+
+ case *types.Slice:
+ p.tag(sliceTag)
+ p.typ(t.Elem())
+
+ case *dddSlice:
+ p.tag(dddTag)
+ p.typ(t.elem)
+
+ case *types.Struct:
+ p.tag(structTag)
+ p.fieldList(t)
+
+ case *types.Pointer:
+ p.tag(pointerTag)
+ p.typ(t.Elem())
+
+ case *types.Signature:
+ p.tag(signatureTag)
+ p.paramList(t.Params(), t.Variadic())
+ p.paramList(t.Results(), false)
+
+ case *types.Interface:
+ p.tag(interfaceTag)
+ p.iface(t)
+
+ case *types.Map:
+ p.tag(mapTag)
+ p.typ(t.Key())
+ p.typ(t.Elem())
+
+ case *types.Chan:
+ p.tag(chanTag)
+ p.int(int(3 - t.Dir())) // hack
+ p.typ(t.Elem())
+
+ default:
+ panic(internalErrorf("unexpected type %T: %s", t, t))
+ }
+}
+
+func (p *exporter) assocMethods(named *types.Named) {
+ // Sort methods (for determinism).
+ var methods []*types.Func
+ for i := 0; i < named.NumMethods(); i++ {
+ methods = append(methods, named.Method(i))
+ }
+ sort.Sort(methodsByName(methods))
+
+ p.int(len(methods))
+
+ if trace && methods != nil {
+ p.tracef("associated methods {>\n")
+ }
+
+ for i, m := range methods {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+
+ p.pos(m)
+ name := m.Name()
+ p.string(name)
+ if !exported(name) {
+ p.pkg(m.Pkg(), false)
+ }
+
+ sig := m.Type().(*types.Signature)
+ p.paramList(types.NewTuple(sig.Recv()), false)
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+ p.int(0) // dummy value for go:nointerface pragma - ignored by importer
+ }
+
+ if trace && methods != nil {
+ p.tracef("<\n} ")
+ }
+}
+
+type methodsByName []*types.Func
+
+func (x methodsByName) Len() int { return len(x) }
+func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
+
+func (p *exporter) fieldList(t *types.Struct) {
+ if trace && t.NumFields() > 0 {
+ p.tracef("fields {>\n")
+ defer p.tracef("<\n} ")
+ }
+
+ p.int(t.NumFields())
+ for i := 0; i < t.NumFields(); i++ {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+ p.field(t.Field(i))
+ p.string(t.Tag(i))
+ }
+}
+
+func (p *exporter) field(f *types.Var) {
+ if !f.IsField() {
+ panic(internalError("field expected"))
+ }
+
+ p.pos(f)
+ p.fieldName(f)
+ p.typ(f.Type())
+}
+
+func (p *exporter) iface(t *types.Interface) {
+ // TODO(gri): enable importer to load embedded interfaces,
+ // then emit Embeddeds and ExplicitMethods separately here.
+ p.int(0)
+
+ n := t.NumMethods()
+ if trace && n > 0 {
+ p.tracef("methods {>\n")
+ defer p.tracef("<\n} ")
+ }
+ p.int(n)
+ for i := 0; i < n; i++ {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+ p.method(t.Method(i))
+ }
+}
+
+func (p *exporter) method(m *types.Func) {
+ sig := m.Type().(*types.Signature)
+ if sig.Recv() == nil {
+ panic(internalError("method expected"))
+ }
+
+ p.pos(m)
+ p.string(m.Name())
+ if m.Name() != "_" && !ast.IsExported(m.Name()) {
+ p.pkg(m.Pkg(), false)
+ }
+
+ // interface method; no need to encode receiver.
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+}
+
+func (p *exporter) fieldName(f *types.Var) {
+ name := f.Name()
+
+ if f.Anonymous() {
+ // anonymous field - we distinguish between 3 cases:
+ // 1) field name matches base type name and is exported
+ // 2) field name matches base type name and is not exported
+ // 3) field name doesn't match base type name (alias name)
+ bname := basetypeName(f.Type())
+ if name == bname {
+ if ast.IsExported(name) {
+ name = "" // 1) we don't need to know the field name or package
+ } else {
+ name = "?" // 2) use unexported name "?" to force package export
+ }
+ } else {
+ // 3) indicate alias and export name as is
+ // (this requires an extra "@" but this is a rare case)
+ p.string("@")
+ }
+ }
+
+ p.string(name)
+ if name != "" && !ast.IsExported(name) {
+ p.pkg(f.Pkg(), false)
+ }
+}
+
+func basetypeName(typ types.Type) string {
+ switch typ := deref(typ).(type) {
+ case *types.Basic:
+ return typ.Name()
+ case *types.Named:
+ return typ.Obj().Name()
+ default:
+ return "" // unnamed type
+ }
+}
+
+func (p *exporter) paramList(params *types.Tuple, variadic bool) {
+ // use negative length to indicate unnamed parameters
+ // (look at the first parameter only since either all
+ // names are present or all are absent)
+ n := params.Len()
+ if n > 0 && params.At(0).Name() == "" {
+ n = -n
+ }
+ p.int(n)
+ for i := 0; i < params.Len(); i++ {
+ q := params.At(i)
+ t := q.Type()
+ if variadic && i == params.Len()-1 {
+ t = &dddSlice{t.(*types.Slice).Elem()}
+ }
+ p.typ(t)
+ if n > 0 {
+ name := q.Name()
+ p.string(name)
+ if name != "_" {
+ p.pkg(q.Pkg(), false)
+ }
+ }
+ p.string("") // no compiler-specific info
+ }
+}
+
+func (p *exporter) value(x constant.Value) {
+ if trace {
+ p.tracef("= ")
+ }
+
+ switch x.Kind() {
+ case constant.Bool:
+ tag := falseTag
+ if constant.BoolVal(x) {
+ tag = trueTag
+ }
+ p.tag(tag)
+
+ case constant.Int:
+ if v, exact := constant.Int64Val(x); exact {
+ // common case: x fits into an int64 - use compact encoding
+ p.tag(int64Tag)
+ p.int64(v)
+ return
+ }
+ // uncommon case: large x - use float encoding
+ // (powers of 2 will be encoded efficiently with exponent)
+ p.tag(floatTag)
+ p.float(constant.ToFloat(x))
+
+ case constant.Float:
+ p.tag(floatTag)
+ p.float(x)
+
+ case constant.Complex:
+ p.tag(complexTag)
+ p.float(constant.Real(x))
+ p.float(constant.Imag(x))
+
+ case constant.String:
+ p.tag(stringTag)
+ p.string(constant.StringVal(x))
+
+ case constant.Unknown:
+ // package contains type errors
+ p.tag(unknownTag)
+
+ default:
+ panic(internalErrorf("unexpected value %v (%T)", x, x))
+ }
+}
+
+func (p *exporter) float(x constant.Value) {
+ if x.Kind() != constant.Float {
+ panic(internalErrorf("unexpected constant %v, want float", x))
+ }
+ // extract sign (there is no -0)
+ sign := constant.Sign(x)
+ if sign == 0 {
+ // x == 0
+ p.int(0)
+ return
+ }
+ // x != 0
+
+ var f big.Float
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ r := valueToRat(num)
+ f.SetRat(r.Quo(r, valueToRat(denom)))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ f.SetFloat64(math.MaxFloat64) // FIXME
+ }
+
+ // extract exponent such that 0.5 <= m < 1.0
+ var m big.Float
+ exp := f.MantExp(&m)
+
+ // extract mantissa as *big.Int
+ // - set exponent large enough so mant satisfies mant.IsInt()
+ // - get *big.Int from mant
+ m.SetMantExp(&m, int(m.MinPrec()))
+ mant, acc := m.Int(nil)
+ if acc != big.Exact {
+ panic(internalError("internal error"))
+ }
+
+ p.int(sign)
+ p.int(exp)
+ p.string(string(mant.Bytes()))
+}
+
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+func (p *exporter) bool(b bool) bool {
+ if trace {
+ p.tracef("[")
+ defer p.tracef("= %v] ", b)
+ }
+
+ x := 0
+ if b {
+ x = 1
+ }
+ p.int(x)
+ return b
+}
+
+// ----------------------------------------------------------------------------
+// Low-level encoders
+
+func (p *exporter) index(marker byte, index int) {
+ if index < 0 {
+ panic(internalError("invalid index < 0"))
+ }
+ if debugFormat {
+ p.marker('t')
+ }
+ if trace {
+ p.tracef("%c%d ", marker, index)
+ }
+ p.rawInt64(int64(index))
+}
+
+func (p *exporter) tag(tag int) {
+ if tag >= 0 {
+ panic(internalError("invalid tag >= 0"))
+ }
+ if debugFormat {
+ p.marker('t')
+ }
+ if trace {
+ p.tracef("%s ", tagString[-tag])
+ }
+ p.rawInt64(int64(tag))
+}
+
+func (p *exporter) int(x int) {
+ p.int64(int64(x))
+}
+
+func (p *exporter) int64(x int64) {
+ if debugFormat {
+ p.marker('i')
+ }
+ if trace {
+ p.tracef("%d ", x)
+ }
+ p.rawInt64(x)
+}
+
+func (p *exporter) string(s string) {
+ if debugFormat {
+ p.marker('s')
+ }
+ if trace {
+ p.tracef("%q ", s)
+ }
+ // if we saw the string before, write its index (>= 0)
+ // (the empty string is mapped to 0)
+ if i, ok := p.strIndex[s]; ok {
+ p.rawInt64(int64(i))
+ return
+ }
+ // otherwise, remember string and write its negative length and bytes
+ p.strIndex[s] = len(p.strIndex)
+ p.rawInt64(-int64(len(s)))
+ for i := 0; i < len(s); i++ {
+ p.rawByte(s[i])
+ }
+}
+
+// marker emits a marker byte and position information which makes
+// it easy for a reader to detect if it is "out of sync". Used for
+// debugFormat format only.
+func (p *exporter) marker(m byte) {
+ p.rawByte(m)
+ // Enable this for help tracking down the location
+ // of an incorrect marker when running in debugFormat.
+ if false && trace {
+ p.tracef("#%d ", p.written)
+ }
+ p.rawInt64(int64(p.written))
+}
+
+// rawInt64 should only be used by low-level encoders.
+func (p *exporter) rawInt64(x int64) {
+ var tmp [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(tmp[:], x)
+ for i := 0; i < n; i++ {
+ p.rawByte(tmp[i])
+ }
+}
+
+// rawStringln should only be used to emit the initial version string.
+func (p *exporter) rawStringln(s string) {
+ for i := 0; i < len(s); i++ {
+ p.rawByte(s[i])
+ }
+ p.rawByte('\n')
+}
+
+// rawByte is the bottleneck interface to write to p.out.
+// rawByte escapes b as follows (any encoding does that
+// hides '$'):
+//
+// '$' => '|' 'S'
+// '|' => '|' '|'
+//
+// Necessary so other tools can find the end of the
+// export data by searching for "$$".
+// rawByte should only be used by low-level encoders.
+func (p *exporter) rawByte(b byte) {
+ switch b {
+ case '$':
+ // write '$' as '|' 'S'
+ b = 'S'
+ fallthrough
+ case '|':
+ // write '|' as '|' '|'
+ p.out.WriteByte('|')
+ p.written++
+ }
+ p.out.WriteByte(b)
+ p.written++
+}
+
+// tracef is like fmt.Printf but it rewrites the format string
+// to take care of indentation.
+func (p *exporter) tracef(format string, args ...interface{}) {
+ if strings.ContainsAny(format, "<>\n") {
+ var buf bytes.Buffer
+ for i := 0; i < len(format); i++ {
+ // no need to deal with runes
+ ch := format[i]
+ switch ch {
+ case '>':
+ p.indent++
+ continue
+ case '<':
+ p.indent--
+ continue
+ }
+ buf.WriteByte(ch)
+ if ch == '\n' {
+ for j := p.indent; j > 0; j-- {
+ buf.WriteString(". ")
+ }
+ }
+ }
+ format = buf.String()
+ }
+ fmt.Printf(format, args...)
+}
+
+// Debugging support.
+// (tagString is only used when tracing is enabled)
+var tagString = [...]string{
+ // Packages
+ -packageTag: "package",
+
+ // Types
+ -namedTag: "named type",
+ -arrayTag: "array",
+ -sliceTag: "slice",
+ -dddTag: "ddd",
+ -structTag: "struct",
+ -pointerTag: "pointer",
+ -signatureTag: "signature",
+ -interfaceTag: "interface",
+ -mapTag: "map",
+ -chanTag: "chan",
+
+ // Values
+ -falseTag: "false",
+ -trueTag: "true",
+ -int64Tag: "int64",
+ -floatTag: "float",
+ -fractionTag: "fraction",
+ -complexTag: "complex",
+ -stringTag: "string",
+ -unknownTag: "unknown",
+
+ // Type aliases
+ -aliasTag: "alias",
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
new file mode 100644
index 000000000..e3c310782
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
@@ -0,0 +1,1036 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
+
+package gcimporter
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+type importer struct {
+ imports map[string]*types.Package
+ data []byte
+ importpath string
+ buf []byte // for reading strings
+ version int // export format version
+
+ // object lists
+ strList []string // in order of appearance
+ pathList []string // in order of appearance
+ pkgList []*types.Package // in order of appearance
+ typList []types.Type // in order of appearance
+ interfaceList []*types.Interface // for delayed completion only
+ trackAllTypes bool
+
+ // position encoding
+ posInfoFormat bool
+ prevFile string
+ prevLine int
+ fake fakeFileSet
+
+ // debugging support
+ debugFormat bool
+ read int // bytes read
+}
+
+// BImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ // catch panics and return them as errors
+ const currentVersion = 6
+ version := -1 // unknown version
+ defer func() {
+ if e := recover(); e != nil {
+ // Return a (possibly nil or incomplete) package unchanged (see #16088).
+ if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
+ }
+ }()
+
+ p := importer{
+ imports: imports,
+ data: data,
+ importpath: path,
+ version: version,
+ strList: []string{""}, // empty string is mapped to 0
+ pathList: []string{""}, // empty string is mapped to 0
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*token.File),
+ },
+ }
+
+ // read version info
+ var versionstr string
+ if b := p.rawByte(); b == 'c' || b == 'd' {
+ // Go1.7 encoding; first byte encodes low-level
+ // encoding format (compact vs debug).
+ // For backward-compatibility only (avoid problems with
+ // old installed packages). Newly compiled packages use
+ // the extensible format string.
+ // TODO(gri) Remove this support eventually; after Go1.8.
+ if b == 'd' {
+ p.debugFormat = true
+ }
+ p.trackAllTypes = p.rawByte() == 'a'
+ p.posInfoFormat = p.int() != 0
+ versionstr = p.string()
+ if versionstr == "v1" {
+ version = 0
+ }
+ } else {
+ // Go1.8 extensible encoding
+ // read version string and extract version number (ignore anything after the version number)
+ versionstr = p.rawStringln(b)
+ if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
+ if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
+ version = v
+ }
+ }
+ }
+ p.version = version
+
+ // read version specific flags - extend as necessary
+ switch p.version {
+ // case currentVersion:
+ // ...
+ // fallthrough
+ case currentVersion, 5, 4, 3, 2, 1:
+ p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
+ p.trackAllTypes = p.int() != 0
+ p.posInfoFormat = p.int() != 0
+ case 0:
+ // Go1.7 encoding format - nothing to do here
+ default:
+ errorf("unknown bexport format version %d (%q)", p.version, versionstr)
+ }
+
+ // --- generic export data ---
+
+ // populate typList with predeclared "known" types
+ p.typList = append(p.typList, predeclared()...)
+
+ // read package data
+ pkg = p.pkg()
+
+ // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
+ objcount := 0
+ for {
+ tag := p.tagOrIndex()
+ if tag == endTag {
+ break
+ }
+ p.obj(tag)
+ objcount++
+ }
+
+ // self-verification
+ if count := p.int(); count != objcount {
+ errorf("got %d objects; want %d", objcount, count)
+ }
+
+ // ignore compiler-specific import data
+
+ // complete interfaces
+ // TODO(gri) re-investigate if we still need to do this in a delayed fashion
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // record all referenced packages as imports
+ list := append(([]*types.Package)(nil), p.pkgList[1:]...)
+ sort.Sort(byPath(list))
+ pkg.SetImports(list)
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+
+ return p.read, pkg, nil
+}
+
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Sprintf(format, args...))
+}
+
+func (p *importer) pkg() *types.Package {
+ // if the package was seen before, i is its index (>= 0)
+ i := p.tagOrIndex()
+ if i >= 0 {
+ return p.pkgList[i]
+ }
+
+ // otherwise, i is the package tag (< 0)
+ if i != packageTag {
+ errorf("unexpected package tag %d version %d", i, p.version)
+ }
+
+ // read package data
+ name := p.string()
+ var path string
+ if p.version >= 5 {
+ path = p.path()
+ } else {
+ path = p.string()
+ }
+ if p.version >= 6 {
+ p.int() // package height; unused by go/types
+ }
+
+ // we should never see an empty package name
+ if name == "" {
+ errorf("empty package name in import")
+ }
+
+ // an empty path denotes the package we are currently importing;
+ // it must be the first package we see
+ if (path == "") != (len(p.pkgList) == 0) {
+ errorf("package path %q for pkg index %d", path, len(p.pkgList))
+ }
+
+ // if the package was imported before, use that one; otherwise create a new one
+ if path == "" {
+ path = p.importpath
+ }
+ pkg := p.imports[path]
+ if pkg == nil {
+ pkg = types.NewPackage(path, name)
+ p.imports[path] = pkg
+ } else if pkg.Name() != name {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
+ }
+ p.pkgList = append(p.pkgList, pkg)
+
+ return pkg
+}
+
+// objTag returns the tag value for each object kind.
+func objTag(obj types.Object) int {
+ switch obj.(type) {
+ case *types.Const:
+ return constTag
+ case *types.TypeName:
+ return typeTag
+ case *types.Var:
+ return varTag
+ case *types.Func:
+ return funcTag
+ default:
+ errorf("unexpected object: %v (%T)", obj, obj) // panics
+ panic("unreachable")
+ }
+}
+
+func sameObj(a, b types.Object) bool {
+ // Because unnamed types are not canonicalized, we cannot simply compare types for
+ // (pointer) identity.
+ // Ideally we'd check equality of constant values as well, but this is good enough.
+ return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
+}
+
+func (p *importer) declare(obj types.Object) {
+ pkg := obj.Pkg()
+ if alt := pkg.Scope().Insert(obj); alt != nil {
+ // This can only trigger if we import a (non-type) object a second time.
+ // Excluding type aliases, this cannot happen because 1) we only import a package
+ // once; and b) we ignore compiler-specific export data which may contain
+ // functions whose inlined function bodies refer to other functions that
+ // were already imported.
+ // However, type aliases require reexporting the original type, so we need
+ // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
+ // method importer.obj, switch case importing functions).
+ // TODO(gri) review/update this comment once the gc compiler handles type aliases.
+ if !sameObj(obj, alt) {
+ errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
+ }
+ }
+}
+
+func (p *importer) obj(tag int) {
+ switch tag {
+ case constTag:
+ pos := p.pos()
+ pkg, name := p.qualifiedName()
+ typ := p.typ(nil, nil)
+ val := p.value()
+ p.declare(types.NewConst(pos, pkg, name, typ, val))
+
+ case aliasTag:
+ // TODO(gri) verify type alias hookup is correct
+ pos := p.pos()
+ pkg, name := p.qualifiedName()
+ typ := p.typ(nil, nil)
+ p.declare(types.NewTypeName(pos, pkg, name, typ))
+
+ case typeTag:
+ p.typ(nil, nil)
+
+ case varTag:
+ pos := p.pos()
+ pkg, name := p.qualifiedName()
+ typ := p.typ(nil, nil)
+ p.declare(types.NewVar(pos, pkg, name, typ))
+
+ case funcTag:
+ pos := p.pos()
+ pkg, name := p.qualifiedName()
+ params, isddd := p.paramList()
+ result, _ := p.paramList()
+ sig := types.NewSignature(nil, params, result, isddd)
+ p.declare(types.NewFunc(pos, pkg, name, sig))
+
+ default:
+ errorf("unexpected object tag %d", tag)
+ }
+}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+func (p *importer) pos() token.Pos {
+ if !p.posInfoFormat {
+ return token.NoPos
+ }
+
+ file := p.prevFile
+ line := p.prevLine
+ delta := p.int()
+ line += delta
+ if p.version >= 5 {
+ if delta == deltaNewFile {
+ if n := p.int(); n >= 0 {
+ // file changed
+ file = p.path()
+ line = n
+ }
+ }
+ } else {
+ if delta == 0 {
+ if n := p.int(); n >= 0 {
+ // file changed
+ file = p.prevFile[:n] + p.string()
+ line = p.int()
+ }
+ }
+ }
+ p.prevFile = file
+ p.prevLine = line
+
+ return p.fake.pos(file, line)
+}
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*token.File
+}
+
+func (s *fakeFileSet) pos(file string, line int) token.Pos {
+ // Since we don't know the set of needed file positions, we
+ // reserve maxlines positions per file.
+ const maxlines = 64 * 1024
+ f := s.files[file]
+ if f == nil {
+ f = s.fset.AddFile(file, -1, maxlines)
+ s.files[file] = f
+ // Allocate the fake linebreak indices on first use.
+ // TODO(adonovan): opt: save ~512KB using a more complex scheme?
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ f.SetLines(fakeLines)
+ }
+
+ if line > maxlines {
+ line = 1
+ }
+
+ // Treat the file as if it contained only newlines
+ // and column=1: use the line number as the offset.
+ return f.Pos(line - 1)
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func (p *importer) qualifiedName() (pkg *types.Package, name string) {
+ name = p.string()
+ pkg = p.pkg()
+ return
+}
+
+func (p *importer) record(t types.Type) {
+ p.typList = append(p.typList, t)
+}
+
+// A dddSlice is a types.Type representing ...T parameters.
+// It only appears for parameter types and does not escape
+// the importer.
+type dddSlice struct {
+ elem types.Type
+}
+
+func (t *dddSlice) Underlying() types.Type { return t }
+func (t *dddSlice) String() string { return "..." + t.elem.String() }
+
+// parent is the package which declared the type; parent == nil means
+// the package currently imported. The parent package is needed for
+// exported struct fields and interface methods which don't contain
+// explicit package information in the export data.
+//
+// A non-nil tname is used as the "owner" of the result type; i.e.,
+// the result type is the underlying type of tname. tname is used
+// to give interface methods a named receiver type where possible.
+func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
+ // if the type was seen before, i is its index (>= 0)
+ i := p.tagOrIndex()
+ if i >= 0 {
+ return p.typList[i]
+ }
+
+ // otherwise, i is the type tag (< 0)
+ switch i {
+ case namedTag:
+ // read type object
+ pos := p.pos()
+ parent, name := p.qualifiedName()
+ scope := parent.Scope()
+ obj := scope.Lookup(name)
+
+ // if the object doesn't exist yet, create and insert it
+ if obj == nil {
+ obj = types.NewTypeName(pos, parent, name, nil)
+ scope.Insert(obj)
+ }
+
+ if _, ok := obj.(*types.TypeName); !ok {
+ errorf("pkg = %s, name = %s => %s", parent, name, obj)
+ }
+
+ // associate new named type with obj if it doesn't exist yet
+ t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
+
+ // but record the existing type, if any
+ tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
+ p.record(tname)
+
+ // read underlying type
+ t0.SetUnderlying(p.typ(parent, t0))
+
+ // interfaces don't have associated methods
+ if types.IsInterface(t0) {
+ return tname
+ }
+
+ // read associated methods
+ for i := p.int(); i > 0; i-- {
+ // TODO(gri) replace this with something closer to fieldName
+ pos := p.pos()
+ name := p.string()
+ if !exported(name) {
+ p.pkg()
+ }
+
+ recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
+ params, isddd := p.paramList()
+ result, _ := p.paramList()
+ p.int() // go:nointerface pragma - discarded
+
+ sig := types.NewSignature(recv.At(0), params, result, isddd)
+ t0.AddMethod(types.NewFunc(pos, parent, name, sig))
+ }
+
+ return tname
+
+ case arrayTag:
+ t := new(types.Array)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ n := p.int64()
+ *t = *types.NewArray(p.typ(parent, nil), n)
+ return t
+
+ case sliceTag:
+ t := new(types.Slice)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ *t = *types.NewSlice(p.typ(parent, nil))
+ return t
+
+ case dddTag:
+ t := new(dddSlice)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ t.elem = p.typ(parent, nil)
+ return t
+
+ case structTag:
+ t := new(types.Struct)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ *t = *types.NewStruct(p.fieldList(parent))
+ return t
+
+ case pointerTag:
+ t := new(types.Pointer)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ *t = *types.NewPointer(p.typ(parent, nil))
+ return t
+
+ case signatureTag:
+ t := new(types.Signature)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ params, isddd := p.paramList()
+ result, _ := p.paramList()
+ *t = *types.NewSignature(nil, params, result, isddd)
+ return t
+
+ case interfaceTag:
+ // Create a dummy entry in the type list. This is safe because we
+ // cannot expect the interface type to appear in a cycle, as any
+ // such cycle must contain a named type which would have been
+ // first defined earlier.
+ // TODO(gri) Is this still true now that we have type aliases?
+ // See issue #23225.
+ n := len(p.typList)
+ if p.trackAllTypes {
+ p.record(nil)
+ }
+
+ var embeddeds []types.Type
+ for n := p.int(); n > 0; n-- {
+ p.pos()
+ embeddeds = append(embeddeds, p.typ(parent, nil))
+ }
+
+ t := newInterface(p.methodList(parent, tname), embeddeds)
+ p.interfaceList = append(p.interfaceList, t)
+ if p.trackAllTypes {
+ p.typList[n] = t
+ }
+ return t
+
+ case mapTag:
+ t := new(types.Map)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ key := p.typ(parent, nil)
+ val := p.typ(parent, nil)
+ *t = *types.NewMap(key, val)
+ return t
+
+ case chanTag:
+ t := new(types.Chan)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ dir := chanDir(p.int())
+ val := p.typ(parent, nil)
+ *t = *types.NewChan(dir, val)
+ return t
+
+ default:
+ errorf("unexpected type tag %d", i) // panics
+ panic("unreachable")
+ }
+}
+
+func chanDir(d int) types.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types.RecvOnly
+ case 2 /* Csend */ :
+ return types.SendOnly
+ case 3 /* Cboth */ :
+ return types.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
+
+func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
+ if n := p.int(); n > 0 {
+ fields = make([]*types.Var, n)
+ tags = make([]string, n)
+ for i := range fields {
+ fields[i], tags[i] = p.field(parent)
+ }
+ }
+ return
+}
+
+func (p *importer) field(parent *types.Package) (*types.Var, string) {
+ pos := p.pos()
+ pkg, name, alias := p.fieldName(parent)
+ typ := p.typ(parent, nil)
+ tag := p.string()
+
+ anonymous := false
+ if name == "" {
+ // anonymous field - typ must be T or *T and T must be a type name
+ switch typ := deref(typ).(type) {
+ case *types.Basic: // basic types are named types
+ pkg = nil // // objects defined in Universe scope have no package
+ name = typ.Name()
+ case *types.Named:
+ name = typ.Obj().Name()
+ default:
+ errorf("named base type expected")
+ }
+ anonymous = true
+ } else if alias {
+ // anonymous field: we have an explicit name because it's an alias
+ anonymous = true
+ }
+
+ return types.NewField(pos, pkg, name, typ, anonymous), tag
+}
+
+func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
+ if n := p.int(); n > 0 {
+ methods = make([]*types.Func, n)
+ for i := range methods {
+ methods[i] = p.method(parent, baseType)
+ }
+ }
+ return
+}
+
+func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
+ pos := p.pos()
+ pkg, name, _ := p.fieldName(parent)
+ // If we don't have a baseType, use a nil receiver.
+ // A receiver using the actual interface type (which
+ // we don't know yet) will be filled in when we call
+ // types.Interface.Complete.
+ var recv *types.Var
+ if baseType != nil {
+ recv = types.NewVar(token.NoPos, parent, "", baseType)
+ }
+ params, isddd := p.paramList()
+ result, _ := p.paramList()
+ sig := types.NewSignature(recv, params, result, isddd)
+ return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) {
+ name = p.string()
+ pkg = parent
+ if pkg == nil {
+ // use the imported package instead
+ pkg = p.pkgList[0]
+ }
+ if p.version == 0 && name == "_" {
+ // version 0 didn't export a package for _ fields
+ return
+ }
+ switch name {
+ case "":
+ // 1) field name matches base type name and is exported: nothing to do
+ case "?":
+ // 2) field name matches base type name and is not exported: need package
+ name = ""
+ pkg = p.pkg()
+ case "@":
+ // 3) field name doesn't match type name (alias)
+ name = p.string()
+ alias = true
+ fallthrough
+ default:
+ if !exported(name) {
+ pkg = p.pkg()
+ }
+ }
+ return
+}
+
+func (p *importer) paramList() (*types.Tuple, bool) {
+ n := p.int()
+ if n == 0 {
+ return nil, false
+ }
+ // negative length indicates unnamed parameters
+ named := true
+ if n < 0 {
+ n = -n
+ named = false
+ }
+ // n > 0
+ params := make([]*types.Var, n)
+ isddd := false
+ for i := range params {
+ params[i], isddd = p.param(named)
+ }
+ return types.NewTuple(params...), isddd
+}
+
+func (p *importer) param(named bool) (*types.Var, bool) {
+ t := p.typ(nil, nil)
+ td, isddd := t.(*dddSlice)
+ if isddd {
+ t = types.NewSlice(td.elem)
+ }
+
+ var pkg *types.Package
+ var name string
+ if named {
+ name = p.string()
+ if name == "" {
+ errorf("expected named parameter")
+ }
+ if name != "_" {
+ pkg = p.pkg()
+ }
+ if i := strings.Index(name, "·"); i > 0 {
+ name = name[:i] // cut off gc-specific parameter numbering
+ }
+ }
+
+ // read and discard compiler-specific info
+ p.string()
+
+ return types.NewVar(token.NoPos, pkg, name, t), isddd
+}
+
+func exported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
+
+func (p *importer) value() constant.Value {
+ switch tag := p.tagOrIndex(); tag {
+ case falseTag:
+ return constant.MakeBool(false)
+ case trueTag:
+ return constant.MakeBool(true)
+ case int64Tag:
+ return constant.MakeInt64(p.int64())
+ case floatTag:
+ return p.float()
+ case complexTag:
+ re := p.float()
+ im := p.float()
+ return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+ case stringTag:
+ return constant.MakeString(p.string())
+ case unknownTag:
+ return constant.MakeUnknown()
+ default:
+ errorf("unexpected value tag %d", tag) // panics
+ panic("unreachable")
+ }
+}
+
+func (p *importer) float() constant.Value {
+ sign := p.int()
+ if sign == 0 {
+ return constant.MakeInt64(0)
+ }
+
+ exp := p.int()
+ mant := []byte(p.string()) // big endian
+
+ // remove leading 0's if any
+ for len(mant) > 0 && mant[0] == 0 {
+ mant = mant[1:]
+ }
+
+ // convert to little endian
+ // TODO(gri) go/constant should have a more direct conversion function
+ // (e.g., once it supports a big.Float based implementation)
+ for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
+ mant[i], mant[j] = mant[j], mant[i]
+ }
+
+ // adjust exponent (constant.MakeFromBytes creates an integer value,
+ // but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
+ exp -= len(mant) << 3
+ if len(mant) > 0 {
+ for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
+ exp++
+ }
+ }
+
+ x := constant.MakeFromBytes(mant)
+ switch {
+ case exp < 0:
+ d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
+ x = constant.BinaryOp(x, token.QUO, d)
+ case exp > 0:
+ x = constant.Shift(x, token.SHL, uint(exp))
+ }
+
+ if sign < 0 {
+ x = constant.UnaryOp(token.SUB, x, 0)
+ }
+ return x
+}
+
+// ----------------------------------------------------------------------------
+// Low-level decoders
+
+func (p *importer) tagOrIndex() int {
+ if p.debugFormat {
+ p.marker('t')
+ }
+
+ return int(p.rawInt64())
+}
+
+func (p *importer) int() int {
+ x := p.int64()
+ if int64(int(x)) != x {
+ errorf("exported integer too large")
+ }
+ return int(x)
+}
+
+func (p *importer) int64() int64 {
+ if p.debugFormat {
+ p.marker('i')
+ }
+
+ return p.rawInt64()
+}
+
+func (p *importer) path() string {
+ if p.debugFormat {
+ p.marker('p')
+ }
+ // if the path was seen before, i is its index (>= 0)
+ // (the empty string is at index 0)
+ i := p.rawInt64()
+ if i >= 0 {
+ return p.pathList[i]
+ }
+ // otherwise, i is the negative path length (< 0)
+ a := make([]string, -i)
+ for n := range a {
+ a[n] = p.string()
+ }
+ s := strings.Join(a, "/")
+ p.pathList = append(p.pathList, s)
+ return s
+}
+
+func (p *importer) string() string {
+ if p.debugFormat {
+ p.marker('s')
+ }
+ // if the string was seen before, i is its index (>= 0)
+ // (the empty string is at index 0)
+ i := p.rawInt64()
+ if i >= 0 {
+ return p.strList[i]
+ }
+ // otherwise, i is the negative string length (< 0)
+ if n := int(-i); n <= cap(p.buf) {
+ p.buf = p.buf[:n]
+ } else {
+ p.buf = make([]byte, n)
+ }
+ for i := range p.buf {
+ p.buf[i] = p.rawByte()
+ }
+ s := string(p.buf)
+ p.strList = append(p.strList, s)
+ return s
+}
+
+func (p *importer) marker(want byte) {
+ if got := p.rawByte(); got != want {
+ errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
+ }
+
+ pos := p.read
+ if n := int(p.rawInt64()); n != pos {
+ errorf("incorrect position: got %d; want %d", n, pos)
+ }
+}
+
+// rawInt64 should only be used by low-level decoders.
+func (p *importer) rawInt64() int64 {
+ i, err := binary.ReadVarint(p)
+ if err != nil {
+ errorf("read error: %v", err)
+ }
+ return i
+}
+
+// rawStringln should only be used to read the initial version string.
+func (p *importer) rawStringln(b byte) string {
+ p.buf = p.buf[:0]
+ for b != '\n' {
+ p.buf = append(p.buf, b)
+ b = p.rawByte()
+ }
+ return string(p.buf)
+}
+
+// needed for binary.ReadVarint in rawInt64
+func (p *importer) ReadByte() (byte, error) {
+ return p.rawByte(), nil
+}
+
+// byte is the bottleneck interface for reading p.data.
+// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
+// rawByte should only be used by low-level decoders.
+func (p *importer) rawByte() byte {
+ b := p.data[0]
+ r := 1
+ if b == '|' {
+ b = p.data[1]
+ r = 2
+ switch b {
+ case 'S':
+ b = '$'
+ case '|':
+ // nothing to do
+ default:
+ errorf("unexpected escape sequence in export data")
+ }
+ }
+ p.data = p.data[r:]
+ p.read += r
+ return b
+
+}
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+ // Objects
+ packageTag = -(iota + 1)
+ constTag
+ typeTag
+ varTag
+ funcTag
+ endTag
+
+ // Types
+ namedTag
+ arrayTag
+ sliceTag
+ dddTag
+ structTag
+ pointerTag
+ signatureTag
+ interfaceTag
+ mapTag
+ chanTag
+
+ // Values
+ falseTag
+ trueTag
+ int64Tag
+ floatTag
+ fractionTag // not used by gc
+ complexTag
+ stringTag
+ nilTag // only used by gc (appears in exported inlined function bodies)
+ unknownTag // not used by gc (only appears in packages with errors)
+
+ // Type aliases
+ aliasTag
+)
+
+var predecl []types.Type // initialized lazily
+
+func predeclared() []types.Type {
+ if predecl == nil {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+ }
+ }
+ return predecl
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
new file mode 100644
index 000000000..f33dc5613
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
@@ -0,0 +1,93 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
+
+// This file implements FindExportData.
+
+package gcimporter
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 16+12+6+6+8+10+2)
+ _, err = io.ReadFull(r, hdr)
+ if err != nil {
+ return
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+ size, err = strconv.Atoi(s)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = fmt.Errorf("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:16]))
+ return
+}
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function. The hdr result
+// is the string before the export data, either "$$" or "$$B".
+//
+func FindExportData(r *bufio.Reader) (hdr string, err error) {
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+
+ if string(line) == "!<arch>\n" {
+ // Archive file. Scan to __.PKGDEF.
+ var name string
+ if name, _, err = readGopackHeader(r); err != nil {
+ return
+ }
+
+ // First entry should be __.PKGDEF.
+ if name != "__.PKGDEF" {
+ err = fmt.Errorf("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = fmt.Errorf("not a Go object file")
+ return
+ }
+
+ // Skip over object header to export data.
+ // Begins after first line starting with $$.
+ for line[0] != '$' {
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ }
+ hdr = string(line)
+
+ return
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
new file mode 100644
index 000000000..9cf186605
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
@@ -0,0 +1,1078 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
+// but it also contains the original source-based importer code for Go1.6.
+// Once we stop supporting 1.6, we can remove that code.
+
+// Package gcimporter provides various functions for reading
+// gc-generated object files that can be used to implement the
+// Importer interface defined by the Go 1.5 standard library package.
+package gcimporter // import "golang.org/x/tools/go/internal/gcimporter"
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "go/build"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "text/scanner"
+)
+
+// debugging/development support
+const debug = false
+
+var pkgExts = [...]string{".a", ".o"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+// If no file was found, an empty filename is returned.
+//
+func FindPkg(path, srcDir string) (filename, id string) {
+ if path == "" {
+ return
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ id = path // make sure we have an id to print in error message
+ return
+ }
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ id = bp.ImportPath
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+// ImportData imports a package by reading the gc-generated export data,
+// adds the corresponding package object to the packages map indexed by id,
+// and returns the object.
+//
+// The packages map must contains all packages already imported. The data
+// reader position must be the beginning of the export data section. The
+// filename is only used in error messages.
+//
+// If packages[id] contains the completely imported package, that package
+// can be used directly, and there is no need to call this function (but
+// there is also no harm but for extra time used).
+//
+func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
+ // support for parser error handling
+ defer func() {
+ switch r := recover().(type) {
+ case nil:
+ // nothing to do
+ case importError:
+ err = r
+ default:
+ panic(r) // internal error
+ }
+ }()
+
+ var p parser
+ p.init(filename, id, data, packages)
+ pkg = p.parseExport()
+
+ return
+}
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+//
+func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+ var rc io.ReadCloser
+ var filename, id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ filename, id = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %q", id)
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ var hdr string
+ buf := bufio.NewReader(rc)
+ if hdr, err = FindExportData(buf); err != nil {
+ return
+ }
+
+ switch hdr {
+ case "$$\n":
+ // Work-around if we don't have a filename; happens only if lookup != nil.
+ // Either way, the filename is only needed for importer error messages, so
+ // this is fine.
+ if filename == "" {
+ filename = path
+ }
+ return ImportData(packages, filename, id, buf)
+
+ case "$$B\n":
+ var data []byte
+ data, err = ioutil.ReadAll(buf)
+ if err != nil {
+ break
+ }
+
+ // TODO(gri): allow clients of go/importer to provide a FileSet.
+ // Or, define a new standard go/types/gcexportdata package.
+ fset := token.NewFileSet()
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 && data[0] == 'i' {
+ _, pkg, err = IImportData(fset, packages, data[1:], id)
+ } else {
+ _, pkg, err = BImportData(fset, packages, data, id)
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Parser
+
+// TODO(gri) Imported objects don't have position information.
+// Ideally use the debug table line info; alternatively
+// create some fake position (or the position of the
+// import). That way error messages referring to imported
+// objects can print meaningful information.
+
+// parser parses the exports inside a gc compiler-produced
+// object/archive file and populates its scope with the results.
+type parser struct {
+ scanner scanner.Scanner
+ tok rune // current token
+ lit string // literal string; only valid for Ident, Int, String tokens
+ id string // package id of imported package
+ sharedPkgs map[string]*types.Package // package id -> package object (across importer)
+ localPkgs map[string]*types.Package // package id -> package object (just this package)
+}
+
+func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) {
+ p.scanner.Init(src)
+ p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
+ p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ p.scanner.Whitespace = 1<<'\t' | 1<<' '
+ p.scanner.Filename = filename // for good error messages
+ p.next()
+ p.id = id
+ p.sharedPkgs = packages
+ if debug {
+ // check consistency of packages map
+ for _, pkg := range packages {
+ if pkg.Name() == "" {
+ fmt.Printf("no package name for %s\n", pkg.Path())
+ }
+ }
+ }
+}
+
+func (p *parser) next() {
+ p.tok = p.scanner.Scan()
+ switch p.tok {
+ case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
+ p.lit = p.scanner.TokenText()
+ default:
+ p.lit = ""
+ }
+ if debug {
+ fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
+ }
+}
+
+func declTypeName(pkg *types.Package, name string) *types.TypeName {
+ scope := pkg.Scope()
+ if obj := scope.Lookup(name); obj != nil {
+ return obj.(*types.TypeName)
+ }
+ obj := types.NewTypeName(token.NoPos, pkg, name, nil)
+ // a named type may be referred to before the underlying type
+ // is known - set it up
+ types.NewNamed(obj, nil, nil)
+ scope.Insert(obj)
+ return obj
+}
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// Internal errors are boxed as importErrors.
+type importError struct {
+ pos scanner.Position
+ err error
+}
+
+func (e importError) Error() string {
+ return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
+}
+
+func (p *parser) error(err interface{}) {
+ if s, ok := err.(string); ok {
+ err = errors.New(s)
+ }
+ // panic with a runtime.Error if err is not an error
+ panic(importError{p.scanner.Pos(), err.(error)})
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+ p.error(fmt.Sprintf(format, args...))
+}
+
+func (p *parser) expect(tok rune) string {
+ lit := p.lit
+ if p.tok != tok {
+ p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
+ }
+ p.next()
+ return lit
+}
+
+func (p *parser) expectSpecial(tok string) {
+ sep := 'x' // not white space
+ i := 0
+ for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ i++
+ }
+ if i < len(tok) {
+ p.errorf("expected %q, got %q", tok, tok[0:i])
+ }
+}
+
+func (p *parser) expectKeyword(keyword string) {
+ lit := p.expect(scanner.Ident)
+ if lit != keyword {
+ p.errorf("expected keyword %s, got %q", keyword, lit)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Qualified and unqualified names
+
+// PackageId = string_lit .
+//
+func (p *parser) parsePackageId() string {
+ id, err := strconv.Unquote(p.expect(scanner.String))
+ if err != nil {
+ p.error(err)
+ }
+ // id == "" stands for the imported package id
+ // (only known at time of package installation)
+ if id == "" {
+ id = p.id
+ }
+ return id
+}
+
+// PackageName = ident .
+//
+func (p *parser) parsePackageName() string {
+ return p.expect(scanner.Ident)
+}
+
+// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
+func (p *parser) parseDotIdent() string {
+ ident := ""
+ if p.tok != scanner.Int {
+ sep := 'x' // not white space
+ for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
+ ident += p.lit
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ }
+ }
+ if ident == "" {
+ p.expect(scanner.Ident) // use expect() for error handling
+ }
+ return ident
+}
+
+// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
+//
+func (p *parser) parseQualifiedName() (id, name string) {
+ p.expect('@')
+ id = p.parsePackageId()
+ p.expect('.')
+ // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
+ if p.tok == '?' {
+ p.next()
+ } else {
+ name = p.parseDotIdent()
+ }
+ return
+}
+
+// getPkg returns the package for a given id. If the package is
+// not found, create the package and add it to the p.localPkgs
+// and p.sharedPkgs maps. name is the (expected) name of the
+// package. If name == "", the package name is expected to be
+// set later via an import clause in the export data.
+//
+// id identifies a package, usually by a canonical package path like
+// "encoding/json" but possibly by a non-canonical import path like
+// "./json".
+//
+func (p *parser) getPkg(id, name string) *types.Package {
+ // package unsafe is not in the packages maps - handle explicitly
+ if id == "unsafe" {
+ return types.Unsafe
+ }
+
+ pkg := p.localPkgs[id]
+ if pkg == nil {
+ // first import of id from this package
+ pkg = p.sharedPkgs[id]
+ if pkg == nil {
+ // first import of id by this importer;
+ // add (possibly unnamed) pkg to shared packages
+ pkg = types.NewPackage(id, name)
+ p.sharedPkgs[id] = pkg
+ }
+ // add (possibly unnamed) pkg to local packages
+ if p.localPkgs == nil {
+ p.localPkgs = make(map[string]*types.Package)
+ }
+ p.localPkgs[id] = pkg
+ } else if name != "" {
+ // package exists already and we have an expected package name;
+ // make sure names match or set package name if necessary
+ if pname := pkg.Name(); pname == "" {
+ pkg.SetName(name)
+ } else if pname != name {
+ p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name)
+ }
+ }
+ return pkg
+}
+
+// parseExportedName is like parseQualifiedName, but
+// the package id is resolved to an imported *types.Package.
+//
+func (p *parser) parseExportedName() (pkg *types.Package, name string) {
+ id, name := p.parseQualifiedName()
+ pkg = p.getPkg(id, "")
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+// BasicType = identifier .
+//
+func (p *parser) parseBasicType() types.Type {
+ id := p.expect(scanner.Ident)
+ obj := types.Universe.Lookup(id)
+ if obj, ok := obj.(*types.TypeName); ok {
+ return obj.Type()
+ }
+ p.errorf("not a basic type: %s", id)
+ return nil
+}
+
+// ArrayType = "[" int_lit "]" Type .
+//
+func (p *parser) parseArrayType(parent *types.Package) types.Type {
+ // "[" already consumed and lookahead known not to be "]"
+ lit := p.expect(scanner.Int)
+ p.expect(']')
+ elem := p.parseType(parent)
+ n, err := strconv.ParseInt(lit, 10, 64)
+ if err != nil {
+ p.error(err)
+ }
+ return types.NewArray(elem, n)
+}
+
+// MapType = "map" "[" Type "]" Type .
+//
+func (p *parser) parseMapType(parent *types.Package) types.Type {
+ p.expectKeyword("map")
+ p.expect('[')
+ key := p.parseType(parent)
+ p.expect(']')
+ elem := p.parseType(parent)
+ return types.NewMap(key, elem)
+}
+
+// Name = identifier | "?" | QualifiedName .
+//
+// For unqualified and anonymous names, the returned package is the parent
+// package unless parent == nil, in which case the returned package is the
+// package being imported. (The parent package is not nil if the the name
+// is an unqualified struct field or interface method name belonging to a
+// type declared in another package.)
+//
+// For qualified names, the returned package is nil (and not created if
+// it doesn't exist yet) unless materializePkg is set (which creates an
+// unnamed package with valid package path). In the latter case, a
+// subsequent import clause is expected to provide a name for the package.
+//
+func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
+ pkg = parent
+ if pkg == nil {
+ pkg = p.sharedPkgs[p.id]
+ }
+ switch p.tok {
+ case scanner.Ident:
+ name = p.lit
+ p.next()
+ case '?':
+ // anonymous
+ p.next()
+ case '@':
+ // exported name prefixed with package path
+ pkg = nil
+ var id string
+ id, name = p.parseQualifiedName()
+ if materializePkg {
+ pkg = p.getPkg(id, "")
+ }
+ default:
+ p.error("name expected")
+ }
+ return
+}
+
+func deref(typ types.Type) types.Type {
+ if p, _ := typ.(*types.Pointer); p != nil {
+ return p.Elem()
+ }
+ return typ
+}
+
+// Field = Name Type [ string_lit ] .
+//
+func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
+ pkg, name := p.parseName(parent, true)
+
+ if name == "_" {
+ // Blank fields should be package-qualified because they
+ // are unexported identifiers, but gc does not qualify them.
+ // Assuming that the ident belongs to the current package
+ // causes types to change during re-exporting, leading
+ // to spurious "can't assign A to B" errors from go/types.
+ // As a workaround, pretend all blank fields belong
+ // to the same unique dummy package.
+ const blankpkg = "<_>"
+ pkg = p.getPkg(blankpkg, blankpkg)
+ }
+
+ typ := p.parseType(parent)
+ anonymous := false
+ if name == "" {
+ // anonymous field - typ must be T or *T and T must be a type name
+ switch typ := deref(typ).(type) {
+ case *types.Basic: // basic types are named types
+ pkg = nil // objects defined in Universe scope have no package
+ name = typ.Name()
+ case *types.Named:
+ name = typ.Obj().Name()
+ default:
+ p.errorf("anonymous field expected")
+ }
+ anonymous = true
+ }
+ tag := ""
+ if p.tok == scanner.String {
+ s := p.expect(scanner.String)
+ var err error
+ tag, err = strconv.Unquote(s)
+ if err != nil {
+ p.errorf("invalid struct tag %s: %s", s, err)
+ }
+ }
+ return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
+}
+
+// StructType = "struct" "{" [ FieldList ] "}" .
+// FieldList = Field { ";" Field } .
+//
+func (p *parser) parseStructType(parent *types.Package) types.Type {
+ var fields []*types.Var
+ var tags []string
+
+ p.expectKeyword("struct")
+ p.expect('{')
+ for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
+ if i > 0 {
+ p.expect(';')
+ }
+ fld, tag := p.parseField(parent)
+ if tag != "" && tags == nil {
+ tags = make([]string, i)
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+ fields = append(fields, fld)
+ }
+ p.expect('}')
+
+ return types.NewStruct(fields, tags)
+}
+
+// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
+//
+func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
+ _, name := p.parseName(nil, false)
+ // remove gc-specific parameter numbering
+ if i := strings.Index(name, "·"); i >= 0 {
+ name = name[:i]
+ }
+ if p.tok == '.' {
+ p.expectSpecial("...")
+ isVariadic = true
+ }
+ typ := p.parseType(nil)
+ if isVariadic {
+ typ = types.NewSlice(typ)
+ }
+ // ignore argument tag (e.g. "noescape")
+ if p.tok == scanner.String {
+ p.next()
+ }
+ // TODO(gri) should we provide a package?
+ par = types.NewVar(token.NoPos, nil, name, typ)
+ return
+}
+
+// Parameters = "(" [ ParameterList ] ")" .
+// ParameterList = { Parameter "," } Parameter .
+//
+func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
+ p.expect('(')
+ for p.tok != ')' && p.tok != scanner.EOF {
+ if len(list) > 0 {
+ p.expect(',')
+ }
+ par, variadic := p.parseParameter()
+ list = append(list, par)
+ if variadic {
+ if isVariadic {
+ p.error("... not on final argument")
+ }
+ isVariadic = true
+ }
+ }
+ p.expect(')')
+
+ return
+}
+
+// Signature = Parameters [ Result ] .
+// Result = Type | Parameters .
+//
+func (p *parser) parseSignature(recv *types.Var) *types.Signature {
+ params, isVariadic := p.parseParameters()
+
+ // optional result type
+ var results []*types.Var
+ if p.tok == '(' {
+ var variadic bool
+ results, variadic = p.parseParameters()
+ if variadic {
+ p.error("... not permitted on result type")
+ }
+ }
+
+ return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
+}
+
+// InterfaceType = "interface" "{" [ MethodList ] "}" .
+// MethodList = Method { ";" Method } .
+// Method = Name Signature .
+//
+// The methods of embedded interfaces are always "inlined"
+// by the compiler and thus embedded interfaces are never
+// visible in the export data.
+//
+func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
+ var methods []*types.Func
+
+ p.expectKeyword("interface")
+ p.expect('{')
+ for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
+ if i > 0 {
+ p.expect(';')
+ }
+ pkg, name := p.parseName(parent, true)
+ sig := p.parseSignature(nil)
+ methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
+ }
+ p.expect('}')
+
+ // Complete requires the type's embedded interfaces to be fully defined,
+ // but we do not define any
+ return types.NewInterface(methods, nil).Complete()
+}
+
+// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
+//
+func (p *parser) parseChanType(parent *types.Package) types.Type {
+ dir := types.SendRecv
+ if p.tok == scanner.Ident {
+ p.expectKeyword("chan")
+ if p.tok == '<' {
+ p.expectSpecial("<-")
+ dir = types.SendOnly
+ }
+ } else {
+ p.expectSpecial("<-")
+ p.expectKeyword("chan")
+ dir = types.RecvOnly
+ }
+ elem := p.parseType(parent)
+ return types.NewChan(dir, elem)
+}
+
+// Type =
+// BasicType | TypeName | ArrayType | SliceType | StructType |
+// PointerType | FuncType | InterfaceType | MapType | ChanType |
+// "(" Type ")" .
+//
+// BasicType = ident .
+// TypeName = ExportedName .
+// SliceType = "[" "]" Type .
+// PointerType = "*" Type .
+// FuncType = "func" Signature .
+//
+func (p *parser) parseType(parent *types.Package) types.Type {
+ switch p.tok {
+ case scanner.Ident:
+ switch p.lit {
+ default:
+ return p.parseBasicType()
+ case "struct":
+ return p.parseStructType(parent)
+ case "func":
+ // FuncType
+ p.next()
+ return p.parseSignature(nil)
+ case "interface":
+ return p.parseInterfaceType(parent)
+ case "map":
+ return p.parseMapType(parent)
+ case "chan":
+ return p.parseChanType(parent)
+ }
+ case '@':
+ // TypeName
+ pkg, name := p.parseExportedName()
+ return declTypeName(pkg, name).Type()
+ case '[':
+ p.next() // look ahead
+ if p.tok == ']' {
+ // SliceType
+ p.next()
+ return types.NewSlice(p.parseType(parent))
+ }
+ return p.parseArrayType(parent)
+ case '*':
+ // PointerType
+ p.next()
+ return types.NewPointer(p.parseType(parent))
+ case '<':
+ return p.parseChanType(parent)
+ case '(':
+ // "(" Type ")"
+ p.next()
+ typ := p.parseType(parent)
+ p.expect(')')
+ return typ
+ }
+ p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// ImportDecl = "import" PackageName PackageId .
+//
+func (p *parser) parseImportDecl() {
+ p.expectKeyword("import")
+ name := p.parsePackageName()
+ p.getPkg(p.parsePackageId(), name)
+}
+
+// int_lit = [ "+" | "-" ] { "0" ... "9" } .
+//
+func (p *parser) parseInt() string {
+ s := ""
+ switch p.tok {
+ case '-':
+ s = "-"
+ p.next()
+ case '+':
+ p.next()
+ }
+ return s + p.expect(scanner.Int)
+}
+
+// number = int_lit [ "p" int_lit ] .
+//
+func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
+ // mantissa
+ mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
+ if mant == nil {
+ panic("invalid mantissa")
+ }
+
+ if p.lit == "p" {
+ // exponent (base 2)
+ p.next()
+ exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
+ if err != nil {
+ p.error(err)
+ }
+ if exp < 0 {
+ denom := constant.MakeInt64(1)
+ denom = constant.Shift(denom, token.SHL, uint(-exp))
+ typ = types.Typ[types.UntypedFloat]
+ val = constant.BinaryOp(mant, token.QUO, denom)
+ return
+ }
+ if exp > 0 {
+ mant = constant.Shift(mant, token.SHL, uint(exp))
+ }
+ typ = types.Typ[types.UntypedFloat]
+ val = mant
+ return
+ }
+
+ typ = types.Typ[types.UntypedInt]
+ val = mant
+ return
+}
+
+// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
+// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
+// bool_lit = "true" | "false" .
+// complex_lit = "(" float_lit "+" float_lit "i" ")" .
+// rune_lit = "(" int_lit "+" int_lit ")" .
+// string_lit = `"` { unicode_char } `"` .
+//
+func (p *parser) parseConstDecl() {
+ p.expectKeyword("const")
+ pkg, name := p.parseExportedName()
+
+ var typ0 types.Type
+ if p.tok != '=' {
+ // constant types are never structured - no need for parent type
+ typ0 = p.parseType(nil)
+ }
+
+ p.expect('=')
+ var typ types.Type
+ var val constant.Value
+ switch p.tok {
+ case scanner.Ident:
+ // bool_lit
+ if p.lit != "true" && p.lit != "false" {
+ p.error("expected true or false")
+ }
+ typ = types.Typ[types.UntypedBool]
+ val = constant.MakeBool(p.lit == "true")
+ p.next()
+
+ case '-', scanner.Int:
+ // int_lit
+ typ, val = p.parseNumber()
+
+ case '(':
+ // complex_lit or rune_lit
+ p.next()
+ if p.tok == scanner.Char {
+ p.next()
+ p.expect('+')
+ typ = types.Typ[types.UntypedRune]
+ _, val = p.parseNumber()
+ p.expect(')')
+ break
+ }
+ _, re := p.parseNumber()
+ p.expect('+')
+ _, im := p.parseNumber()
+ p.expectKeyword("i")
+ p.expect(')')
+ typ = types.Typ[types.UntypedComplex]
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ case scanner.Char:
+ // rune_lit
+ typ = types.Typ[types.UntypedRune]
+ val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
+ p.next()
+
+ case scanner.String:
+ // string_lit
+ typ = types.Typ[types.UntypedString]
+ val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
+ p.next()
+
+ default:
+ p.errorf("expected literal got %s", scanner.TokenString(p.tok))
+ }
+
+ if typ0 == nil {
+ typ0 = typ
+ }
+
+ pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
+}
+
+// TypeDecl = "type" ExportedName Type .
+//
+func (p *parser) parseTypeDecl() {
+ p.expectKeyword("type")
+ pkg, name := p.parseExportedName()
+ obj := declTypeName(pkg, name)
+
+ // The type object may have been imported before and thus already
+ // have a type associated with it. We still need to parse the type
+ // structure, but throw it away if the object already has a type.
+ // This ensures that all imports refer to the same type object for
+ // a given type declaration.
+ typ := p.parseType(pkg)
+
+ if name := obj.Type().(*types.Named); name.Underlying() == nil {
+ name.SetUnderlying(typ)
+ }
+}
+
+// VarDecl = "var" ExportedName Type .
+//
+func (p *parser) parseVarDecl() {
+ p.expectKeyword("var")
+ pkg, name := p.parseExportedName()
+ typ := p.parseType(pkg)
+ pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
+}
+
+// Func = Signature [ Body ] .
+// Body = "{" ... "}" .
+//
+func (p *parser) parseFunc(recv *types.Var) *types.Signature {
+ sig := p.parseSignature(recv)
+ if p.tok == '{' {
+ p.next()
+ for i := 1; i > 0; p.next() {
+ switch p.tok {
+ case '{':
+ i++
+ case '}':
+ i--
+ }
+ }
+ }
+ return sig
+}
+
+// MethodDecl = "func" Receiver Name Func .
+// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
+//
+func (p *parser) parseMethodDecl() {
+ // "func" already consumed
+ p.expect('(')
+ recv, _ := p.parseParameter() // receiver
+ p.expect(')')
+
+ // determine receiver base type object
+ base := deref(recv.Type()).(*types.Named)
+
+ // parse method name, signature, and possibly inlined body
+ _, name := p.parseName(nil, false)
+ sig := p.parseFunc(recv)
+
+ // methods always belong to the same package as the base type object
+ pkg := base.Obj().Pkg()
+
+ // add method to type unless type was imported before
+ // and method exists already
+ // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
+ base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
+}
+
+// FuncDecl = "func" ExportedName Func .
+//
+func (p *parser) parseFuncDecl() {
+ // "func" already consumed
+ pkg, name := p.parseExportedName()
+ typ := p.parseFunc(nil)
+ pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
+}
+
+// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
+//
+func (p *parser) parseDecl() {
+ if p.tok == scanner.Ident {
+ switch p.lit {
+ case "import":
+ p.parseImportDecl()
+ case "const":
+ p.parseConstDecl()
+ case "type":
+ p.parseTypeDecl()
+ case "var":
+ p.parseVarDecl()
+ case "func":
+ p.next() // look ahead
+ if p.tok == '(' {
+ p.parseMethodDecl()
+ } else {
+ p.parseFuncDecl()
+ }
+ }
+ }
+ p.expect('\n')
+}
+
+// ----------------------------------------------------------------------------
+// Export
+
+// Export = "PackageClause { Decl } "$$" .
+// PackageClause = "package" PackageName [ "safe" ] "\n" .
+//
+func (p *parser) parseExport() *types.Package {
+ p.expectKeyword("package")
+ name := p.parsePackageName()
+ if p.tok == scanner.Ident && p.lit == "safe" {
+ // package was compiled with -u option - ignore
+ p.next()
+ }
+ p.expect('\n')
+
+ pkg := p.getPkg(p.id, name)
+
+ for p.tok != '$' && p.tok != scanner.EOF {
+ p.parseDecl()
+ }
+
+ if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
+ // don't call next()/expect() since reading past the
+ // export data may cause scanner errors (e.g. NUL chars)
+ p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
+ }
+
+ if n := p.scanner.ErrorCount; n != 0 {
+ p.errorf("expected no scanner errors, got %d", n)
+ }
+
+ // Record all locally referenced packages as imports.
+ var imports []*types.Package
+ for id, pkg2 := range p.localPkgs {
+ if pkg2.Name() == "" {
+ p.errorf("%s package has no name", id)
+ }
+ if id == p.id {
+ continue // avoid self-edge
+ }
+ imports = append(imports, pkg2)
+ }
+ sort.Sort(byPath(imports))
+ pkg.SetImports(imports)
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+
+ return pkg
+}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
new file mode 100644
index 000000000..be671c79b
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
@@ -0,0 +1,723 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
+// see that file for specification of the format.
+
+// +build go1.11
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "reflect"
+ "sort"
+)
+
+// Current indexed export format version. Increase with each format change.
+// 0: Go1.11 encoding
+const iexportVersion = 0
+
+// IExportData returns the binary export data for pkg.
+// If no file set is provided, position info will be missing.
+func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+
+ p := iexporter{
+ out: bytes.NewBuffer(nil),
+ fset: fset,
+ allPkgs: map[*types.Package]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[types.Object]uint64{},
+ typIndex: map[types.Type]uint64{},
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
+ }
+
+ // Initialize work queue with exported declarations.
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if ast.IsExported(name) {
+ p.pushDecl(scope.Lookup(name))
+ }
+ }
+
+ // Loop until no more work.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popHead())
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex, pkg)
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ hdr.WriteByte('i')
+ hdr.uint64(iexportVersion)
+ hdr.uint64(uint64(p.strings.Len()))
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ io.Copy(p.out, &hdr)
+ io.Copy(p.out, &p.strings)
+ io.Copy(p.out, &p.data0)
+
+ return p.out.Bytes(), nil
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) {
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Package][]types.Object{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ pkgObjs[localpkg] = nil
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+
+ for obj := range index {
+ pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
+ }
+
+ var pkgs []*types.Package
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].Name() < objs[j].Name()
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return pkgs[i].Path() < pkgs[j].Path()
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(pkg.Path())
+ w.string(pkg.Name())
+ w.uint64(uint64(0)) // package height is not needed for go/types
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, obj := range objs {
+ w.string(obj.Name())
+ w.uint64(index[obj])
+ }
+ }
+}
+
+type iexporter struct {
+ fset *token.FileSet
+ out *bytes.Buffer
+
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Package]bool
+
+ declTodo objQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ data0 intWriter
+ declIndex map[types.Object]uint64
+ typIndex map[types.Type]uint64
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(obj types.Object) {
+ // Package unsafe is known to the compiler and predeclared.
+ assert(obj.Pkg() != types.Unsafe)
+
+ if _, ok := p.declIndex[obj]; ok {
+ return
+ }
+
+ p.declIndex[obj] = ^uint64(0) // mark n present in work queue
+ p.declTodo.pushTail(obj)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+}
+
+func (p *iexporter) doDecl(obj types.Object) {
+ w := p.newWriter()
+ w.setPkg(obj.Pkg(), false)
+
+ switch obj := obj.(type) {
+ case *types.Var:
+ w.tag('V')
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+
+ case *types.Func:
+ sig, _ := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ panic(internalErrorf("unexpected method: %v", sig))
+ }
+ w.tag('F')
+ w.pos(obj.Pos())
+ w.signature(sig)
+
+ case *types.Const:
+ w.tag('C')
+ w.pos(obj.Pos())
+ w.value(obj.Type(), obj.Val())
+
+ case *types.TypeName:
+ if obj.IsAlias() {
+ w.tag('A')
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+ break
+ }
+
+ // Defined type.
+ w.tag('T')
+ w.pos(obj.Pos())
+
+ underlying := obj.Type().Underlying()
+ w.typ(underlying, obj.Pkg())
+
+ t := obj.Type()
+ if types.IsInterface(t) {
+ break
+ }
+
+ named, ok := t.(*types.Named)
+ if !ok {
+ panic(internalErrorf("%s is not a defined type", t))
+ }
+
+ n := named.NumMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := named.Method(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.param(sig.Recv())
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected object: %v", obj))
+ }
+
+ p.declIndex[obj] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) pos(pos token.Pos) {
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+
+ // When file is the same as the last position (common case),
+ // we can save a few bytes by delta encoding just the line
+ // number.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile and
+ // prevLine as fields of exportWriter.
+
+ if file == w.prevFile {
+ delta := line - w.prevLine
+ w.int64(delta)
+ if delta == deltaNewFile {
+ w.int64(-1)
+ }
+ } else {
+ w.int64(deltaNewFile)
+ w.int64(line) // line >= 0
+ w.string(file)
+ w.prevFile = file
+ }
+ w.prevLine = line
+}
+
+func (w *exportWriter) pkg(pkg *types.Package) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(pkg.Path())
+}
+
+func (w *exportWriter) qualifiedIdent(obj types.Object) {
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(obj)
+
+ w.string(obj.Name())
+ w.pkg(obj.Pkg())
+}
+
+func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
+ w.data.uint64(w.p.typOff(t, pkg))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t, pkg)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+ switch t := t.(type) {
+ case *types.Named:
+ w.startType(definedType)
+ w.qualifiedIdent(t.Obj())
+
+ case *types.Pointer:
+ w.startType(pointerType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Slice:
+ w.startType(sliceType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Array:
+ w.startType(arrayType)
+ w.uint64(uint64(t.Len()))
+ w.typ(t.Elem(), pkg)
+
+ case *types.Chan:
+ w.startType(chanType)
+ // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+ var dir uint64
+ switch t.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ w.uint64(dir)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Map:
+ w.startType(mapType)
+ w.typ(t.Key(), pkg)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Signature:
+ w.startType(signatureType)
+ w.setPkg(pkg, true)
+ w.signature(t)
+
+ case *types.Struct:
+ w.startType(structType)
+ w.setPkg(pkg, true)
+
+ n := t.NumFields()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ w.pos(f.Pos())
+ w.string(f.Name())
+ w.typ(f.Type(), pkg)
+ w.bool(f.Embedded())
+ w.string(t.Tag(i)) // note (or tag)
+ }
+
+ case *types.Interface:
+ w.startType(interfaceType)
+ w.setPkg(pkg, true)
+
+ n := t.NumEmbeddeds()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ f := t.Embedded(i)
+ w.pos(f.Obj().Pos())
+ w.typ(f.Obj().Type(), f.Obj().Pkg())
+ }
+
+ n = t.NumExplicitMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
+ }
+}
+
+func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
+ if write {
+ w.pkg(pkg)
+ }
+
+ w.currPkg = pkg
+}
+
+func (w *exportWriter) signature(sig *types.Signature) {
+ w.paramList(sig.Params())
+ w.paramList(sig.Results())
+ if sig.Params().Len() > 0 {
+ w.bool(sig.Variadic())
+ }
+}
+
+func (w *exportWriter) paramList(tup *types.Tuple) {
+ n := tup.Len()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ w.param(tup.At(i))
+ }
+}
+
+func (w *exportWriter) param(obj types.Object) {
+ w.pos(obj.Pos())
+ w.localIdent(obj)
+ w.typ(obj.Type(), obj.Pkg())
+}
+
+func (w *exportWriter) value(typ types.Type, v constant.Value) {
+ w.typ(typ, nil)
+
+ switch v.Kind() {
+ case constant.Bool:
+ w.bool(constant.BoolVal(v))
+ case constant.Int:
+ var i big.Int
+ if i64, exact := constant.Int64Val(v); exact {
+ i.SetInt64(i64)
+ } else if ui64, exact := constant.Uint64Val(v); exact {
+ i.SetUint64(ui64)
+ } else {
+ i.SetString(v.ExactString(), 10)
+ }
+ w.mpint(&i, typ)
+ case constant.Float:
+ f := constantToFloat(v)
+ w.mpfloat(f, typ)
+ case constant.Complex:
+ w.mpfloat(constantToFloat(constant.Real(v)), typ)
+ w.mpfloat(constantToFloat(constant.Imag(v)), typ)
+ case constant.String:
+ w.string(constant.StringVal(v))
+ case constant.Unknown:
+ // package contains type errors
+ default:
+ panic(internalErrorf("unexpected value %v (%T)", v, v))
+ }
+}
+
+// constantToFloat converts a constant.Value with kind constant.Float to a
+// big.Float.
+func constantToFloat(x constant.Value) *big.Float {
+ assert(x.Kind() == constant.Float)
+ // Use the same floating-point precision (512) as cmd/compile
+ // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+ const mpprec = 512
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ assert(ok)
+ }
+ return &f
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
+ basic, ok := typ.Underlying().(*types.Basic)
+ if !ok {
+ panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
+ }
+
+ signed, maxBytes := intSize(basic)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ panic(internalErrorf("leading zeros"))
+ }
+ if uint(len(b)) > maxBytes {
+ panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
+ if f.IsInf() {
+ panic("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+func (w *exportWriter) localIdent(obj types.Object) {
+ // Anonymous parameters.
+ if obj == nil {
+ w.string("")
+ return
+ }
+
+ name := obj.Name()
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("internal error: assertion failed")
+ }
+}
+
+// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
+
+// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
+// a ready-to-use empty queue.
+type objQueue struct {
+ ring []types.Object
+ head, tail int
+}
+
+// empty returns true if q contains no Nodes.
+func (q *objQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushTail appends n to the tail of the queue.
+func (q *objQueue) pushTail(obj types.Object) {
+ if len(q.ring) == 0 {
+ q.ring = make([]types.Object, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]types.Object, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = obj
+ q.tail++
+}
+
+// popHead pops a node from the head of the queue. It panics if q is empty.
+func (q *objQueue) popHead() types.Object {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ obj := q.ring[q.head%len(q.ring)]
+ q.head++
+ return obj
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
new file mode 100644
index 000000000..3cb7ae5b9
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
@@ -0,0 +1,606 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/gc/iexport.go for the export data format.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "sort"
+)
+
+type intReader struct {
+ *bytes.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+)
+
+// IImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ const currentVersion = 0
+ version := -1
+ defer func() {
+ if e := recover(); e != nil {
+ if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
+ }
+ }()
+
+ r := &intReader{bytes.NewReader(data), path}
+
+ version = int(r.uint64())
+ switch version {
+ case currentVersion:
+ default:
+ errorf("unknown iexport format version %d", version)
+ }
+
+ sLen := int64(r.uint64())
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ r.Seek(sLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ ipath: path,
+
+ stringData: stringData,
+ stringCache: make(map[uint64]string),
+ pkgCache: make(map[uint64]*types.Package),
+
+ declData: declData,
+ pkgIndex: make(map[*types.Package]map[string]uint64),
+ typCache: make(map[uint64]types.Type),
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*token.File),
+ },
+ }
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ pkgList := make([]*types.Package, r.uint64())
+ for i := range pkgList {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = r.uint64() // package height; unused by go/types
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ pkg := imports[pkgPath]
+ if pkg == nil {
+ pkg = types.NewPackage(pkgPath, pkgName)
+ imports[pkgPath] = pkg
+ } else if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+
+ p.pkgCache[pkgPathOff] = pkg
+
+ nameIndex := make(map[string]uint64)
+ for nSyms := r.uint64(); nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ p.pkgIndex[pkg] = nameIndex
+ pkgList[i] = pkg
+ }
+ var localpkg *types.Package
+ for _, pkg := range pkgList {
+ if pkg.Path() == path {
+ localpkg = pkg
+ }
+ }
+
+ names := make([]string, 0, len(p.pkgIndex[localpkg]))
+ for name := range p.pkgIndex[localpkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(localpkg, name)
+ }
+
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // record all referenced packages as imports
+ list := append(([]*types.Package)(nil), pkgList[1:]...)
+ sort.Sort(byPath(list))
+ localpkg.SetImports(list)
+
+ // package was imported completely and without errors
+ localpkg.MarkComplete()
+
+ consumed, _ := r.Seek(0, io.SeekCurrent)
+ return int(consumed), localpkg, nil
+}
+
+type iimporter struct {
+ ipath string
+
+ stringData []byte
+ stringCache map[uint64]string
+ pkgCache map[uint64]*types.Package
+
+ declData []byte
+ pkgIndex map[*types.Package]map[string]uint64
+ typCache map[uint64]types.Type
+
+ fake fakeFileSet
+ interfaceList []*types.Interface
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ if s, ok := p.stringCache[off]; ok {
+ return s
+ }
+
+ slen, n := binary.Uvarint(p.stringData[off:])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ s := string(p.stringData[spos : spos+slen])
+ p.stringCache[off] = s
+ return s
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+ if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if base == nil || !isInterface(t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+type importReader struct {
+ p *iimporter
+ declReader bytes.Reader
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
+
+ case 'C':
+ typ, val := r.value()
+
+ r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+ case 'F':
+ sig := r.signature(nil)
+
+ r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+ case 'T':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types.NewTypeName(pos, r.currPkg, name, nil)
+ named := types.NewNamed(obj, nil, nil)
+ r.declare(obj)
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+ msig := r.signature(recv)
+
+ named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case 'V':
+ typ := r.typ()
+
+ r.declare(types.NewVar(pos, r.currPkg, name, typ))
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+ typ = r.typ()
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types.IsString:
+ val = constant.MakeString(r.string())
+
+ case types.IsInteger:
+ val = r.mpint(b)
+
+ case types.IsFloat:
+ val = r.mpfloat(b)
+
+ case types.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ if b.Kind() == types.Invalid {
+ val = constant.MakeUnknown()
+ return
+ }
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types.Float32, types.Complex64:
+ return true, 3
+ case types.Float64, types.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types.IsUnsigned) == 0
+ switch b.Kind() {
+ case types.Int8, types.Uint8:
+ maxBytes = 1
+ case types.Int16, types.Uint16:
+ maxBytes = 2
+ case types.Int32, types.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(b *types.Basic) constant.Value {
+ signed, maxBytes := intSize(b)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ return constant.MakeInt64(v)
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+
+ buf := make([]byte, v)
+ io.ReadFull(&r.declReader, buf)
+
+ // convert to little endian
+ // TODO(gri) go/constant should have a more direct conversion function
+ // (e.g., once it supports a big.Float based implementation)
+ for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
+ buf[i], buf[j] = buf[j], buf[i]
+ }
+
+ x := constant.MakeFromBytes(buf)
+ if signed && n&1 != 0 {
+ x = constant.UnaryOp(token.SUB, x, 0)
+ }
+ return x
+}
+
+func (r *importReader) mpfloat(b *types.Basic) constant.Value {
+ x := r.mpint(b)
+ if constant.Sign(x) == 0 {
+ return x
+ }
+
+ exp := r.int64()
+ switch {
+ case exp > 0:
+ x = constant.Shift(x, token.SHL, uint(exp))
+ case exp < 0:
+ d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
+ x = constant.BinaryOp(x, token.QUO, d)
+ }
+ return x
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevFile = r.string()
+ r.prevLine = l
+ }
+
+ if r.prevFile == "" && r.prevLine == 0 {
+ return token.NoPos
+ }
+
+ return r.p.fake.pos(r.prevFile, int(r.prevLine))
+}
+
+func (r *importReader) typ() types.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+ _, ok := t.(*types.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) types.Type {
+ switch k := r.kind(); k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+ case pointerType:
+ return types.NewPointer(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types.NewChan(dir, r.typ())
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ tags[i] = tag
+ }
+ return types.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types.Func, r.uint64())
+ for i := range methods {
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types.Var
+ if base != nil {
+ recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+ }
+
+ msig := r.signature(recv)
+ methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+
+ typ := newInterface(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types.Var) *types.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types.NewSignature(recv, params, results, variadic)
+}
+
+func (r *importReader) paramList() *types.Tuple {
+ xs := make([]*types.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
new file mode 100644
index 000000000..463f25227
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ named := make([]*types.Named, len(embeddeds))
+ for i, e := range embeddeds {
+ var ok bool
+ named[i], ok = e.(*types.Named)
+ if !ok {
+ panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
+ }
+ }
+ return types.NewInterface(methods, named)
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
new file mode 100644
index 000000000..ab28b95cb
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ return types.NewInterfaceType(methods, embeddeds)
+}
diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
new file mode 100644
index 000000000..fdc7da056
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
@@ -0,0 +1,160 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package packagesdriver fetches type sizes for go/packages and go/analysis.
+package packagesdriver
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/types"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+)
+
+var debug = false
+
+// GetSizes returns the sizes used by the underlying driver with the given parameters.
+func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
+ // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
+ const toolPrefix = "GOPACKAGESDRIVER="
+ tool := ""
+ for _, env := range env {
+ if val := strings.TrimPrefix(env, toolPrefix); val != env {
+ tool = val
+ }
+ }
+
+ if tool == "" {
+ var err error
+ tool, err = exec.LookPath("gopackagesdriver")
+ if err != nil {
+ // We did not find the driver, so use "go list".
+ tool = "off"
+ }
+ }
+
+ if tool == "off" {
+ return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData)
+ }
+
+ req, err := json.Marshal(struct {
+ Command string `json:"command"`
+ Env []string `json:"env"`
+ BuildFlags []string `json:"build_flags"`
+ }{
+ Command: "sizes",
+ Env: env,
+ BuildFlags: buildFlags,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
+ }
+
+ buf := new(bytes.Buffer)
+ cmd := exec.CommandContext(ctx, tool)
+ cmd.Dir = dir
+ cmd.Env = env
+ cmd.Stdin = bytes.NewReader(req)
+ cmd.Stdout = buf
+ cmd.Stderr = new(bytes.Buffer)
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
+ }
+ var response struct {
+ // Sizes, if not nil, is the types.Sizes to use when type checking.
+ Sizes *types.StdSizes
+ }
+ if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
+ return nil, err
+ }
+ return response.Sizes, nil
+}
+
+func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
+ args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
+ args = append(args, buildFlags...)
+ args = append(args, "--", "unsafe")
+ stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...)
+ if err != nil {
+ return nil, err
+ }
+ fields := strings.Fields(stdout.String())
+ if len(fields) < 2 {
+ return nil, fmt.Errorf("could not determine GOARCH and Go compiler")
+ }
+ goarch := fields[0]
+ compiler := fields[1]
+ return types.SizesFor(compiler, goarch), nil
+}
+
+// InvokeGo returns the stdout of a go command invocation.
+func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) {
+ if debug {
+ defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
+ }
+ stdout := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+ cmd := exec.CommandContext(ctx, "go", args...)
+ // On darwin the cwd gets resolved to the real path, which breaks anything that
+ // expects the working directory to keep the original path, including the
+ // go command when dealing with modules.
+ // The Go stdlib has a special feature where if the cwd and the PWD are the
+ // same node then it trusts the PWD, so by setting it in the env for the child
+ // process we fix up all the paths returned by the go command.
+ cmd.Env = append(append([]string{}, env...), "PWD="+dir)
+ cmd.Dir = dir
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ if err := cmd.Run(); err != nil {
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // Catastrophic error:
+ // - executable not found
+ // - context cancellation
+ return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
+ }
+
+ // Export mode entails a build.
+ // If that build fails, errors appear on stderr
+ // (despite the -e flag) and the Export field is blank.
+ // Do not fail in that case.
+ if !usesExportData {
+ return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
+ }
+ }
+
+ // As of writing, go list -export prints some non-fatal compilation
+ // errors to stderr, even with -e set. We would prefer that it put
+ // them in the Package.Error JSON (see https://golang.org/issue/26319).
+ // In the meantime, there's nowhere good to put them, but they can
+ // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
+ // is set.
+ if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
+ fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr)
+ }
+
+ // debugging
+ if false {
+ fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
+ }
+
+ return stdout, nil
+}
+
+func cmdDebugStr(envlist []string, args ...string) string {
+ env := make(map[string]string)
+ for _, kv := range envlist {
+ split := strings.Split(kv, "=")
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
+}
diff --git a/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/golang.org/x/tools/go/loader/doc.go
new file mode 100644
index 000000000..9b51c9ecd
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/loader/doc.go
@@ -0,0 +1,205 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package loader loads a complete Go program from source code, parsing
+// and type-checking the initial packages plus their transitive closure
+// of dependencies. The ASTs and the derived facts are retained for
+// later use.
+//
+// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
+//
+// The package defines two primary types: Config, which specifies a
+// set of initial packages to load and various other options; and
+// Program, which is the result of successfully loading the packages
+// specified by a configuration.
+//
+// The configuration can be set directly, but *Config provides various
+// convenience methods to simplify the common cases, each of which can
+// be called any number of times. Finally, these are followed by a
+// call to Load() to actually load and type-check the program.
+//
+// var conf loader.Config
+//
+// // Use the command-line arguments to specify
+// // a set of initial packages to load from source.
+// // See FromArgsUsage for help.
+// rest, err := conf.FromArgs(os.Args[1:], wantTests)
+//
+// // Parse the specified files and create an ad hoc package with path "foo".
+// // All files must have the same 'package' declaration.
+// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
+//
+// // Create an ad hoc package with path "foo" from
+// // the specified already-parsed files.
+// // All ASTs must have the same 'package' declaration.
+// conf.CreateFromFiles("foo", parsedFiles)
+//
+// // Add "runtime" to the set of packages to be loaded.
+// conf.Import("runtime")
+//
+// // Adds "fmt" and "fmt_test" to the set of packages
+// // to be loaded. "fmt" will include *_test.go files.
+// conf.ImportWithTests("fmt")
+//
+// // Finally, load all the packages specified by the configuration.
+// prog, err := conf.Load()
+//
+// See examples_test.go for examples of API usage.
+//
+//
+// CONCEPTS AND TERMINOLOGY
+//
+// The WORKSPACE is the set of packages accessible to the loader. The
+// workspace is defined by Config.Build, a *build.Context. The
+// default context treats subdirectories of $GOROOT and $GOPATH as
+// packages, but this behavior may be overridden.
+//
+// An AD HOC package is one specified as a set of source files on the
+// command line. In the simplest case, it may consist of a single file
+// such as $GOROOT/src/net/http/triv.go.
+//
+// EXTERNAL TEST packages are those comprised of a set of *_test.go
+// files all with the same 'package foo_test' declaration, all in the
+// same directory. (go/build.Package calls these files XTestFiles.)
+//
+// An IMPORTABLE package is one that can be referred to by some import
+// spec. Every importable package is uniquely identified by its
+// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
+// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path
+// typically denotes a subdirectory of the workspace.
+//
+// An import declaration uses an IMPORT PATH to refer to a package.
+// Most import declarations use the package path as the import path.
+//
+// Due to VENDORING (https://golang.org/s/go15vendor), the
+// interpretation of an import path may depend on the directory in which
+// it appears. To resolve an import path to a package path, go/build
+// must search the enclosing directories for a subdirectory named
+// "vendor".
+//
+// ad hoc packages and external test packages are NON-IMPORTABLE. The
+// path of an ad hoc package is inferred from the package
+// declarations of its files and is therefore not a unique package key.
+// For example, Config.CreatePkgs may specify two initial ad hoc
+// packages, both with path "main".
+//
+// An AUGMENTED package is an importable package P plus all the
+// *_test.go files with same 'package foo' declaration as P.
+// (go/build.Package calls these files TestFiles.)
+//
+// The INITIAL packages are those specified in the configuration. A
+// DEPENDENCY is a package loaded to satisfy an import in an initial
+// package or another dependency.
+//
+package loader
+
+// IMPLEMENTATION NOTES
+//
+// 'go test', in-package test files, and import cycles
+// ---------------------------------------------------
+//
+// An external test package may depend upon members of the augmented
+// package that are not in the unaugmented package, such as functions
+// that expose internals. (See bufio/export_test.go for an example.)
+// So, the loader must ensure that for each external test package
+// it loads, it also augments the corresponding non-test package.
+//
+// The import graph over n unaugmented packages must be acyclic; the
+// import graph over n-1 unaugmented packages plus one augmented
+// package must also be acyclic. ('go test' relies on this.) But the
+// import graph over n augmented packages may contain cycles.
+//
+// First, all the (unaugmented) non-test packages and their
+// dependencies are imported in the usual way; the loader reports an
+// error if it detects an import cycle.
+//
+// Then, each package P for which testing is desired is augmented by
+// the list P' of its in-package test files, by calling
+// (*types.Checker).Files. This arrangement ensures that P' may
+// reference definitions within P, but P may not reference definitions
+// within P'. Furthermore, P' may import any other package, including
+// ones that depend upon P, without an import cycle error.
+//
+// Consider two packages A and B, both of which have lists of
+// in-package test files we'll call A' and B', and which have the
+// following import graph edges:
+// B imports A
+// B' imports A
+// A' imports B
+// This last edge would be expected to create an error were it not
+// for the special type-checking discipline above.
+// Cycles of size greater than two are possible. For example:
+// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
+// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
+// regexp/exec_test.go (package regexp) imports "compress/bzip2"
+//
+//
+// Concurrency
+// -----------
+//
+// Let us define the import dependency graph as follows. Each node is a
+// list of files passed to (Checker).Files at once. Many of these lists
+// are the production code of an importable Go package, so those nodes
+// are labelled by the package's path. The remaining nodes are
+// ad hoc packages and lists of in-package *_test.go files that augment
+// an importable package; those nodes have no label.
+//
+// The edges of the graph represent import statements appearing within a
+// file. An edge connects a node (a list of files) to the node it
+// imports, which is importable and thus always labelled.
+//
+// Loading is controlled by this dependency graph.
+//
+// To reduce I/O latency, we start loading a package's dependencies
+// asynchronously as soon as we've parsed its files and enumerated its
+// imports (scanImports). This performs a preorder traversal of the
+// import dependency graph.
+//
+// To exploit hardware parallelism, we type-check unrelated packages in
+// parallel, where "unrelated" means not ordered by the partial order of
+// the import dependency graph.
+//
+// We use a concurrency-safe non-blocking cache (importer.imported) to
+// record the results of type-checking, whether success or failure. An
+// entry is created in this cache by startLoad the first time the
+// package is imported. The first goroutine to request an entry becomes
+// responsible for completing the task and broadcasting completion to
+// subsequent requestors, which block until then.
+//
+// Type checking occurs in (parallel) postorder: we cannot type-check a
+// set of files until we have loaded and type-checked all of their
+// immediate dependencies (and thus all of their transitive
+// dependencies). If the input were guaranteed free of import cycles,
+// this would be trivial: we could simply wait for completion of the
+// dependencies and then invoke the typechecker.
+//
+// But as we saw in the 'go test' section above, some cycles in the
+// import graph over packages are actually legal, so long as the
+// cycle-forming edge originates in the in-package test files that
+// augment the package. This explains why the nodes of the import
+// dependency graph are not packages, but lists of files: the unlabelled
+// nodes avoid the cycles. Consider packages A and B where B imports A
+// and A's in-package tests AT import B. The naively constructed import
+// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
+// the graph over lists of files is AT --> B --> A, where AT is an
+// unlabelled node.
+//
+// Awaiting completion of the dependencies in a cyclic graph would
+// deadlock, so we must materialize the import dependency graph (as
+// importer.graph) and check whether each import edge forms a cycle. If
+// x imports y, and the graph already contains a path from y to x, then
+// there is an import cycle, in which case the processing of x must not
+// wait for the completion of processing of y.
+//
+// When the type-checker makes a callback (doImport) to the loader for a
+// given import edge, there are two possible cases. In the normal case,
+// the dependency has already been completely type-checked; doImport
+// does a cache lookup and returns it. In the cyclic case, the entry in
+// the cache is still necessarily incomplete, indicating a cycle. We
+// perform the cycle check again to obtain the error message, and return
+// the error.
+//
+// The result of using concurrency is about a 2.5x speedup for stdlib_test.
+
+// TODO(adonovan): overhaul the package documentation.
diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go
new file mode 100644
index 000000000..de34b809c
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/loader/loader.go
@@ -0,0 +1,1078 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loader
+
+// See doc.go for package documentation and implementation notes.
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/internal/cgo"
+)
+
+var ignoreVendor build.ImportMode
+
+const trace = false // show timing info for type-checking
+
+// Config specifies the configuration for loading a whole program from
+// Go source code.
+// The zero value for Config is a ready-to-use default configuration.
+type Config struct {
+ // Fset is the file set for the parser to use when loading the
+ // program. If nil, it may be lazily initialized by any
+ // method of Config.
+ Fset *token.FileSet
+
+ // ParserMode specifies the mode to be used by the parser when
+ // loading source packages.
+ ParserMode parser.Mode
+
+ // TypeChecker contains options relating to the type checker.
+ //
+ // The supplied IgnoreFuncBodies is not used; the effective
+ // value comes from the TypeCheckFuncBodies func below.
+ // The supplied Import function is not used either.
+ TypeChecker types.Config
+
+ // TypeCheckFuncBodies is a predicate over package paths.
+ // A package for which the predicate is false will
+ // have its package-level declarations type checked, but not
+ // its function bodies; this can be used to quickly load
+ // dependencies from source. If nil, all func bodies are type
+ // checked.
+ TypeCheckFuncBodies func(path string) bool
+
+ // If Build is non-nil, it is used to locate source packages.
+ // Otherwise &build.Default is used.
+ //
+ // By default, cgo is invoked to preprocess Go files that
+ // import the fake package "C". This behaviour can be
+ // disabled by setting CGO_ENABLED=0 in the environment prior
+ // to startup, or by setting Build.CgoEnabled=false.
+ Build *build.Context
+
+ // The current directory, used for resolving relative package
+ // references such as "./go/loader". If empty, os.Getwd will be
+ // used instead.
+ Cwd string
+
+ // If DisplayPath is non-nil, it is used to transform each
+ // file name obtained from Build.Import(). This can be used
+ // to prevent a virtualized build.Config's file names from
+ // leaking into the user interface.
+ DisplayPath func(path string) string
+
+ // If AllowErrors is true, Load will return a Program even
+ // if some of the its packages contained I/O, parser or type
+ // errors; such errors are accessible via PackageInfo.Errors. If
+ // false, Load will fail if any package had an error.
+ AllowErrors bool
+
+ // CreatePkgs specifies a list of non-importable initial
+ // packages to create. The resulting packages will appear in
+ // the corresponding elements of the Program.Created slice.
+ CreatePkgs []PkgSpec
+
+ // ImportPkgs specifies a set of initial packages to load.
+ // The map keys are package paths.
+ //
+ // The map value indicates whether to load tests. If true, Load
+ // will add and type-check two lists of files to the package:
+ // non-test files followed by in-package *_test.go files. In
+ // addition, it will append the external test package (if any)
+ // to Program.Created.
+ ImportPkgs map[string]bool
+
+ // FindPackage is called during Load to create the build.Package
+ // for a given import path from a given directory.
+ // If FindPackage is nil, (*build.Context).Import is used.
+ // A client may use this hook to adapt to a proprietary build
+ // system that does not follow the "go build" layout
+ // conventions, for example.
+ //
+ // It must be safe to call concurrently from multiple goroutines.
+ FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error)
+
+ // AfterTypeCheck is called immediately after a list of files
+ // has been type-checked and appended to info.Files.
+ //
+ // This optional hook function is the earliest opportunity for
+ // the client to observe the output of the type checker,
+ // which may be useful to reduce analysis latency when loading
+ // a large program.
+ //
+ // The function is permitted to modify info.Info, for instance
+ // to clear data structures that are no longer needed, which can
+ // dramatically reduce peak memory consumption.
+ //
+ // The function may be called twice for the same PackageInfo:
+ // once for the files of the package and again for the
+ // in-package test files.
+ //
+ // It must be safe to call concurrently from multiple goroutines.
+ AfterTypeCheck func(info *PackageInfo, files []*ast.File)
+}
+
+// A PkgSpec specifies a non-importable package to be created by Load.
+// Files are processed first, but typically only one of Files and
+// Filenames is provided. The path needn't be globally unique.
+//
+// For vendoring purposes, the package's directory is the one that
+// contains the first file.
+type PkgSpec struct {
+ Path string // package path ("" => use package declaration)
+ Files []*ast.File // ASTs of already-parsed files
+ Filenames []string // names of files to be parsed
+}
+
+// A Program is a Go program loaded from source as specified by a Config.
+type Program struct {
+ Fset *token.FileSet // the file set for this program
+
+ // Created[i] contains the initial package whose ASTs or
+ // filenames were supplied by Config.CreatePkgs[i], followed by
+ // the external test package, if any, of each package in
+ // Config.ImportPkgs ordered by ImportPath.
+ //
+ // NOTE: these files must not import "C". Cgo preprocessing is
+ // only performed on imported packages, not ad hoc packages.
+ //
+ // TODO(adonovan): we need to copy and adapt the logic of
+ // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make
+ // Config.Import and Config.Create methods return the same kind
+ // of entity, essentially a build.Package.
+ // Perhaps we can even reuse that type directly.
+ Created []*PackageInfo
+
+ // Imported contains the initially imported packages,
+ // as specified by Config.ImportPkgs.
+ Imported map[string]*PackageInfo
+
+ // AllPackages contains the PackageInfo of every package
+ // encountered by Load: all initial packages and all
+ // dependencies, including incomplete ones.
+ AllPackages map[*types.Package]*PackageInfo
+
+ // importMap is the canonical mapping of package paths to
+ // packages. It contains all Imported initial packages, but not
+ // Created ones, and all imported dependencies.
+ importMap map[string]*types.Package
+}
+
+// PackageInfo holds the ASTs and facts derived by the type-checker
+// for a single package.
+//
+// Not mutated once exposed via the API.
+//
+type PackageInfo struct {
+ Pkg *types.Package
+ Importable bool // true if 'import "Pkg.Path()"' would resolve to this
+ TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors
+ Files []*ast.File // syntax trees for the package's files
+ Errors []error // non-nil if the package had errors
+ types.Info // type-checker deductions.
+ dir string // package directory
+
+ checker *types.Checker // transient type-checker state
+ errorFunc func(error)
+}
+
+func (info *PackageInfo) String() string { return info.Pkg.Path() }
+
+func (info *PackageInfo) appendError(err error) {
+ if info.errorFunc != nil {
+ info.errorFunc(err)
+ } else {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ info.Errors = append(info.Errors, err)
+}
+
+func (conf *Config) fset() *token.FileSet {
+ if conf.Fset == nil {
+ conf.Fset = token.NewFileSet()
+ }
+ return conf.Fset
+}
+
+// ParseFile is a convenience function (intended for testing) that invokes
+// the parser using the Config's FileSet, which is initialized if nil.
+//
+// src specifies the parser input as a string, []byte, or io.Reader, and
+// filename is its apparent name. If src is nil, the contents of
+// filename are read from the file system.
+//
+func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) {
+ // TODO(adonovan): use conf.build() etc like parseFiles does.
+ return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode)
+}
+
+// FromArgsUsage is a partial usage message that applications calling
+// FromArgs may wish to include in their -help output.
+const FromArgsUsage = `
+<args> is a list of arguments denoting a set of initial packages.
+It may take one of two forms:
+
+1. A list of *.go source files.
+
+ All of the specified files are loaded, parsed and type-checked
+ as a single package. All the files must belong to the same directory.
+
+2. A list of import paths, each denoting a package.
+
+ The package's directory is found relative to the $GOROOT and
+ $GOPATH using similar logic to 'go build', and the *.go files in
+ that directory are loaded, parsed and type-checked as a single
+ package.
+
+ In addition, all *_test.go files in the directory are then loaded
+ and parsed. Those files whose package declaration equals that of
+ the non-*_test.go files are included in the primary package. Test
+ files whose package declaration ends with "_test" are type-checked
+ as another package, the 'external' test package, so that a single
+ import path may denote two packages. (Whether this behaviour is
+ enabled is tool-specific, and may depend on additional flags.)
+
+A '--' argument terminates the list of packages.
+`
+
+// FromArgs interprets args as a set of initial packages to load from
+// source and updates the configuration. It returns the list of
+// unconsumed arguments.
+//
+// It is intended for use in command-line interfaces that require a
+// set of initial packages to be specified; see FromArgsUsage message
+// for details.
+//
+// Only superficial errors are reported at this stage; errors dependent
+// on I/O are detected during Load.
+//
+func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) {
+ var rest []string
+ for i, arg := range args {
+ if arg == "--" {
+ rest = args[i+1:]
+ args = args[:i]
+ break // consume "--" and return the remaining args
+ }
+ }
+
+ if len(args) > 0 && strings.HasSuffix(args[0], ".go") {
+ // Assume args is a list of a *.go files
+ // denoting a single ad hoc package.
+ for _, arg := range args {
+ if !strings.HasSuffix(arg, ".go") {
+ return nil, fmt.Errorf("named files must be .go files: %s", arg)
+ }
+ }
+ conf.CreateFromFilenames("", args...)
+ } else {
+ // Assume args are directories each denoting a
+ // package and (perhaps) an external test, iff xtest.
+ for _, arg := range args {
+ if xtest {
+ conf.ImportWithTests(arg)
+ } else {
+ conf.Import(arg)
+ }
+ }
+ }
+
+ return rest, nil
+}
+
+// CreateFromFilenames is a convenience function that adds
+// a conf.CreatePkgs entry to create a package of the specified *.go
+// files.
+//
+func (conf *Config) CreateFromFilenames(path string, filenames ...string) {
+ conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames})
+}
+
+// CreateFromFiles is a convenience function that adds a conf.CreatePkgs
+// entry to create package of the specified path and parsed files.
+//
+func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
+ conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files})
+}
+
+// ImportWithTests is a convenience function that adds path to
+// ImportPkgs, the set of initial source packages located relative to
+// $GOPATH. The package will be augmented by any *_test.go files in
+// its directory that contain a "package x" (not "package x_test")
+// declaration.
+//
+// In addition, if any *_test.go files contain a "package x_test"
+// declaration, an additional package comprising just those files will
+// be added to CreatePkgs.
+//
+func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) }
+
+// Import is a convenience function that adds path to ImportPkgs, the
+// set of initial packages that will be imported from source.
+//
+func (conf *Config) Import(path string) { conf.addImport(path, false) }
+
+func (conf *Config) addImport(path string, tests bool) {
+ if path == "C" {
+ return // ignore; not a real package
+ }
+ if conf.ImportPkgs == nil {
+ conf.ImportPkgs = make(map[string]bool)
+ }
+ conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests
+}
+
+// PathEnclosingInterval returns the PackageInfo and ast.Node that
+// contain source interval [start, end), and all the node's ancestors
+// up to the AST root. It searches all ast.Files of all packages in prog.
+// exact is defined as for astutil.PathEnclosingInterval.
+//
+// The zero value is returned if not found.
+//
+func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) {
+ for _, info := range prog.AllPackages {
+ for _, f := range info.Files {
+ if f.Pos() == token.NoPos {
+ // This can happen if the parser saw
+ // too many errors and bailed out.
+ // (Use parser.AllErrors to prevent that.)
+ continue
+ }
+ if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) {
+ continue
+ }
+ if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
+ return info, path, exact
+ }
+ }
+ }
+ return nil, nil, false
+}
+
+// InitialPackages returns a new slice containing the set of initial
+// packages (Created + Imported) in unspecified order.
+//
+func (prog *Program) InitialPackages() []*PackageInfo {
+ infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported))
+ infos = append(infos, prog.Created...)
+ for _, info := range prog.Imported {
+ infos = append(infos, info)
+ }
+ return infos
+}
+
+// Package returns the ASTs and results of type checking for the
+// specified package.
+func (prog *Program) Package(path string) *PackageInfo {
+ if info, ok := prog.AllPackages[prog.importMap[path]]; ok {
+ return info
+ }
+ for _, info := range prog.Created {
+ if path == info.Pkg.Path() {
+ return info
+ }
+ }
+ return nil
+}
+
+// ---------- Implementation ----------
+
+// importer holds the working state of the algorithm.
+type importer struct {
+ conf *Config // the client configuration
+ start time.Time // for logging
+
+ progMu sync.Mutex // guards prog
+ prog *Program // the resulting program
+
+ // findpkg is a memoization of FindPackage.
+ findpkgMu sync.Mutex // guards findpkg
+ findpkg map[findpkgKey]*findpkgValue
+
+ importedMu sync.Mutex // guards imported
+ imported map[string]*importInfo // all imported packages (incl. failures) by import path
+
+ // import dependency graph: graph[x][y] => x imports y
+ //
+ // Since non-importable packages cannot be cyclic, we ignore
+ // their imports, thus we only need the subgraph over importable
+ // packages. Nodes are identified by their import paths.
+ graphMu sync.Mutex
+ graph map[string]map[string]bool
+}
+
+type findpkgKey struct {
+ importPath string
+ fromDir string
+ mode build.ImportMode
+}
+
+type findpkgValue struct {
+ ready chan struct{} // closed to broadcast readiness
+ bp *build.Package
+ err error
+}
+
+// importInfo tracks the success or failure of a single import.
+//
+// Upon completion, exactly one of info and err is non-nil:
+// info on successful creation of a package, err otherwise.
+// A successful package may still contain type errors.
+//
+type importInfo struct {
+ path string // import path
+ info *PackageInfo // results of typechecking (including errors)
+ complete chan struct{} // closed to broadcast that info is set.
+}
+
+// awaitCompletion blocks until ii is complete,
+// i.e. the info field is safe to inspect.
+func (ii *importInfo) awaitCompletion() {
+ <-ii.complete // wait for close
+}
+
+// Complete marks ii as complete.
+// Its info and err fields will not be subsequently updated.
+func (ii *importInfo) Complete(info *PackageInfo) {
+ if info == nil {
+ panic("info == nil")
+ }
+ ii.info = info
+ close(ii.complete)
+}
+
+type importError struct {
+ path string // import path
+ err error // reason for failure to create a package
+}
+
+// Load creates the initial packages specified by conf.{Create,Import}Pkgs,
+// loading their dependencies packages as needed.
+//
+// On success, Load returns a Program containing a PackageInfo for
+// each package. On failure, it returns an error.
+//
+// If AllowErrors is true, Load will return a Program even if some
+// packages contained I/O, parser or type errors, or if dependencies
+// were missing. (Such errors are accessible via PackageInfo.Errors. If
+// false, Load will fail if any package had an error.
+//
+// It is an error if no packages were loaded.
+//
+func (conf *Config) Load() (*Program, error) {
+ // Create a simple default error handler for parse/type errors.
+ if conf.TypeChecker.Error == nil {
+ conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }
+ }
+
+ // Set default working directory for relative package references.
+ if conf.Cwd == "" {
+ var err error
+ conf.Cwd, err = os.Getwd()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Install default FindPackage hook using go/build logic.
+ if conf.FindPackage == nil {
+ conf.FindPackage = (*build.Context).Import
+ }
+
+ prog := &Program{
+ Fset: conf.fset(),
+ Imported: make(map[string]*PackageInfo),
+ importMap: make(map[string]*types.Package),
+ AllPackages: make(map[*types.Package]*PackageInfo),
+ }
+
+ imp := importer{
+ conf: conf,
+ prog: prog,
+ findpkg: make(map[findpkgKey]*findpkgValue),
+ imported: make(map[string]*importInfo),
+ start: time.Now(),
+ graph: make(map[string]map[string]bool),
+ }
+
+ // -- loading proper (concurrent phase) --------------------------------
+
+ var errpkgs []string // packages that contained errors
+
+ // Load the initially imported packages and their dependencies,
+ // in parallel.
+ // No vendor check on packages imported from the command line.
+ infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor)
+ for _, ie := range importErrors {
+ conf.TypeChecker.Error(ie.err) // failed to create package
+ errpkgs = append(errpkgs, ie.path)
+ }
+ for _, info := range infos {
+ prog.Imported[info.Pkg.Path()] = info
+ }
+
+ // Augment the designated initial packages by their tests.
+ // Dependencies are loaded in parallel.
+ var xtestPkgs []*build.Package
+ for importPath, augment := range conf.ImportPkgs {
+ if !augment {
+ continue
+ }
+
+ // No vendor check on packages imported from command line.
+ bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor)
+ if err != nil {
+ // Package not found, or can't even parse package declaration.
+ // Already reported by previous loop; ignore it.
+ continue
+ }
+
+ // Needs external test package?
+ if len(bp.XTestGoFiles) > 0 {
+ xtestPkgs = append(xtestPkgs, bp)
+ }
+
+ // Consult the cache using the canonical package path.
+ path := bp.ImportPath
+ imp.importedMu.Lock() // (unnecessary, we're sequential here)
+ ii, ok := imp.imported[path]
+ // Paranoid checks added due to issue #11012.
+ if !ok {
+ // Unreachable.
+ // The previous loop called importAll and thus
+ // startLoad for each path in ImportPkgs, which
+ // populates imp.imported[path] with a non-zero value.
+ panic(fmt.Sprintf("imported[%q] not found", path))
+ }
+ if ii == nil {
+ // Unreachable.
+ // The ii values in this loop are the same as in
+ // the previous loop, which enforced the invariant
+ // that at least one of ii.err and ii.info is non-nil.
+ panic(fmt.Sprintf("imported[%q] == nil", path))
+ }
+ if ii.info == nil {
+ // Unreachable.
+ // awaitCompletion has the postcondition
+ // ii.info != nil.
+ panic(fmt.Sprintf("imported[%q].info = nil", path))
+ }
+ info := ii.info
+ imp.importedMu.Unlock()
+
+ // Parse the in-package test files.
+ files, errs := imp.conf.parsePackageFiles(bp, 't')
+ for _, err := range errs {
+ info.appendError(err)
+ }
+
+ // The test files augmenting package P cannot be imported,
+ // but may import packages that import P,
+ // so we must disable the cycle check.
+ imp.addFiles(info, files, false)
+ }
+
+ createPkg := func(path, dir string, files []*ast.File, errs []error) {
+ info := imp.newPackageInfo(path, dir)
+ for _, err := range errs {
+ info.appendError(err)
+ }
+
+ // Ad hoc packages are non-importable,
+ // so no cycle check is needed.
+ // addFiles loads dependencies in parallel.
+ imp.addFiles(info, files, false)
+ prog.Created = append(prog.Created, info)
+ }
+
+ // Create packages specified by conf.CreatePkgs.
+ for _, cp := range conf.CreatePkgs {
+ files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode)
+ files = append(files, cp.Files...)
+
+ path := cp.Path
+ if path == "" {
+ if len(files) > 0 {
+ path = files[0].Name.Name
+ } else {
+ path = "(unnamed)"
+ }
+ }
+
+ dir := conf.Cwd
+ if len(files) > 0 && files[0].Pos().IsValid() {
+ dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name())
+ }
+ createPkg(path, dir, files, errs)
+ }
+
+ // Create external test packages.
+ sort.Sort(byImportPath(xtestPkgs))
+ for _, bp := range xtestPkgs {
+ files, errs := imp.conf.parsePackageFiles(bp, 'x')
+ createPkg(bp.ImportPath+"_test", bp.Dir, files, errs)
+ }
+
+ // -- finishing up (sequential) ----------------------------------------
+
+ if len(prog.Imported)+len(prog.Created) == 0 {
+ return nil, errors.New("no initial packages were loaded")
+ }
+
+ // Create infos for indirectly imported packages.
+ // e.g. incomplete packages without syntax, loaded from export data.
+ for _, obj := range prog.importMap {
+ info := prog.AllPackages[obj]
+ if info == nil {
+ prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true}
+ } else {
+ // finished
+ info.checker = nil
+ info.errorFunc = nil
+ }
+ }
+
+ if !conf.AllowErrors {
+ // Report errors in indirectly imported packages.
+ for _, info := range prog.AllPackages {
+ if len(info.Errors) > 0 {
+ errpkgs = append(errpkgs, info.Pkg.Path())
+ }
+ }
+ if errpkgs != nil {
+ var more string
+ if len(errpkgs) > 3 {
+ more = fmt.Sprintf(" and %d more", len(errpkgs)-3)
+ errpkgs = errpkgs[:3]
+ }
+ return nil, fmt.Errorf("couldn't load packages due to errors: %s%s",
+ strings.Join(errpkgs, ", "), more)
+ }
+ }
+
+ markErrorFreePackages(prog.AllPackages)
+
+ return prog, nil
+}
+
+type byImportPath []*build.Package
+
+func (b byImportPath) Len() int { return len(b) }
+func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath }
+func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+
+// markErrorFreePackages sets the TransitivelyErrorFree flag on all
+// applicable packages.
+func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) {
+ // Build the transpose of the import graph.
+ importedBy := make(map[*types.Package]map[*types.Package]bool)
+ for P := range allPackages {
+ for _, Q := range P.Imports() {
+ clients, ok := importedBy[Q]
+ if !ok {
+ clients = make(map[*types.Package]bool)
+ importedBy[Q] = clients
+ }
+ clients[P] = true
+ }
+ }
+
+ // Find all packages reachable from some error package.
+ reachable := make(map[*types.Package]bool)
+ var visit func(*types.Package)
+ visit = func(p *types.Package) {
+ if !reachable[p] {
+ reachable[p] = true
+ for q := range importedBy[p] {
+ visit(q)
+ }
+ }
+ }
+ for _, info := range allPackages {
+ if len(info.Errors) > 0 {
+ visit(info.Pkg)
+ }
+ }
+
+ // Mark the others as "transitively error-free".
+ for _, info := range allPackages {
+ if !reachable[info.Pkg] {
+ info.TransitivelyErrorFree = true
+ }
+ }
+}
+
+// build returns the effective build context.
+func (conf *Config) build() *build.Context {
+ if conf.Build != nil {
+ return conf.Build
+ }
+ return &build.Default
+}
+
+// parsePackageFiles enumerates the files belonging to package path,
+// then loads, parses and returns them, plus a list of I/O or parse
+// errors that were encountered.
+//
+// 'which' indicates which files to include:
+// 'g': include non-test *.go source files (GoFiles + processed CgoFiles)
+// 't': include in-package *_test.go source files (TestGoFiles)
+// 'x': include external *_test.go source files. (XTestGoFiles)
+//
+func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) {
+ if bp.ImportPath == "unsafe" {
+ return nil, nil
+ }
+ var filenames []string
+ switch which {
+ case 'g':
+ filenames = bp.GoFiles
+ case 't':
+ filenames = bp.TestGoFiles
+ case 'x':
+ filenames = bp.XTestGoFiles
+ default:
+ panic(which)
+ }
+
+ files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode)
+
+ // Preprocess CgoFiles and parse the outputs (sequentially).
+ if which == 'g' && bp.CgoFiles != nil {
+ cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode)
+ if err != nil {
+ errs = append(errs, err)
+ } else {
+ files = append(files, cgofiles...)
+ }
+ }
+
+ return files, errs
+}
+
+// doImport imports the package denoted by path.
+// It implements the types.Importer signature.
+//
+// It returns an error if a package could not be created
+// (e.g. go/build or parse error), but type errors are reported via
+// the types.Config.Error callback (the first of which is also saved
+// in the package's PackageInfo).
+//
+// Idempotent.
+//
+func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) {
+ if to == "C" {
+ // This should be unreachable, but ad hoc packages are
+ // not currently subject to cgo preprocessing.
+ // See https://golang.org/issue/11627.
+ return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`,
+ from.Pkg.Path())
+ }
+
+ bp, err := imp.findPackage(to, from.dir, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // The standard unsafe package is handled specially,
+ // and has no PackageInfo.
+ if bp.ImportPath == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ // Look for the package in the cache using its canonical path.
+ path := bp.ImportPath
+ imp.importedMu.Lock()
+ ii := imp.imported[path]
+ imp.importedMu.Unlock()
+ if ii == nil {
+ panic("internal error: unexpected import: " + path)
+ }
+ if ii.info != nil {
+ return ii.info.Pkg, nil
+ }
+
+ // Import of incomplete package: this indicates a cycle.
+ fromPath := from.Pkg.Path()
+ if cycle := imp.findPath(path, fromPath); cycle != nil {
+ cycle = append([]string{fromPath}, cycle...)
+ return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> "))
+ }
+
+ panic("internal error: import of incomplete (yet acyclic) package: " + fromPath)
+}
+
+// findPackage locates the package denoted by the importPath in the
+// specified directory.
+func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) {
+ // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7)
+ // to avoid holding the lock around FindPackage.
+ key := findpkgKey{importPath, fromDir, mode}
+ imp.findpkgMu.Lock()
+ v, ok := imp.findpkg[key]
+ if ok {
+ // cache hit
+ imp.findpkgMu.Unlock()
+
+ <-v.ready // wait for entry to become ready
+ } else {
+ // Cache miss: this goroutine becomes responsible for
+ // populating the map entry and broadcasting its readiness.
+ v = &findpkgValue{ready: make(chan struct{})}
+ imp.findpkg[key] = v
+ imp.findpkgMu.Unlock()
+
+ ioLimit <- true
+ v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode)
+ <-ioLimit
+
+ if _, ok := v.err.(*build.NoGoError); ok {
+ v.err = nil // empty directory is not an error
+ }
+
+ close(v.ready) // broadcast ready condition
+ }
+ return v.bp, v.err
+}
+
+// importAll loads, parses, and type-checks the specified packages in
+// parallel and returns their completed importInfos in unspecified order.
+//
+// fromPath is the package path of the importing package, if it is
+// importable, "" otherwise. It is used for cycle detection.
+//
+// fromDir is the directory containing the import declaration that
+// caused these imports.
+//
+func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) {
+ // TODO(adonovan): opt: do the loop in parallel once
+ // findPackage is non-blocking.
+ var pending []*importInfo
+ for importPath := range imports {
+ bp, err := imp.findPackage(importPath, fromDir, mode)
+ if err != nil {
+ errors = append(errors, importError{
+ path: importPath,
+ err: err,
+ })
+ continue
+ }
+ pending = append(pending, imp.startLoad(bp))
+ }
+
+ if fromPath != "" {
+ // We're loading a set of imports.
+ //
+ // We must record graph edges from the importing package
+ // to its dependencies, and check for cycles.
+ imp.graphMu.Lock()
+ deps, ok := imp.graph[fromPath]
+ if !ok {
+ deps = make(map[string]bool)
+ imp.graph[fromPath] = deps
+ }
+ for _, ii := range pending {
+ deps[ii.path] = true
+ }
+ imp.graphMu.Unlock()
+ }
+
+ for _, ii := range pending {
+ if fromPath != "" {
+ if cycle := imp.findPath(ii.path, fromPath); cycle != nil {
+ // Cycle-forming import: we must not await its
+ // completion since it would deadlock.
+ //
+ // We don't record the error in ii since
+ // the error is really associated with the
+ // cycle-forming edge, not the package itself.
+ // (Also it would complicate the
+ // invariants of importPath completion.)
+ if trace {
+ fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle)
+ }
+ continue
+ }
+ }
+ ii.awaitCompletion()
+ infos = append(infos, ii.info)
+ }
+
+ return infos, errors
+}
+
+// findPath returns an arbitrary path from 'from' to 'to' in the import
+// graph, or nil if there was none.
+func (imp *importer) findPath(from, to string) []string {
+ imp.graphMu.Lock()
+ defer imp.graphMu.Unlock()
+
+ seen := make(map[string]bool)
+ var search func(stack []string, importPath string) []string
+ search = func(stack []string, importPath string) []string {
+ if !seen[importPath] {
+ seen[importPath] = true
+ stack = append(stack, importPath)
+ if importPath == to {
+ return stack
+ }
+ for x := range imp.graph[importPath] {
+ if p := search(stack, x); p != nil {
+ return p
+ }
+ }
+ }
+ return nil
+ }
+ return search(make([]string, 0, 20), from)
+}
+
+// startLoad initiates the loading, parsing and type-checking of the
+// specified package and its dependencies, if it has not already begun.
+//
+// It returns an importInfo, not necessarily in a completed state. The
+// caller must call awaitCompletion() before accessing its info field.
+//
+// startLoad is concurrency-safe and idempotent.
+//
+func (imp *importer) startLoad(bp *build.Package) *importInfo {
+ path := bp.ImportPath
+ imp.importedMu.Lock()
+ ii, ok := imp.imported[path]
+ if !ok {
+ ii = &importInfo{path: path, complete: make(chan struct{})}
+ imp.imported[path] = ii
+ go func() {
+ info := imp.load(bp)
+ ii.Complete(info)
+ }()
+ }
+ imp.importedMu.Unlock()
+
+ return ii
+}
+
+// load implements package loading by parsing Go source files
+// located by go/build.
+func (imp *importer) load(bp *build.Package) *PackageInfo {
+ info := imp.newPackageInfo(bp.ImportPath, bp.Dir)
+ info.Importable = true
+ files, errs := imp.conf.parsePackageFiles(bp, 'g')
+ for _, err := range errs {
+ info.appendError(err)
+ }
+
+ imp.addFiles(info, files, true)
+
+ imp.progMu.Lock()
+ imp.prog.importMap[bp.ImportPath] = info.Pkg
+ imp.progMu.Unlock()
+
+ return info
+}
+
+// addFiles adds and type-checks the specified files to info, loading
+// their dependencies if needed. The order of files determines the
+// package initialization order. It may be called multiple times on the
+// same package. Errors are appended to the info.Errors field.
+//
+// cycleCheck determines whether the imports within files create
+// dependency edges that should be checked for potential cycles.
+//
+func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) {
+ // Ensure the dependencies are loaded, in parallel.
+ var fromPath string
+ if cycleCheck {
+ fromPath = info.Pkg.Path()
+ }
+ // TODO(adonovan): opt: make the caller do scanImports.
+ // Callers with a build.Package can skip it.
+ imp.importAll(fromPath, info.dir, scanImports(files), 0)
+
+ if trace {
+ fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n",
+ time.Since(imp.start), info.Pkg.Path(), len(files))
+ }
+
+ // Don't call checker.Files on Unsafe, even with zero files,
+ // because it would mutate the package, which is a global.
+ if info.Pkg == types.Unsafe {
+ if len(files) > 0 {
+ panic(`"unsafe" package contains unexpected files`)
+ }
+ } else {
+ // Ignore the returned (first) error since we
+ // already collect them all in the PackageInfo.
+ info.checker.Files(files)
+ info.Files = append(info.Files, files...)
+ }
+
+ if imp.conf.AfterTypeCheck != nil {
+ imp.conf.AfterTypeCheck(info, files)
+ }
+
+ if trace {
+ fmt.Fprintf(os.Stderr, "%s: stop %q\n",
+ time.Since(imp.start), info.Pkg.Path())
+ }
+}
+
+func (imp *importer) newPackageInfo(path, dir string) *PackageInfo {
+ var pkg *types.Package
+ if path == "unsafe" {
+ pkg = types.Unsafe
+ } else {
+ pkg = types.NewPackage(path, "")
+ }
+ info := &PackageInfo{
+ Pkg: pkg,
+ Info: types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ },
+ errorFunc: imp.conf.TypeChecker.Error,
+ dir: dir,
+ }
+
+ // Copy the types.Config so we can vary it across PackageInfos.
+ tc := imp.conf.TypeChecker
+ tc.IgnoreFuncBodies = false
+ if f := imp.conf.TypeCheckFuncBodies; f != nil {
+ tc.IgnoreFuncBodies = !f(path)
+ }
+ tc.Importer = closure{imp, info}
+ tc.Error = info.appendError // appendError wraps the user's Error function
+
+ info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info)
+ imp.progMu.Lock()
+ imp.prog.AllPackages[pkg] = info
+ imp.progMu.Unlock()
+ return info
+}
+
+type closure struct {
+ imp *importer
+ info *PackageInfo
+}
+
+func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) }
diff --git a/vendor/golang.org/x/tools/go/loader/util.go b/vendor/golang.org/x/tools/go/loader/util.go
new file mode 100644
index 000000000..7f38dd740
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/loader/util.go
@@ -0,0 +1,124 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loader
+
+import (
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "os"
+ "strconv"
+ "sync"
+
+ "golang.org/x/tools/go/buildutil"
+)
+
+// We use a counting semaphore to limit
+// the number of parallel I/O calls per process.
+var ioLimit = make(chan bool, 10)
+
+// parseFiles parses the Go source files within directory dir and
+// returns the ASTs of the ones that could be at least partially parsed,
+// along with a list of I/O and parse errors encountered.
+//
+// I/O is done via ctxt, which may specify a virtual file system.
+// displayPath is used to transform the filenames attached to the ASTs.
+//
+func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
+ if displayPath == nil {
+ displayPath = func(path string) string { return path }
+ }
+ var wg sync.WaitGroup
+ n := len(files)
+ parsed := make([]*ast.File, n)
+ errors := make([]error, n)
+ for i, file := range files {
+ if !buildutil.IsAbsPath(ctxt, file) {
+ file = buildutil.JoinPath(ctxt, dir, file)
+ }
+ wg.Add(1)
+ go func(i int, file string) {
+ ioLimit <- true // wait
+ defer func() {
+ wg.Done()
+ <-ioLimit // signal
+ }()
+ var rd io.ReadCloser
+ var err error
+ if ctxt.OpenFile != nil {
+ rd, err = ctxt.OpenFile(file)
+ } else {
+ rd, err = os.Open(file)
+ }
+ if err != nil {
+ errors[i] = err // open failed
+ return
+ }
+
+ // ParseFile may return both an AST and an error.
+ parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
+ rd.Close()
+ }(i, file)
+ }
+ wg.Wait()
+
+ // Eliminate nils, preserving order.
+ var o int
+ for _, f := range parsed {
+ if f != nil {
+ parsed[o] = f
+ o++
+ }
+ }
+ parsed = parsed[:o]
+
+ o = 0
+ for _, err := range errors {
+ if err != nil {
+ errors[o] = err
+ o++
+ }
+ }
+ errors = errors[:o]
+
+ return parsed, errors
+}
+
+// scanImports returns the set of all import paths from all
+// import specs in the specified files.
+func scanImports(files []*ast.File) map[string]bool {
+ imports := make(map[string]bool)
+ for _, f := range files {
+ for _, decl := range f.Decls {
+ if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
+ for _, spec := range decl.Specs {
+ spec := spec.(*ast.ImportSpec)
+
+ // NB: do not assume the program is well-formed!
+ path, err := strconv.Unquote(spec.Path.Value)
+ if err != nil {
+ continue // quietly ignore the error
+ }
+ if path == "C" {
+ continue // skip pseudopackage
+ }
+ imports[path] = true
+ }
+ }
+ }
+ }
+ return imports
+}
+
+// ---------- Internal helpers ----------
+
+// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
+func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
+ p := int(pos)
+ base := f.Base()
+ return base <= p && p < base+f.Size()
+}
diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go
new file mode 100644
index 000000000..3799f8ed8
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/doc.go
@@ -0,0 +1,222 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package packages loads Go packages for inspection and analysis.
+
+The Load function takes as input a list of patterns and return a list of Package
+structs describing individual packages matched by those patterns.
+The LoadMode controls the amount of detail in the loaded packages.
+
+Load passes most patterns directly to the underlying build tool,
+but all patterns with the prefix "query=", where query is a
+non-empty string of letters from [a-z], are reserved and may be
+interpreted as query operators.
+
+Two query operators are currently supported: "file" and "pattern".
+
+The query "file=path/to/file.go" matches the package or packages enclosing
+the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
+might return the packages "fmt" and "fmt [fmt.test]".
+
+The query "pattern=string" causes "string" to be passed directly to
+the underlying build tool. In most cases this is unnecessary,
+but an application can use Load("pattern=" + x) as an escaping mechanism
+to ensure that x is not interpreted as a query operator if it contains '='.
+
+All other query operators are reserved for future use and currently
+cause Load to report an error.
+
+The Package struct provides basic information about the package, including
+
+ - ID, a unique identifier for the package in the returned set;
+ - GoFiles, the names of the package's Go source files;
+ - Imports, a map from source import strings to the Packages they name;
+ - Types, the type information for the package's exported symbols;
+ - Syntax, the parsed syntax trees for the package's source code; and
+ - TypeInfo, the result of a complete type-check of the package syntax trees.
+
+(See the documentation for type Package for the complete list of fields
+and more detailed descriptions.)
+
+For example,
+
+ Load(nil, "bytes", "unicode...")
+
+returns four Package structs describing the standard library packages
+bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
+can match multiple packages and that a package might be matched by
+multiple patterns: in general it is not possible to determine which
+packages correspond to which patterns.
+
+Note that the list returned by Load contains only the packages matched
+by the patterns. Their dependencies can be found by walking the import
+graph using the Imports fields.
+
+The Load function can be configured by passing a pointer to a Config as
+the first argument. A nil Config is equivalent to the zero Config, which
+causes Load to run in LoadFiles mode, collecting minimal information.
+See the documentation for type Config for details.
+
+As noted earlier, the Config.Mode controls the amount of detail
+reported about the loaded packages, with each mode returning all the data of the
+previous mode with some extra added. See the documentation for type LoadMode
+for details.
+
+Most tools should pass their command-line arguments (after any flags)
+uninterpreted to the loader, so that the loader can interpret them
+according to the conventions of the underlying build system.
+See the Example function for typical usage.
+
+*/
+package packages // import "golang.org/x/tools/go/packages"
+
+/*
+
+Motivation and design considerations
+
+The new package's design solves problems addressed by two existing
+packages: go/build, which locates and describes packages, and
+golang.org/x/tools/go/loader, which loads, parses and type-checks them.
+The go/build.Package structure encodes too much of the 'go build' way
+of organizing projects, leaving us in need of a data type that describes a
+package of Go source code independent of the underlying build system.
+We wanted something that works equally well with go build and vgo, and
+also other build systems such as Bazel and Blaze, making it possible to
+construct analysis tools that work in all these environments.
+Tools such as errcheck and staticcheck were essentially unavailable to
+the Go community at Google, and some of Google's internal tools for Go
+are unavailable externally.
+This new package provides a uniform way to obtain package metadata by
+querying each of these build systems, optionally supporting their
+preferred command-line notations for packages, so that tools integrate
+neatly with users' build environments. The Metadata query function
+executes an external query tool appropriate to the current workspace.
+
+Loading packages always returns the complete import graph "all the way down",
+even if all you want is information about a single package, because the query
+mechanisms of all the build systems we currently support ({go,vgo} list, and
+blaze/bazel aspect-based query) cannot provide detailed information
+about one package without visiting all its dependencies too, so there is
+no additional asymptotic cost to providing transitive information.
+(This property might not be true of a hypothetical 5th build system.)
+
+In calls to TypeCheck, all initial packages, and any package that
+transitively depends on one of them, must be loaded from source.
+Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
+source; D may be loaded from export data, and E may not be loaded at all
+(though it's possible that D's export data mentions it, so a
+types.Package may be created for it and exposed.)
+
+The old loader had a feature to suppress type-checking of function
+bodies on a per-package basis, primarily intended to reduce the work of
+obtaining type information for imported packages. Now that imports are
+satisfied by export data, the optimization no longer seems necessary.
+
+Despite some early attempts, the old loader did not exploit export data,
+instead always using the equivalent of WholeProgram mode. This was due
+to the complexity of mixing source and export data packages (now
+resolved by the upward traversal mentioned above), and because export data
+files were nearly always missing or stale. Now that 'go build' supports
+caching, all the underlying build systems can guarantee to produce
+export data in a reasonable (amortized) time.
+
+Test "main" packages synthesized by the build system are now reported as
+first-class packages, avoiding the need for clients (such as go/ssa) to
+reinvent this generation logic.
+
+One way in which go/packages is simpler than the old loader is in its
+treatment of in-package tests. In-package tests are packages that
+consist of all the files of the library under test, plus the test files.
+The old loader constructed in-package tests by a two-phase process of
+mutation called "augmentation": first it would construct and type check
+all the ordinary library packages and type-check the packages that
+depend on them; then it would add more (test) files to the package and
+type-check again. This two-phase approach had four major problems:
+1) in processing the tests, the loader modified the library package,
+ leaving no way for a client application to see both the test
+ package and the library package; one would mutate into the other.
+2) because test files can declare additional methods on types defined in
+ the library portion of the package, the dispatch of method calls in
+ the library portion was affected by the presence of the test files.
+ This should have been a clue that the packages were logically
+ different.
+3) this model of "augmentation" assumed at most one in-package test
+ per library package, which is true of projects using 'go build',
+ but not other build systems.
+4) because of the two-phase nature of test processing, all packages that
+ import the library package had to be processed before augmentation,
+ forcing a "one-shot" API and preventing the client from calling Load
+ in several times in sequence as is now possible in WholeProgram mode.
+ (TypeCheck mode has a similar one-shot restriction for a different reason.)
+
+Early drafts of this package supported "multi-shot" operation.
+Although it allowed clients to make a sequence of calls (or concurrent
+calls) to Load, building up the graph of Packages incrementally,
+it was of marginal value: it complicated the API
+(since it allowed some options to vary across calls but not others),
+it complicated the implementation,
+it cannot be made to work in Types mode, as explained above,
+and it was less efficient than making one combined call (when this is possible).
+Among the clients we have inspected, none made multiple calls to load
+but could not be easily and satisfactorily modified to make only a single call.
+However, applications changes may be required.
+For example, the ssadump command loads the user-specified packages
+and in addition the runtime package. It is tempting to simply append
+"runtime" to the user-provided list, but that does not work if the user
+specified an ad-hoc package such as [a.go b.go].
+Instead, ssadump no longer requests the runtime package,
+but seeks it among the dependencies of the user-specified packages,
+and emits an error if it is not found.
+
+Overlays: The Overlay field in the Config allows providing alternate contents
+for Go source files, by providing a mapping from file path to contents.
+go/packages will pull in new imports added in overlay files when go/packages
+is run in LoadImports mode or greater.
+Overlay support for the go list driver isn't complete yet: if the file doesn't
+exist on disk, it will only be recognized in an overlay if it is a non-test file
+and the package would be reported even without the overlay.
+
+Questions & Tasks
+
+- Add GOARCH/GOOS?
+ They are not portable concepts, but could be made portable.
+ Our goal has been to allow users to express themselves using the conventions
+ of the underlying build system: if the build system honors GOARCH
+ during a build and during a metadata query, then so should
+ applications built atop that query mechanism.
+ Conversely, if the target architecture of the build is determined by
+ command-line flags, the application can pass the relevant
+ flags through to the build system using a command such as:
+ myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
+ However, this approach is low-level, unwieldy, and non-portable.
+ GOOS and GOARCH seem important enough to warrant a dedicated option.
+
+- How should we handle partial failures such as a mixture of good and
+ malformed patterns, existing and non-existent packages, successful and
+ failed builds, import failures, import cycles, and so on, in a call to
+ Load?
+
+- Support bazel, blaze, and go1.10 list, not just go1.11 list.
+
+- Handle (and test) various partial success cases, e.g.
+ a mixture of good packages and:
+ invalid patterns
+ nonexistent packages
+ empty packages
+ packages with malformed package or import declarations
+ unreadable files
+ import cycles
+ other parse errors
+ type errors
+ Make sure we record errors at the correct place in the graph.
+
+- Missing packages among initial arguments are not reported.
+ Return bogus packages for them, like golist does.
+
+- "undeclared name" errors (for example) are reported out of source file
+ order. I suspect this is due to the breadth-first resolution now used
+ by go/types. Is that a bug? Discuss with gri.
+
+*/
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
new file mode 100644
index 000000000..22ff769ef
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file enables an external tool to intercept package requests.
+// If the tool is present then its results are used in preference to
+// the go list command.
+
+package packages
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "strings"
+)
+
+// Driver
+type driverRequest struct {
+ Command string `json:"command"`
+ Mode LoadMode `json:"mode"`
+ Env []string `json:"env"`
+ BuildFlags []string `json:"build_flags"`
+ Tests bool `json:"tests"`
+ Overlay map[string][]byte `json:"overlay"`
+}
+
+// findExternalDriver returns the file path of a tool that supplies
+// the build system package structure, or "" if not found."
+// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
+// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
+func findExternalDriver(cfg *Config) driver {
+ const toolPrefix = "GOPACKAGESDRIVER="
+ tool := ""
+ for _, env := range cfg.Env {
+ if val := strings.TrimPrefix(env, toolPrefix); val != env {
+ tool = val
+ }
+ }
+ if tool != "" && tool == "off" {
+ return nil
+ }
+ if tool == "" {
+ var err error
+ tool, err = exec.LookPath("gopackagesdriver")
+ if err != nil {
+ return nil
+ }
+ }
+ return func(cfg *Config, words ...string) (*driverResponse, error) {
+ req, err := json.Marshal(driverRequest{
+ Mode: cfg.Mode,
+ Env: cfg.Env,
+ BuildFlags: cfg.BuildFlags,
+ Tests: cfg.Tests,
+ Overlay: cfg.Overlay,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
+ }
+
+ buf := new(bytes.Buffer)
+ cmd := exec.CommandContext(cfg.Context, tool, words...)
+ cmd.Dir = cfg.Dir
+ cmd.Env = cfg.Env
+ cmd.Stdin = bytes.NewReader(req)
+ cmd.Stdout = buf
+ cmd.Stderr = new(bytes.Buffer)
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
+ }
+ var response driverResponse
+ if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
+ return nil, err
+ }
+ return &response, nil
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
new file mode 100644
index 000000000..132d28347
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -0,0 +1,828 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/go/internal/packagesdriver"
+ "golang.org/x/tools/internal/gopathwalk"
+ "golang.org/x/tools/internal/semver"
+)
+
+// debug controls verbose logging.
+var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
+
+// A goTooOldError reports that the go command
+// found by exec.LookPath is too old to use the new go list behavior.
+type goTooOldError struct {
+ error
+}
+
+// responseDeduper wraps a driverResponse, deduplicating its contents.
+type responseDeduper struct {
+ seenRoots map[string]bool
+ seenPackages map[string]*Package
+ dr *driverResponse
+}
+
+// init fills in r with a driverResponse.
+func (r *responseDeduper) init(dr *driverResponse) {
+ r.dr = dr
+ r.seenRoots = map[string]bool{}
+ r.seenPackages = map[string]*Package{}
+ for _, pkg := range dr.Packages {
+ r.seenPackages[pkg.ID] = pkg
+ }
+ for _, root := range dr.Roots {
+ r.seenRoots[root] = true
+ }
+}
+
+func (r *responseDeduper) addPackage(p *Package) {
+ if r.seenPackages[p.ID] != nil {
+ return
+ }
+ r.seenPackages[p.ID] = p
+ r.dr.Packages = append(r.dr.Packages, p)
+}
+
+func (r *responseDeduper) addRoot(id string) {
+ if r.seenRoots[id] {
+ return
+ }
+ r.seenRoots[id] = true
+ r.dr.Roots = append(r.dr.Roots, id)
+}
+
+// goListDriver uses the go list command to interpret the patterns and produce
+// the build system package structure.
+// See driver for more details.
+func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
+ var sizes types.Sizes
+ var sizeserr error
+ var sizeswg sync.WaitGroup
+ if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
+ sizeswg.Add(1)
+ go func() {
+ sizes, sizeserr = getSizes(cfg)
+ sizeswg.Done()
+ }()
+ }
+
+ // Determine files requested in contains patterns
+ var containFiles []string
+ var packagesNamed []string
+ restPatterns := make([]string, 0, len(patterns))
+ // Extract file= and other [querytype]= patterns. Report an error if querytype
+ // doesn't exist.
+extractQueries:
+ for _, pattern := range patterns {
+ eqidx := strings.Index(pattern, "=")
+ if eqidx < 0 {
+ restPatterns = append(restPatterns, pattern)
+ } else {
+ query, value := pattern[:eqidx], pattern[eqidx+len("="):]
+ switch query {
+ case "file":
+ containFiles = append(containFiles, value)
+ case "pattern":
+ restPatterns = append(restPatterns, value)
+ case "iamashamedtousethedisabledqueryname":
+ packagesNamed = append(packagesNamed, value)
+ case "": // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ default:
+ for _, rune := range query {
+ if rune < 'a' || rune > 'z' { // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ continue extractQueries
+ }
+ }
+ // Reject all other patterns containing "="
+ return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
+ }
+ }
+ }
+
+ response := &responseDeduper{}
+ var err error
+
+ // See if we have any patterns to pass through to go list. Zero initial
+ // patterns also requires a go list call, since it's the equivalent of
+ // ".".
+ if len(restPatterns) > 0 || len(patterns) == 0 {
+ dr, err := golistDriver(cfg, restPatterns...)
+ if err != nil {
+ return nil, err
+ }
+ response.init(dr)
+ } else {
+ response.init(&driverResponse{})
+ }
+
+ sizeswg.Wait()
+ if sizeserr != nil {
+ return nil, sizeserr
+ }
+ // types.SizesFor always returns nil or a *types.StdSizes
+ response.dr.Sizes, _ = sizes.(*types.StdSizes)
+
+ var containsCandidates []string
+
+ if len(containFiles) != 0 {
+ if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil {
+ return nil, err
+ }
+ }
+
+ if len(packagesNamed) != 0 {
+ if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil {
+ return nil, err
+ }
+ }
+
+ modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response.dr)
+ if err != nil {
+ return nil, err
+ }
+ if len(containFiles) > 0 {
+ containsCandidates = append(containsCandidates, modifiedPkgs...)
+ containsCandidates = append(containsCandidates, needPkgs...)
+ }
+
+ if len(needPkgs) > 0 {
+ addNeededOverlayPackages(cfg, golistDriver, response, needPkgs)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Check candidate packages for containFiles.
+ if len(containFiles) > 0 {
+ for _, id := range containsCandidates {
+ pkg := response.seenPackages[id]
+ for _, f := range containFiles {
+ for _, g := range pkg.GoFiles {
+ if sameFile(f, g) {
+ response.addRoot(id)
+ }
+ }
+ }
+ }
+ }
+
+ return response.dr, nil
+}
+
+func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error {
+ dr, err := driver(cfg, pkgs...)
+ if err != nil {
+ return err
+ }
+ for _, pkg := range dr.Packages {
+ response.addPackage(pkg)
+ }
+ return nil
+}
+
+func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
+ for _, query := range queries {
+ // TODO(matloob): Do only one query per directory.
+ fdir := filepath.Dir(query)
+ // Pass absolute path of directory to go list so that it knows to treat it as a directory,
+ // not a package path.
+ pattern, err := filepath.Abs(fdir)
+ if err != nil {
+ return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
+ }
+ dirResponse, err := driver(cfg, pattern)
+ if err != nil {
+ return err
+ }
+ isRoot := make(map[string]bool, len(dirResponse.Roots))
+ for _, root := range dirResponse.Roots {
+ isRoot[root] = true
+ }
+ for _, pkg := range dirResponse.Packages {
+ // Add any new packages to the main set
+ // We don't bother to filter packages that will be dropped by the changes of roots,
+ // that will happen anyway during graph construction outside this function.
+ // Over-reporting packages is not a problem.
+ response.addPackage(pkg)
+ // if the package was not a root one, it cannot have the file
+ if !isRoot[pkg.ID] {
+ continue
+ }
+ for _, pkgFile := range pkg.GoFiles {
+ if filepath.Base(query) == filepath.Base(pkgFile) {
+ response.addRoot(pkg.ID)
+ break
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// modCacheRegexp splits a path in a module cache into module, module version, and package.
+var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
+
+func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
+ // calling `go env` isn't free; bail out if there's nothing to do.
+ if len(queries) == 0 {
+ return nil
+ }
+ // Determine which directories are relevant to scan.
+ roots, modRoot, err := roots(cfg)
+ if err != nil {
+ return err
+ }
+
+ // Scan the selected directories. Simple matches, from GOPATH/GOROOT
+ // or the local module, can simply be "go list"ed. Matches from the
+ // module cache need special treatment.
+ var matchesMu sync.Mutex
+ var simpleMatches, modCacheMatches []string
+ add := func(root gopathwalk.Root, dir string) {
+ // Walk calls this concurrently; protect the result slices.
+ matchesMu.Lock()
+ defer matchesMu.Unlock()
+
+ path := dir
+ if dir != root.Path {
+ path = dir[len(root.Path)+1:]
+ }
+ if pathMatchesQueries(path, queries) {
+ switch root.Type {
+ case gopathwalk.RootModuleCache:
+ modCacheMatches = append(modCacheMatches, path)
+ case gopathwalk.RootCurrentModule:
+ // We'd need to read go.mod to find the full
+ // import path. Relative's easier.
+ rel, err := filepath.Rel(cfg.Dir, dir)
+ if err != nil {
+ // This ought to be impossible, since
+ // we found dir in the current module.
+ panic(err)
+ }
+ simpleMatches = append(simpleMatches, "./"+rel)
+ case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
+ simpleMatches = append(simpleMatches, path)
+ }
+ }
+ }
+
+ startWalk := time.Now()
+ gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
+ if debug {
+ log.Printf("%v for walk", time.Since(startWalk))
+ }
+
+ // Weird special case: the top-level package in a module will be in
+ // whatever directory the user checked the repository out into. It's
+ // more reasonable for that to not match the package name. So, if there
+ // are any Go files in the mod root, query it just to be safe.
+ if modRoot != "" {
+ rel, err := filepath.Rel(cfg.Dir, modRoot)
+ if err != nil {
+ panic(err) // See above.
+ }
+
+ files, err := ioutil.ReadDir(modRoot)
+ for _, f := range files {
+ if strings.HasSuffix(f.Name(), ".go") {
+ simpleMatches = append(simpleMatches, rel)
+ break
+ }
+ }
+ }
+
+ addResponse := func(r *driverResponse) {
+ for _, pkg := range r.Packages {
+ response.addPackage(pkg)
+ for _, name := range queries {
+ if pkg.Name == name {
+ response.addRoot(pkg.ID)
+ break
+ }
+ }
+ }
+ }
+
+ if len(simpleMatches) != 0 {
+ resp, err := driver(cfg, simpleMatches...)
+ if err != nil {
+ return err
+ }
+ addResponse(resp)
+ }
+
+ // Module cache matches are tricky. We want to avoid downloading new
+ // versions of things, so we need to use the ones present in the cache.
+ // go list doesn't accept version specifiers, so we have to write out a
+ // temporary module, and do the list in that module.
+ if len(modCacheMatches) != 0 {
+ // Collect all the matches, deduplicating by major version
+ // and preferring the newest.
+ type modInfo struct {
+ mod string
+ major string
+ }
+ mods := make(map[modInfo]string)
+ var imports []string
+ for _, modPath := range modCacheMatches {
+ matches := modCacheRegexp.FindStringSubmatch(modPath)
+ mod, ver := filepath.ToSlash(matches[1]), matches[2]
+ importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
+
+ major := semver.Major(ver)
+ if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
+ mods[modInfo{mod, major}] = ver
+ }
+
+ imports = append(imports, importPath)
+ }
+
+ // Build the temporary module.
+ var gomod bytes.Buffer
+ gomod.WriteString("module modquery\nrequire (\n")
+ for mod, version := range mods {
+ gomod.WriteString("\t" + mod.mod + " " + version + "\n")
+ }
+ gomod.WriteString(")\n")
+
+ tmpCfg := *cfg
+
+ // We're only trying to look at stuff in the module cache, so
+ // disable the network. This should speed things up, and has
+ // prevented errors in at least one case, #28518.
+ tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...))
+
+ var err error
+ tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpCfg.Dir)
+
+ if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
+ return fmt.Errorf("writing go.mod for module cache query: %v", err)
+ }
+
+ // Run the query, using the import paths calculated from the matches above.
+ resp, err := driver(&tmpCfg, imports...)
+ if err != nil {
+ return fmt.Errorf("querying module cache matches: %v", err)
+ }
+ addResponse(resp)
+ }
+
+ return nil
+}
+
+func getSizes(cfg *Config) (types.Sizes, error) {
+ return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
+}
+
+// roots selects the appropriate paths to walk based on the passed-in configuration,
+// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
+func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
+ stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
+ if err != nil {
+ return nil, "", err
+ }
+
+ fields := strings.Split(stdout.String(), "\n")
+ if len(fields) != 4 || len(fields[3]) != 0 {
+ return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String())
+ }
+ goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
+ var modDir string
+ if gomod != "" {
+ modDir = filepath.Dir(gomod)
+ }
+
+ var roots []gopathwalk.Root
+ // Always add GOROOT.
+ roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
+ // If modules are enabled, scan the module dir.
+ if modDir != "" {
+ roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule})
+ }
+ // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
+ for _, p := range gopath {
+ if modDir != "" {
+ roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
+ } else {
+ roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
+ }
+ }
+
+ return roots, modDir, nil
+}
+
+// These functions were copied from goimports. See further documentation there.
+
+// pathMatchesQueries is adapted from pkgIsCandidate.
+// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
+func pathMatchesQueries(path string, queries []string) bool {
+ lastTwo := lastTwoComponents(path)
+ for _, query := range queries {
+ if strings.Contains(lastTwo, query) {
+ return true
+ }
+ if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
+ lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
+ if strings.Contains(lastTwo, query) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// lastTwoComponents returns at most the last two path components
+// of v, using either / or \ as the path separator.
+func lastTwoComponents(v string) string {
+ nslash := 0
+ for i := len(v) - 1; i >= 0; i-- {
+ if v[i] == '/' || v[i] == '\\' {
+ nslash++
+ if nslash == 2 {
+ return v[i:]
+ }
+ }
+ }
+ return v
+}
+
+func hasHyphenOrUpperASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if b == '-' || ('A' <= b && b <= 'Z') {
+ return true
+ }
+ }
+ return false
+}
+
+func lowerASCIIAndRemoveHyphen(s string) (ret string) {
+ buf := make([]byte, 0, len(s))
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case b == '-':
+ continue
+ case 'A' <= b && b <= 'Z':
+ buf = append(buf, b+('a'-'A'))
+ default:
+ buf = append(buf, b)
+ }
+ }
+ return string(buf)
+}
+
+// Fields must match go list;
+// see $GOROOT/src/cmd/go/internal/load/pkg.go.
+type jsonPackage struct {
+ ImportPath string
+ Dir string
+ Name string
+ Export string
+ GoFiles []string
+ CompiledGoFiles []string
+ CFiles []string
+ CgoFiles []string
+ CXXFiles []string
+ MFiles []string
+ HFiles []string
+ FFiles []string
+ SFiles []string
+ SwigFiles []string
+ SwigCXXFiles []string
+ SysoFiles []string
+ Imports []string
+ ImportMap map[string]string
+ Deps []string
+ TestGoFiles []string
+ TestImports []string
+ XTestGoFiles []string
+ XTestImports []string
+ ForTest string // q in a "p [q.test]" package, else ""
+ DepOnly bool
+
+ Error *jsonPackageError
+}
+
+type jsonPackageError struct {
+ ImportStack []string
+ Pos string
+ Err string
+}
+
+func otherFiles(p *jsonPackage) [][]string {
+ return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
+}
+
+// golistDriver uses the "go list" command to expand the pattern
+// words and return metadata for the specified packages. dir may be
+// "" and env may be nil, as per os/exec.Command.
+func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
+ // go list uses the following identifiers in ImportPath and Imports:
+ //
+ // "p" -- importable package or main (command)
+ // "q.test" -- q's test executable
+ // "p [q.test]" -- variant of p as built for q's test executable
+ // "q_test [q.test]" -- q's external test package
+ //
+ // The packages p that are built differently for a test q.test
+ // are q itself, plus any helpers used by the external test q_test,
+ // typically including "testing" and all its dependencies.
+
+ // Run "go list" for complete
+ // information on the specified packages.
+ buf, err := invokeGo(cfg, golistargs(cfg, words)...)
+ if err != nil {
+ return nil, err
+ }
+ seen := make(map[string]*jsonPackage)
+ // Decode the JSON and convert it to Package form.
+ var response driverResponse
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+
+ if p.ImportPath == "" {
+ // The documentation for go list says that “[e]rroneous packages will have
+ // a non-empty ImportPath”. If for some reason it comes back empty, we
+ // prefer to error out rather than silently discarding data or handing
+ // back a package without any way to refer to it.
+ if p.Error != nil {
+ return nil, Error{
+ Pos: p.Error.Pos,
+ Msg: p.Error.Err,
+ }
+ }
+ return nil, fmt.Errorf("package missing import path: %+v", p)
+ }
+
+ if old, found := seen[p.ImportPath]; found {
+ if !reflect.DeepEqual(p, old) {
+ return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
+ }
+ // skip the duplicate
+ continue
+ }
+ seen[p.ImportPath] = p
+
+ pkg := &Package{
+ Name: p.Name,
+ ID: p.ImportPath,
+ GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
+ CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
+ OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ }
+
+ // Work around https://golang.org/issue/28749:
+ // cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
+ // Filter out any elements of CompiledGoFiles that are also in OtherFiles.
+ // We have to keep this workaround in place until go1.12 is a distant memory.
+ if len(pkg.OtherFiles) > 0 {
+ other := make(map[string]bool, len(pkg.OtherFiles))
+ for _, f := range pkg.OtherFiles {
+ other[f] = true
+ }
+
+ out := pkg.CompiledGoFiles[:0]
+ for _, f := range pkg.CompiledGoFiles {
+ if other[f] {
+ continue
+ }
+ out = append(out, f)
+ }
+ pkg.CompiledGoFiles = out
+ }
+
+ // Extract the PkgPath from the package's ID.
+ if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
+ pkg.PkgPath = pkg.ID[:i]
+ } else {
+ pkg.PkgPath = pkg.ID
+ }
+
+ if pkg.PkgPath == "unsafe" {
+ pkg.GoFiles = nil // ignore fake unsafe.go file
+ }
+
+ // Assume go list emits only absolute paths for Dir.
+ if p.Dir != "" && !filepath.IsAbs(p.Dir) {
+ log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
+ }
+
+ if p.Export != "" && !filepath.IsAbs(p.Export) {
+ pkg.ExportFile = filepath.Join(p.Dir, p.Export)
+ } else {
+ pkg.ExportFile = p.Export
+ }
+
+ // imports
+ //
+ // Imports contains the IDs of all imported packages.
+ // ImportsMap records (path, ID) only where they differ.
+ ids := make(map[string]bool)
+ for _, id := range p.Imports {
+ ids[id] = true
+ }
+ pkg.Imports = make(map[string]*Package)
+ for path, id := range p.ImportMap {
+ pkg.Imports[path] = &Package{ID: id} // non-identity import
+ delete(ids, id)
+ }
+ for id := range ids {
+ if id == "C" {
+ continue
+ }
+
+ pkg.Imports[id] = &Package{ID: id} // identity import
+ }
+ if !p.DepOnly {
+ response.Roots = append(response.Roots, pkg.ID)
+ }
+
+ // Work around for pre-go.1.11 versions of go list.
+ // TODO(matloob): they should be handled by the fallback.
+ // Can we delete this?
+ if len(pkg.CompiledGoFiles) == 0 {
+ pkg.CompiledGoFiles = pkg.GoFiles
+ }
+
+ if p.Error != nil {
+ pkg.Errors = append(pkg.Errors, Error{
+ Pos: p.Error.Pos,
+ Msg: p.Error.Err,
+ })
+ }
+
+ response.Packages = append(response.Packages, pkg)
+ }
+
+ return &response, nil
+}
+
+// absJoin absolutizes and flattens the lists of files.
+func absJoin(dir string, fileses ...[]string) (res []string) {
+ for _, files := range fileses {
+ for _, file := range files {
+ if !filepath.IsAbs(file) {
+ file = filepath.Join(dir, file)
+ }
+ res = append(res, file)
+ }
+ }
+ return res
+}
+
+func golistargs(cfg *Config, words []string) []string {
+ const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
+ fullargs := []string{
+ "list", "-e", "-json",
+ fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
+ fmt.Sprintf("-test=%t", cfg.Tests),
+ fmt.Sprintf("-export=%t", usesExportData(cfg)),
+ fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0),
+ // go list doesn't let you pass -test and -find together,
+ // probably because you'd just get the TestMain.
+ fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
+ }
+ fullargs = append(fullargs, cfg.BuildFlags...)
+ fullargs = append(fullargs, "--")
+ fullargs = append(fullargs, words...)
+ return fullargs
+}
+
+// invokeGo returns the stdout of a go command invocation.
+func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
+ stdout := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+ cmd := exec.CommandContext(cfg.Context, "go", args...)
+ // On darwin the cwd gets resolved to the real path, which breaks anything that
+ // expects the working directory to keep the original path, including the
+ // go command when dealing with modules.
+ // The Go stdlib has a special feature where if the cwd and the PWD are the
+ // same node then it trusts the PWD, so by setting it in the env for the child
+ // process we fix up all the paths returned by the go command.
+ cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
+ cmd.Dir = cfg.Dir
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ if debug {
+ defer func(start time.Time) {
+ log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
+ }(time.Now())
+ }
+
+ if err := cmd.Run(); err != nil {
+ // Check for 'go' executable not being found.
+ if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
+ return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
+ }
+
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // Catastrophic error:
+ // - context cancellation
+ return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
+ }
+
+ // Old go version?
+ if strings.Contains(stderr.String(), "flag provided but not defined") {
+ return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
+ }
+
+ // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
+ // the error in the Err section of stdout in case -e option is provided.
+ // This fix is provided for backwards compatibility.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Export mode entails a build.
+ // If that build fails, errors appear on stderr
+ // (despite the -e flag) and the Export field is blank.
+ // Do not fail in that case.
+ // The same is true if an ad-hoc package given to go list doesn't exist.
+ // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
+ // packages don't exist or a build fails.
+ if !usesExportData(cfg) && !containsGoFile(args) {
+ return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
+ }
+ }
+
+ // As of writing, go list -export prints some non-fatal compilation
+ // errors to stderr, even with -e set. We would prefer that it put
+ // them in the Package.Error JSON (see https://golang.org/issue/26319).
+ // In the meantime, there's nowhere good to put them, but they can
+ // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
+ // is set.
+ if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
+ fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr)
+ }
+
+ // debugging
+ if false {
+ fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout)
+ }
+
+ return stdout, nil
+}
+
+func containsGoFile(s []string) bool {
+ for _, f := range s {
+ if strings.HasSuffix(f, ".go") {
+ return true
+ }
+ }
+ return false
+}
+
+func cmdDebugStr(cmd *exec.Cmd, args ...string) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.Split(kv, "=")
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+ var quotedArgs []string
+ for _, arg := range args {
+ quotedArgs = append(quotedArgs, strconv.Quote(arg))
+ }
+
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " "))
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
new file mode 100644
index 000000000..71ffcd9d5
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
@@ -0,0 +1,104 @@
+package packages
+
+import (
+ "go/parser"
+ "go/token"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// processGolistOverlay provides rudimentary support for adding
+// files that don't exist on disk to an overlay. The results can be
+// sometimes incorrect.
+// TODO(matloob): Handle unsupported cases, including the following:
+// - test files
+// - adding test and non-test files to test variants of packages
+// - determining the correct package to add given a new import path
+// - creating packages that don't exist
+func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, needPkgs []string, err error) {
+ havePkgs := make(map[string]string) // importPath -> non-test package ID
+ needPkgsSet := make(map[string]bool)
+ modifiedPkgsSet := make(map[string]bool)
+
+ for _, pkg := range response.Packages {
+ // This is an approximation of import path to id. This can be
+ // wrong for tests, vendored packages, and a number of other cases.
+ havePkgs[pkg.PkgPath] = pkg.ID
+ }
+
+outer:
+ for path, contents := range cfg.Overlay {
+ base := filepath.Base(path)
+ if strings.HasSuffix(path, "_test.go") {
+ // Overlays don't support adding new test files yet.
+ // TODO(matloob): support adding new test files.
+ continue
+ }
+ dir := filepath.Dir(path)
+ for _, pkg := range response.Packages {
+ var dirContains, fileExists bool
+ for _, f := range pkg.GoFiles {
+ if sameFile(filepath.Dir(f), dir) {
+ dirContains = true
+ }
+ if filepath.Base(f) == base {
+ fileExists = true
+ }
+ }
+ if dirContains {
+ if !fileExists {
+ pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles?
+ pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path)
+ modifiedPkgsSet[pkg.ID] = true
+ }
+ imports, err := extractImports(path, contents)
+ if err != nil {
+ // Let the parser or type checker report errors later.
+ continue outer
+ }
+ for _, imp := range imports {
+ _, found := pkg.Imports[imp]
+ if !found {
+ needPkgsSet[imp] = true
+ // TODO(matloob): Handle cases when the following block isn't correct.
+ // These include imports of test variants, imports of vendored packages, etc.
+ id, ok := havePkgs[imp]
+ if !ok {
+ id = imp
+ }
+ pkg.Imports[imp] = &Package{ID: id}
+ }
+ }
+ continue outer
+ }
+ }
+ }
+
+ needPkgs = make([]string, 0, len(needPkgsSet))
+ for pkg := range needPkgsSet {
+ needPkgs = append(needPkgs, pkg)
+ }
+ modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
+ for pkg := range modifiedPkgsSet {
+ modifiedPkgs = append(modifiedPkgs, pkg)
+ }
+ return modifiedPkgs, needPkgs, err
+}
+
+func extractImports(filename string, contents []byte) ([]string, error) {
+ f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
+ if err != nil {
+ return nil, err
+ }
+ var res []string
+ for _, imp := range f.Imports {
+ quotedPath := imp.Path.Value
+ path, err := strconv.Unquote(quotedPath)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, path)
+ }
+ return res, nil
+}
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
new file mode 100644
index 000000000..4639fcddd
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -0,0 +1,1059 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+// See doc.go for package documentation and implementation notes.
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "golang.org/x/tools/go/gcexportdata"
+)
+
+// A LoadMode specifies the amount of detail to return when loading.
+// Higher-numbered modes cause Load to return more information,
+// but may be slower. Load may return more information than requested.
+type LoadMode int
+
+const (
+ // The following constants are used to specify which fields of the Package
+ // should be filled when loading is done. As a special case to provide
+ // backwards compatibility, a LoadMode of 0 is equivalent to LoadFiles.
+ // For all other LoadModes, the bits below specify which fields will be filled
+ // in the result packages.
+ // WARNING: This part of the go/packages API is EXPERIMENTAL. It might
+ // be changed or removed up until April 15 2019. After that date it will
+ // be frozen.
+ // TODO(matloob): Remove this comment on April 15.
+
+ // ID and Errors (if present) will always be filled.
+
+ // NeedName adds Name and PkgPath.
+ NeedName LoadMode = 1 << iota
+
+ // NeedFiles adds GoFiles and OtherFiles.
+ NeedFiles
+
+ // NeedCompiledGoFiles adds CompiledGoFiles.
+ NeedCompiledGoFiles
+
+ // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
+ // "placeholder" Packages with only the ID set.
+ NeedImports
+
+ // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports
+ // is not set NeedDeps has no effect.
+ NeedDeps
+
+ // NeedExportsFile adds ExportsFile.
+ NeedExportsFile
+
+ // NeedTypes adds Types, Fset, and IllTyped.
+ NeedTypes
+
+ // NeedSyntax adds Syntax.
+ NeedSyntax
+
+ // NeedTypesInfo adds TypesInfo.
+ NeedTypesInfo
+
+ // NeedTypesSizes adds TypesSizes.
+ NeedTypesSizes
+)
+
+const (
+ // LoadFiles finds the packages and computes their source file lists.
+ // Package fields: ID, Name, Errors, GoFiles, CompiledGoFiles, and OtherFiles.
+ LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
+
+ // LoadImports adds import information for each package
+ // and its dependencies.
+ // Package fields added: Imports.
+ LoadImports = LoadFiles | NeedImports | NeedDeps
+
+ // LoadTypes adds type information for package-level
+ // declarations in the packages matching the patterns.
+ // Package fields added: Types, TypesSizes, Fset, and IllTyped.
+ // This mode uses type information provided by the build system when
+ // possible, and may fill in the ExportFile field.
+ LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
+
+ // LoadSyntax adds typed syntax trees for the packages matching the patterns.
+ // Package fields added: Syntax, and TypesInfo, for direct pattern matches only.
+ LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
+
+ // LoadAllSyntax adds typed syntax trees for the packages matching the patterns
+ // and all dependencies.
+ // Package fields added: Types, Fset, IllTyped, Syntax, and TypesInfo,
+ // for all packages in the import graph.
+ LoadAllSyntax = LoadSyntax
+)
+
+// A Config specifies details about how packages should be loaded.
+// The zero value is a valid configuration.
+// Calls to Load do not modify this struct.
+type Config struct {
+ // Mode controls the level of information returned for each package.
+ Mode LoadMode
+
+ // Context specifies the context for the load operation.
+ // If the context is cancelled, the loader may stop early
+ // and return an ErrCancelled error.
+ // If Context is nil, the load cannot be cancelled.
+ Context context.Context
+
+ // Dir is the directory in which to run the build system's query tool
+ // that provides information about the packages.
+ // If Dir is empty, the tool is run in the current directory.
+ Dir string
+
+ // Env is the environment to use when invoking the build system's query tool.
+ // If Env is nil, the current environment is used.
+ // As in os/exec's Cmd, only the last value in the slice for
+ // each environment key is used. To specify the setting of only
+ // a few variables, append to the current environment, as in:
+ //
+ // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
+ //
+ Env []string
+
+ // BuildFlags is a list of command-line flags to be passed through to
+ // the build system's query tool.
+ BuildFlags []string
+
+ // Fset provides source position information for syntax trees and types.
+ // If Fset is nil, Load will use a new fileset, but preserve Fset's value.
+ Fset *token.FileSet
+
+ // ParseFile is called to read and parse each file
+ // when preparing a package's type-checked syntax tree.
+ // It must be safe to call ParseFile simultaneously from multiple goroutines.
+ // If ParseFile is nil, the loader will uses parser.ParseFile.
+ //
+ // ParseFile should parse the source from src and use filename only for
+ // recording position information.
+ //
+ // An application may supply a custom implementation of ParseFile
+ // to change the effective file contents or the behavior of the parser,
+ // or to modify the syntax tree. For example, selectively eliminating
+ // unwanted function bodies can significantly accelerate type checking.
+ ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
+
+ // If Tests is set, the loader includes not just the packages
+ // matching a particular pattern but also any related test packages,
+ // including test-only variants of the package and the test executable.
+ //
+ // For example, when using the go command, loading "fmt" with Tests=true
+ // returns four packages, with IDs "fmt" (the standard package),
+ // "fmt [fmt.test]" (the package as compiled for the test),
+ // "fmt_test" (the test functions from source files in package fmt_test),
+ // and "fmt.test" (the test binary).
+ //
+ // In build systems with explicit names for tests,
+ // setting Tests may have no effect.
+ Tests bool
+
+ // Overlay provides a mapping of absolute file paths to file contents.
+ // If the file with the given path already exists, the parser will use the
+ // alternative file contents provided by the map.
+ //
+ // Overlays provide incomplete support for when a given file doesn't
+ // already exist on disk. See the package doc above for more details.
+ Overlay map[string][]byte
+}
+
+// driver is the type for functions that query the build system for the
+// packages named by the patterns.
+type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
+
+// driverResponse contains the results for a driver query.
+type driverResponse struct {
+ // Sizes, if not nil, is the types.Sizes to use when type checking.
+ Sizes *types.StdSizes
+
+ // Roots is the set of package IDs that make up the root packages.
+ // We have to encode this separately because when we encode a single package
+ // we cannot know if it is one of the roots as that requires knowledge of the
+ // graph it is part of.
+ Roots []string `json:",omitempty"`
+
+ // Packages is the full set of packages in the graph.
+ // The packages are not connected into a graph.
+ // The Imports if populated will be stubs that only have their ID set.
+ // Imports will be connected and then type and syntax information added in a
+ // later pass (see refine).
+ Packages []*Package
+}
+
+// Load loads and returns the Go packages named by the given patterns.
+//
+// Config specifies loading options;
+// nil behaves the same as an empty Config.
+//
+// Load returns an error if any of the patterns was invalid
+// as defined by the underlying build system.
+// It may return an empty list of packages without an error,
+// for instance for an empty expansion of a valid wildcard.
+// Errors associated with a particular package are recorded in the
+// corresponding Package's Errors list, and do not cause Load to
+// return an error. Clients may need to handle such errors before
+// proceeding with further analysis. The PrintErrors function is
+// provided for convenient display of all errors.
+func Load(cfg *Config, patterns ...string) ([]*Package, error) {
+ l := newLoader(cfg)
+ response, err := defaultDriver(&l.Config, patterns...)
+ if err != nil {
+ return nil, err
+ }
+ l.sizes = response.Sizes
+ return l.refine(response.Roots, response.Packages...)
+}
+
+// defaultDriver is a driver that looks for an external driver binary, and if
+// it does not find it falls back to the built in go list driver.
+func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
+ driver := findExternalDriver(cfg)
+ if driver == nil {
+ driver = goListDriver
+ }
+ return driver(cfg, patterns...)
+}
+
+// A Package describes a loaded Go package.
+type Package struct {
+ // ID is a unique identifier for a package,
+ // in a syntax provided by the underlying build system.
+ //
+ // Because the syntax varies based on the build system,
+ // clients should treat IDs as opaque and not attempt to
+ // interpret them.
+ ID string
+
+ // Name is the package name as it appears in the package source code.
+ Name string
+
+ // PkgPath is the package path as used by the go/types package.
+ PkgPath string
+
+ // Errors contains any errors encountered querying the metadata
+ // of the package, or while parsing or type-checking its files.
+ Errors []Error
+
+ // GoFiles lists the absolute file paths of the package's Go source files.
+ GoFiles []string
+
+ // CompiledGoFiles lists the absolute file paths of the package's source
+ // files that were presented to the compiler.
+ // This may differ from GoFiles if files are processed before compilation.
+ CompiledGoFiles []string
+
+ // OtherFiles lists the absolute file paths of the package's non-Go source files,
+ // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
+ OtherFiles []string
+
+ // ExportFile is the absolute path to a file containing type
+ // information for the package as provided by the build system.
+ ExportFile string
+
+ // Imports maps import paths appearing in the package's Go source files
+ // to corresponding loaded Packages.
+ Imports map[string]*Package
+
+ // Types provides type information for the package.
+ // Modes LoadTypes and above set this field for packages matching the
+ // patterns; type information for dependencies may be missing or incomplete.
+ // Mode LoadAllSyntax sets this field for all packages, including dependencies.
+ Types *types.Package
+
+ // Fset provides position information for Types, TypesInfo, and Syntax.
+ // It is set only when Types is set.
+ Fset *token.FileSet
+
+ // IllTyped indicates whether the package or any dependency contains errors.
+ // It is set only when Types is set.
+ IllTyped bool
+
+ // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
+ //
+ // Mode LoadSyntax sets this field for packages matching the patterns.
+ // Mode LoadAllSyntax sets this field for all packages, including dependencies.
+ Syntax []*ast.File
+
+ // TypesInfo provides type information about the package's syntax trees.
+ // It is set only when Syntax is set.
+ TypesInfo *types.Info
+
+ // TypesSizes provides the effective size function for types in TypesInfo.
+ TypesSizes types.Sizes
+}
+
+// An Error describes a problem with a package's metadata, syntax, or types.
+type Error struct {
+ Pos string // "file:line:col" or "file:line" or "" or "-"
+ Msg string
+ Kind ErrorKind
+}
+
+// ErrorKind describes the source of the error, allowing the user to
+// differentiate between errors generated by the driver, the parser, or the
+// type-checker.
+type ErrorKind int
+
+const (
+ UnknownError ErrorKind = iota
+ ListError
+ ParseError
+ TypeError
+)
+
+func (err Error) Error() string {
+ pos := err.Pos
+ if pos == "" {
+ pos = "-" // like token.Position{}.String()
+ }
+ return pos + ": " + err.Msg
+}
+
+// flatPackage is the JSON form of Package
+// It drops all the type and syntax fields, and transforms the Imports
+//
+// TODO(adonovan): identify this struct with Package, effectively
+// publishing the JSON protocol.
+type flatPackage struct {
+ ID string
+ Name string `json:",omitempty"`
+ PkgPath string `json:",omitempty"`
+ Errors []Error `json:",omitempty"`
+ GoFiles []string `json:",omitempty"`
+ CompiledGoFiles []string `json:",omitempty"`
+ OtherFiles []string `json:",omitempty"`
+ ExportFile string `json:",omitempty"`
+ Imports map[string]string `json:",omitempty"`
+}
+
+// MarshalJSON returns the Package in its JSON form.
+// For the most part, the structure fields are written out unmodified, and
+// the type and syntax fields are skipped.
+// The imports are written out as just a map of path to package id.
+// The errors are written using a custom type that tries to preserve the
+// structure of error types we know about.
+//
+// This method exists to enable support for additional build systems. It is
+// not intended for use by clients of the API and we may change the format.
+func (p *Package) MarshalJSON() ([]byte, error) {
+ flat := &flatPackage{
+ ID: p.ID,
+ Name: p.Name,
+ PkgPath: p.PkgPath,
+ Errors: p.Errors,
+ GoFiles: p.GoFiles,
+ CompiledGoFiles: p.CompiledGoFiles,
+ OtherFiles: p.OtherFiles,
+ ExportFile: p.ExportFile,
+ }
+ if len(p.Imports) > 0 {
+ flat.Imports = make(map[string]string, len(p.Imports))
+ for path, ipkg := range p.Imports {
+ flat.Imports[path] = ipkg.ID
+ }
+ }
+ return json.Marshal(flat)
+}
+
+// UnmarshalJSON reads in a Package from its JSON format.
+// See MarshalJSON for details about the format accepted.
+func (p *Package) UnmarshalJSON(b []byte) error {
+ flat := &flatPackage{}
+ if err := json.Unmarshal(b, &flat); err != nil {
+ return err
+ }
+ *p = Package{
+ ID: flat.ID,
+ Name: flat.Name,
+ PkgPath: flat.PkgPath,
+ Errors: flat.Errors,
+ GoFiles: flat.GoFiles,
+ CompiledGoFiles: flat.CompiledGoFiles,
+ OtherFiles: flat.OtherFiles,
+ ExportFile: flat.ExportFile,
+ }
+ if len(flat.Imports) > 0 {
+ p.Imports = make(map[string]*Package, len(flat.Imports))
+ for path, id := range flat.Imports {
+ p.Imports[path] = &Package{ID: id}
+ }
+ }
+ return nil
+}
+
+func (p *Package) String() string { return p.ID }
+
+// loaderPackage augments Package with state used during the loading phase
+type loaderPackage struct {
+ *Package
+ importErrors map[string]error // maps each bad import to its error
+ loadOnce sync.Once
+ color uint8 // for cycle detection
+ needsrc bool // load from source (Mode >= LoadTypes)
+ needtypes bool // type information is either requested or depended on
+ initial bool // package was matched by a pattern
+}
+
+// loader holds the working state of a single call to load.
+type loader struct {
+ pkgs map[string]*loaderPackage
+ Config
+ sizes types.Sizes
+ exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
+
+ // TODO(matloob): Add an implied mode here and use that instead of mode.
+ // Implied mode would contain all the fields we need the data for so we can
+ // get the actually requested fields. We'll zero them out before returning
+ // packages to the user. This will make it easier for us to get the conditions
+ // where we need certain modes right.
+}
+
+func newLoader(cfg *Config) *loader {
+ ld := &loader{}
+ if cfg != nil {
+ ld.Config = *cfg
+ }
+ if ld.Config.Mode == 0 {
+ ld.Config.Mode = LoadFiles // Preserve zero behavior of Mode for backwards compatibility.
+ }
+ if ld.Config.Env == nil {
+ ld.Config.Env = os.Environ()
+ }
+ if ld.Context == nil {
+ ld.Context = context.Background()
+ }
+ if ld.Dir == "" {
+ if dir, err := os.Getwd(); err == nil {
+ ld.Dir = dir
+ }
+ }
+
+ if ld.Mode&NeedTypes != 0 {
+ if ld.Fset == nil {
+ ld.Fset = token.NewFileSet()
+ }
+
+ // ParseFile is required even in LoadTypes mode
+ // because we load source if export data is missing.
+ if ld.ParseFile == nil {
+ ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
+ var isrc interface{}
+ if src != nil {
+ isrc = src
+ }
+ const mode = parser.AllErrors | parser.ParseComments
+ return parser.ParseFile(fset, filename, isrc, mode)
+ }
+ }
+ }
+ return ld
+}
+
+// refine connects the supplied packages into a graph and then adds type and
+// and syntax information as requested by the LoadMode.
+func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
+ rootMap := make(map[string]int, len(roots))
+ for i, root := range roots {
+ rootMap[root] = i
+ }
+ ld.pkgs = make(map[string]*loaderPackage)
+ // first pass, fixup and build the map and roots
+ var initial = make([]*loaderPackage, len(roots))
+ for _, pkg := range list {
+ rootIndex := -1
+ if i, found := rootMap[pkg.ID]; found {
+ rootIndex = i
+ }
+ lpkg := &loaderPackage{
+ Package: pkg,
+ needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0,
+ needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 ||
+ len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
+ pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
+ }
+ ld.pkgs[lpkg.ID] = lpkg
+ if rootIndex >= 0 {
+ initial[rootIndex] = lpkg
+ lpkg.initial = true
+ }
+ }
+ for i, root := range roots {
+ if initial[i] == nil {
+ return nil, fmt.Errorf("root package %v is missing", root)
+ }
+ }
+
+ // Materialize the import graph.
+
+ const (
+ white = 0 // new
+ grey = 1 // in progress
+ black = 2 // complete
+ )
+
+ // visit traverses the import graph, depth-first,
+ // and materializes the graph as Packages.Imports.
+ //
+ // Valid imports are saved in the Packages.Import map.
+ // Invalid imports (cycles and missing nodes) are saved in the importErrors map.
+ // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG.
+ //
+ // visit returns whether the package needs src or has a transitive
+ // dependency on a package that does. These are the only packages
+ // for which we load source code.
+ var stack []*loaderPackage
+ var visit func(lpkg *loaderPackage) bool
+ var srcPkgs []*loaderPackage
+ visit = func(lpkg *loaderPackage) bool {
+ switch lpkg.color {
+ case black:
+ return lpkg.needsrc
+ case grey:
+ panic("internal error: grey node")
+ }
+ lpkg.color = grey
+ stack = append(stack, lpkg) // push
+ stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
+ lpkg.Imports = make(map[string]*Package, len(stubs))
+ for importPath, ipkg := range stubs {
+ var importErr error
+ imp := ld.pkgs[ipkg.ID]
+ if imp == nil {
+ // (includes package "C" when DisableCgo)
+ importErr = fmt.Errorf("missing package: %q", ipkg.ID)
+ } else if imp.color == grey {
+ importErr = fmt.Errorf("import cycle: %s", stack)
+ }
+ if importErr != nil {
+ if lpkg.importErrors == nil {
+ lpkg.importErrors = make(map[string]error)
+ }
+ lpkg.importErrors[importPath] = importErr
+ continue
+ }
+
+ if visit(imp) {
+ lpkg.needsrc = true
+ }
+ lpkg.Imports[importPath] = imp.Package
+ }
+ if lpkg.needsrc {
+ srcPkgs = append(srcPkgs, lpkg)
+ }
+ if ld.Mode&NeedTypesSizes != 0 {
+ lpkg.TypesSizes = ld.sizes
+ }
+ stack = stack[:len(stack)-1] // pop
+ lpkg.color = black
+
+ return lpkg.needsrc
+ }
+
+ if ld.Mode&(NeedImports|NeedDeps) == 0 {
+ // We do this to drop the stub import packages that we are not even going to try to resolve.
+ for _, lpkg := range initial {
+ lpkg.Imports = nil
+ }
+ } else {
+ // For each initial package, create its import DAG.
+ for _, lpkg := range initial {
+ visit(lpkg)
+ }
+ }
+ if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right?
+ for _, lpkg := range srcPkgs {
+ // Complete type information is required for the
+ // immediate dependencies of each source package.
+ for _, ipkg := range lpkg.Imports {
+ imp := ld.pkgs[ipkg.ID]
+ imp.needtypes = true
+ }
+ }
+ }
+ // Load type data if needed, starting at
+ // the initial packages (roots of the import DAG).
+ if ld.Mode&NeedTypes != 0 {
+ var wg sync.WaitGroup
+ for _, lpkg := range initial {
+ wg.Add(1)
+ go func(lpkg *loaderPackage) {
+ ld.loadRecursive(lpkg)
+ wg.Done()
+ }(lpkg)
+ }
+ wg.Wait()
+ }
+
+ result := make([]*Package, len(initial))
+ importPlaceholders := make(map[string]*Package)
+ for i, lpkg := range initial {
+ result[i] = lpkg.Package
+ }
+ for i := range ld.pkgs {
+ // Clear all unrequested fields, for extra de-Hyrum-ization.
+ if ld.Mode&NeedName == 0 {
+ ld.pkgs[i].Name = ""
+ ld.pkgs[i].PkgPath = ""
+ }
+ if ld.Mode&NeedFiles == 0 {
+ ld.pkgs[i].GoFiles = nil
+ ld.pkgs[i].OtherFiles = nil
+ }
+ if ld.Mode&NeedCompiledGoFiles == 0 {
+ ld.pkgs[i].CompiledGoFiles = nil
+ }
+ if ld.Mode&NeedImports == 0 {
+ ld.pkgs[i].Imports = nil
+ }
+ if ld.Mode&NeedExportsFile == 0 {
+ ld.pkgs[i].ExportFile = ""
+ }
+ if ld.Mode&NeedTypes == 0 {
+ ld.pkgs[i].Types = nil
+ ld.pkgs[i].Fset = nil
+ ld.pkgs[i].IllTyped = false
+ }
+ if ld.Mode&NeedSyntax == 0 {
+ ld.pkgs[i].Syntax = nil
+ }
+ if ld.Mode&NeedTypesInfo == 0 {
+ ld.pkgs[i].TypesInfo = nil
+ }
+ if ld.Mode&NeedTypesSizes == 0 {
+ ld.pkgs[i].TypesSizes = nil
+ }
+ if ld.Mode&NeedDeps == 0 {
+ for j, pkg := range ld.pkgs[i].Imports {
+ ph, ok := importPlaceholders[pkg.ID]
+ if !ok {
+ ph = &Package{ID: pkg.ID}
+ importPlaceholders[pkg.ID] = ph
+ }
+ ld.pkgs[i].Imports[j] = ph
+ }
+ }
+ }
+ return result, nil
+}
+
+// loadRecursive loads the specified package and its dependencies,
+// recursively, in parallel, in topological order.
+// It is atomic and idempotent.
+// Precondition: ld.Mode&NeedTypes.
+func (ld *loader) loadRecursive(lpkg *loaderPackage) {
+ lpkg.loadOnce.Do(func() {
+ // Load the direct dependencies, in parallel.
+ var wg sync.WaitGroup
+ for _, ipkg := range lpkg.Imports {
+ imp := ld.pkgs[ipkg.ID]
+ wg.Add(1)
+ go func(imp *loaderPackage) {
+ ld.loadRecursive(imp)
+ wg.Done()
+ }(imp)
+ }
+ wg.Wait()
+
+ ld.loadPackage(lpkg)
+ })
+}
+
+// loadPackage loads the specified package.
+// It must be called only once per Package,
+// after immediate dependencies are loaded.
+// Precondition: ld.Mode >= LoadTypes.
+func (ld *loader) loadPackage(lpkg *loaderPackage) {
+ if lpkg.PkgPath == "unsafe" {
+ // Fill in the blanks to avoid surprises.
+ lpkg.Types = types.Unsafe
+ lpkg.Fset = ld.Fset
+ lpkg.Syntax = []*ast.File{}
+ lpkg.TypesInfo = new(types.Info)
+ lpkg.TypesSizes = ld.sizes
+ return
+ }
+
+ // Call NewPackage directly with explicit name.
+ // This avoids skew between golist and go/types when the files'
+ // package declarations are inconsistent.
+ lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
+ lpkg.Fset = ld.Fset
+
+ // Subtle: we populate all Types fields with an empty Package
+ // before loading export data so that export data processing
+ // never has to create a types.Package for an indirect dependency,
+ // which would then require that such created packages be explicitly
+ // inserted back into the Import graph as a final step after export data loading.
+ // The Diamond test exercises this case.
+ if !lpkg.needtypes {
+ return
+ }
+ if !lpkg.needsrc {
+ ld.loadFromExportData(lpkg)
+ return // not a source package, don't get syntax trees
+ }
+
+ appendError := func(err error) {
+ // Convert various error types into the one true Error.
+ var errs []Error
+ switch err := err.(type) {
+ case Error:
+ // from driver
+ errs = append(errs, err)
+
+ case *os.PathError:
+ // from parser
+ errs = append(errs, Error{
+ Pos: err.Path + ":1",
+ Msg: err.Err.Error(),
+ Kind: ParseError,
+ })
+
+ case scanner.ErrorList:
+ // from parser
+ for _, err := range err {
+ errs = append(errs, Error{
+ Pos: err.Pos.String(),
+ Msg: err.Msg,
+ Kind: ParseError,
+ })
+ }
+
+ case types.Error:
+ // from type checker
+ errs = append(errs, Error{
+ Pos: err.Fset.Position(err.Pos).String(),
+ Msg: err.Msg,
+ Kind: TypeError,
+ })
+
+ default:
+ // unexpected impoverished error from parser?
+ errs = append(errs, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError,
+ })
+
+ // If you see this error message, please file a bug.
+ log.Printf("internal error: error %q (%T) without position", err, err)
+ }
+
+ lpkg.Errors = append(lpkg.Errors, errs...)
+ }
+
+ files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
+ for _, err := range errs {
+ appendError(err)
+ }
+
+ lpkg.Syntax = files
+
+ lpkg.TypesInfo = &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+ lpkg.TypesSizes = ld.sizes
+
+ importer := importerFunc(func(path string) (*types.Package, error) {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ // The imports map is keyed by import path.
+ ipkg := lpkg.Imports[path]
+ if ipkg == nil {
+ if err := lpkg.importErrors[path]; err != nil {
+ return nil, err
+ }
+ // There was skew between the metadata and the
+ // import declarations, likely due to an edit
+ // race, or because the ParseFile feature was
+ // used to supply alternative file contents.
+ return nil, fmt.Errorf("no metadata for %s", path)
+ }
+
+ if ipkg.Types != nil && ipkg.Types.Complete() {
+ return ipkg.Types, nil
+ }
+ log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg)
+ panic("unreachable")
+ })
+
+ // type-check
+ tc := &types.Config{
+ Importer: importer,
+
+ // Type-check bodies of functions only in non-initial packages.
+ // Example: for import graph A->B->C and initial packages {A,C},
+ // we can ignore function bodies in B.
+ IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial,
+
+ Error: appendError,
+ Sizes: ld.sizes,
+ }
+ types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
+
+ lpkg.importErrors = nil // no longer needed
+
+ // If !Cgo, the type-checker uses FakeImportC mode, so
+ // it doesn't invoke the importer for import "C",
+ // nor report an error for the import,
+ // or for any undefined C.f reference.
+ // We must detect this explicitly and correctly
+ // mark the package as IllTyped (by reporting an error).
+ // TODO(adonovan): if these errors are annoying,
+ // we could just set IllTyped quietly.
+ if tc.FakeImportC {
+ outer:
+ for _, f := range lpkg.Syntax {
+ for _, imp := range f.Imports {
+ if imp.Path.Value == `"C"` {
+ err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
+ appendError(err)
+ break outer
+ }
+ }
+ }
+ }
+
+ // Record accumulated errors.
+ illTyped := len(lpkg.Errors) > 0
+ if !illTyped {
+ for _, imp := range lpkg.Imports {
+ if imp.IllTyped {
+ illTyped = true
+ break
+ }
+ }
+ }
+ lpkg.IllTyped = illTyped
+}
+
+// An importFunc is an implementation of the single-method
+// types.Importer interface based on a function value.
+type importerFunc func(path string) (*types.Package, error)
+
+func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
+
+// We use a counting semaphore to limit
+// the number of parallel I/O calls per process.
+var ioLimit = make(chan bool, 20)
+
+// parseFiles reads and parses the Go source files and returns the ASTs
+// of the ones that could be at least partially parsed, along with a
+// list of I/O and parse errors encountered.
+//
+// Because files are scanned in parallel, the token.Pos
+// positions of the resulting ast.Files are not ordered.
+//
+func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
+ var wg sync.WaitGroup
+ n := len(filenames)
+ parsed := make([]*ast.File, n)
+ errors := make([]error, n)
+ for i, file := range filenames {
+ if ld.Config.Context.Err() != nil {
+ parsed[i] = nil
+ errors[i] = ld.Config.Context.Err()
+ continue
+ }
+ wg.Add(1)
+ go func(i int, filename string) {
+ ioLimit <- true // wait
+ // ParseFile may return both an AST and an error.
+ var src []byte
+ for f, contents := range ld.Config.Overlay {
+ if sameFile(f, filename) {
+ src = contents
+ }
+ }
+ var err error
+ if src == nil {
+ src, err = ioutil.ReadFile(filename)
+ }
+ if err != nil {
+ parsed[i], errors[i] = nil, err
+ } else {
+ parsed[i], errors[i] = ld.ParseFile(ld.Fset, filename, src)
+ }
+ <-ioLimit // signal
+ wg.Done()
+ }(i, file)
+ }
+ wg.Wait()
+
+ // Eliminate nils, preserving order.
+ var o int
+ for _, f := range parsed {
+ if f != nil {
+ parsed[o] = f
+ o++
+ }
+ }
+ parsed = parsed[:o]
+
+ o = 0
+ for _, err := range errors {
+ if err != nil {
+ errors[o] = err
+ o++
+ }
+ }
+ errors = errors[:o]
+
+ return parsed, errors
+}
+
+// sameFile returns true if x and y have the same basename and denote
+// the same file.
+//
+func sameFile(x, y string) bool {
+ if x == y {
+ // It could be the case that y doesn't exist.
+ // For instance, it may be an overlay file that
+ // hasn't been written to disk. To handle that case
+ // let x == y through. (We added the exact absolute path
+ // string to the CompiledGoFiles list, so the unwritten
+ // overlay case implies x==y.)
+ return true
+ }
+ if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
+ if xi, err := os.Stat(x); err == nil {
+ if yi, err := os.Stat(y); err == nil {
+ return os.SameFile(xi, yi)
+ }
+ }
+ }
+ return false
+}
+
+// loadFromExportData returns type information for the specified
+// package, loading it from an export data file on the first request.
+func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
+ if lpkg.PkgPath == "" {
+ log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
+ }
+
+ // Because gcexportdata.Read has the potential to create or
+ // modify the types.Package for each node in the transitive
+ // closure of dependencies of lpkg, all exportdata operations
+ // must be sequential. (Finer-grained locking would require
+ // changes to the gcexportdata API.)
+ //
+ // The exportMu lock guards the Package.Pkg field and the
+ // types.Package it points to, for each Package in the graph.
+ //
+ // Not all accesses to Package.Pkg need to be protected by exportMu:
+ // graph ordering ensures that direct dependencies of source
+ // packages are fully loaded before the importer reads their Pkg field.
+ ld.exportMu.Lock()
+ defer ld.exportMu.Unlock()
+
+ if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
+ return tpkg, nil // cache hit
+ }
+
+ lpkg.IllTyped = true // fail safe
+
+ if lpkg.ExportFile == "" {
+ // Errors while building export data will have been printed to stderr.
+ return nil, fmt.Errorf("no export data file")
+ }
+ f, err := os.Open(lpkg.ExportFile)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ // Read gc export data.
+ //
+ // We don't currently support gccgo export data because all
+ // underlying workspaces use the gc toolchain. (Even build
+ // systems that support gccgo don't use it for workspace
+ // queries.)
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+
+ // Build the view.
+ //
+ // The gcexportdata machinery has no concept of package ID.
+ // It identifies packages by their PkgPath, which although not
+ // globally unique is unique within the scope of one invocation
+ // of the linker, type-checker, or gcexportdata.
+ //
+ // So, we must build a PkgPath-keyed view of the global
+ // (conceptually ID-keyed) cache of packages and pass it to
+ // gcexportdata. The view must contain every existing
+ // package that might possibly be mentioned by the
+ // current package---its transitive closure.
+ //
+ // In loadPackage, we unconditionally create a types.Package for
+ // each dependency so that export data loading does not
+ // create new ones.
+ //
+ // TODO(adonovan): it would be simpler and more efficient
+ // if the export data machinery invoked a callback to
+ // get-or-create a package instead of a map.
+ //
+ view := make(map[string]*types.Package) // view seen by gcexportdata
+ seen := make(map[*loaderPackage]bool) // all visited packages
+ var visit func(pkgs map[string]*Package)
+ visit = func(pkgs map[string]*Package) {
+ for _, p := range pkgs {
+ lpkg := ld.pkgs[p.ID]
+ if !seen[lpkg] {
+ seen[lpkg] = true
+ view[lpkg.PkgPath] = lpkg.Types
+ visit(lpkg.Imports)
+ }
+ }
+ }
+ visit(lpkg.Imports)
+
+ viewLen := len(view) + 1 // adding the self package
+ // Parse the export data.
+ // (May modify incomplete packages in view but not create new ones.)
+ tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
+ if err != nil {
+ return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+ if viewLen != len(view) {
+ log.Fatalf("Unexpected package creation during export data loading")
+ }
+
+ lpkg.Types = tpkg
+ lpkg.IllTyped = false
+
+ return tpkg, nil
+}
+
+func usesExportData(cfg *Config) bool {
+ return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0
+}
diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go
new file mode 100644
index 000000000..b13cb081f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/visit.go
@@ -0,0 +1,55 @@
+package packages
+
+import (
+ "fmt"
+ "os"
+ "sort"
+)
+
+// Visit visits all the packages in the import graph whose roots are
+// pkgs, calling the optional pre function the first time each package
+// is encountered (preorder), and the optional post function after a
+// package's dependencies have been visited (postorder).
+// The boolean result of pre(pkg) determines whether
+// the imports of package pkg are visited.
+func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
+ seen := make(map[*Package]bool)
+ var visit func(*Package)
+ visit = func(pkg *Package) {
+ if !seen[pkg] {
+ seen[pkg] = true
+
+ if pre == nil || pre(pkg) {
+ paths := make([]string, 0, len(pkg.Imports))
+ for path := range pkg.Imports {
+ paths = append(paths, path)
+ }
+ sort.Strings(paths) // Imports is a map, this makes visit stable
+ for _, path := range paths {
+ visit(pkg.Imports[path])
+ }
+ }
+
+ if post != nil {
+ post(pkg)
+ }
+ }
+ }
+ for _, pkg := range pkgs {
+ visit(pkg)
+ }
+}
+
+// PrintErrors prints to os.Stderr the accumulated errors of all
+// packages in the import graph rooted at pkgs, dependencies first.
+// PrintErrors returns the number of errors printed.
+func PrintErrors(pkgs []*Package) int {
+ var n int
+ Visit(pkgs, nil, func(pkg *Package) {
+ for _, err := range pkg.Errors {
+ fmt.Fprintln(os.Stderr, err)
+ n++
+ }
+ })
+ return n
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
new file mode 100644
index 000000000..38f596daf
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import (
+ "go/ast"
+ "go/types"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// Callee returns the named target of a function call, if any:
+// a function, method, builtin, or variable.
+func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+ var obj types.Object
+ switch fun := astutil.Unparen(call.Fun).(type) {
+ case *ast.Ident:
+ obj = info.Uses[fun] // type, var, builtin, or declared func
+ case *ast.SelectorExpr:
+ if sel, ok := info.Selections[fun]; ok {
+ obj = sel.Obj() // method or field
+ } else {
+ obj = info.Uses[fun.Sel] // qualified identifier?
+ }
+ }
+ if _, ok := obj.(*types.TypeName); ok {
+ return nil // T(x) is a conversion, not a call
+ }
+ return obj
+}
+
+// StaticCallee returns the target (function or method) of a static
+// function call, if any. It returns nil for calls to builtins.
+func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
+ if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
+ return f
+ }
+ return nil
+}
+
+func interfaceMethod(f *types.Func) bool {
+ recv := f.Type().(*types.Signature).Recv()
+ return recv != nil && types.IsInterface(recv.Type())
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
new file mode 100644
index 000000000..9c441dba9
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import "go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+//
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+ var result []*types.Package
+ seen := make(map[*types.Package]bool)
+ var visit func(pkgs []*types.Package)
+ visit = func(pkgs []*types.Package) {
+ for _, p := range pkgs {
+ if !seen[p] {
+ seen[p] = true
+ visit(p.Imports())
+ result = append(result, p)
+ }
+ }
+ }
+ visit(pkgs)
+ return result
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
new file mode 100644
index 000000000..c7f754500
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -0,0 +1,313 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as Map,
+// a mapping from types.Type to interface{} values.
+package typeutil // import "golang.org/x/tools/go/types/typeutil"
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "reflect"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary interface{} values. The concrete types that implement
+// the Type interface are pointers. Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Not thread-safe.
+//
+type Map struct {
+ hasher Hasher // shared by many Maps
+ table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+ length int // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+ key types.Type
+ value interface{}
+}
+
+// SetHasher sets the hasher used by Map.
+//
+// All Hashers are functionally equivalent but contain internal state
+// used to cache the results of hashing previously seen types.
+//
+// A single Hasher created by MakeHasher() may be shared among many
+// Maps. This is recommended if the instances have many keys in
+// common, as it will amortize the cost of hash computation.
+//
+// A Hasher may grow without bound as new types are seen. Even when a
+// type is deleted from the map, the Hasher never shrinks, since other
+// types in the map may reference the deleted type indirectly.
+//
+// Hashers are not thread-safe, and read-only operations such as
+// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
+// read-lock) is require around all Map operations if a shared
+// hasher is accessed from multiple threads.
+//
+// If SetHasher is not called, the Map will create a private hasher at
+// the first call to Insert.
+//
+func (m *Map) SetHasher(hasher Hasher) {
+ m.hasher = hasher
+}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+//
+func (m *Map) Delete(key types.Type) bool {
+ if m != nil && m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ for i, e := range bucket {
+ if e.key != nil && types.Identical(key, e.key) {
+ // We can't compact the bucket as it
+ // would disturb iterators.
+ bucket[i] = entry{}
+ m.length--
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+//
+func (m *Map) At(key types.Type) interface{} {
+ if m != nil && m.table != nil {
+ for _, e := range m.table[m.hasher.Hash(key)] {
+ if e.key != nil && types.Identical(key, e.key) {
+ return e.value
+ }
+ }
+ }
+ return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
+ if m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ var hole *entry
+ for i, e := range bucket {
+ if e.key == nil {
+ hole = &bucket[i]
+ } else if types.Identical(key, e.key) {
+ prev = e.value
+ bucket[i].value = value
+ return
+ }
+ }
+
+ if hole != nil {
+ *hole = entry{key, value} // overwrite deleted entry
+ } else {
+ m.table[hash] = append(bucket, entry{key, value})
+ }
+ } else {
+ if m.hasher.memo == nil {
+ m.hasher = MakeHasher()
+ }
+ hash := m.hasher.Hash(key)
+ m.table = map[uint32][]entry{hash: {entry{key, value}}}
+ }
+
+ m.length++
+ return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+ if m != nil {
+ return m.length
+ }
+ return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+//
+func (m *Map) Iterate(f func(key types.Type, value interface{})) {
+ if m != nil {
+ for _, bucket := range m.table {
+ for _, e := range bucket {
+ if e.key != nil {
+ f(e.key, e.value)
+ }
+ }
+ }
+ }
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+ keys := make([]types.Type, 0, m.Len())
+ m.Iterate(func(key types.Type, _ interface{}) {
+ keys = append(keys, key)
+ })
+ return keys
+}
+
+func (m *Map) toString(values bool) string {
+ if m == nil {
+ return "{}"
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "{")
+ sep := ""
+ m.Iterate(func(key types.Type, value interface{}) {
+ fmt.Fprint(&buf, sep)
+ sep = ", "
+ fmt.Fprint(&buf, key)
+ if values {
+ fmt.Fprintf(&buf, ": %q", value)
+ }
+ })
+ fmt.Fprint(&buf, "}")
+ return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+//
+func (m *Map) String() string {
+ return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+//
+func (m *Map) KeysString() string {
+ return m.toString(false)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Hasher
+
+// A Hasher maps each type to its hash value.
+// For efficiency, a hasher uses memoization; thus its memory
+// footprint grows monotonically over time.
+// Hashers are not thread-safe.
+// Hashers have reference semantics.
+// Call MakeHasher to create a Hasher.
+type Hasher struct {
+ memo map[types.Type]uint32
+}
+
+// MakeHasher returns a new Hasher instance.
+func MakeHasher() Hasher {
+ return Hasher{make(map[types.Type]uint32)}
+}
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+ hash, ok := h.memo[t]
+ if !ok {
+ hash = h.hashFor(t)
+ h.memo[t] = hash
+ }
+ return hash
+}
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+ var h uint32
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// hashFor computes the hash of t.
+func (h Hasher) hashFor(t types.Type) uint32 {
+ // See Identical for rationale.
+ switch t := t.(type) {
+ case *types.Basic:
+ return uint32(t.Kind())
+
+ case *types.Array:
+ return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
+
+ case *types.Slice:
+ return 9049 + 2*h.Hash(t.Elem())
+
+ case *types.Struct:
+ var hash uint32 = 9059
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ f := t.Field(i)
+ if f.Anonymous() {
+ hash += 8861
+ }
+ hash += hashString(t.Tag(i))
+ hash += hashString(f.Name()) // (ignore f.Pkg)
+ hash += h.Hash(f.Type())
+ }
+ return hash
+
+ case *types.Pointer:
+ return 9067 + 2*h.Hash(t.Elem())
+
+ case *types.Signature:
+ var hash uint32 = 9091
+ if t.Variadic() {
+ hash *= 8863
+ }
+ return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+ case *types.Interface:
+ var hash uint32 = 9103
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ // See go/types.identicalMethods for rationale.
+ // Method order is not significant.
+ // Ignore m.Pkg().
+ m := t.Method(i)
+ hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
+ }
+ return hash
+
+ case *types.Map:
+ return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
+
+ case *types.Chan:
+ return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
+
+ case *types.Named:
+ // Not safe with a copying GC; objects may move.
+ return uint32(reflect.ValueOf(t.Obj()).Pointer())
+
+ case *types.Tuple:
+ return h.hashTuple(t)
+ }
+ panic(t)
+}
+
+func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
+ // See go/types.identicalTypes for rationale.
+ n := tuple.Len()
+ var hash uint32 = 9137 + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ hash += 3 * h.Hash(tuple.At(i).Type())
+ }
+ return hash
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
new file mode 100644
index 000000000..32084610f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package typeutil
+
+import (
+ "go/types"
+ "sync"
+)
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+ mu sync.Mutex
+ named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
+ others map[types.Type]*types.MethodSet // all other types
+}
+
+// MethodSet returns the method set of type T. It is thread-safe.
+//
+// If cache is nil, this function is equivalent to types.NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+//
+func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
+ if cache == nil {
+ return types.NewMethodSet(T)
+ }
+ cache.mu.Lock()
+ defer cache.mu.Unlock()
+
+ switch T := T.(type) {
+ case *types.Named:
+ return cache.lookupNamed(T).value
+
+ case *types.Pointer:
+ if N, ok := T.Elem().(*types.Named); ok {
+ return cache.lookupNamed(N).pointer
+ }
+ }
+
+ // all other types
+ // (The map uses pointer equivalence, not type identity.)
+ mset := cache.others[T]
+ if mset == nil {
+ mset = types.NewMethodSet(T)
+ if cache.others == nil {
+ cache.others = make(map[types.Type]*types.MethodSet)
+ }
+ cache.others[T] = mset
+ }
+ return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
+ if cache.named == nil {
+ cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
+ }
+ // Avoid recomputing mset(*T) for each distinct Pointer
+ // instance whose underlying type is a named type.
+ msets, ok := cache.named[named]
+ if !ok {
+ msets.value = types.NewMethodSet(named)
+ msets.pointer = types.NewMethodSet(types.NewPointer(named))
+ cache.named[named] = msets
+ }
+ return msets
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
new file mode 100644
index 000000000..9849c24ce
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
@@ -0,0 +1,52 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import "go/types"
+
+// IntuitiveMethodSet returns the intuitive method set of a type T,
+// which is the set of methods you can call on an addressable value of
+// that type.
+//
+// The result always contains MethodSet(T), and is exactly MethodSet(T)
+// for interface types and for pointer-to-concrete types.
+// For all other concrete types T, the result additionally
+// contains each method belonging to *T if there is no identically
+// named method on T itself.
+//
+// This corresponds to user intuition about method sets;
+// this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+//
+func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
+ isPointerToConcrete := func(T types.Type) bool {
+ ptr, ok := T.(*types.Pointer)
+ return ok && !types.IsInterface(ptr.Elem())
+ }
+
+ var result []*types.Selection
+ mset := msets.MethodSet(T)
+ if types.IsInterface(T) || isPointerToConcrete(T) {
+ for i, n := 0, mset.Len(); i < n; i++ {
+ result = append(result, mset.At(i))
+ }
+ } else {
+ // T is some other concrete type.
+ // Report methods of T and *T, preferring those of T.
+ pmset := msets.MethodSet(types.NewPointer(T))
+ for i, n := 0, pmset.Len(); i < n; i++ {
+ meth := pmset.At(i)
+ if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+ meth = m
+ }
+ result = append(result, meth)
+ }
+
+ }
+ return result
+}
diff --git a/vendor/golang.org/x/tools/go/vcs/discovery.go b/vendor/golang.org/x/tools/go/vcs/discovery.go
new file mode 100644
index 000000000..f431dc1c5
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/vcs/discovery.go
@@ -0,0 +1,76 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// charsetReader returns a reader for the given charset. Currently
+// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
+// error which is printed by go get, so the user can find why the package
+// wasn't downloaded if the encoding is not supported. Note that, in
+// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
+// greater than 0x7f are not rejected).
+func charsetReader(charset string, input io.Reader) (io.Reader, error) {
+ switch strings.ToLower(charset) {
+ case "ascii":
+ return input, nil
+ default:
+ return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
+ }
+}
+
+// parseMetaGoImports returns meta imports from the HTML in r.
+// Parsing ends at the end of the <head> section or the beginning of the <body>.
+func parseMetaGoImports(r io.Reader) (imports []metaImport, err error) {
+ d := xml.NewDecoder(r)
+ d.CharsetReader = charsetReader
+ d.Strict = false
+ var t xml.Token
+ for {
+ t, err = d.Token()
+ if err != nil {
+ if err == io.EOF || len(imports) > 0 {
+ err = nil
+ }
+ return
+ }
+ if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
+ return
+ }
+ if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
+ return
+ }
+ e, ok := t.(xml.StartElement)
+ if !ok || !strings.EqualFold(e.Name.Local, "meta") {
+ continue
+ }
+ if attrValue(e.Attr, "name") != "go-import" {
+ continue
+ }
+ if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 {
+ imports = append(imports, metaImport{
+ Prefix: f[0],
+ VCS: f[1],
+ RepoRoot: f[2],
+ })
+ }
+ }
+}
+
+// attrValue returns the attribute value for the case-insensitive key
+// `name', or the empty string if nothing is found.
+func attrValue(attrs []xml.Attr, name string) string {
+ for _, a := range attrs {
+ if strings.EqualFold(a.Name.Local, name) {
+ return a.Value
+ }
+ }
+ return ""
+}
diff --git a/vendor/golang.org/x/tools/go/vcs/env.go b/vendor/golang.org/x/tools/go/vcs/env.go
new file mode 100644
index 000000000..e846f5b3b
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/vcs/env.go
@@ -0,0 +1,39 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "os"
+ "strings"
+)
+
+// envForDir returns a copy of the environment
+// suitable for running in the given directory.
+// The environment is the current process's environment
+// but with an updated $PWD, so that an os.Getwd in the
+// child will be faster.
+func envForDir(dir string) []string {
+ env := os.Environ()
+ // Internally we only use rooted paths, so dir is rooted.
+ // Even if dir is not rooted, no harm done.
+ return mergeEnvLists([]string{"PWD=" + dir}, env)
+}
+
+// mergeEnvLists merges the two environment lists such that
+// variables with the same name in "in" replace those in "out".
+func mergeEnvLists(in, out []string) []string {
+NextVar:
+ for _, inkv := range in {
+ k := strings.SplitAfterN(inkv, "=", 2)[0]
+ for i, outkv := range out {
+ if strings.HasPrefix(outkv, k) {
+ out[i] = inkv
+ continue NextVar
+ }
+ }
+ out = append(out, inkv)
+ }
+ return out
+}
diff --git a/vendor/golang.org/x/tools/go/vcs/http.go b/vendor/golang.org/x/tools/go/vcs/http.go
new file mode 100644
index 000000000..96188185c
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/vcs/http.go
@@ -0,0 +1,80 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+)
+
+// httpClient is the default HTTP client, but a variable so it can be
+// changed by tests, without modifying http.DefaultClient.
+var httpClient = http.DefaultClient
+
+// httpGET returns the data from an HTTP GET request for the given URL.
+func httpGET(url string) ([]byte, error) {
+ resp, err := httpClient.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("%s: %s", url, resp.Status)
+ }
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %v", url, err)
+ }
+ return b, nil
+}
+
+// httpsOrHTTP returns the body of either the importPath's
+// https resource or, if unavailable, the http resource.
+func httpsOrHTTP(importPath string) (urlStr string, body io.ReadCloser, err error) {
+ fetch := func(scheme string) (urlStr string, res *http.Response, err error) {
+ u, err := url.Parse(scheme + "://" + importPath)
+ if err != nil {
+ return "", nil, err
+ }
+ u.RawQuery = "go-get=1"
+ urlStr = u.String()
+ if Verbose {
+ log.Printf("Fetching %s", urlStr)
+ }
+ res, err = httpClient.Get(urlStr)
+ return
+ }
+ closeBody := func(res *http.Response) {
+ if res != nil {
+ res.Body.Close()
+ }
+ }
+ urlStr, res, err := fetch("https")
+ if err != nil || res.StatusCode != 200 {
+ if Verbose {
+ if err != nil {
+ log.Printf("https fetch failed.")
+ } else {
+ log.Printf("ignoring https fetch with status code %d", res.StatusCode)
+ }
+ }
+ closeBody(res)
+ urlStr, res, err = fetch("http")
+ }
+ if err != nil {
+ closeBody(res)
+ return "", nil, err
+ }
+ // Note: accepting a non-200 OK here, so people can serve a
+ // meta import in their http 404 page.
+ if Verbose {
+ log.Printf("Parsing meta tags from %s (status code %d)", urlStr, res.StatusCode)
+ }
+ return urlStr, res.Body, nil
+}
diff --git a/vendor/golang.org/x/tools/go/vcs/vcs.go b/vendor/golang.org/x/tools/go/vcs/vcs.go
new file mode 100644
index 000000000..6e58ac749
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/vcs/vcs.go
@@ -0,0 +1,759 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vcs exposes functions for resolving import paths
+// and using version control systems, which can be used to
+// implement behavior similar to the standard "go get" command.
+//
+// This package is a copy of internal code in package cmd/go/internal/get,
+// modified to make the identifiers exported. It's provided here
+// for developers who want to write tools with similar semantics.
+// It needs to be manually kept in sync with upstream when changes are
+// made to cmd/go/internal/get; see https://golang.org/issue/11490.
+//
+package vcs // import "golang.org/x/tools/go/vcs"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// Verbose enables verbose operation logging.
+var Verbose bool
+
+// ShowCmd controls whether VCS commands are printed.
+var ShowCmd bool
+
+// A Cmd describes how to use a version control system
+// like Mercurial, Git, or Subversion.
+type Cmd struct {
+ Name string
+ Cmd string // name of binary to invoke command
+
+ CreateCmd string // command to download a fresh copy of a repository
+ DownloadCmd string // command to download updates into an existing repository
+
+ TagCmd []TagCmd // commands to list tags
+ TagLookupCmd []TagCmd // commands to lookup tags before running tagSyncCmd
+ TagSyncCmd string // command to sync to specific tag
+ TagSyncDefault string // command to sync to default tag
+
+ LogCmd string // command to list repository changelogs in an XML format
+
+ Scheme []string
+ PingCmd string
+}
+
+// A TagCmd describes a command to list available tags
+// that can be passed to Cmd.TagSyncCmd.
+type TagCmd struct {
+ Cmd string // command to list tags
+ Pattern string // regexp to extract tags from list
+}
+
+// vcsList lists the known version control systems
+var vcsList = []*Cmd{
+ vcsHg,
+ vcsGit,
+ vcsSvn,
+ vcsBzr,
+}
+
+// ByCmd returns the version control system for the given
+// command name (hg, git, svn, bzr).
+func ByCmd(cmd string) *Cmd {
+ for _, vcs := range vcsList {
+ if vcs.Cmd == cmd {
+ return vcs
+ }
+ }
+ return nil
+}
+
+// vcsHg describes how to use Mercurial.
+var vcsHg = &Cmd{
+ Name: "Mercurial",
+ Cmd: "hg",
+
+ CreateCmd: "clone -U {repo} {dir}",
+ DownloadCmd: "pull",
+
+ // We allow both tag and branch names as 'tags'
+ // for selecting a version. This lets people have
+ // a go.release.r60 branch and a go1 branch
+ // and make changes in both, without constantly
+ // editing .hgtags.
+ TagCmd: []TagCmd{
+ {"tags", `^(\S+)`},
+ {"branches", `^(\S+)`},
+ },
+ TagSyncCmd: "update -r {tag}",
+ TagSyncDefault: "update default",
+
+ LogCmd: "log --encoding=utf-8 --limit={limit} --template={template}",
+
+ Scheme: []string{"https", "http", "ssh"},
+ PingCmd: "identify {scheme}://{repo}",
+}
+
+// vcsGit describes how to use Git.
+var vcsGit = &Cmd{
+ Name: "Git",
+ Cmd: "git",
+
+ CreateCmd: "clone {repo} {dir}",
+ DownloadCmd: "pull --ff-only",
+
+ TagCmd: []TagCmd{
+ // tags/xxx matches a git tag named xxx
+ // origin/xxx matches a git branch named xxx on the default remote repository
+ {"show-ref", `(?:tags|origin)/(\S+)$`},
+ },
+ TagLookupCmd: []TagCmd{
+ {"show-ref tags/{tag} origin/{tag}", `((?:tags|origin)/\S+)$`},
+ },
+ TagSyncCmd: "checkout {tag}",
+ TagSyncDefault: "checkout master",
+
+ Scheme: []string{"git", "https", "http", "git+ssh"},
+ PingCmd: "ls-remote {scheme}://{repo}",
+}
+
+// vcsBzr describes how to use Bazaar.
+var vcsBzr = &Cmd{
+ Name: "Bazaar",
+ Cmd: "bzr",
+
+ CreateCmd: "branch {repo} {dir}",
+
+ // Without --overwrite bzr will not pull tags that changed.
+ // Replace by --overwrite-tags after http://pad.lv/681792 goes in.
+ DownloadCmd: "pull --overwrite",
+
+ TagCmd: []TagCmd{{"tags", `^(\S+)`}},
+ TagSyncCmd: "update -r {tag}",
+ TagSyncDefault: "update -r revno:-1",
+
+ Scheme: []string{"https", "http", "bzr", "bzr+ssh"},
+ PingCmd: "info {scheme}://{repo}",
+}
+
+// vcsSvn describes how to use Subversion.
+var vcsSvn = &Cmd{
+ Name: "Subversion",
+ Cmd: "svn",
+
+ CreateCmd: "checkout {repo} {dir}",
+ DownloadCmd: "update",
+
+ // There is no tag command in subversion.
+ // The branch information is all in the path names.
+
+ LogCmd: "log --xml --limit={limit}",
+
+ Scheme: []string{"https", "http", "svn", "svn+ssh"},
+ PingCmd: "info {scheme}://{repo}",
+}
+
+func (v *Cmd) String() string {
+ return v.Name
+}
+
+// run runs the command line cmd in the given directory.
+// keyval is a list of key, value pairs. run expands
+// instances of {key} in cmd into value, but only after
+// splitting cmd into individual arguments.
+// If an error occurs, run prints the command line and the
+// command's combined stdout+stderr to standard error.
+// Otherwise run discards the command's output.
+func (v *Cmd) run(dir string, cmd string, keyval ...string) error {
+ _, err := v.run1(dir, cmd, keyval, true)
+ return err
+}
+
+// runVerboseOnly is like run but only generates error output to standard error in verbose mode.
+func (v *Cmd) runVerboseOnly(dir string, cmd string, keyval ...string) error {
+ _, err := v.run1(dir, cmd, keyval, false)
+ return err
+}
+
+// runOutput is like run but returns the output of the command.
+func (v *Cmd) runOutput(dir string, cmd string, keyval ...string) ([]byte, error) {
+ return v.run1(dir, cmd, keyval, true)
+}
+
+// run1 is the generalized implementation of run and runOutput.
+func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([]byte, error) {
+ m := make(map[string]string)
+ for i := 0; i < len(keyval); i += 2 {
+ m[keyval[i]] = keyval[i+1]
+ }
+ args := strings.Fields(cmdline)
+ for i, arg := range args {
+ args[i] = expand(m, arg)
+ }
+
+ _, err := exec.LookPath(v.Cmd)
+ if err != nil {
+ fmt.Fprintf(os.Stderr,
+ "go: missing %s command. See http://golang.org/s/gogetcmd\n",
+ v.Name)
+ return nil, err
+ }
+
+ cmd := exec.Command(v.Cmd, args...)
+ cmd.Dir = dir
+ cmd.Env = envForDir(cmd.Dir)
+ if ShowCmd {
+ fmt.Printf("cd %s\n", dir)
+ fmt.Printf("%s %s\n", v.Cmd, strings.Join(args, " "))
+ }
+ var buf bytes.Buffer
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ err = cmd.Run()
+ out := buf.Bytes()
+ if err != nil {
+ if verbose || Verbose {
+ fmt.Fprintf(os.Stderr, "# cd %s; %s %s\n", dir, v.Cmd, strings.Join(args, " "))
+ os.Stderr.Write(out)
+ }
+ return nil, err
+ }
+ return out, nil
+}
+
+// Ping pings the repo to determine if scheme used is valid.
+// This repo must be pingable with this scheme and VCS.
+func (v *Cmd) Ping(scheme, repo string) error {
+ return v.runVerboseOnly(".", v.PingCmd, "scheme", scheme, "repo", repo)
+}
+
+// Create creates a new copy of repo in dir.
+// The parent of dir must exist; dir must not.
+func (v *Cmd) Create(dir, repo string) error {
+ return v.run(".", v.CreateCmd, "dir", dir, "repo", repo)
+}
+
+// CreateAtRev creates a new copy of repo in dir at revision rev.
+// The parent of dir must exist; dir must not.
+// rev must be a valid revision in repo.
+func (v *Cmd) CreateAtRev(dir, repo, rev string) error {
+ if err := v.Create(dir, repo); err != nil {
+ return err
+ }
+ return v.run(dir, v.TagSyncCmd, "tag", rev)
+}
+
+// Download downloads any new changes for the repo in dir.
+// dir must be a valid VCS repo compatible with v.
+func (v *Cmd) Download(dir string) error {
+ return v.run(dir, v.DownloadCmd)
+}
+
+// Tags returns the list of available tags for the repo in dir.
+// dir must be a valid VCS repo compatible with v.
+func (v *Cmd) Tags(dir string) ([]string, error) {
+ var tags []string
+ for _, tc := range v.TagCmd {
+ out, err := v.runOutput(dir, tc.Cmd)
+ if err != nil {
+ return nil, err
+ }
+ re := regexp.MustCompile(`(?m-s)` + tc.Pattern)
+ for _, m := range re.FindAllStringSubmatch(string(out), -1) {
+ tags = append(tags, m[1])
+ }
+ }
+ return tags, nil
+}
+
+// TagSync syncs the repo in dir to the named tag, which is either a
+// tag returned by Tags or the empty string (the default tag).
+// dir must be a valid VCS repo compatible with v and the tag must exist.
+func (v *Cmd) TagSync(dir, tag string) error {
+ if v.TagSyncCmd == "" {
+ return nil
+ }
+ if tag != "" {
+ for _, tc := range v.TagLookupCmd {
+ out, err := v.runOutput(dir, tc.Cmd, "tag", tag)
+ if err != nil {
+ return err
+ }
+ re := regexp.MustCompile(`(?m-s)` + tc.Pattern)
+ m := re.FindStringSubmatch(string(out))
+ if len(m) > 1 {
+ tag = m[1]
+ break
+ }
+ }
+ }
+ if tag == "" && v.TagSyncDefault != "" {
+ return v.run(dir, v.TagSyncDefault)
+ }
+ return v.run(dir, v.TagSyncCmd, "tag", tag)
+}
+
+// Log logs the changes for the repo in dir.
+// dir must be a valid VCS repo compatible with v.
+func (v *Cmd) Log(dir, logTemplate string) ([]byte, error) {
+ if err := v.Download(dir); err != nil {
+ return []byte{}, err
+ }
+
+ const N = 50 // how many revisions to grab
+ return v.runOutput(dir, v.LogCmd, "limit", strconv.Itoa(N), "template", logTemplate)
+}
+
+// LogAtRev logs the change for repo in dir at the rev revision.
+// dir must be a valid VCS repo compatible with v.
+// rev must be a valid revision for the repo in dir.
+func (v *Cmd) LogAtRev(dir, rev, logTemplate string) ([]byte, error) {
+ if err := v.Download(dir); err != nil {
+ return []byte{}, err
+ }
+
+ // Append revision flag to LogCmd.
+ logAtRevCmd := v.LogCmd + " --rev=" + rev
+ return v.runOutput(dir, logAtRevCmd, "limit", strconv.Itoa(1), "template", logTemplate)
+}
+
+// A vcsPath describes how to convert an import path into a
+// version control system and repository name.
+type vcsPath struct {
+ prefix string // prefix this description applies to
+ re string // pattern for import path
+ repo string // repository to use (expand with match of re)
+ vcs string // version control system to use (expand with match of re)
+ check func(match map[string]string) error // additional checks
+ ping bool // ping for scheme to use to download repo
+
+ regexp *regexp.Regexp // cached compiled form of re
+}
+
+// FromDir inspects dir and its parents to determine the
+// version control system and code repository to use.
+// On return, root is the import path
+// corresponding to the root of the repository.
+func FromDir(dir, srcRoot string) (vcs *Cmd, root string, err error) {
+ // Clean and double-check that dir is in (a subdirectory of) srcRoot.
+ dir = filepath.Clean(dir)
+ srcRoot = filepath.Clean(srcRoot)
+ if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
+ return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
+ }
+
+ var vcsRet *Cmd
+ var rootRet string
+
+ origDir := dir
+ for len(dir) > len(srcRoot) {
+ for _, vcs := range vcsList {
+ if _, err := os.Stat(filepath.Join(dir, "."+vcs.Cmd)); err == nil {
+ root := filepath.ToSlash(dir[len(srcRoot)+1:])
+ // Record first VCS we find, but keep looking,
+ // to detect mistakes like one kind of VCS inside another.
+ if vcsRet == nil {
+ vcsRet = vcs
+ rootRet = root
+ continue
+ }
+ // Allow .git inside .git, which can arise due to submodules.
+ if vcsRet == vcs && vcs.Cmd == "git" {
+ continue
+ }
+ // Otherwise, we have one VCS inside a different VCS.
+ return nil, "", fmt.Errorf("directory %q uses %s, but parent %q uses %s",
+ filepath.Join(srcRoot, rootRet), vcsRet.Cmd, filepath.Join(srcRoot, root), vcs.Cmd)
+ }
+ }
+
+ // Move to parent.
+ ndir := filepath.Dir(dir)
+ if len(ndir) >= len(dir) {
+ // Shouldn't happen, but just in case, stop.
+ break
+ }
+ dir = ndir
+ }
+
+ if vcsRet != nil {
+ return vcsRet, rootRet, nil
+ }
+
+ return nil, "", fmt.Errorf("directory %q is not using a known version control system", origDir)
+}
+
+// RepoRoot represents a version control system, a repo, and a root of
+// where to put it on disk.
+type RepoRoot struct {
+ VCS *Cmd
+
+ // Repo is the repository URL, including scheme.
+ Repo string
+
+ // Root is the import path corresponding to the root of the
+ // repository.
+ Root string
+}
+
+// RepoRootForImportPath analyzes importPath to determine the
+// version control system, and code repository to use.
+func RepoRootForImportPath(importPath string, verbose bool) (*RepoRoot, error) {
+ rr, err := RepoRootForImportPathStatic(importPath, "")
+ if err == errUnknownSite {
+ rr, err = RepoRootForImportDynamic(importPath, verbose)
+
+ // RepoRootForImportDynamic returns error detail
+ // that is irrelevant if the user didn't intend to use a
+ // dynamic import in the first place.
+ // Squelch it.
+ if err != nil {
+ if Verbose {
+ log.Printf("import %q: %v", importPath, err)
+ }
+ err = fmt.Errorf("unrecognized import path %q", importPath)
+ }
+ }
+
+ if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") {
+ // Do not allow wildcards in the repo root.
+ rr = nil
+ err = fmt.Errorf("cannot expand ... in %q", importPath)
+ }
+ return rr, err
+}
+
+var errUnknownSite = errors.New("dynamic lookup required to find mapping")
+
+// RepoRootForImportPathStatic attempts to map importPath to a
+// RepoRoot using the commonly-used VCS hosting sites in vcsPaths
+// (github.com/user/dir), or from a fully-qualified importPath already
+// containing its VCS type (foo.com/repo.git/dir)
+//
+// If scheme is non-empty, that scheme is forced.
+func RepoRootForImportPathStatic(importPath, scheme string) (*RepoRoot, error) {
+ if strings.Contains(importPath, "://") {
+ return nil, fmt.Errorf("invalid import path %q", importPath)
+ }
+ for _, srv := range vcsPaths {
+ if !strings.HasPrefix(importPath, srv.prefix) {
+ continue
+ }
+ m := srv.regexp.FindStringSubmatch(importPath)
+ if m == nil {
+ if srv.prefix != "" {
+ return nil, fmt.Errorf("invalid %s import path %q", srv.prefix, importPath)
+ }
+ continue
+ }
+
+ // Build map of named subexpression matches for expand.
+ match := map[string]string{
+ "prefix": srv.prefix,
+ "import": importPath,
+ }
+ for i, name := range srv.regexp.SubexpNames() {
+ if name != "" && match[name] == "" {
+ match[name] = m[i]
+ }
+ }
+ if srv.vcs != "" {
+ match["vcs"] = expand(match, srv.vcs)
+ }
+ if srv.repo != "" {
+ match["repo"] = expand(match, srv.repo)
+ }
+ if srv.check != nil {
+ if err := srv.check(match); err != nil {
+ return nil, err
+ }
+ }
+ vcs := ByCmd(match["vcs"])
+ if vcs == nil {
+ return nil, fmt.Errorf("unknown version control system %q", match["vcs"])
+ }
+ if srv.ping {
+ if scheme != "" {
+ match["repo"] = scheme + "://" + match["repo"]
+ } else {
+ for _, scheme := range vcs.Scheme {
+ if vcs.Ping(scheme, match["repo"]) == nil {
+ match["repo"] = scheme + "://" + match["repo"]
+ break
+ }
+ }
+ }
+ }
+ rr := &RepoRoot{
+ VCS: vcs,
+ Repo: match["repo"],
+ Root: match["root"],
+ }
+ return rr, nil
+ }
+ return nil, errUnknownSite
+}
+
+// RepoRootForImportDynamic finds a *RepoRoot for a custom domain that's not
+// statically known by RepoRootForImportPathStatic.
+//
+// This handles custom import paths like "name.tld/pkg/foo" or just "name.tld".
+func RepoRootForImportDynamic(importPath string, verbose bool) (*RepoRoot, error) {
+ slash := strings.Index(importPath, "/")
+ if slash < 0 {
+ slash = len(importPath)
+ }
+ host := importPath[:slash]
+ if !strings.Contains(host, ".") {
+ return nil, errors.New("import path doesn't contain a hostname")
+ }
+ urlStr, body, err := httpsOrHTTP(importPath)
+ if err != nil {
+ return nil, fmt.Errorf("http/https fetch: %v", err)
+ }
+ defer body.Close()
+ imports, err := parseMetaGoImports(body)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", importPath, err)
+ }
+ metaImport, err := matchGoImport(imports, importPath)
+ if err != nil {
+ if err != errNoMatch {
+ return nil, fmt.Errorf("parse %s: %v", urlStr, err)
+ }
+ return nil, fmt.Errorf("parse %s: no go-import meta tags", urlStr)
+ }
+ if verbose {
+ log.Printf("get %q: found meta tag %#v at %s", importPath, metaImport, urlStr)
+ }
+ // If the import was "uni.edu/bob/project", which said the
+ // prefix was "uni.edu" and the RepoRoot was "evilroot.com",
+ // make sure we don't trust Bob and check out evilroot.com to
+ // "uni.edu" yet (possibly overwriting/preempting another
+ // non-evil student). Instead, first verify the root and see
+ // if it matches Bob's claim.
+ if metaImport.Prefix != importPath {
+ if verbose {
+ log.Printf("get %q: verifying non-authoritative meta tag", importPath)
+ }
+ urlStr0 := urlStr
+ urlStr, body, err = httpsOrHTTP(metaImport.Prefix)
+ if err != nil {
+ return nil, fmt.Errorf("fetch %s: %v", urlStr, err)
+ }
+ imports, err := parseMetaGoImports(body)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", importPath, err)
+ }
+ if len(imports) == 0 {
+ return nil, fmt.Errorf("fetch %s: no go-import meta tag", urlStr)
+ }
+ metaImport2, err := matchGoImport(imports, importPath)
+ if err != nil || metaImport != metaImport2 {
+ return nil, fmt.Errorf("%s and %s disagree about go-import for %s", urlStr0, urlStr, metaImport.Prefix)
+ }
+ }
+
+ if err := validateRepoRoot(metaImport.RepoRoot); err != nil {
+ return nil, fmt.Errorf("%s: invalid repo root %q: %v", urlStr, metaImport.RepoRoot, err)
+ }
+ rr := &RepoRoot{
+ VCS: ByCmd(metaImport.VCS),
+ Repo: metaImport.RepoRoot,
+ Root: metaImport.Prefix,
+ }
+ if rr.VCS == nil {
+ return nil, fmt.Errorf("%s: unknown vcs %q", urlStr, metaImport.VCS)
+ }
+ return rr, nil
+}
+
+// validateRepoRoot returns an error if repoRoot does not seem to be
+// a valid URL with scheme.
+func validateRepoRoot(repoRoot string) error {
+ url, err := url.Parse(repoRoot)
+ if err != nil {
+ return err
+ }
+ if url.Scheme == "" {
+ return errors.New("no scheme")
+ }
+ return nil
+}
+
+// metaImport represents the parsed <meta name="go-import"
+// content="prefix vcs reporoot" /> tags from HTML files.
+type metaImport struct {
+ Prefix, VCS, RepoRoot string
+}
+
+// errNoMatch is returned from matchGoImport when there's no applicable match.
+var errNoMatch = errors.New("no import match")
+
+// pathPrefix reports whether sub is a prefix of s,
+// only considering entire path components.
+func pathPrefix(s, sub string) bool {
+ // strings.HasPrefix is necessary but not sufficient.
+ if !strings.HasPrefix(s, sub) {
+ return false
+ }
+ // The remainder after the prefix must either be empty or start with a slash.
+ rem := s[len(sub):]
+ return rem == "" || rem[0] == '/'
+}
+
+// matchGoImport returns the metaImport from imports matching importPath.
+// An error is returned if there are multiple matches.
+// errNoMatch is returned if none match.
+func matchGoImport(imports []metaImport, importPath string) (_ metaImport, err error) {
+ match := -1
+ for i, im := range imports {
+ if !pathPrefix(importPath, im.Prefix) {
+ continue
+ }
+
+ if match != -1 {
+ err = fmt.Errorf("multiple meta tags match import path %q", importPath)
+ return
+ }
+ match = i
+ }
+ if match == -1 {
+ err = errNoMatch
+ return
+ }
+ return imports[match], nil
+}
+
+// expand rewrites s to replace {k} with match[k] for each key k in match.
+func expand(match map[string]string, s string) string {
+ for k, v := range match {
+ s = strings.Replace(s, "{"+k+"}", v, -1)
+ }
+ return s
+}
+
+// vcsPaths lists the known vcs paths.
+var vcsPaths = []*vcsPath{
+ // Github
+ {
+ prefix: "github.com/",
+ re: `^(?P<root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[\p{L}0-9_.\-]+)*$`,
+ vcs: "git",
+ repo: "https://{root}",
+ check: noVCSSuffix,
+ },
+
+ // Bitbucket
+ {
+ prefix: "bitbucket.org/",
+ re: `^(?P<root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`,
+ repo: "https://{root}",
+ check: bitbucketVCS,
+ },
+
+ // Launchpad
+ {
+ prefix: "launchpad.net/",
+ re: `^(?P<root>launchpad\.net/((?P<project>[A-Za-z0-9_.\-]+)(?P<series>/[A-Za-z0-9_.\-]+)?|~[A-Za-z0-9_.\-]+/(\+junk|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`,
+ vcs: "bzr",
+ repo: "https://{root}",
+ check: launchpadVCS,
+ },
+
+ // Git at OpenStack
+ {
+ prefix: "git.openstack.org",
+ re: `^(?P<root>git\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/[A-Za-z0-9_.\-]+)*$`,
+ vcs: "git",
+ repo: "https://{root}",
+ check: noVCSSuffix,
+ },
+
+ // General syntax for any server.
+ {
+ re: `^(?P<root>(?P<repo>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?/[A-Za-z0-9_.\-/]*?)\.(?P<vcs>bzr|git|hg|svn))(/[A-Za-z0-9_.\-]+)*$`,
+ ping: true,
+ },
+}
+
+func init() {
+ // fill in cached regexps.
+ // Doing this eagerly discovers invalid regexp syntax
+ // without having to run a command that needs that regexp.
+ for _, srv := range vcsPaths {
+ srv.regexp = regexp.MustCompile(srv.re)
+ }
+}
+
+// noVCSSuffix checks that the repository name does not
+// end in .foo for any version control system foo.
+// The usual culprit is ".git".
+func noVCSSuffix(match map[string]string) error {
+ repo := match["repo"]
+ for _, vcs := range vcsList {
+ if strings.HasSuffix(repo, "."+vcs.Cmd) {
+ return fmt.Errorf("invalid version control suffix in %s path", match["prefix"])
+ }
+ }
+ return nil
+}
+
+// bitbucketVCS determines the version control system for a
+// Bitbucket repository, by using the Bitbucket API.
+func bitbucketVCS(match map[string]string) error {
+ if err := noVCSSuffix(match); err != nil {
+ return err
+ }
+
+ var resp struct {
+ SCM string `json:"scm"`
+ }
+ url := expand(match, "https://api.bitbucket.org/2.0/repositories/{bitname}?fields=scm")
+ data, err := httpGET(url)
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &resp); err != nil {
+ return fmt.Errorf("decoding %s: %v", url, err)
+ }
+
+ if ByCmd(resp.SCM) != nil {
+ match["vcs"] = resp.SCM
+ if resp.SCM == "git" {
+ match["repo"] += ".git"
+ }
+ return nil
+ }
+
+ return fmt.Errorf("unable to detect version control system for bitbucket.org/ path")
+}
+
+// launchpadVCS solves the ambiguity for "lp.net/project/foo". In this case,
+// "foo" could be a series name registered in Launchpad with its own branch,
+// and it could also be the name of a directory within the main project
+// branch one level up.
+func launchpadVCS(match map[string]string) error {
+ if match["project"] == "" || match["series"] == "" {
+ return nil
+ }
+ _, err := httpGET(expand(match, "https://code.launchpad.net/{project}{series}/.bzr/branch-format"))
+ if err != nil {
+ match["root"] = expand(match, "launchpad.net/{project}")
+ match["repo"] = expand(match, "https://{root}")
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/tools/imports/fix.go b/vendor/golang.org/x/tools/imports/fix.go
new file mode 100644
index 000000000..777d28ccd
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/fix.go
@@ -0,0 +1,1259 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/internal/gopathwalk"
+)
+
+// Debug controls verbose logging.
+var Debug = false
+
+// LocalPrefix is a comma-separated string of import path prefixes, which, if
+// set, instructs Process to sort the import paths with the given prefixes
+// into another group after 3rd-party packages.
+var LocalPrefix string
+
+func localPrefixes() []string {
+ if LocalPrefix != "" {
+ return strings.Split(LocalPrefix, ",")
+ }
+ return nil
+}
+
+// importToGroup is a list of functions which map from an import path to
+// a group number.
+var importToGroup = []func(importPath string) (num int, ok bool){
+ func(importPath string) (num int, ok bool) {
+ for _, p := range localPrefixes() {
+ if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath {
+ return 3, true
+ }
+ }
+ return
+ },
+ func(importPath string) (num int, ok bool) {
+ if strings.HasPrefix(importPath, "appengine") {
+ return 2, true
+ }
+ return
+ },
+ func(importPath string) (num int, ok bool) {
+ if strings.Contains(importPath, ".") {
+ return 1, true
+ }
+ return
+ },
+}
+
+func importGroup(importPath string) int {
+ for _, fn := range importToGroup {
+ if n, ok := fn(importPath); ok {
+ return n
+ }
+ }
+ return 0
+}
+
+// An importInfo represents a single import statement.
+type importInfo struct {
+ importPath string // import path, e.g. "crypto/rand".
+ name string // import name, e.g. "crand", or "" if none.
+}
+
+// A packageInfo represents what's known about a package.
+type packageInfo struct {
+ name string // real package name, if known.
+ exports map[string]bool // known exports.
+}
+
+// parseOtherFiles parses all the Go files in srcDir except filename, including
+// test files if filename looks like a test.
+func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File {
+ // This could use go/packages but it doesn't buy much, and it fails
+ // with https://golang.org/issue/26296 in LoadFiles mode in some cases.
+ considerTests := strings.HasSuffix(filename, "_test.go")
+
+ fileBase := filepath.Base(filename)
+ packageFileInfos, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ return nil
+ }
+
+ var files []*ast.File
+ for _, fi := range packageFileInfos {
+ if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") {
+ continue
+ }
+ if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") {
+ continue
+ }
+
+ f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0)
+ if err != nil {
+ continue
+ }
+
+ files = append(files, f)
+ }
+
+ return files
+}
+
+// addGlobals puts the names of package vars into the provided map.
+func addGlobals(f *ast.File, globals map[string]bool) {
+ for _, decl := range f.Decls {
+ genDecl, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+
+ for _, spec := range genDecl.Specs {
+ valueSpec, ok := spec.(*ast.ValueSpec)
+ if !ok {
+ continue
+ }
+ globals[valueSpec.Names[0].Name] = true
+ }
+ }
+}
+
+// collectReferences builds a map of selector expressions, from
+// left hand side (X) to a set of right hand sides (Sel).
+func collectReferences(f *ast.File) references {
+ refs := references{}
+
+ var visitor visitFn
+ visitor = func(node ast.Node) ast.Visitor {
+ if node == nil {
+ return visitor
+ }
+ switch v := node.(type) {
+ case *ast.SelectorExpr:
+ xident, ok := v.X.(*ast.Ident)
+ if !ok {
+ break
+ }
+ if xident.Obj != nil {
+ // If the parser can resolve it, it's not a package ref.
+ break
+ }
+ if !ast.IsExported(v.Sel.Name) {
+ // Whatever this is, it's not exported from a package.
+ break
+ }
+ pkgName := xident.Name
+ r := refs[pkgName]
+ if r == nil {
+ r = make(map[string]bool)
+ refs[pkgName] = r
+ }
+ r[v.Sel.Name] = true
+ }
+ return visitor
+ }
+ ast.Walk(visitor, f)
+ return refs
+}
+
+// collectImports returns all the imports in f, keyed by their package name as
+// determined by pathToName. Unnamed imports (., _) and "C" are ignored.
+func collectImports(f *ast.File) []*importInfo {
+ var imports []*importInfo
+ for _, imp := range f.Imports {
+ var name string
+ if imp.Name != nil {
+ name = imp.Name.Name
+ }
+ if imp.Path.Value == `"C"` || name == "_" || name == "." {
+ continue
+ }
+ path := strings.Trim(imp.Path.Value, `"`)
+ imports = append(imports, &importInfo{
+ name: name,
+ importPath: path,
+ })
+ }
+ return imports
+}
+
+// findMissingImport searches pass's candidates for an import that provides
+// pkg, containing all of syms.
+func (p *pass) findMissingImport(pkg string, syms map[string]bool) *importInfo {
+ for _, candidate := range p.candidates {
+ pkgInfo, ok := p.knownPackages[candidate.importPath]
+ if !ok {
+ continue
+ }
+ if p.importIdentifier(candidate) != pkg {
+ continue
+ }
+
+ allFound := true
+ for right := range syms {
+ if !pkgInfo.exports[right] {
+ allFound = false
+ break
+ }
+ }
+
+ if allFound {
+ return candidate
+ }
+ }
+ return nil
+}
+
+// references is set of references found in a Go file. The first map key is the
+// left hand side of a selector expression, the second key is the right hand
+// side, and the value should always be true.
+type references map[string]map[string]bool
+
+// A pass contains all the inputs and state necessary to fix a file's imports.
+// It can be modified in some ways during use; see comments below.
+type pass struct {
+ // Inputs. These must be set before a call to load, and not modified after.
+ fset *token.FileSet // fset used to parse f and its siblings.
+ f *ast.File // the file being fixed.
+ srcDir string // the directory containing f.
+ fixEnv *fixEnv // the environment to use for go commands, etc.
+ loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
+ otherFiles []*ast.File // sibling files.
+
+ // Intermediate state, generated by load.
+ existingImports map[string]*importInfo
+ allRefs references
+ missingRefs references
+
+ // Inputs to fix. These can be augmented between successive fix calls.
+ lastTry bool // indicates that this is the last call and fix should clean up as best it can.
+ candidates []*importInfo // candidate imports in priority order.
+ knownPackages map[string]*packageInfo // information about all known packages.
+}
+
+// loadPackageNames saves the package names for everything referenced by imports.
+func (p *pass) loadPackageNames(imports []*importInfo) error {
+ var unknown []string
+ for _, imp := range imports {
+ if _, ok := p.knownPackages[imp.importPath]; ok {
+ continue
+ }
+ unknown = append(unknown, imp.importPath)
+ }
+
+ names, err := p.fixEnv.getResolver().loadPackageNames(unknown, p.srcDir)
+ if err != nil {
+ return err
+ }
+
+ for path, name := range names {
+ p.knownPackages[path] = &packageInfo{
+ name: name,
+ exports: map[string]bool{},
+ }
+ }
+ return nil
+}
+
+// importIdentifier returns the identifier that imp will introduce. It will
+// guess if the package name has not been loaded, e.g. because the source
+// is not available.
+func (p *pass) importIdentifier(imp *importInfo) string {
+ if imp.name != "" {
+ return imp.name
+ }
+ known := p.knownPackages[imp.importPath]
+ if known != nil && known.name != "" {
+ return known.name
+ }
+ return importPathToAssumedName(imp.importPath)
+}
+
+// load reads in everything necessary to run a pass, and reports whether the
+// file already has all the imports it needs. It fills in p.missingRefs with the
+// file's missing symbols, if any, or removes unused imports if not.
+func (p *pass) load() bool {
+ p.knownPackages = map[string]*packageInfo{}
+ p.missingRefs = references{}
+ p.existingImports = map[string]*importInfo{}
+
+ // Load basic information about the file in question.
+ p.allRefs = collectReferences(p.f)
+
+ // Load stuff from other files in the same package:
+ // global variables so we know they don't need resolving, and imports
+ // that we might want to mimic.
+ globals := map[string]bool{}
+ for _, otherFile := range p.otherFiles {
+ // Don't load globals from files that are in the same directory
+ // but a different package. Using them to suggest imports is OK.
+ if p.f.Name.Name == otherFile.Name.Name {
+ addGlobals(otherFile, globals)
+ }
+ p.candidates = append(p.candidates, collectImports(otherFile)...)
+ }
+
+ // Resolve all the import paths we've seen to package names, and store
+ // f's imports by the identifier they introduce.
+ imports := collectImports(p.f)
+ if p.loadRealPackageNames {
+ err := p.loadPackageNames(append(imports, p.candidates...))
+ if err != nil {
+ if Debug {
+ log.Printf("loading package names: %v", err)
+ }
+ return false
+ }
+ }
+ for _, imp := range imports {
+ p.existingImports[p.importIdentifier(imp)] = imp
+ }
+
+ // Find missing references.
+ for left, rights := range p.allRefs {
+ if globals[left] {
+ continue
+ }
+ _, ok := p.existingImports[left]
+ if !ok {
+ p.missingRefs[left] = rights
+ continue
+ }
+ }
+ if len(p.missingRefs) != 0 {
+ return false
+ }
+
+ return p.fix()
+}
+
+// fix attempts to satisfy missing imports using p.candidates. If it finds
+// everything, or if p.lastTry is true, it adds the imports it found,
+// removes anything unused, and returns true.
+func (p *pass) fix() bool {
+ // Find missing imports.
+ var selected []*importInfo
+ for left, rights := range p.missingRefs {
+ if imp := p.findMissingImport(left, rights); imp != nil {
+ selected = append(selected, imp)
+ }
+ }
+
+ if !p.lastTry && len(selected) != len(p.missingRefs) {
+ return false
+ }
+
+ // Found everything, or giving up. Add the new imports and remove any unused.
+ for _, imp := range p.existingImports {
+ // We deliberately ignore globals here, because we can't be sure
+ // they're in the same package. People do things like put multiple
+ // main packages in the same directory, and we don't want to
+ // remove imports if they happen to have the same name as a var in
+ // a different package.
+ if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok {
+ astutil.DeleteNamedImport(p.fset, p.f, imp.name, imp.importPath)
+ }
+ }
+
+ for _, imp := range selected {
+ astutil.AddNamedImport(p.fset, p.f, imp.name, imp.importPath)
+ }
+
+ if p.loadRealPackageNames {
+ for _, imp := range p.f.Imports {
+ if imp.Name != nil {
+ continue
+ }
+ path := strings.Trim(imp.Path.Value, `""`)
+ ident := p.importIdentifier(&importInfo{importPath: path})
+ if ident != importPathToAssumedName(path) {
+ imp.Name = &ast.Ident{Name: ident, NamePos: imp.Pos()}
+ }
+ }
+ }
+
+ return true
+}
+
+// assumeSiblingImportsValid assumes that siblings' use of packages is valid,
+// adding the exports they use.
+func (p *pass) assumeSiblingImportsValid() {
+ for _, f := range p.otherFiles {
+ refs := collectReferences(f)
+ imports := collectImports(f)
+ importsByName := map[string]*importInfo{}
+ for _, imp := range imports {
+ importsByName[p.importIdentifier(imp)] = imp
+ }
+ for left, rights := range refs {
+ if imp, ok := importsByName[left]; ok {
+ if _, ok := stdlib[imp.importPath]; ok {
+ // We have the stdlib in memory; no need to guess.
+ rights = stdlib[imp.importPath]
+ }
+ p.addCandidate(imp, &packageInfo{
+ // no name; we already know it.
+ exports: rights,
+ })
+ }
+ }
+ }
+}
+
+// addCandidate adds a candidate import to p, and merges in the information
+// in pkg.
+func (p *pass) addCandidate(imp *importInfo, pkg *packageInfo) {
+ p.candidates = append(p.candidates, imp)
+ if existing, ok := p.knownPackages[imp.importPath]; ok {
+ if existing.name == "" {
+ existing.name = pkg.name
+ }
+ for export := range pkg.exports {
+ existing.exports[export] = true
+ }
+ } else {
+ p.knownPackages[imp.importPath] = pkg
+ }
+}
+
+// fixImports adds and removes imports from f so that all its references are
+// satisfied and there are no unused imports.
+//
+// This is declared as a variable rather than a function so goimports can
+// easily be extended by adding a file with an init function.
+var fixImports = fixImportsDefault
+
+func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *fixEnv) error {
+ abs, err := filepath.Abs(filename)
+ if err != nil {
+ return err
+ }
+ srcDir := filepath.Dir(abs)
+ if Debug {
+ log.Printf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
+ }
+
+ // First pass: looking only at f, and using the naive algorithm to
+ // derive package names from import paths, see if the file is already
+ // complete. We can't add any imports yet, because we don't know
+ // if missing references are actually package vars.
+ p := &pass{fset: fset, f: f, srcDir: srcDir}
+ if p.load() {
+ return nil
+ }
+
+ otherFiles := parseOtherFiles(fset, srcDir, filename)
+
+ // Second pass: add information from other files in the same package,
+ // like their package vars and imports.
+ p.otherFiles = otherFiles
+ if p.load() {
+ return nil
+ }
+
+ // Now we can try adding imports from the stdlib.
+ p.assumeSiblingImportsValid()
+ addStdlibCandidates(p, p.missingRefs)
+ if p.fix() {
+ return nil
+ }
+
+ // Third pass: get real package names where we had previously used
+ // the naive algorithm. This is the first step that will use the
+ // environment, so we provide it here for the first time.
+ p = &pass{fset: fset, f: f, srcDir: srcDir, fixEnv: env}
+ p.loadRealPackageNames = true
+ p.otherFiles = otherFiles
+ if p.load() {
+ return nil
+ }
+
+ addStdlibCandidates(p, p.missingRefs)
+ p.assumeSiblingImportsValid()
+ if p.fix() {
+ return nil
+ }
+
+ // Go look for candidates in $GOPATH, etc. We don't necessarily load
+ // the real exports of sibling imports, so keep assuming their contents.
+ if err := addExternalCandidates(p, p.missingRefs, filename); err != nil {
+ return err
+ }
+
+ p.lastTry = true
+ p.fix()
+ return nil
+}
+
+// fixEnv contains environment variables and settings that affect the use of
+// the go command, the go/build package, etc.
+type fixEnv struct {
+ // If non-empty, these will be used instead of the
+ // process-wide values.
+ GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS string
+ WorkingDir string
+
+ // If true, use go/packages regardless of the environment.
+ ForceGoPackages bool
+
+ resolver resolver
+}
+
+func (e *fixEnv) env() []string {
+ env := os.Environ()
+ add := func(k, v string) {
+ if v != "" {
+ env = append(env, k+"="+v)
+ }
+ }
+ add("GOPATH", e.GOPATH)
+ add("GOROOT", e.GOROOT)
+ add("GO111MODULE", e.GO111MODULE)
+ add("GOPROXY", e.GOPROXY)
+ add("GOFLAGS", e.GOFLAGS)
+ if e.WorkingDir != "" {
+ add("PWD", e.WorkingDir)
+ }
+ return env
+}
+
+func (e *fixEnv) getResolver() resolver {
+ if e.resolver != nil {
+ return e.resolver
+ }
+ if e.ForceGoPackages {
+ return &goPackagesResolver{env: e}
+ }
+
+ out, err := e.invokeGo("env", "GOMOD")
+ if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 {
+ return &gopathResolver{env: e}
+ }
+ return &moduleResolver{env: e}
+}
+
+func (e *fixEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config {
+ return &packages.Config{
+ Mode: mode,
+ Dir: e.WorkingDir,
+ Env: e.env(),
+ }
+}
+
+func (e *fixEnv) buildContext() *build.Context {
+ ctx := build.Default
+ ctx.GOROOT = e.GOROOT
+ ctx.GOPATH = e.GOPATH
+ return &ctx
+}
+
+func (e *fixEnv) invokeGo(args ...string) (*bytes.Buffer, error) {
+ cmd := exec.Command("go", args...)
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ cmd.Env = e.env()
+ cmd.Dir = e.WorkingDir
+
+ if Debug {
+ defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
+ }
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("running go: %v (stderr:\n%s)", err, stderr)
+ }
+ return stdout, nil
+}
+
+func cmdDebugStr(cmd *exec.Cmd) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.Split(kv, "=")
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args)
+}
+
+func addStdlibCandidates(pass *pass, refs references) {
+ add := func(pkg string) {
+ pass.addCandidate(
+ &importInfo{importPath: pkg},
+ &packageInfo{name: path.Base(pkg), exports: stdlib[pkg]})
+ }
+ for left := range refs {
+ if left == "rand" {
+ // Make sure we try crypto/rand before math/rand.
+ add("crypto/rand")
+ add("math/rand")
+ continue
+ }
+ for importPath := range stdlib {
+ if path.Base(importPath) == left {
+ add(importPath)
+ }
+ }
+ }
+}
+
+// A resolver does the build-system-specific parts of goimports.
+type resolver interface {
+ // loadPackageNames loads the package names in importPaths.
+ loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
+ // scan finds (at least) the packages satisfying refs. The returned slice is unordered.
+ scan(refs references) ([]*pkg, error)
+}
+
+// gopathResolver implements resolver for GOPATH and module workspaces using go/packages.
+type goPackagesResolver struct {
+ env *fixEnv
+}
+
+func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
+ cfg := r.env.newPackagesConfig(packages.LoadFiles)
+ pkgs, err := packages.Load(cfg, importPaths...)
+ if err != nil {
+ return nil, err
+ }
+ names := map[string]string{}
+ for _, pkg := range pkgs {
+ names[VendorlessPath(pkg.PkgPath)] = pkg.Name
+ }
+ // We may not have found all the packages. Guess the rest.
+ for _, path := range importPaths {
+ if _, ok := names[path]; ok {
+ continue
+ }
+ names[path] = importPathToAssumedName(path)
+ }
+ return names, nil
+
+}
+
+func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) {
+ var loadQueries []string
+ for pkgName := range refs {
+ loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName)
+ }
+ sort.Strings(loadQueries)
+ cfg := r.env.newPackagesConfig(packages.LoadFiles)
+ goPackages, err := packages.Load(cfg, loadQueries...)
+ if err != nil {
+ return nil, err
+ }
+
+ var scan []*pkg
+ for _, goPackage := range goPackages {
+ scan = append(scan, &pkg{
+ dir: filepath.Dir(goPackage.CompiledGoFiles[0]),
+ importPathShort: VendorlessPath(goPackage.PkgPath),
+ goPackage: goPackage,
+ })
+ }
+ return scan, nil
+}
+
+func addExternalCandidates(pass *pass, refs references, filename string) error {
+ dirScan, err := pass.fixEnv.getResolver().scan(refs)
+ if err != nil {
+ return err
+ }
+
+ // Search for imports matching potential package references.
+ type result struct {
+ imp *importInfo
+ pkg *packageInfo
+ }
+ results := make(chan result, len(refs))
+
+ ctx, cancel := context.WithCancel(context.TODO())
+ var wg sync.WaitGroup
+ defer func() {
+ cancel()
+ wg.Wait()
+ }()
+ var (
+ firstErr error
+ firstErrOnce sync.Once
+ )
+ for pkgName, symbols := range refs {
+ wg.Add(1)
+ go func(pkgName string, symbols map[string]bool) {
+ defer wg.Done()
+
+ found, err := findImport(ctx, pass.fixEnv, dirScan, pkgName, symbols, filename)
+
+ if err != nil {
+ firstErrOnce.Do(func() {
+ firstErr = err
+ cancel()
+ })
+ return
+ }
+
+ if found == nil {
+ return // No matching package.
+ }
+
+ imp := &importInfo{
+ importPath: found.importPathShort,
+ }
+
+ pkg := &packageInfo{
+ name: pkgName,
+ exports: symbols,
+ }
+ results <- result{imp, pkg}
+ }(pkgName, symbols)
+ }
+ go func() {
+ wg.Wait()
+ close(results)
+ }()
+
+ for result := range results {
+ pass.addCandidate(result.imp, result.pkg)
+ }
+ return firstErr
+}
+
+// notIdentifier reports whether ch is an invalid identifier character.
+func notIdentifier(ch rune) bool {
+ return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' ||
+ '0' <= ch && ch <= '9' ||
+ ch == '_' ||
+ ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch)))
+}
+
+// importPathToAssumedName returns the assumed package name of an import path.
+// It does this using only string parsing of the import path.
+// It picks the last element of the path that does not look like a major
+// version, and then picks the valid identifier off the start of that element.
+// It is used to determine if a local rename should be added to an import for
+// clarity.
+// This function could be moved to a standard package and exported if we want
+// for use in other tools.
+func importPathToAssumedName(importPath string) string {
+ base := path.Base(importPath)
+ if strings.HasPrefix(base, "v") {
+ if _, err := strconv.Atoi(base[1:]); err == nil {
+ dir := path.Dir(importPath)
+ if dir != "." {
+ base = path.Base(dir)
+ }
+ }
+ }
+ base = strings.TrimPrefix(base, "go-")
+ if i := strings.IndexFunc(base, notIdentifier); i >= 0 {
+ base = base[:i]
+ }
+ return base
+}
+
+// gopathResolver implements resolver for GOPATH workspaces.
+type gopathResolver struct {
+ env *fixEnv
+}
+
+func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
+ names := map[string]string{}
+ for _, path := range importPaths {
+ names[path] = importPathToName(r.env, path, srcDir)
+ }
+ return names, nil
+}
+
+// importPathToNameGoPath finds out the actual package name, as declared in its .go files.
+// If there's a problem, it returns "".
+func importPathToName(env *fixEnv, importPath, srcDir string) (packageName string) {
+ // Fast path for standard library without going to disk.
+ if _, ok := stdlib[importPath]; ok {
+ return path.Base(importPath) // stdlib packages always match their paths.
+ }
+
+ buildPkg, err := env.buildContext().Import(importPath, srcDir, build.FindOnly)
+ if err != nil {
+ return ""
+ }
+ pkgName, err := packageDirToName(buildPkg.Dir)
+ if err != nil {
+ return ""
+ }
+ return pkgName
+}
+
+// packageDirToName is a faster version of build.Import if
+// the only thing desired is the package name. It uses build.FindOnly
+// to find the directory and then only parses one file in the package,
+// trusting that the files in the directory are consistent.
+func packageDirToName(dir string) (packageName string, err error) {
+ d, err := os.Open(dir)
+ if err != nil {
+ return "", err
+ }
+ names, err := d.Readdirnames(-1)
+ d.Close()
+ if err != nil {
+ return "", err
+ }
+ sort.Strings(names) // to have predictable behavior
+ var lastErr error
+ var nfile int
+ for _, name := range names {
+ if !strings.HasSuffix(name, ".go") {
+ continue
+ }
+ if strings.HasSuffix(name, "_test.go") {
+ continue
+ }
+ nfile++
+ fullFile := filepath.Join(dir, name)
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ pkgName := f.Name.Name
+ if pkgName == "documentation" {
+ // Special case from go/build.ImportDir, not
+ // handled by ctx.MatchFile.
+ continue
+ }
+ if pkgName == "main" {
+ // Also skip package main, assuming it's a +build ignore generator or example.
+ // Since you can't import a package main anyway, there's no harm here.
+ continue
+ }
+ return pkgName, nil
+ }
+ if lastErr != nil {
+ return "", lastErr
+ }
+ return "", fmt.Errorf("no importable package found in %d Go files", nfile)
+}
+
+type pkg struct {
+ goPackage *packages.Package
+ dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http")
+ importPathShort string // vendorless import path ("net/http", "a/b")
+}
+
+type pkgDistance struct {
+ pkg *pkg
+ distance int // relative distance to target
+}
+
+// byDistanceOrImportPathShortLength sorts by relative distance breaking ties
+// on the short import path length and then the import string itself.
+type byDistanceOrImportPathShortLength []pkgDistance
+
+func (s byDistanceOrImportPathShortLength) Len() int { return len(s) }
+func (s byDistanceOrImportPathShortLength) Less(i, j int) bool {
+ di, dj := s[i].distance, s[j].distance
+ if di == -1 {
+ return false
+ }
+ if dj == -1 {
+ return true
+ }
+ if di != dj {
+ return di < dj
+ }
+
+ vi, vj := s[i].pkg.importPathShort, s[j].pkg.importPathShort
+ if len(vi) != len(vj) {
+ return len(vi) < len(vj)
+ }
+ return vi < vj
+}
+func (s byDistanceOrImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func distance(basepath, targetpath string) int {
+ p, err := filepath.Rel(basepath, targetpath)
+ if err != nil {
+ return -1
+ }
+ if p == "." {
+ return 0
+ }
+ return strings.Count(p, string(filepath.Separator)) + 1
+}
+
+func (r *gopathResolver) scan(_ references) ([]*pkg, error) {
+ dupCheck := make(map[string]bool)
+ var result []*pkg
+
+ var mu sync.Mutex
+
+ add := func(root gopathwalk.Root, dir string) {
+ mu.Lock()
+ defer mu.Unlock()
+
+ if _, dup := dupCheck[dir]; dup {
+ return
+ }
+ dupCheck[dir] = true
+ importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):])
+ result = append(result, &pkg{
+ importPathShort: VendorlessPath(importpath),
+ dir: dir,
+ })
+ }
+ gopathwalk.Walk(gopathwalk.SrcDirsRoots(r.env.buildContext()), add, gopathwalk.Options{Debug: Debug, ModulesEnabled: false})
+ return result, nil
+}
+
+// VendorlessPath returns the devendorized version of the import path ipath.
+// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b".
+func VendorlessPath(ipath string) string {
+ // Devendorize for use in import statement.
+ if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 {
+ return ipath[i+len("/vendor/"):]
+ }
+ if strings.HasPrefix(ipath, "vendor/") {
+ return ipath[len("vendor/"):]
+ }
+ return ipath
+}
+
+// loadExports returns the set of exported symbols in the package at dir.
+// It returns nil on error or if the package name in dir does not match expectPackage.
+func loadExports(ctx context.Context, env *fixEnv, expectPackage string, pkg *pkg) (map[string]bool, error) {
+ if Debug {
+ log.Printf("loading exports in dir %s (seeking package %s)", pkg.dir, expectPackage)
+ }
+ if pkg.goPackage != nil {
+ exports := map[string]bool{}
+ fset := token.NewFileSet()
+ for _, fname := range pkg.goPackage.CompiledGoFiles {
+ f, err := parser.ParseFile(fset, fname, nil, 0)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", fname, err)
+ }
+ for name := range f.Scope.Objects {
+ if ast.IsExported(name) {
+ exports[name] = true
+ }
+ }
+ }
+ return exports, nil
+ }
+
+ exports := make(map[string]bool)
+
+ // Look for non-test, buildable .go files which could provide exports.
+ all, err := ioutil.ReadDir(pkg.dir)
+ if err != nil {
+ return nil, err
+ }
+ var files []os.FileInfo
+ for _, fi := range all {
+ name := fi.Name()
+ if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") {
+ continue
+ }
+ match, err := env.buildContext().MatchFile(pkg.dir, fi.Name())
+ if err != nil || !match {
+ continue
+ }
+ files = append(files, fi)
+ }
+
+ if len(files) == 0 {
+ return nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", pkg.dir)
+ }
+
+ fset := token.NewFileSet()
+ for _, fi := range files {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ fullFile := filepath.Join(pkg.dir, fi.Name())
+ f, err := parser.ParseFile(fset, fullFile, nil, 0)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", fullFile, err)
+ }
+ pkgName := f.Name.Name
+ if pkgName == "documentation" {
+ // Special case from go/build.ImportDir, not
+ // handled by MatchFile above.
+ continue
+ }
+ if pkgName != expectPackage {
+ return nil, fmt.Errorf("scan of dir %v is not expected package %v (actually %v)", pkg.dir, expectPackage, pkgName)
+ }
+ for name := range f.Scope.Objects {
+ if ast.IsExported(name) {
+ exports[name] = true
+ }
+ }
+ }
+
+ if Debug {
+ exportList := make([]string, 0, len(exports))
+ for k := range exports {
+ exportList = append(exportList, k)
+ }
+ sort.Strings(exportList)
+ log.Printf("loaded exports in dir %v (package %v): %v", pkg.dir, expectPackage, strings.Join(exportList, ", "))
+ }
+ return exports, nil
+}
+
+// findImport searches for a package with the given symbols.
+// If no package is found, findImport returns ("", false, nil)
+func findImport(ctx context.Context, env *fixEnv, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
+ pkgDir, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ pkgDir = filepath.Dir(pkgDir)
+
+ // Find candidate packages, looking only at their directory names first.
+ var candidates []pkgDistance
+ for _, pkg := range dirScan {
+ if pkg.dir != pkgDir && pkgIsCandidate(filename, pkgName, pkg) {
+ candidates = append(candidates, pkgDistance{
+ pkg: pkg,
+ distance: distance(pkgDir, pkg.dir),
+ })
+ }
+ }
+
+ // Sort the candidates by their import package length,
+ // assuming that shorter package names are better than long
+ // ones. Note that this sorts by the de-vendored name, so
+ // there's no "penalty" for vendoring.
+ sort.Sort(byDistanceOrImportPathShortLength(candidates))
+ if Debug {
+ for i, c := range candidates {
+ log.Printf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
+ }
+ }
+
+ // Collect exports for packages with matching names.
+
+ rescv := make([]chan *pkg, len(candidates))
+ for i := range candidates {
+ rescv[i] = make(chan *pkg, 1)
+ }
+ const maxConcurrentPackageImport = 4
+ loadExportsSem := make(chan struct{}, maxConcurrentPackageImport)
+
+ ctx, cancel := context.WithCancel(ctx)
+ var wg sync.WaitGroup
+ defer func() {
+ cancel()
+ wg.Wait()
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i, c := range candidates {
+ select {
+ case loadExportsSem <- struct{}{}:
+ case <-ctx.Done():
+ return
+ }
+
+ wg.Add(1)
+ go func(c pkgDistance, resc chan<- *pkg) {
+ defer func() {
+ <-loadExportsSem
+ wg.Done()
+ }()
+
+ exports, err := loadExports(ctx, env, pkgName, c.pkg)
+ if err != nil {
+ if Debug {
+ log.Printf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
+ }
+ resc <- nil
+ return
+ }
+
+ // If it doesn't have the right
+ // symbols, send nil to mean no match.
+ for symbol := range symbols {
+ if !exports[symbol] {
+ resc <- nil
+ return
+ }
+ }
+ resc <- c.pkg
+ }(c, rescv[i])
+ }
+ }()
+
+ for _, resc := range rescv {
+ pkg := <-resc
+ if pkg == nil {
+ continue
+ }
+ return pkg, nil
+ }
+ return nil, nil
+}
+
+// pkgIsCandidate reports whether pkg is a candidate for satisfying the
+// finding which package pkgIdent in the file named by filename is trying
+// to refer to.
+//
+// This check is purely lexical and is meant to be as fast as possible
+// because it's run over all $GOPATH directories to filter out poor
+// candidates in order to limit the CPU and I/O later parsing the
+// exports in candidate packages.
+//
+// filename is the file being formatted.
+// pkgIdent is the package being searched for, like "client" (if
+// searching for "client.New")
+func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool {
+ // Check "internal" and "vendor" visibility:
+ if !canUse(filename, pkg.dir) {
+ return false
+ }
+
+ // Speed optimization to minimize disk I/O:
+ // the last two components on disk must contain the
+ // package name somewhere.
+ //
+ // This permits mismatch naming like directory
+ // "go-foo" being package "foo", or "pkg.v3" being "pkg",
+ // or directory "google.golang.org/api/cloudbilling/v1"
+ // being package "cloudbilling", but doesn't
+ // permit a directory "foo" to be package
+ // "bar", which is strongly discouraged
+ // anyway. There's no reason goimports needs
+ // to be slow just to accommodate that.
+ lastTwo := lastTwoComponents(pkg.importPathShort)
+ if strings.Contains(lastTwo, pkgIdent) {
+ return true
+ }
+ if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
+ lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
+ if strings.Contains(lastTwo, pkgIdent) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func hasHyphenOrUpperASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if b == '-' || ('A' <= b && b <= 'Z') {
+ return true
+ }
+ }
+ return false
+}
+
+func lowerASCIIAndRemoveHyphen(s string) (ret string) {
+ buf := make([]byte, 0, len(s))
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case b == '-':
+ continue
+ case 'A' <= b && b <= 'Z':
+ buf = append(buf, b+('a'-'A'))
+ default:
+ buf = append(buf, b)
+ }
+ }
+ return string(buf)
+}
+
+// canUse reports whether the package in dir is usable from filename,
+// respecting the Go "internal" and "vendor" visibility rules.
+func canUse(filename, dir string) bool {
+ // Fast path check, before any allocations. If it doesn't contain vendor
+ // or internal, it's not tricky:
+ // Note that this can false-negative on directories like "notinternal",
+ // but we check it correctly below. This is just a fast path.
+ if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") {
+ return true
+ }
+
+ dirSlash := filepath.ToSlash(dir)
+ if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") {
+ return true
+ }
+ // Vendor or internal directory only visible from children of parent.
+ // That means the path from the current directory to the target directory
+ // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal
+ // or bar/vendor or bar/internal.
+ // After stripping all the leading ../, the only okay place to see vendor or internal
+ // is at the very beginning of the path.
+ absfile, err := filepath.Abs(filename)
+ if err != nil {
+ return false
+ }
+ absdir, err := filepath.Abs(dir)
+ if err != nil {
+ return false
+ }
+ rel, err := filepath.Rel(absfile, absdir)
+ if err != nil {
+ return false
+ }
+ relSlash := filepath.ToSlash(rel)
+ if i := strings.LastIndex(relSlash, "../"); i >= 0 {
+ relSlash = relSlash[i+len("../"):]
+ }
+ return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal")
+}
+
+// lastTwoComponents returns at most the last two path components
+// of v, using either / or \ as the path separator.
+func lastTwoComponents(v string) string {
+ nslash := 0
+ for i := len(v) - 1; i >= 0; i-- {
+ if v[i] == '/' || v[i] == '\\' {
+ nslash++
+ if nslash == 2 {
+ return v[i:]
+ }
+ }
+ }
+ return v
+}
+
+type visitFn func(node ast.Node) ast.Visitor
+
+func (fn visitFn) Visit(node ast.Node) ast.Visitor {
+ return fn(node)
+}
diff --git a/vendor/golang.org/x/tools/imports/imports.go b/vendor/golang.org/x/tools/imports/imports.go
new file mode 100644
index 000000000..07101cb80
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/imports.go
@@ -0,0 +1,315 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkstdlib.go
+
+// Package imports implements a Go pretty-printer (like package "go/format")
+// that also adds or removes import statements as necessary.
+package imports // import "golang.org/x/tools/imports"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/format"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// Options specifies options for processing files.
+type Options struct {
+ Fragment bool // Accept fragment of a source file (no package statement)
+ AllErrors bool // Report all errors (not just the first 10 on different lines)
+
+ Comments bool // Print comments (true if nil *Options provided)
+ TabIndent bool // Use tabs for indent (true if nil *Options provided)
+ TabWidth int // Tab width (8 if nil *Options provided)
+
+ FormatOnly bool // Disable the insertion and deletion of imports
+}
+
+// Process formats and adjusts imports for the provided file.
+// If opt is nil the defaults are used.
+//
+// Note that filename's directory influences which imports can be chosen,
+// so it is important that filename be accurate.
+// To process data ``as if'' it were in filename, pass the data as a non-nil src.
+func Process(filename string, src []byte, opt *Options) ([]byte, error) {
+ env := &fixEnv{GOPATH: build.Default.GOPATH, GOROOT: build.Default.GOROOT}
+ return process(filename, src, opt, env)
+}
+
+func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, error) {
+ if opt == nil {
+ opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
+ }
+ if src == nil {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ src = b
+ }
+
+ fileSet := token.NewFileSet()
+ file, adjust, err := parse(fileSet, filename, src, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ if !opt.FormatOnly {
+ if err := fixImports(fileSet, file, filename, env); err != nil {
+ return nil, err
+ }
+ }
+
+ sortImports(fileSet, file)
+ imps := astutil.Imports(fileSet, file)
+ var spacesBefore []string // import paths we need spaces before
+ for _, impSection := range imps {
+ // Within each block of contiguous imports, see if any
+ // import lines are in different group numbers. If so,
+ // we'll need to put a space between them so it's
+ // compatible with gofmt.
+ lastGroup := -1
+ for _, importSpec := range impSection {
+ importPath, _ := strconv.Unquote(importSpec.Path.Value)
+ groupNum := importGroup(importPath)
+ if groupNum != lastGroup && lastGroup != -1 {
+ spacesBefore = append(spacesBefore, importPath)
+ }
+ lastGroup = groupNum
+ }
+
+ }
+
+ printerMode := printer.UseSpaces
+ if opt.TabIndent {
+ printerMode |= printer.TabIndent
+ }
+ printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
+
+ var buf bytes.Buffer
+ err = printConfig.Fprint(&buf, fileSet, file)
+ if err != nil {
+ return nil, err
+ }
+ out := buf.Bytes()
+ if adjust != nil {
+ out = adjust(src, out)
+ }
+ if len(spacesBefore) > 0 {
+ out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ out, err = format.Source(out)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// parse parses src, which was read from filename,
+// as a Go source file or statement list.
+func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
+ parserMode := parser.Mode(0)
+ if opt.Comments {
+ parserMode |= parser.ParseComments
+ }
+ if opt.AllErrors {
+ parserMode |= parser.AllErrors
+ }
+
+ // Try as whole source file.
+ file, err := parser.ParseFile(fset, filename, src, parserMode)
+ if err == nil {
+ return file, nil, nil
+ }
+ // If the error is that the source file didn't begin with a
+ // package line and we accept fragmented input, fall through to
+ // try as a source fragment. Stop and return on any other error.
+ if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
+ return nil, nil, err
+ }
+
+ // If this is a declaration list, make it a source file
+ // by inserting a package clause.
+ // Insert using a ;, not a newline, so that parse errors are on
+ // the correct line.
+ const prefix = "package main;"
+ psrc := append([]byte(prefix), src...)
+ file, err = parser.ParseFile(fset, filename, psrc, parserMode)
+ if err == nil {
+ // Gofmt will turn the ; into a \n.
+ // Do that ourselves now and update the file contents,
+ // so that positions and line numbers are correct going forward.
+ psrc[len(prefix)-1] = '\n'
+ fset.File(file.Package).SetLinesForContent(psrc)
+
+ // If a main function exists, we will assume this is a main
+ // package and leave the file.
+ if containsMainFunc(file) {
+ return file, nil, nil
+ }
+
+ adjust := func(orig, src []byte) []byte {
+ // Remove the package clause.
+ src = src[len(prefix):]
+ return matchSpace(orig, src)
+ }
+ return file, adjust, nil
+ }
+ // If the error is that the source file didn't begin with a
+ // declaration, fall through to try as a statement list.
+ // Stop and return on any other error.
+ if !strings.Contains(err.Error(), "expected declaration") {
+ return nil, nil, err
+ }
+
+ // If this is a statement list, make it a source file
+ // by inserting a package clause and turning the list
+ // into a function body. This handles expressions too.
+ // Insert using a ;, not a newline, so that the line numbers
+ // in fsrc match the ones in src.
+ fsrc := append(append([]byte("package p; func _() {"), src...), '}')
+ file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
+ if err == nil {
+ adjust := func(orig, src []byte) []byte {
+ // Remove the wrapping.
+ // Gofmt has turned the ; into a \n\n.
+ src = src[len("package p\n\nfunc _() {"):]
+ src = src[:len(src)-len("}\n")]
+ // Gofmt has also indented the function body one level.
+ // Remove that indent.
+ src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
+ return matchSpace(orig, src)
+ }
+ return file, adjust, nil
+ }
+
+ // Failed, and out of options.
+ return nil, nil, err
+}
+
+// containsMainFunc checks if a file contains a function declaration with the
+// function signature 'func main()'
+func containsMainFunc(file *ast.File) bool {
+ for _, decl := range file.Decls {
+ if f, ok := decl.(*ast.FuncDecl); ok {
+ if f.Name.Name != "main" {
+ continue
+ }
+
+ if len(f.Type.Params.List) != 0 {
+ continue
+ }
+
+ if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
+ continue
+ }
+
+ return true
+ }
+ }
+
+ return false
+}
+
+func cutSpace(b []byte) (before, middle, after []byte) {
+ i := 0
+ for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
+ i++
+ }
+ j := len(b)
+ for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
+ j--
+ }
+ if i <= j {
+ return b[:i], b[i:j], b[j:]
+ }
+ return nil, nil, b[j:]
+}
+
+// matchSpace reformats src to use the same space context as orig.
+// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
+// 2) matchSpace copies the indentation of the first non-blank line in orig
+// to every non-blank line in src.
+// 3) matchSpace copies the trailing space from orig and uses it in place
+// of src's trailing space.
+func matchSpace(orig []byte, src []byte) []byte {
+ before, _, after := cutSpace(orig)
+ i := bytes.LastIndex(before, []byte{'\n'})
+ before, indent := before[:i+1], before[i+1:]
+
+ _, src, _ = cutSpace(src)
+
+ var b bytes.Buffer
+ b.Write(before)
+ for len(src) > 0 {
+ line := src
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, src = line[:i+1], line[i+1:]
+ } else {
+ src = nil
+ }
+ if len(line) > 0 && line[0] != '\n' { // not blank
+ b.Write(indent)
+ }
+ b.Write(line)
+ }
+ b.Write(after)
+ return b.Bytes()
+}
+
+var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
+
+func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
+ var out bytes.Buffer
+ in := bufio.NewReader(r)
+ inImports := false
+ done := false
+ for {
+ s, err := in.ReadString('\n')
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return nil, err
+ }
+
+ if !inImports && !done && strings.HasPrefix(s, "import") {
+ inImports = true
+ }
+ if inImports && (strings.HasPrefix(s, "var") ||
+ strings.HasPrefix(s, "func") ||
+ strings.HasPrefix(s, "const") ||
+ strings.HasPrefix(s, "type")) {
+ done = true
+ inImports = false
+ }
+ if inImports && len(breaks) > 0 {
+ if m := impLine.FindStringSubmatch(s); m != nil {
+ if m[1] == breaks[0] {
+ out.WriteByte('\n')
+ breaks = breaks[1:]
+ }
+ }
+ }
+
+ fmt.Fprint(&out, s)
+ }
+ return out.Bytes(), nil
+}
diff --git a/vendor/golang.org/x/tools/imports/mod.go b/vendor/golang.org/x/tools/imports/mod.go
new file mode 100644
index 000000000..018c43ce8
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/mod.go
@@ -0,0 +1,355 @@
+package imports
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/gopathwalk"
+ "golang.org/x/tools/internal/module"
+)
+
+// moduleResolver implements resolver for modules using the go command as little
+// as feasible.
+type moduleResolver struct {
+ env *fixEnv
+
+ initialized bool
+ main *moduleJSON
+ modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path...
+ modsByDir []*moduleJSON // ...or Dir.
+}
+
+type moduleJSON struct {
+ Path string // module path
+ Version string // module version
+ Versions []string // available module versions (with -versions)
+ Replace *moduleJSON // replaced by this module
+ Time *time.Time // time version was created
+ Update *moduleJSON // available update, if any (with -u)
+ Main bool // is this the main module?
+ Indirect bool // is this module only an indirect dependency of main module?
+ Dir string // directory holding files for this module, if any
+ GoMod string // path to go.mod file for this module, if any
+ Error *moduleErrorJSON // error loading module
+}
+
+type moduleErrorJSON struct {
+ Err string // the error itself
+}
+
+func (r *moduleResolver) init() error {
+ if r.initialized {
+ return nil
+ }
+ stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
+ if err != nil {
+ return err
+ }
+ for dec := json.NewDecoder(stdout); dec.More(); {
+ mod := &moduleJSON{}
+ if err := dec.Decode(mod); err != nil {
+ return err
+ }
+ if mod.Dir == "" {
+ if Debug {
+ log.Printf("module %v has not been downloaded and will be ignored", mod.Path)
+ }
+ // Can't do anything with a module that's not downloaded.
+ continue
+ }
+ r.modsByModPath = append(r.modsByModPath, mod)
+ r.modsByDir = append(r.modsByDir, mod)
+ if mod.Main {
+ r.main = mod
+ }
+ }
+
+ sort.Slice(r.modsByModPath, func(i, j int) bool {
+ count := func(x int) int {
+ return strings.Count(r.modsByModPath[x].Path, "/")
+ }
+ return count(j) < count(i) // descending order
+ })
+ sort.Slice(r.modsByDir, func(i, j int) bool {
+ count := func(x int) int {
+ return strings.Count(r.modsByDir[x].Dir, "/")
+ }
+ return count(j) < count(i) // descending order
+ })
+
+ r.initialized = true
+ return nil
+}
+
+// findPackage returns the module and directory that contains the package at
+// the given import path, or returns nil, "" if no module is in scope.
+func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) {
+ for _, m := range r.modsByModPath {
+ if !strings.HasPrefix(importPath, m.Path) {
+ continue
+ }
+ pathInModule := importPath[len(m.Path):]
+ pkgDir := filepath.Join(m.Dir, pathInModule)
+ if dirIsNestedModule(pkgDir, m) {
+ continue
+ }
+
+ pkgFiles, err := ioutil.ReadDir(pkgDir)
+ if err != nil {
+ continue
+ }
+
+ // A module only contains a package if it has buildable go
+ // files in that directory. If not, it could be provided by an
+ // outer module. See #29736.
+ for _, fi := range pkgFiles {
+ if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok {
+ return m, pkgDir
+ }
+ }
+ }
+ return nil, ""
+}
+
+// findModuleByDir returns the module that contains dir, or nil if no such
+// module is in scope.
+func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
+ // This is quite tricky and may not be correct. dir could be:
+ // - a package in the main module.
+ // - a replace target underneath the main module's directory.
+ // - a nested module in the above.
+ // - a replace target somewhere totally random.
+ // - a nested module in the above.
+ // - in the mod cache.
+ // - in /vendor/ in -mod=vendor mode.
+ // - nested module? Dunno.
+ // Rumor has it that replace targets cannot contain other replace targets.
+ for _, m := range r.modsByDir {
+ if !strings.HasPrefix(dir, m.Dir) {
+ continue
+ }
+
+ if dirIsNestedModule(dir, m) {
+ continue
+ }
+
+ return m
+ }
+ return nil
+}
+
+// dirIsNestedModule reports if dir is contained in a nested module underneath
+// mod, not actually in mod.
+func dirIsNestedModule(dir string, mod *moduleJSON) bool {
+ if !strings.HasPrefix(dir, mod.Dir) {
+ return false
+ }
+ mf := findModFile(dir)
+ if mf == "" {
+ return false
+ }
+ return filepath.Dir(mf) != mod.Dir
+}
+
+func findModFile(dir string) string {
+ for {
+ f := filepath.Join(dir, "go.mod")
+ info, err := os.Stat(f)
+ if err == nil && !info.IsDir() {
+ return f
+ }
+ d := filepath.Dir(dir)
+ if len(d) >= len(dir) {
+ return "" // reached top of file system, no go.mod
+ }
+ dir = d
+ }
+}
+
+func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
+ if err := r.init(); err != nil {
+ return nil, err
+ }
+ names := map[string]string{}
+ for _, path := range importPaths {
+ _, packageDir := r.findPackage(path)
+ if packageDir == "" {
+ continue
+ }
+ name, err := packageDirToName(packageDir)
+ if err != nil {
+ continue
+ }
+ names[path] = name
+ }
+ return names, nil
+}
+
+func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
+ if err := r.init(); err != nil {
+ return nil, err
+ }
+
+ // Walk GOROOT, GOPATH/pkg/mod, and the main module.
+ roots := []gopathwalk.Root{
+ {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
+ }
+ if r.main != nil {
+ roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
+ }
+ for _, p := range filepath.SplitList(r.env.GOPATH) {
+ roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
+ }
+
+ // Walk replace targets, just in case they're not in any of the above.
+ for _, mod := range r.modsByModPath {
+ if mod.Replace != nil {
+ roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
+ }
+ }
+
+ var result []*pkg
+ dupCheck := make(map[string]bool)
+ var mu sync.Mutex
+
+ gopathwalk.Walk(roots, func(root gopathwalk.Root, dir string) {
+ mu.Lock()
+ defer mu.Unlock()
+
+ if _, dup := dupCheck[dir]; dup {
+ return
+ }
+
+ dupCheck[dir] = true
+
+ subdir := ""
+ if dir != root.Path {
+ subdir = dir[len(root.Path)+len("/"):]
+ }
+ importPath := filepath.ToSlash(subdir)
+ if strings.HasPrefix(importPath, "vendor/") {
+ // Ignore vendor dirs. If -mod=vendor is on, then things
+ // should mostly just work, but when it's not vendor/
+ // is a mess. There's no easy way to tell if it's on.
+ // We can still find things in the mod cache and
+ // map them into /vendor when -mod=vendor is on.
+ return
+ }
+ switch root.Type {
+ case gopathwalk.RootCurrentModule:
+ importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
+ case gopathwalk.RootModuleCache:
+ matches := modCacheRegexp.FindStringSubmatch(subdir)
+ modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
+ if err != nil {
+ if Debug {
+ log.Printf("decoding module cache path %q: %v", subdir, err)
+ }
+ return
+ }
+ importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
+ case gopathwalk.RootGOROOT:
+ importPath = subdir
+ }
+
+ // Check if the directory is underneath a module that's in scope.
+ if mod := r.findModuleByDir(dir); mod != nil {
+ // It is. If dir is the target of a replace directive,
+ // our guessed import path is wrong. Use the real one.
+ if mod.Dir == dir {
+ importPath = mod.Path
+ } else {
+ dirInMod := dir[len(mod.Dir)+len("/"):]
+ importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
+ }
+ } else {
+ // The package is in an unknown module. Check that it's
+ // not obviously impossible to import.
+ var modFile string
+ switch root.Type {
+ case gopathwalk.RootModuleCache:
+ matches := modCacheRegexp.FindStringSubmatch(subdir)
+ modFile = filepath.Join(matches[1], "@", matches[2], "go.mod")
+ default:
+ modFile = findModFile(dir)
+ }
+
+ modBytes, err := ioutil.ReadFile(modFile)
+ if err == nil && !strings.HasPrefix(importPath, modulePath(modBytes)) {
+ // The module's declared path does not match
+ // its expected path. It probably needs a
+ // replace directive we don't have.
+ return
+ }
+ }
+ // We may have discovered a package that has a different version
+ // in scope already. Canonicalize to that one if possible.
+ if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
+ dir = canonicalDir
+ }
+
+ result = append(result, &pkg{
+ importPathShort: VendorlessPath(importPath),
+ dir: dir,
+ })
+ }, gopathwalk.Options{Debug: Debug, ModulesEnabled: true})
+ return result, nil
+}
+
+// modCacheRegexp splits a path in a module cache into module, module version, and package.
+var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
+
+var (
+ slashSlash = []byte("//")
+ moduleStr = []byte("module")
+)
+
+// modulePath returns the module path from the gomod file text.
+// If it cannot find a module path, it returns an empty string.
+// It is tolerant of unrelated problems in the go.mod file.
+//
+// Copied from cmd/go/internal/modfile.
+func modulePath(mod []byte) string {
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+ return "" // missing module path
+}
diff --git a/vendor/golang.org/x/tools/imports/sortimports.go b/vendor/golang.org/x/tools/imports/sortimports.go
new file mode 100644
index 000000000..f3dd56c7a
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/sortimports.go
@@ -0,0 +1,230 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Hacked up copy of go/ast/import.go
+
+package imports
+
+import (
+ "go/ast"
+ "go/token"
+ "sort"
+ "strconv"
+)
+
+// sortImports sorts runs of consecutive import lines in import blocks in f.
+// It also removes duplicate imports when it is possible to do so without data loss.
+func sortImports(fset *token.FileSet, f *ast.File) {
+ for i, d := range f.Decls {
+ d, ok := d.(*ast.GenDecl)
+ if !ok || d.Tok != token.IMPORT {
+ // Not an import declaration, so we're done.
+ // Imports are always first.
+ break
+ }
+
+ if len(d.Specs) == 0 {
+ // Empty import block, remove it.
+ f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
+ }
+
+ if !d.Lparen.IsValid() {
+ // Not a block: sorted by default.
+ continue
+ }
+
+ // Identify and sort runs of specs on successive lines.
+ i := 0
+ specs := d.Specs[:0]
+ for j, s := range d.Specs {
+ if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
+ // j begins a new run. End this one.
+ specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
+ i = j
+ }
+ }
+ specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
+ d.Specs = specs
+
+ // Deduping can leave a blank line before the rparen; clean that up.
+ if len(d.Specs) > 0 {
+ lastSpec := d.Specs[len(d.Specs)-1]
+ lastLine := fset.Position(lastSpec.Pos()).Line
+ if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
+ fset.File(d.Rparen).MergeLine(rParenLine - 1)
+ }
+ }
+ }
+}
+
+func importPath(s ast.Spec) string {
+ t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+func importName(s ast.Spec) string {
+ n := s.(*ast.ImportSpec).Name
+ if n == nil {
+ return ""
+ }
+ return n.Name
+}
+
+func importComment(s ast.Spec) string {
+ c := s.(*ast.ImportSpec).Comment
+ if c == nil {
+ return ""
+ }
+ return c.Text()
+}
+
+// collapse indicates whether prev may be removed, leaving only next.
+func collapse(prev, next ast.Spec) bool {
+ if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
+ return false
+ }
+ return prev.(*ast.ImportSpec).Comment == nil
+}
+
+type posSpan struct {
+ Start token.Pos
+ End token.Pos
+}
+
+func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
+ // Can't short-circuit here even if specs are already sorted,
+ // since they might yet need deduplication.
+ // A lone import, however, may be safely ignored.
+ if len(specs) <= 1 {
+ return specs
+ }
+
+ // Record positions for specs.
+ pos := make([]posSpan, len(specs))
+ for i, s := range specs {
+ pos[i] = posSpan{s.Pos(), s.End()}
+ }
+
+ // Identify comments in this range.
+ // Any comment from pos[0].Start to the final line counts.
+ lastLine := fset.Position(pos[len(pos)-1].End).Line
+ cstart := len(f.Comments)
+ cend := len(f.Comments)
+ for i, g := range f.Comments {
+ if g.Pos() < pos[0].Start {
+ continue
+ }
+ if i < cstart {
+ cstart = i
+ }
+ if fset.Position(g.End()).Line > lastLine {
+ cend = i
+ break
+ }
+ }
+ comments := f.Comments[cstart:cend]
+
+ // Assign each comment to the import spec preceding it.
+ importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
+ specIndex := 0
+ for _, g := range comments {
+ for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
+ specIndex++
+ }
+ s := specs[specIndex].(*ast.ImportSpec)
+ importComment[s] = append(importComment[s], g)
+ }
+
+ // Sort the import specs by import path.
+ // Remove duplicates, when possible without data loss.
+ // Reassign the import paths to have the same position sequence.
+ // Reassign each comment to abut the end of its spec.
+ // Sort the comments by new position.
+ sort.Sort(byImportSpec(specs))
+
+ // Dedup. Thanks to our sorting, we can just consider
+ // adjacent pairs of imports.
+ deduped := specs[:0]
+ for i, s := range specs {
+ if i == len(specs)-1 || !collapse(s, specs[i+1]) {
+ deduped = append(deduped, s)
+ } else {
+ p := s.Pos()
+ fset.File(p).MergeLine(fset.Position(p).Line)
+ }
+ }
+ specs = deduped
+
+ // Fix up comment positions
+ for i, s := range specs {
+ s := s.(*ast.ImportSpec)
+ if s.Name != nil {
+ s.Name.NamePos = pos[i].Start
+ }
+ s.Path.ValuePos = pos[i].Start
+ s.EndPos = pos[i].End
+ nextSpecPos := pos[i].End
+
+ for _, g := range importComment[s] {
+ for _, c := range g.List {
+ c.Slash = pos[i].End
+ nextSpecPos = c.End()
+ }
+ }
+ if i < len(specs)-1 {
+ pos[i+1].Start = nextSpecPos
+ pos[i+1].End = nextSpecPos
+ }
+ }
+
+ sort.Sort(byCommentPos(comments))
+
+ // Fixup comments can insert blank lines, because import specs are on different lines.
+ // We remove those blank lines here by merging import spec to the first import spec line.
+ firstSpecLine := fset.Position(specs[0].Pos()).Line
+ for _, s := range specs[1:] {
+ p := s.Pos()
+ line := fset.File(p).Line(p)
+ for previousLine := line - 1; previousLine >= firstSpecLine; {
+ fset.File(p).MergeLine(previousLine)
+ previousLine--
+ }
+ }
+ return specs
+}
+
+type byImportSpec []ast.Spec // slice of *ast.ImportSpec
+
+func (x byImportSpec) Len() int { return len(x) }
+func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byImportSpec) Less(i, j int) bool {
+ ipath := importPath(x[i])
+ jpath := importPath(x[j])
+
+ igroup := importGroup(ipath)
+ jgroup := importGroup(jpath)
+ if igroup != jgroup {
+ return igroup < jgroup
+ }
+
+ if ipath != jpath {
+ return ipath < jpath
+ }
+ iname := importName(x[i])
+ jname := importName(x[j])
+
+ if iname != jname {
+ return iname < jname
+ }
+ return importComment(x[i]) < importComment(x[j])
+}
+
+type byCommentPos []*ast.CommentGroup
+
+func (x byCommentPos) Len() int { return len(x) }
+func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
diff --git a/vendor/golang.org/x/tools/imports/zstdlib.go b/vendor/golang.org/x/tools/imports/zstdlib.go
new file mode 100644
index 000000000..d81b8c530
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/zstdlib.go
@@ -0,0 +1,10325 @@
+// Code generated by mkstdlib.go. DO NOT EDIT.
+
+package imports
+
+var stdlib = map[string]map[string]bool{
+ "archive/tar": map[string]bool{
+ "ErrFieldTooLong": true,
+ "ErrHeader": true,
+ "ErrWriteAfterClose": true,
+ "ErrWriteTooLong": true,
+ "FileInfoHeader": true,
+ "Format": true,
+ "FormatGNU": true,
+ "FormatPAX": true,
+ "FormatUSTAR": true,
+ "FormatUnknown": true,
+ "Header": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "Reader": true,
+ "TypeBlock": true,
+ "TypeChar": true,
+ "TypeCont": true,
+ "TypeDir": true,
+ "TypeFifo": true,
+ "TypeGNULongLink": true,
+ "TypeGNULongName": true,
+ "TypeGNUSparse": true,
+ "TypeLink": true,
+ "TypeReg": true,
+ "TypeRegA": true,
+ "TypeSymlink": true,
+ "TypeXGlobalHeader": true,
+ "TypeXHeader": true,
+ "Writer": true,
+ },
+ "archive/zip": map[string]bool{
+ "Compressor": true,
+ "Decompressor": true,
+ "Deflate": true,
+ "ErrAlgorithm": true,
+ "ErrChecksum": true,
+ "ErrFormat": true,
+ "File": true,
+ "FileHeader": true,
+ "FileInfoHeader": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "OpenReader": true,
+ "ReadCloser": true,
+ "Reader": true,
+ "RegisterCompressor": true,
+ "RegisterDecompressor": true,
+ "Store": true,
+ "Writer": true,
+ },
+ "bufio": map[string]bool{
+ "ErrAdvanceTooFar": true,
+ "ErrBufferFull": true,
+ "ErrFinalToken": true,
+ "ErrInvalidUnreadByte": true,
+ "ErrInvalidUnreadRune": true,
+ "ErrNegativeAdvance": true,
+ "ErrNegativeCount": true,
+ "ErrTooLong": true,
+ "MaxScanTokenSize": true,
+ "NewReadWriter": true,
+ "NewReader": true,
+ "NewReaderSize": true,
+ "NewScanner": true,
+ "NewWriter": true,
+ "NewWriterSize": true,
+ "ReadWriter": true,
+ "Reader": true,
+ "ScanBytes": true,
+ "ScanLines": true,
+ "ScanRunes": true,
+ "ScanWords": true,
+ "Scanner": true,
+ "SplitFunc": true,
+ "Writer": true,
+ },
+ "bytes": map[string]bool{
+ "Buffer": true,
+ "Compare": true,
+ "Contains": true,
+ "ContainsAny": true,
+ "ContainsRune": true,
+ "Count": true,
+ "Equal": true,
+ "EqualFold": true,
+ "ErrTooLarge": true,
+ "Fields": true,
+ "FieldsFunc": true,
+ "HasPrefix": true,
+ "HasSuffix": true,
+ "Index": true,
+ "IndexAny": true,
+ "IndexByte": true,
+ "IndexFunc": true,
+ "IndexRune": true,
+ "Join": true,
+ "LastIndex": true,
+ "LastIndexAny": true,
+ "LastIndexByte": true,
+ "LastIndexFunc": true,
+ "Map": true,
+ "MinRead": true,
+ "NewBuffer": true,
+ "NewBufferString": true,
+ "NewReader": true,
+ "Reader": true,
+ "Repeat": true,
+ "Replace": true,
+ "ReplaceAll": true,
+ "Runes": true,
+ "Split": true,
+ "SplitAfter": true,
+ "SplitAfterN": true,
+ "SplitN": true,
+ "Title": true,
+ "ToLower": true,
+ "ToLowerSpecial": true,
+ "ToTitle": true,
+ "ToTitleSpecial": true,
+ "ToUpper": true,
+ "ToUpperSpecial": true,
+ "Trim": true,
+ "TrimFunc": true,
+ "TrimLeft": true,
+ "TrimLeftFunc": true,
+ "TrimPrefix": true,
+ "TrimRight": true,
+ "TrimRightFunc": true,
+ "TrimSpace": true,
+ "TrimSuffix": true,
+ },
+ "compress/bzip2": map[string]bool{
+ "NewReader": true,
+ "StructuralError": true,
+ },
+ "compress/flate": map[string]bool{
+ "BestCompression": true,
+ "BestSpeed": true,
+ "CorruptInputError": true,
+ "DefaultCompression": true,
+ "HuffmanOnly": true,
+ "InternalError": true,
+ "NewReader": true,
+ "NewReaderDict": true,
+ "NewWriter": true,
+ "NewWriterDict": true,
+ "NoCompression": true,
+ "ReadError": true,
+ "Reader": true,
+ "Resetter": true,
+ "WriteError": true,
+ "Writer": true,
+ },
+ "compress/gzip": map[string]bool{
+ "BestCompression": true,
+ "BestSpeed": true,
+ "DefaultCompression": true,
+ "ErrChecksum": true,
+ "ErrHeader": true,
+ "Header": true,
+ "HuffmanOnly": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "NewWriterLevel": true,
+ "NoCompression": true,
+ "Reader": true,
+ "Writer": true,
+ },
+ "compress/lzw": map[string]bool{
+ "LSB": true,
+ "MSB": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "Order": true,
+ },
+ "compress/zlib": map[string]bool{
+ "BestCompression": true,
+ "BestSpeed": true,
+ "DefaultCompression": true,
+ "ErrChecksum": true,
+ "ErrDictionary": true,
+ "ErrHeader": true,
+ "HuffmanOnly": true,
+ "NewReader": true,
+ "NewReaderDict": true,
+ "NewWriter": true,
+ "NewWriterLevel": true,
+ "NewWriterLevelDict": true,
+ "NoCompression": true,
+ "Resetter": true,
+ "Writer": true,
+ },
+ "container/heap": map[string]bool{
+ "Fix": true,
+ "Init": true,
+ "Interface": true,
+ "Pop": true,
+ "Push": true,
+ "Remove": true,
+ },
+ "container/list": map[string]bool{
+ "Element": true,
+ "List": true,
+ "New": true,
+ },
+ "container/ring": map[string]bool{
+ "New": true,
+ "Ring": true,
+ },
+ "context": map[string]bool{
+ "Background": true,
+ "CancelFunc": true,
+ "Canceled": true,
+ "Context": true,
+ "DeadlineExceeded": true,
+ "TODO": true,
+ "WithCancel": true,
+ "WithDeadline": true,
+ "WithTimeout": true,
+ "WithValue": true,
+ },
+ "crypto": map[string]bool{
+ "BLAKE2b_256": true,
+ "BLAKE2b_384": true,
+ "BLAKE2b_512": true,
+ "BLAKE2s_256": true,
+ "Decrypter": true,
+ "DecrypterOpts": true,
+ "Hash": true,
+ "MD4": true,
+ "MD5": true,
+ "MD5SHA1": true,
+ "PrivateKey": true,
+ "PublicKey": true,
+ "RIPEMD160": true,
+ "RegisterHash": true,
+ "SHA1": true,
+ "SHA224": true,
+ "SHA256": true,
+ "SHA384": true,
+ "SHA3_224": true,
+ "SHA3_256": true,
+ "SHA3_384": true,
+ "SHA3_512": true,
+ "SHA512": true,
+ "SHA512_224": true,
+ "SHA512_256": true,
+ "Signer": true,
+ "SignerOpts": true,
+ },
+ "crypto/aes": map[string]bool{
+ "BlockSize": true,
+ "KeySizeError": true,
+ "NewCipher": true,
+ },
+ "crypto/cipher": map[string]bool{
+ "AEAD": true,
+ "Block": true,
+ "BlockMode": true,
+ "NewCBCDecrypter": true,
+ "NewCBCEncrypter": true,
+ "NewCFBDecrypter": true,
+ "NewCFBEncrypter": true,
+ "NewCTR": true,
+ "NewGCM": true,
+ "NewGCMWithNonceSize": true,
+ "NewGCMWithTagSize": true,
+ "NewOFB": true,
+ "Stream": true,
+ "StreamReader": true,
+ "StreamWriter": true,
+ },
+ "crypto/des": map[string]bool{
+ "BlockSize": true,
+ "KeySizeError": true,
+ "NewCipher": true,
+ "NewTripleDESCipher": true,
+ },
+ "crypto/dsa": map[string]bool{
+ "ErrInvalidPublicKey": true,
+ "GenerateKey": true,
+ "GenerateParameters": true,
+ "L1024N160": true,
+ "L2048N224": true,
+ "L2048N256": true,
+ "L3072N256": true,
+ "ParameterSizes": true,
+ "Parameters": true,
+ "PrivateKey": true,
+ "PublicKey": true,
+ "Sign": true,
+ "Verify": true,
+ },
+ "crypto/ecdsa": map[string]bool{
+ "GenerateKey": true,
+ "PrivateKey": true,
+ "PublicKey": true,
+ "Sign": true,
+ "Verify": true,
+ },
+ "crypto/elliptic": map[string]bool{
+ "Curve": true,
+ "CurveParams": true,
+ "GenerateKey": true,
+ "Marshal": true,
+ "P224": true,
+ "P256": true,
+ "P384": true,
+ "P521": true,
+ "Unmarshal": true,
+ },
+ "crypto/hmac": map[string]bool{
+ "Equal": true,
+ "New": true,
+ },
+ "crypto/md5": map[string]bool{
+ "BlockSize": true,
+ "New": true,
+ "Size": true,
+ "Sum": true,
+ },
+ "crypto/rand": map[string]bool{
+ "Int": true,
+ "Prime": true,
+ "Read": true,
+ "Reader": true,
+ },
+ "crypto/rc4": map[string]bool{
+ "Cipher": true,
+ "KeySizeError": true,
+ "NewCipher": true,
+ },
+ "crypto/rsa": map[string]bool{
+ "CRTValue": true,
+ "DecryptOAEP": true,
+ "DecryptPKCS1v15": true,
+ "DecryptPKCS1v15SessionKey": true,
+ "EncryptOAEP": true,
+ "EncryptPKCS1v15": true,
+ "ErrDecryption": true,
+ "ErrMessageTooLong": true,
+ "ErrVerification": true,
+ "GenerateKey": true,
+ "GenerateMultiPrimeKey": true,
+ "OAEPOptions": true,
+ "PKCS1v15DecryptOptions": true,
+ "PSSOptions": true,
+ "PSSSaltLengthAuto": true,
+ "PSSSaltLengthEqualsHash": true,
+ "PrecomputedValues": true,
+ "PrivateKey": true,
+ "PublicKey": true,
+ "SignPKCS1v15": true,
+ "SignPSS": true,
+ "VerifyPKCS1v15": true,
+ "VerifyPSS": true,
+ },
+ "crypto/sha1": map[string]bool{
+ "BlockSize": true,
+ "New": true,
+ "Size": true,
+ "Sum": true,
+ },
+ "crypto/sha256": map[string]bool{
+ "BlockSize": true,
+ "New": true,
+ "New224": true,
+ "Size": true,
+ "Size224": true,
+ "Sum224": true,
+ "Sum256": true,
+ },
+ "crypto/sha512": map[string]bool{
+ "BlockSize": true,
+ "New": true,
+ "New384": true,
+ "New512_224": true,
+ "New512_256": true,
+ "Size": true,
+ "Size224": true,
+ "Size256": true,
+ "Size384": true,
+ "Sum384": true,
+ "Sum512": true,
+ "Sum512_224": true,
+ "Sum512_256": true,
+ },
+ "crypto/subtle": map[string]bool{
+ "ConstantTimeByteEq": true,
+ "ConstantTimeCompare": true,
+ "ConstantTimeCopy": true,
+ "ConstantTimeEq": true,
+ "ConstantTimeLessOrEq": true,
+ "ConstantTimeSelect": true,
+ },
+ "crypto/tls": map[string]bool{
+ "Certificate": true,
+ "CertificateRequestInfo": true,
+ "Client": true,
+ "ClientAuthType": true,
+ "ClientHelloInfo": true,
+ "ClientSessionCache": true,
+ "ClientSessionState": true,
+ "Config": true,
+ "Conn": true,
+ "ConnectionState": true,
+ "CurveID": true,
+ "CurveP256": true,
+ "CurveP384": true,
+ "CurveP521": true,
+ "Dial": true,
+ "DialWithDialer": true,
+ "ECDSAWithP256AndSHA256": true,
+ "ECDSAWithP384AndSHA384": true,
+ "ECDSAWithP521AndSHA512": true,
+ "ECDSAWithSHA1": true,
+ "Listen": true,
+ "LoadX509KeyPair": true,
+ "NewLRUClientSessionCache": true,
+ "NewListener": true,
+ "NoClientCert": true,
+ "PKCS1WithSHA1": true,
+ "PKCS1WithSHA256": true,
+ "PKCS1WithSHA384": true,
+ "PKCS1WithSHA512": true,
+ "PSSWithSHA256": true,
+ "PSSWithSHA384": true,
+ "PSSWithSHA512": true,
+ "RecordHeaderError": true,
+ "RenegotiateFreelyAsClient": true,
+ "RenegotiateNever": true,
+ "RenegotiateOnceAsClient": true,
+ "RenegotiationSupport": true,
+ "RequestClientCert": true,
+ "RequireAndVerifyClientCert": true,
+ "RequireAnyClientCert": true,
+ "Server": true,
+ "SignatureScheme": true,
+ "TLS_AES_128_GCM_SHA256": true,
+ "TLS_AES_256_GCM_SHA384": true,
+ "TLS_CHACHA20_POLY1305_SHA256": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": true,
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": true,
+ "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": true,
+ "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": true,
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": true,
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": true,
+ "TLS_ECDHE_RSA_WITH_RC4_128_SHA": true,
+ "TLS_FALLBACK_SCSV": true,
+ "TLS_RSA_WITH_3DES_EDE_CBC_SHA": true,
+ "TLS_RSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_RSA_WITH_AES_128_CBC_SHA256": true,
+ "TLS_RSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_RSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_RSA_WITH_AES_256_GCM_SHA384": true,
+ "TLS_RSA_WITH_RC4_128_SHA": true,
+ "VerifyClientCertIfGiven": true,
+ "VersionSSL30": true,
+ "VersionTLS10": true,
+ "VersionTLS11": true,
+ "VersionTLS12": true,
+ "VersionTLS13": true,
+ "X25519": true,
+ "X509KeyPair": true,
+ },
+ "crypto/x509": map[string]bool{
+ "CANotAuthorizedForExtKeyUsage": true,
+ "CANotAuthorizedForThisName": true,
+ "CertPool": true,
+ "Certificate": true,
+ "CertificateInvalidError": true,
+ "CertificateRequest": true,
+ "ConstraintViolationError": true,
+ "CreateCertificate": true,
+ "CreateCertificateRequest": true,
+ "DSA": true,
+ "DSAWithSHA1": true,
+ "DSAWithSHA256": true,
+ "DecryptPEMBlock": true,
+ "ECDSA": true,
+ "ECDSAWithSHA1": true,
+ "ECDSAWithSHA256": true,
+ "ECDSAWithSHA384": true,
+ "ECDSAWithSHA512": true,
+ "EncryptPEMBlock": true,
+ "ErrUnsupportedAlgorithm": true,
+ "Expired": true,
+ "ExtKeyUsage": true,
+ "ExtKeyUsageAny": true,
+ "ExtKeyUsageClientAuth": true,
+ "ExtKeyUsageCodeSigning": true,
+ "ExtKeyUsageEmailProtection": true,
+ "ExtKeyUsageIPSECEndSystem": true,
+ "ExtKeyUsageIPSECTunnel": true,
+ "ExtKeyUsageIPSECUser": true,
+ "ExtKeyUsageMicrosoftCommercialCodeSigning": true,
+ "ExtKeyUsageMicrosoftKernelCodeSigning": true,
+ "ExtKeyUsageMicrosoftServerGatedCrypto": true,
+ "ExtKeyUsageNetscapeServerGatedCrypto": true,
+ "ExtKeyUsageOCSPSigning": true,
+ "ExtKeyUsageServerAuth": true,
+ "ExtKeyUsageTimeStamping": true,
+ "HostnameError": true,
+ "IncompatibleUsage": true,
+ "IncorrectPasswordError": true,
+ "InsecureAlgorithmError": true,
+ "InvalidReason": true,
+ "IsEncryptedPEMBlock": true,
+ "KeyUsage": true,
+ "KeyUsageCRLSign": true,
+ "KeyUsageCertSign": true,
+ "KeyUsageContentCommitment": true,
+ "KeyUsageDataEncipherment": true,
+ "KeyUsageDecipherOnly": true,
+ "KeyUsageDigitalSignature": true,
+ "KeyUsageEncipherOnly": true,
+ "KeyUsageKeyAgreement": true,
+ "KeyUsageKeyEncipherment": true,
+ "MD2WithRSA": true,
+ "MD5WithRSA": true,
+ "MarshalECPrivateKey": true,
+ "MarshalPKCS1PrivateKey": true,
+ "MarshalPKCS1PublicKey": true,
+ "MarshalPKCS8PrivateKey": true,
+ "MarshalPKIXPublicKey": true,
+ "NameConstraintsWithoutSANs": true,
+ "NameMismatch": true,
+ "NewCertPool": true,
+ "NotAuthorizedToSign": true,
+ "PEMCipher": true,
+ "PEMCipher3DES": true,
+ "PEMCipherAES128": true,
+ "PEMCipherAES192": true,
+ "PEMCipherAES256": true,
+ "PEMCipherDES": true,
+ "ParseCRL": true,
+ "ParseCertificate": true,
+ "ParseCertificateRequest": true,
+ "ParseCertificates": true,
+ "ParseDERCRL": true,
+ "ParseECPrivateKey": true,
+ "ParsePKCS1PrivateKey": true,
+ "ParsePKCS1PublicKey": true,
+ "ParsePKCS8PrivateKey": true,
+ "ParsePKIXPublicKey": true,
+ "PublicKeyAlgorithm": true,
+ "RSA": true,
+ "SHA1WithRSA": true,
+ "SHA256WithRSA": true,
+ "SHA256WithRSAPSS": true,
+ "SHA384WithRSA": true,
+ "SHA384WithRSAPSS": true,
+ "SHA512WithRSA": true,
+ "SHA512WithRSAPSS": true,
+ "SignatureAlgorithm": true,
+ "SystemCertPool": true,
+ "SystemRootsError": true,
+ "TooManyConstraints": true,
+ "TooManyIntermediates": true,
+ "UnconstrainedName": true,
+ "UnhandledCriticalExtension": true,
+ "UnknownAuthorityError": true,
+ "UnknownPublicKeyAlgorithm": true,
+ "UnknownSignatureAlgorithm": true,
+ "VerifyOptions": true,
+ },
+ "crypto/x509/pkix": map[string]bool{
+ "AlgorithmIdentifier": true,
+ "AttributeTypeAndValue": true,
+ "AttributeTypeAndValueSET": true,
+ "CertificateList": true,
+ "Extension": true,
+ "Name": true,
+ "RDNSequence": true,
+ "RelativeDistinguishedNameSET": true,
+ "RevokedCertificate": true,
+ "TBSCertificateList": true,
+ },
+ "database/sql": map[string]bool{
+ "ColumnType": true,
+ "Conn": true,
+ "DB": true,
+ "DBStats": true,
+ "Drivers": true,
+ "ErrConnDone": true,
+ "ErrNoRows": true,
+ "ErrTxDone": true,
+ "IsolationLevel": true,
+ "LevelDefault": true,
+ "LevelLinearizable": true,
+ "LevelReadCommitted": true,
+ "LevelReadUncommitted": true,
+ "LevelRepeatableRead": true,
+ "LevelSerializable": true,
+ "LevelSnapshot": true,
+ "LevelWriteCommitted": true,
+ "Named": true,
+ "NamedArg": true,
+ "NullBool": true,
+ "NullFloat64": true,
+ "NullInt64": true,
+ "NullString": true,
+ "Open": true,
+ "OpenDB": true,
+ "Out": true,
+ "RawBytes": true,
+ "Register": true,
+ "Result": true,
+ "Row": true,
+ "Rows": true,
+ "Scanner": true,
+ "Stmt": true,
+ "Tx": true,
+ "TxOptions": true,
+ },
+ "database/sql/driver": map[string]bool{
+ "Bool": true,
+ "ColumnConverter": true,
+ "Conn": true,
+ "ConnBeginTx": true,
+ "ConnPrepareContext": true,
+ "Connector": true,
+ "DefaultParameterConverter": true,
+ "Driver": true,
+ "DriverContext": true,
+ "ErrBadConn": true,
+ "ErrRemoveArgument": true,
+ "ErrSkip": true,
+ "Execer": true,
+ "ExecerContext": true,
+ "Int32": true,
+ "IsScanValue": true,
+ "IsValue": true,
+ "IsolationLevel": true,
+ "NamedValue": true,
+ "NamedValueChecker": true,
+ "NotNull": true,
+ "Null": true,
+ "Pinger": true,
+ "Queryer": true,
+ "QueryerContext": true,
+ "Result": true,
+ "ResultNoRows": true,
+ "Rows": true,
+ "RowsAffected": true,
+ "RowsColumnTypeDatabaseTypeName": true,
+ "RowsColumnTypeLength": true,
+ "RowsColumnTypeNullable": true,
+ "RowsColumnTypePrecisionScale": true,
+ "RowsColumnTypeScanType": true,
+ "RowsNextResultSet": true,
+ "SessionResetter": true,
+ "Stmt": true,
+ "StmtExecContext": true,
+ "StmtQueryContext": true,
+ "String": true,
+ "Tx": true,
+ "TxOptions": true,
+ "Value": true,
+ "ValueConverter": true,
+ "Valuer": true,
+ },
+ "debug/dwarf": map[string]bool{
+ "AddrType": true,
+ "ArrayType": true,
+ "Attr": true,
+ "AttrAbstractOrigin": true,
+ "AttrAccessibility": true,
+ "AttrAddrClass": true,
+ "AttrAllocated": true,
+ "AttrArtificial": true,
+ "AttrAssociated": true,
+ "AttrBaseTypes": true,
+ "AttrBitOffset": true,
+ "AttrBitSize": true,
+ "AttrByteSize": true,
+ "AttrCallColumn": true,
+ "AttrCallFile": true,
+ "AttrCallLine": true,
+ "AttrCalling": true,
+ "AttrCommonRef": true,
+ "AttrCompDir": true,
+ "AttrConstValue": true,
+ "AttrContainingType": true,
+ "AttrCount": true,
+ "AttrDataLocation": true,
+ "AttrDataMemberLoc": true,
+ "AttrDeclColumn": true,
+ "AttrDeclFile": true,
+ "AttrDeclLine": true,
+ "AttrDeclaration": true,
+ "AttrDefaultValue": true,
+ "AttrDescription": true,
+ "AttrDiscr": true,
+ "AttrDiscrList": true,
+ "AttrDiscrValue": true,
+ "AttrEncoding": true,
+ "AttrEntrypc": true,
+ "AttrExtension": true,
+ "AttrExternal": true,
+ "AttrFrameBase": true,
+ "AttrFriend": true,
+ "AttrHighpc": true,
+ "AttrIdentifierCase": true,
+ "AttrImport": true,
+ "AttrInline": true,
+ "AttrIsOptional": true,
+ "AttrLanguage": true,
+ "AttrLocation": true,
+ "AttrLowerBound": true,
+ "AttrLowpc": true,
+ "AttrMacroInfo": true,
+ "AttrName": true,
+ "AttrNamelistItem": true,
+ "AttrOrdering": true,
+ "AttrPriority": true,
+ "AttrProducer": true,
+ "AttrPrototyped": true,
+ "AttrRanges": true,
+ "AttrReturnAddr": true,
+ "AttrSegment": true,
+ "AttrSibling": true,
+ "AttrSpecification": true,
+ "AttrStartScope": true,
+ "AttrStaticLink": true,
+ "AttrStmtList": true,
+ "AttrStride": true,
+ "AttrStrideSize": true,
+ "AttrStringLength": true,
+ "AttrTrampoline": true,
+ "AttrType": true,
+ "AttrUpperBound": true,
+ "AttrUseLocation": true,
+ "AttrUseUTF8": true,
+ "AttrVarParam": true,
+ "AttrVirtuality": true,
+ "AttrVisibility": true,
+ "AttrVtableElemLoc": true,
+ "BasicType": true,
+ "BoolType": true,
+ "CharType": true,
+ "Class": true,
+ "ClassAddress": true,
+ "ClassBlock": true,
+ "ClassConstant": true,
+ "ClassExprLoc": true,
+ "ClassFlag": true,
+ "ClassLinePtr": true,
+ "ClassLocListPtr": true,
+ "ClassMacPtr": true,
+ "ClassRangeListPtr": true,
+ "ClassReference": true,
+ "ClassReferenceAlt": true,
+ "ClassReferenceSig": true,
+ "ClassString": true,
+ "ClassStringAlt": true,
+ "ClassUnknown": true,
+ "CommonType": true,
+ "ComplexType": true,
+ "Data": true,
+ "DecodeError": true,
+ "DotDotDotType": true,
+ "Entry": true,
+ "EnumType": true,
+ "EnumValue": true,
+ "ErrUnknownPC": true,
+ "Field": true,
+ "FloatType": true,
+ "FuncType": true,
+ "IntType": true,
+ "LineEntry": true,
+ "LineFile": true,
+ "LineReader": true,
+ "LineReaderPos": true,
+ "New": true,
+ "Offset": true,
+ "PtrType": true,
+ "QualType": true,
+ "Reader": true,
+ "StructField": true,
+ "StructType": true,
+ "Tag": true,
+ "TagAccessDeclaration": true,
+ "TagArrayType": true,
+ "TagBaseType": true,
+ "TagCatchDwarfBlock": true,
+ "TagClassType": true,
+ "TagCommonDwarfBlock": true,
+ "TagCommonInclusion": true,
+ "TagCompileUnit": true,
+ "TagCondition": true,
+ "TagConstType": true,
+ "TagConstant": true,
+ "TagDwarfProcedure": true,
+ "TagEntryPoint": true,
+ "TagEnumerationType": true,
+ "TagEnumerator": true,
+ "TagFileType": true,
+ "TagFormalParameter": true,
+ "TagFriend": true,
+ "TagImportedDeclaration": true,
+ "TagImportedModule": true,
+ "TagImportedUnit": true,
+ "TagInheritance": true,
+ "TagInlinedSubroutine": true,
+ "TagInterfaceType": true,
+ "TagLabel": true,
+ "TagLexDwarfBlock": true,
+ "TagMember": true,
+ "TagModule": true,
+ "TagMutableType": true,
+ "TagNamelist": true,
+ "TagNamelistItem": true,
+ "TagNamespace": true,
+ "TagPackedType": true,
+ "TagPartialUnit": true,
+ "TagPointerType": true,
+ "TagPtrToMemberType": true,
+ "TagReferenceType": true,
+ "TagRestrictType": true,
+ "TagRvalueReferenceType": true,
+ "TagSetType": true,
+ "TagSharedType": true,
+ "TagStringType": true,
+ "TagStructType": true,
+ "TagSubprogram": true,
+ "TagSubrangeType": true,
+ "TagSubroutineType": true,
+ "TagTemplateAlias": true,
+ "TagTemplateTypeParameter": true,
+ "TagTemplateValueParameter": true,
+ "TagThrownType": true,
+ "TagTryDwarfBlock": true,
+ "TagTypeUnit": true,
+ "TagTypedef": true,
+ "TagUnionType": true,
+ "TagUnspecifiedParameters": true,
+ "TagUnspecifiedType": true,
+ "TagVariable": true,
+ "TagVariant": true,
+ "TagVariantPart": true,
+ "TagVolatileType": true,
+ "TagWithStmt": true,
+ "Type": true,
+ "TypedefType": true,
+ "UcharType": true,
+ "UintType": true,
+ "UnspecifiedType": true,
+ "VoidType": true,
+ },
+ "debug/elf": map[string]bool{
+ "ARM_MAGIC_TRAMP_NUMBER": true,
+ "COMPRESS_HIOS": true,
+ "COMPRESS_HIPROC": true,
+ "COMPRESS_LOOS": true,
+ "COMPRESS_LOPROC": true,
+ "COMPRESS_ZLIB": true,
+ "Chdr32": true,
+ "Chdr64": true,
+ "Class": true,
+ "CompressionType": true,
+ "DF_BIND_NOW": true,
+ "DF_ORIGIN": true,
+ "DF_STATIC_TLS": true,
+ "DF_SYMBOLIC": true,
+ "DF_TEXTREL": true,
+ "DT_BIND_NOW": true,
+ "DT_DEBUG": true,
+ "DT_ENCODING": true,
+ "DT_FINI": true,
+ "DT_FINI_ARRAY": true,
+ "DT_FINI_ARRAYSZ": true,
+ "DT_FLAGS": true,
+ "DT_HASH": true,
+ "DT_HIOS": true,
+ "DT_HIPROC": true,
+ "DT_INIT": true,
+ "DT_INIT_ARRAY": true,
+ "DT_INIT_ARRAYSZ": true,
+ "DT_JMPREL": true,
+ "DT_LOOS": true,
+ "DT_LOPROC": true,
+ "DT_NEEDED": true,
+ "DT_NULL": true,
+ "DT_PLTGOT": true,
+ "DT_PLTREL": true,
+ "DT_PLTRELSZ": true,
+ "DT_PREINIT_ARRAY": true,
+ "DT_PREINIT_ARRAYSZ": true,
+ "DT_REL": true,
+ "DT_RELA": true,
+ "DT_RELAENT": true,
+ "DT_RELASZ": true,
+ "DT_RELENT": true,
+ "DT_RELSZ": true,
+ "DT_RPATH": true,
+ "DT_RUNPATH": true,
+ "DT_SONAME": true,
+ "DT_STRSZ": true,
+ "DT_STRTAB": true,
+ "DT_SYMBOLIC": true,
+ "DT_SYMENT": true,
+ "DT_SYMTAB": true,
+ "DT_TEXTREL": true,
+ "DT_VERNEED": true,
+ "DT_VERNEEDNUM": true,
+ "DT_VERSYM": true,
+ "Data": true,
+ "Dyn32": true,
+ "Dyn64": true,
+ "DynFlag": true,
+ "DynTag": true,
+ "EI_ABIVERSION": true,
+ "EI_CLASS": true,
+ "EI_DATA": true,
+ "EI_NIDENT": true,
+ "EI_OSABI": true,
+ "EI_PAD": true,
+ "EI_VERSION": true,
+ "ELFCLASS32": true,
+ "ELFCLASS64": true,
+ "ELFCLASSNONE": true,
+ "ELFDATA2LSB": true,
+ "ELFDATA2MSB": true,
+ "ELFDATANONE": true,
+ "ELFMAG": true,
+ "ELFOSABI_86OPEN": true,
+ "ELFOSABI_AIX": true,
+ "ELFOSABI_ARM": true,
+ "ELFOSABI_AROS": true,
+ "ELFOSABI_CLOUDABI": true,
+ "ELFOSABI_FENIXOS": true,
+ "ELFOSABI_FREEBSD": true,
+ "ELFOSABI_HPUX": true,
+ "ELFOSABI_HURD": true,
+ "ELFOSABI_IRIX": true,
+ "ELFOSABI_LINUX": true,
+ "ELFOSABI_MODESTO": true,
+ "ELFOSABI_NETBSD": true,
+ "ELFOSABI_NONE": true,
+ "ELFOSABI_NSK": true,
+ "ELFOSABI_OPENBSD": true,
+ "ELFOSABI_OPENVMS": true,
+ "ELFOSABI_SOLARIS": true,
+ "ELFOSABI_STANDALONE": true,
+ "ELFOSABI_TRU64": true,
+ "EM_386": true,
+ "EM_486": true,
+ "EM_56800EX": true,
+ "EM_68HC05": true,
+ "EM_68HC08": true,
+ "EM_68HC11": true,
+ "EM_68HC12": true,
+ "EM_68HC16": true,
+ "EM_68K": true,
+ "EM_78KOR": true,
+ "EM_8051": true,
+ "EM_860": true,
+ "EM_88K": true,
+ "EM_960": true,
+ "EM_AARCH64": true,
+ "EM_ALPHA": true,
+ "EM_ALPHA_STD": true,
+ "EM_ALTERA_NIOS2": true,
+ "EM_AMDGPU": true,
+ "EM_ARC": true,
+ "EM_ARCA": true,
+ "EM_ARC_COMPACT": true,
+ "EM_ARC_COMPACT2": true,
+ "EM_ARM": true,
+ "EM_AVR": true,
+ "EM_AVR32": true,
+ "EM_BA1": true,
+ "EM_BA2": true,
+ "EM_BLACKFIN": true,
+ "EM_BPF": true,
+ "EM_C166": true,
+ "EM_CDP": true,
+ "EM_CE": true,
+ "EM_CLOUDSHIELD": true,
+ "EM_COGE": true,
+ "EM_COLDFIRE": true,
+ "EM_COOL": true,
+ "EM_COREA_1ST": true,
+ "EM_COREA_2ND": true,
+ "EM_CR": true,
+ "EM_CR16": true,
+ "EM_CRAYNV2": true,
+ "EM_CRIS": true,
+ "EM_CRX": true,
+ "EM_CSR_KALIMBA": true,
+ "EM_CUDA": true,
+ "EM_CYPRESS_M8C": true,
+ "EM_D10V": true,
+ "EM_D30V": true,
+ "EM_DSP24": true,
+ "EM_DSPIC30F": true,
+ "EM_DXP": true,
+ "EM_ECOG1": true,
+ "EM_ECOG16": true,
+ "EM_ECOG1X": true,
+ "EM_ECOG2": true,
+ "EM_ETPU": true,
+ "EM_EXCESS": true,
+ "EM_F2MC16": true,
+ "EM_FIREPATH": true,
+ "EM_FR20": true,
+ "EM_FR30": true,
+ "EM_FT32": true,
+ "EM_FX66": true,
+ "EM_H8S": true,
+ "EM_H8_300": true,
+ "EM_H8_300H": true,
+ "EM_H8_500": true,
+ "EM_HUANY": true,
+ "EM_IA_64": true,
+ "EM_INTEL205": true,
+ "EM_INTEL206": true,
+ "EM_INTEL207": true,
+ "EM_INTEL208": true,
+ "EM_INTEL209": true,
+ "EM_IP2K": true,
+ "EM_JAVELIN": true,
+ "EM_K10M": true,
+ "EM_KM32": true,
+ "EM_KMX16": true,
+ "EM_KMX32": true,
+ "EM_KMX8": true,
+ "EM_KVARC": true,
+ "EM_L10M": true,
+ "EM_LANAI": true,
+ "EM_LATTICEMICO32": true,
+ "EM_M16C": true,
+ "EM_M32": true,
+ "EM_M32C": true,
+ "EM_M32R": true,
+ "EM_MANIK": true,
+ "EM_MAX": true,
+ "EM_MAXQ30": true,
+ "EM_MCHP_PIC": true,
+ "EM_MCST_ELBRUS": true,
+ "EM_ME16": true,
+ "EM_METAG": true,
+ "EM_MICROBLAZE": true,
+ "EM_MIPS": true,
+ "EM_MIPS_RS3_LE": true,
+ "EM_MIPS_RS4_BE": true,
+ "EM_MIPS_X": true,
+ "EM_MMA": true,
+ "EM_MMDSP_PLUS": true,
+ "EM_MMIX": true,
+ "EM_MN10200": true,
+ "EM_MN10300": true,
+ "EM_MOXIE": true,
+ "EM_MSP430": true,
+ "EM_NCPU": true,
+ "EM_NDR1": true,
+ "EM_NDS32": true,
+ "EM_NONE": true,
+ "EM_NORC": true,
+ "EM_NS32K": true,
+ "EM_OPEN8": true,
+ "EM_OPENRISC": true,
+ "EM_PARISC": true,
+ "EM_PCP": true,
+ "EM_PDP10": true,
+ "EM_PDP11": true,
+ "EM_PDSP": true,
+ "EM_PJ": true,
+ "EM_PPC": true,
+ "EM_PPC64": true,
+ "EM_PRISM": true,
+ "EM_QDSP6": true,
+ "EM_R32C": true,
+ "EM_RCE": true,
+ "EM_RH32": true,
+ "EM_RISCV": true,
+ "EM_RL78": true,
+ "EM_RS08": true,
+ "EM_RX": true,
+ "EM_S370": true,
+ "EM_S390": true,
+ "EM_SCORE7": true,
+ "EM_SEP": true,
+ "EM_SE_C17": true,
+ "EM_SE_C33": true,
+ "EM_SH": true,
+ "EM_SHARC": true,
+ "EM_SLE9X": true,
+ "EM_SNP1K": true,
+ "EM_SPARC": true,
+ "EM_SPARC32PLUS": true,
+ "EM_SPARCV9": true,
+ "EM_ST100": true,
+ "EM_ST19": true,
+ "EM_ST200": true,
+ "EM_ST7": true,
+ "EM_ST9PLUS": true,
+ "EM_STARCORE": true,
+ "EM_STM8": true,
+ "EM_STXP7X": true,
+ "EM_SVX": true,
+ "EM_TILE64": true,
+ "EM_TILEGX": true,
+ "EM_TILEPRO": true,
+ "EM_TINYJ": true,
+ "EM_TI_ARP32": true,
+ "EM_TI_C2000": true,
+ "EM_TI_C5500": true,
+ "EM_TI_C6000": true,
+ "EM_TI_PRU": true,
+ "EM_TMM_GPP": true,
+ "EM_TPC": true,
+ "EM_TRICORE": true,
+ "EM_TRIMEDIA": true,
+ "EM_TSK3000": true,
+ "EM_UNICORE": true,
+ "EM_V800": true,
+ "EM_V850": true,
+ "EM_VAX": true,
+ "EM_VIDEOCORE": true,
+ "EM_VIDEOCORE3": true,
+ "EM_VIDEOCORE5": true,
+ "EM_VISIUM": true,
+ "EM_VPP500": true,
+ "EM_X86_64": true,
+ "EM_XCORE": true,
+ "EM_XGATE": true,
+ "EM_XIMO16": true,
+ "EM_XTENSA": true,
+ "EM_Z80": true,
+ "EM_ZSP": true,
+ "ET_CORE": true,
+ "ET_DYN": true,
+ "ET_EXEC": true,
+ "ET_HIOS": true,
+ "ET_HIPROC": true,
+ "ET_LOOS": true,
+ "ET_LOPROC": true,
+ "ET_NONE": true,
+ "ET_REL": true,
+ "EV_CURRENT": true,
+ "EV_NONE": true,
+ "ErrNoSymbols": true,
+ "File": true,
+ "FileHeader": true,
+ "FormatError": true,
+ "Header32": true,
+ "Header64": true,
+ "ImportedSymbol": true,
+ "Machine": true,
+ "NT_FPREGSET": true,
+ "NT_PRPSINFO": true,
+ "NT_PRSTATUS": true,
+ "NType": true,
+ "NewFile": true,
+ "OSABI": true,
+ "Open": true,
+ "PF_MASKOS": true,
+ "PF_MASKPROC": true,
+ "PF_R": true,
+ "PF_W": true,
+ "PF_X": true,
+ "PT_DYNAMIC": true,
+ "PT_HIOS": true,
+ "PT_HIPROC": true,
+ "PT_INTERP": true,
+ "PT_LOAD": true,
+ "PT_LOOS": true,
+ "PT_LOPROC": true,
+ "PT_NOTE": true,
+ "PT_NULL": true,
+ "PT_PHDR": true,
+ "PT_SHLIB": true,
+ "PT_TLS": true,
+ "Prog": true,
+ "Prog32": true,
+ "Prog64": true,
+ "ProgFlag": true,
+ "ProgHeader": true,
+ "ProgType": true,
+ "R_386": true,
+ "R_386_16": true,
+ "R_386_32": true,
+ "R_386_32PLT": true,
+ "R_386_8": true,
+ "R_386_COPY": true,
+ "R_386_GLOB_DAT": true,
+ "R_386_GOT32": true,
+ "R_386_GOT32X": true,
+ "R_386_GOTOFF": true,
+ "R_386_GOTPC": true,
+ "R_386_IRELATIVE": true,
+ "R_386_JMP_SLOT": true,
+ "R_386_NONE": true,
+ "R_386_PC16": true,
+ "R_386_PC32": true,
+ "R_386_PC8": true,
+ "R_386_PLT32": true,
+ "R_386_RELATIVE": true,
+ "R_386_SIZE32": true,
+ "R_386_TLS_DESC": true,
+ "R_386_TLS_DESC_CALL": true,
+ "R_386_TLS_DTPMOD32": true,
+ "R_386_TLS_DTPOFF32": true,
+ "R_386_TLS_GD": true,
+ "R_386_TLS_GD_32": true,
+ "R_386_TLS_GD_CALL": true,
+ "R_386_TLS_GD_POP": true,
+ "R_386_TLS_GD_PUSH": true,
+ "R_386_TLS_GOTDESC": true,
+ "R_386_TLS_GOTIE": true,
+ "R_386_TLS_IE": true,
+ "R_386_TLS_IE_32": true,
+ "R_386_TLS_LDM": true,
+ "R_386_TLS_LDM_32": true,
+ "R_386_TLS_LDM_CALL": true,
+ "R_386_TLS_LDM_POP": true,
+ "R_386_TLS_LDM_PUSH": true,
+ "R_386_TLS_LDO_32": true,
+ "R_386_TLS_LE": true,
+ "R_386_TLS_LE_32": true,
+ "R_386_TLS_TPOFF": true,
+ "R_386_TLS_TPOFF32": true,
+ "R_390": true,
+ "R_390_12": true,
+ "R_390_16": true,
+ "R_390_20": true,
+ "R_390_32": true,
+ "R_390_64": true,
+ "R_390_8": true,
+ "R_390_COPY": true,
+ "R_390_GLOB_DAT": true,
+ "R_390_GOT12": true,
+ "R_390_GOT16": true,
+ "R_390_GOT20": true,
+ "R_390_GOT32": true,
+ "R_390_GOT64": true,
+ "R_390_GOTENT": true,
+ "R_390_GOTOFF": true,
+ "R_390_GOTOFF16": true,
+ "R_390_GOTOFF64": true,
+ "R_390_GOTPC": true,
+ "R_390_GOTPCDBL": true,
+ "R_390_GOTPLT12": true,
+ "R_390_GOTPLT16": true,
+ "R_390_GOTPLT20": true,
+ "R_390_GOTPLT32": true,
+ "R_390_GOTPLT64": true,
+ "R_390_GOTPLTENT": true,
+ "R_390_GOTPLTOFF16": true,
+ "R_390_GOTPLTOFF32": true,
+ "R_390_GOTPLTOFF64": true,
+ "R_390_JMP_SLOT": true,
+ "R_390_NONE": true,
+ "R_390_PC16": true,
+ "R_390_PC16DBL": true,
+ "R_390_PC32": true,
+ "R_390_PC32DBL": true,
+ "R_390_PC64": true,
+ "R_390_PLT16DBL": true,
+ "R_390_PLT32": true,
+ "R_390_PLT32DBL": true,
+ "R_390_PLT64": true,
+ "R_390_RELATIVE": true,
+ "R_390_TLS_DTPMOD": true,
+ "R_390_TLS_DTPOFF": true,
+ "R_390_TLS_GD32": true,
+ "R_390_TLS_GD64": true,
+ "R_390_TLS_GDCALL": true,
+ "R_390_TLS_GOTIE12": true,
+ "R_390_TLS_GOTIE20": true,
+ "R_390_TLS_GOTIE32": true,
+ "R_390_TLS_GOTIE64": true,
+ "R_390_TLS_IE32": true,
+ "R_390_TLS_IE64": true,
+ "R_390_TLS_IEENT": true,
+ "R_390_TLS_LDCALL": true,
+ "R_390_TLS_LDM32": true,
+ "R_390_TLS_LDM64": true,
+ "R_390_TLS_LDO32": true,
+ "R_390_TLS_LDO64": true,
+ "R_390_TLS_LE32": true,
+ "R_390_TLS_LE64": true,
+ "R_390_TLS_LOAD": true,
+ "R_390_TLS_TPOFF": true,
+ "R_AARCH64": true,
+ "R_AARCH64_ABS16": true,
+ "R_AARCH64_ABS32": true,
+ "R_AARCH64_ABS64": true,
+ "R_AARCH64_ADD_ABS_LO12_NC": true,
+ "R_AARCH64_ADR_GOT_PAGE": true,
+ "R_AARCH64_ADR_PREL_LO21": true,
+ "R_AARCH64_ADR_PREL_PG_HI21": true,
+ "R_AARCH64_ADR_PREL_PG_HI21_NC": true,
+ "R_AARCH64_CALL26": true,
+ "R_AARCH64_CONDBR19": true,
+ "R_AARCH64_COPY": true,
+ "R_AARCH64_GLOB_DAT": true,
+ "R_AARCH64_GOT_LD_PREL19": true,
+ "R_AARCH64_IRELATIVE": true,
+ "R_AARCH64_JUMP26": true,
+ "R_AARCH64_JUMP_SLOT": true,
+ "R_AARCH64_LD64_GOTOFF_LO15": true,
+ "R_AARCH64_LD64_GOTPAGE_LO15": true,
+ "R_AARCH64_LD64_GOT_LO12_NC": true,
+ "R_AARCH64_LDST128_ABS_LO12_NC": true,
+ "R_AARCH64_LDST16_ABS_LO12_NC": true,
+ "R_AARCH64_LDST32_ABS_LO12_NC": true,
+ "R_AARCH64_LDST64_ABS_LO12_NC": true,
+ "R_AARCH64_LDST8_ABS_LO12_NC": true,
+ "R_AARCH64_LD_PREL_LO19": true,
+ "R_AARCH64_MOVW_SABS_G0": true,
+ "R_AARCH64_MOVW_SABS_G1": true,
+ "R_AARCH64_MOVW_SABS_G2": true,
+ "R_AARCH64_MOVW_UABS_G0": true,
+ "R_AARCH64_MOVW_UABS_G0_NC": true,
+ "R_AARCH64_MOVW_UABS_G1": true,
+ "R_AARCH64_MOVW_UABS_G1_NC": true,
+ "R_AARCH64_MOVW_UABS_G2": true,
+ "R_AARCH64_MOVW_UABS_G2_NC": true,
+ "R_AARCH64_MOVW_UABS_G3": true,
+ "R_AARCH64_NONE": true,
+ "R_AARCH64_NULL": true,
+ "R_AARCH64_P32_ABS16": true,
+ "R_AARCH64_P32_ABS32": true,
+ "R_AARCH64_P32_ADD_ABS_LO12_NC": true,
+ "R_AARCH64_P32_ADR_GOT_PAGE": true,
+ "R_AARCH64_P32_ADR_PREL_LO21": true,
+ "R_AARCH64_P32_ADR_PREL_PG_HI21": true,
+ "R_AARCH64_P32_CALL26": true,
+ "R_AARCH64_P32_CONDBR19": true,
+ "R_AARCH64_P32_COPY": true,
+ "R_AARCH64_P32_GLOB_DAT": true,
+ "R_AARCH64_P32_GOT_LD_PREL19": true,
+ "R_AARCH64_P32_IRELATIVE": true,
+ "R_AARCH64_P32_JUMP26": true,
+ "R_AARCH64_P32_JUMP_SLOT": true,
+ "R_AARCH64_P32_LD32_GOT_LO12_NC": true,
+ "R_AARCH64_P32_LDST128_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LDST16_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LDST32_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LDST64_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LDST8_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LD_PREL_LO19": true,
+ "R_AARCH64_P32_MOVW_SABS_G0": true,
+ "R_AARCH64_P32_MOVW_UABS_G0": true,
+ "R_AARCH64_P32_MOVW_UABS_G0_NC": true,
+ "R_AARCH64_P32_MOVW_UABS_G1": true,
+ "R_AARCH64_P32_PREL16": true,
+ "R_AARCH64_P32_PREL32": true,
+ "R_AARCH64_P32_RELATIVE": true,
+ "R_AARCH64_P32_TLSDESC": true,
+ "R_AARCH64_P32_TLSDESC_ADD_LO12_NC": true,
+ "R_AARCH64_P32_TLSDESC_ADR_PAGE21": true,
+ "R_AARCH64_P32_TLSDESC_ADR_PREL21": true,
+ "R_AARCH64_P32_TLSDESC_CALL": true,
+ "R_AARCH64_P32_TLSDESC_LD32_LO12_NC": true,
+ "R_AARCH64_P32_TLSDESC_LD_PREL19": true,
+ "R_AARCH64_P32_TLSGD_ADD_LO12_NC": true,
+ "R_AARCH64_P32_TLSGD_ADR_PAGE21": true,
+ "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21": true,
+ "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC": true,
+ "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19": true,
+ "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12": true,
+ "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12": true,
+ "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC": true,
+ "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0": true,
+ "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC": true,
+ "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1": true,
+ "R_AARCH64_P32_TLS_DTPMOD": true,
+ "R_AARCH64_P32_TLS_DTPREL": true,
+ "R_AARCH64_P32_TLS_TPREL": true,
+ "R_AARCH64_P32_TSTBR14": true,
+ "R_AARCH64_PREL16": true,
+ "R_AARCH64_PREL32": true,
+ "R_AARCH64_PREL64": true,
+ "R_AARCH64_RELATIVE": true,
+ "R_AARCH64_TLSDESC": true,
+ "R_AARCH64_TLSDESC_ADD": true,
+ "R_AARCH64_TLSDESC_ADD_LO12_NC": true,
+ "R_AARCH64_TLSDESC_ADR_PAGE21": true,
+ "R_AARCH64_TLSDESC_ADR_PREL21": true,
+ "R_AARCH64_TLSDESC_CALL": true,
+ "R_AARCH64_TLSDESC_LD64_LO12_NC": true,
+ "R_AARCH64_TLSDESC_LDR": true,
+ "R_AARCH64_TLSDESC_LD_PREL19": true,
+ "R_AARCH64_TLSDESC_OFF_G0_NC": true,
+ "R_AARCH64_TLSDESC_OFF_G1": true,
+ "R_AARCH64_TLSGD_ADD_LO12_NC": true,
+ "R_AARCH64_TLSGD_ADR_PAGE21": true,
+ "R_AARCH64_TLSGD_ADR_PREL21": true,
+ "R_AARCH64_TLSGD_MOVW_G0_NC": true,
+ "R_AARCH64_TLSGD_MOVW_G1": true,
+ "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21": true,
+ "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC": true,
+ "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19": true,
+ "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC": true,
+ "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1": true,
+ "R_AARCH64_TLSLD_ADR_PAGE21": true,
+ "R_AARCH64_TLSLD_ADR_PREL21": true,
+ "R_AARCH64_TLSLD_LDST128_DTPREL_LO12": true,
+ "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC": true,
+ "R_AARCH64_TLSLE_ADD_TPREL_HI12": true,
+ "R_AARCH64_TLSLE_ADD_TPREL_LO12": true,
+ "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC": true,
+ "R_AARCH64_TLSLE_LDST128_TPREL_LO12": true,
+ "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G0": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G1": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G2": true,
+ "R_AARCH64_TLS_DTPMOD64": true,
+ "R_AARCH64_TLS_DTPREL64": true,
+ "R_AARCH64_TLS_TPREL64": true,
+ "R_AARCH64_TSTBR14": true,
+ "R_ALPHA": true,
+ "R_ALPHA_BRADDR": true,
+ "R_ALPHA_COPY": true,
+ "R_ALPHA_GLOB_DAT": true,
+ "R_ALPHA_GPDISP": true,
+ "R_ALPHA_GPREL32": true,
+ "R_ALPHA_GPRELHIGH": true,
+ "R_ALPHA_GPRELLOW": true,
+ "R_ALPHA_GPVALUE": true,
+ "R_ALPHA_HINT": true,
+ "R_ALPHA_IMMED_BR_HI32": true,
+ "R_ALPHA_IMMED_GP_16": true,
+ "R_ALPHA_IMMED_GP_HI32": true,
+ "R_ALPHA_IMMED_LO32": true,
+ "R_ALPHA_IMMED_SCN_HI32": true,
+ "R_ALPHA_JMP_SLOT": true,
+ "R_ALPHA_LITERAL": true,
+ "R_ALPHA_LITUSE": true,
+ "R_ALPHA_NONE": true,
+ "R_ALPHA_OP_PRSHIFT": true,
+ "R_ALPHA_OP_PSUB": true,
+ "R_ALPHA_OP_PUSH": true,
+ "R_ALPHA_OP_STORE": true,
+ "R_ALPHA_REFLONG": true,
+ "R_ALPHA_REFQUAD": true,
+ "R_ALPHA_RELATIVE": true,
+ "R_ALPHA_SREL16": true,
+ "R_ALPHA_SREL32": true,
+ "R_ALPHA_SREL64": true,
+ "R_ARM": true,
+ "R_ARM_ABS12": true,
+ "R_ARM_ABS16": true,
+ "R_ARM_ABS32": true,
+ "R_ARM_ABS32_NOI": true,
+ "R_ARM_ABS8": true,
+ "R_ARM_ALU_PCREL_15_8": true,
+ "R_ARM_ALU_PCREL_23_15": true,
+ "R_ARM_ALU_PCREL_7_0": true,
+ "R_ARM_ALU_PC_G0": true,
+ "R_ARM_ALU_PC_G0_NC": true,
+ "R_ARM_ALU_PC_G1": true,
+ "R_ARM_ALU_PC_G1_NC": true,
+ "R_ARM_ALU_PC_G2": true,
+ "R_ARM_ALU_SBREL_19_12_NC": true,
+ "R_ARM_ALU_SBREL_27_20_CK": true,
+ "R_ARM_ALU_SB_G0": true,
+ "R_ARM_ALU_SB_G0_NC": true,
+ "R_ARM_ALU_SB_G1": true,
+ "R_ARM_ALU_SB_G1_NC": true,
+ "R_ARM_ALU_SB_G2": true,
+ "R_ARM_AMP_VCALL9": true,
+ "R_ARM_BASE_ABS": true,
+ "R_ARM_CALL": true,
+ "R_ARM_COPY": true,
+ "R_ARM_GLOB_DAT": true,
+ "R_ARM_GNU_VTENTRY": true,
+ "R_ARM_GNU_VTINHERIT": true,
+ "R_ARM_GOT32": true,
+ "R_ARM_GOTOFF": true,
+ "R_ARM_GOTOFF12": true,
+ "R_ARM_GOTPC": true,
+ "R_ARM_GOTRELAX": true,
+ "R_ARM_GOT_ABS": true,
+ "R_ARM_GOT_BREL12": true,
+ "R_ARM_GOT_PREL": true,
+ "R_ARM_IRELATIVE": true,
+ "R_ARM_JUMP24": true,
+ "R_ARM_JUMP_SLOT": true,
+ "R_ARM_LDC_PC_G0": true,
+ "R_ARM_LDC_PC_G1": true,
+ "R_ARM_LDC_PC_G2": true,
+ "R_ARM_LDC_SB_G0": true,
+ "R_ARM_LDC_SB_G1": true,
+ "R_ARM_LDC_SB_G2": true,
+ "R_ARM_LDRS_PC_G0": true,
+ "R_ARM_LDRS_PC_G1": true,
+ "R_ARM_LDRS_PC_G2": true,
+ "R_ARM_LDRS_SB_G0": true,
+ "R_ARM_LDRS_SB_G1": true,
+ "R_ARM_LDRS_SB_G2": true,
+ "R_ARM_LDR_PC_G1": true,
+ "R_ARM_LDR_PC_G2": true,
+ "R_ARM_LDR_SBREL_11_10_NC": true,
+ "R_ARM_LDR_SB_G0": true,
+ "R_ARM_LDR_SB_G1": true,
+ "R_ARM_LDR_SB_G2": true,
+ "R_ARM_ME_TOO": true,
+ "R_ARM_MOVT_ABS": true,
+ "R_ARM_MOVT_BREL": true,
+ "R_ARM_MOVT_PREL": true,
+ "R_ARM_MOVW_ABS_NC": true,
+ "R_ARM_MOVW_BREL": true,
+ "R_ARM_MOVW_BREL_NC": true,
+ "R_ARM_MOVW_PREL_NC": true,
+ "R_ARM_NONE": true,
+ "R_ARM_PC13": true,
+ "R_ARM_PC24": true,
+ "R_ARM_PLT32": true,
+ "R_ARM_PLT32_ABS": true,
+ "R_ARM_PREL31": true,
+ "R_ARM_PRIVATE_0": true,
+ "R_ARM_PRIVATE_1": true,
+ "R_ARM_PRIVATE_10": true,
+ "R_ARM_PRIVATE_11": true,
+ "R_ARM_PRIVATE_12": true,
+ "R_ARM_PRIVATE_13": true,
+ "R_ARM_PRIVATE_14": true,
+ "R_ARM_PRIVATE_15": true,
+ "R_ARM_PRIVATE_2": true,
+ "R_ARM_PRIVATE_3": true,
+ "R_ARM_PRIVATE_4": true,
+ "R_ARM_PRIVATE_5": true,
+ "R_ARM_PRIVATE_6": true,
+ "R_ARM_PRIVATE_7": true,
+ "R_ARM_PRIVATE_8": true,
+ "R_ARM_PRIVATE_9": true,
+ "R_ARM_RABS32": true,
+ "R_ARM_RBASE": true,
+ "R_ARM_REL32": true,
+ "R_ARM_REL32_NOI": true,
+ "R_ARM_RELATIVE": true,
+ "R_ARM_RPC24": true,
+ "R_ARM_RREL32": true,
+ "R_ARM_RSBREL32": true,
+ "R_ARM_RXPC25": true,
+ "R_ARM_SBREL31": true,
+ "R_ARM_SBREL32": true,
+ "R_ARM_SWI24": true,
+ "R_ARM_TARGET1": true,
+ "R_ARM_TARGET2": true,
+ "R_ARM_THM_ABS5": true,
+ "R_ARM_THM_ALU_ABS_G0_NC": true,
+ "R_ARM_THM_ALU_ABS_G1_NC": true,
+ "R_ARM_THM_ALU_ABS_G2_NC": true,
+ "R_ARM_THM_ALU_ABS_G3": true,
+ "R_ARM_THM_ALU_PREL_11_0": true,
+ "R_ARM_THM_GOT_BREL12": true,
+ "R_ARM_THM_JUMP11": true,
+ "R_ARM_THM_JUMP19": true,
+ "R_ARM_THM_JUMP24": true,
+ "R_ARM_THM_JUMP6": true,
+ "R_ARM_THM_JUMP8": true,
+ "R_ARM_THM_MOVT_ABS": true,
+ "R_ARM_THM_MOVT_BREL": true,
+ "R_ARM_THM_MOVT_PREL": true,
+ "R_ARM_THM_MOVW_ABS_NC": true,
+ "R_ARM_THM_MOVW_BREL": true,
+ "R_ARM_THM_MOVW_BREL_NC": true,
+ "R_ARM_THM_MOVW_PREL_NC": true,
+ "R_ARM_THM_PC12": true,
+ "R_ARM_THM_PC22": true,
+ "R_ARM_THM_PC8": true,
+ "R_ARM_THM_RPC22": true,
+ "R_ARM_THM_SWI8": true,
+ "R_ARM_THM_TLS_CALL": true,
+ "R_ARM_THM_TLS_DESCSEQ16": true,
+ "R_ARM_THM_TLS_DESCSEQ32": true,
+ "R_ARM_THM_XPC22": true,
+ "R_ARM_TLS_CALL": true,
+ "R_ARM_TLS_DESCSEQ": true,
+ "R_ARM_TLS_DTPMOD32": true,
+ "R_ARM_TLS_DTPOFF32": true,
+ "R_ARM_TLS_GD32": true,
+ "R_ARM_TLS_GOTDESC": true,
+ "R_ARM_TLS_IE12GP": true,
+ "R_ARM_TLS_IE32": true,
+ "R_ARM_TLS_LDM32": true,
+ "R_ARM_TLS_LDO12": true,
+ "R_ARM_TLS_LDO32": true,
+ "R_ARM_TLS_LE12": true,
+ "R_ARM_TLS_LE32": true,
+ "R_ARM_TLS_TPOFF32": true,
+ "R_ARM_V4BX": true,
+ "R_ARM_XPC25": true,
+ "R_INFO": true,
+ "R_INFO32": true,
+ "R_MIPS": true,
+ "R_MIPS_16": true,
+ "R_MIPS_26": true,
+ "R_MIPS_32": true,
+ "R_MIPS_64": true,
+ "R_MIPS_ADD_IMMEDIATE": true,
+ "R_MIPS_CALL16": true,
+ "R_MIPS_CALL_HI16": true,
+ "R_MIPS_CALL_LO16": true,
+ "R_MIPS_DELETE": true,
+ "R_MIPS_GOT16": true,
+ "R_MIPS_GOT_DISP": true,
+ "R_MIPS_GOT_HI16": true,
+ "R_MIPS_GOT_LO16": true,
+ "R_MIPS_GOT_OFST": true,
+ "R_MIPS_GOT_PAGE": true,
+ "R_MIPS_GPREL16": true,
+ "R_MIPS_GPREL32": true,
+ "R_MIPS_HI16": true,
+ "R_MIPS_HIGHER": true,
+ "R_MIPS_HIGHEST": true,
+ "R_MIPS_INSERT_A": true,
+ "R_MIPS_INSERT_B": true,
+ "R_MIPS_JALR": true,
+ "R_MIPS_LITERAL": true,
+ "R_MIPS_LO16": true,
+ "R_MIPS_NONE": true,
+ "R_MIPS_PC16": true,
+ "R_MIPS_PJUMP": true,
+ "R_MIPS_REL16": true,
+ "R_MIPS_REL32": true,
+ "R_MIPS_RELGOT": true,
+ "R_MIPS_SCN_DISP": true,
+ "R_MIPS_SHIFT5": true,
+ "R_MIPS_SHIFT6": true,
+ "R_MIPS_SUB": true,
+ "R_MIPS_TLS_DTPMOD32": true,
+ "R_MIPS_TLS_DTPMOD64": true,
+ "R_MIPS_TLS_DTPREL32": true,
+ "R_MIPS_TLS_DTPREL64": true,
+ "R_MIPS_TLS_DTPREL_HI16": true,
+ "R_MIPS_TLS_DTPREL_LO16": true,
+ "R_MIPS_TLS_GD": true,
+ "R_MIPS_TLS_GOTTPREL": true,
+ "R_MIPS_TLS_LDM": true,
+ "R_MIPS_TLS_TPREL32": true,
+ "R_MIPS_TLS_TPREL64": true,
+ "R_MIPS_TLS_TPREL_HI16": true,
+ "R_MIPS_TLS_TPREL_LO16": true,
+ "R_PPC": true,
+ "R_PPC64": true,
+ "R_PPC64_ADDR14": true,
+ "R_PPC64_ADDR14_BRNTAKEN": true,
+ "R_PPC64_ADDR14_BRTAKEN": true,
+ "R_PPC64_ADDR16": true,
+ "R_PPC64_ADDR16_DS": true,
+ "R_PPC64_ADDR16_HA": true,
+ "R_PPC64_ADDR16_HI": true,
+ "R_PPC64_ADDR16_HIGH": true,
+ "R_PPC64_ADDR16_HIGHA": true,
+ "R_PPC64_ADDR16_HIGHER": true,
+ "R_PPC64_ADDR16_HIGHERA": true,
+ "R_PPC64_ADDR16_HIGHEST": true,
+ "R_PPC64_ADDR16_HIGHESTA": true,
+ "R_PPC64_ADDR16_LO": true,
+ "R_PPC64_ADDR16_LO_DS": true,
+ "R_PPC64_ADDR24": true,
+ "R_PPC64_ADDR32": true,
+ "R_PPC64_ADDR64": true,
+ "R_PPC64_ADDR64_LOCAL": true,
+ "R_PPC64_DTPMOD64": true,
+ "R_PPC64_DTPREL16": true,
+ "R_PPC64_DTPREL16_DS": true,
+ "R_PPC64_DTPREL16_HA": true,
+ "R_PPC64_DTPREL16_HI": true,
+ "R_PPC64_DTPREL16_HIGH": true,
+ "R_PPC64_DTPREL16_HIGHA": true,
+ "R_PPC64_DTPREL16_HIGHER": true,
+ "R_PPC64_DTPREL16_HIGHERA": true,
+ "R_PPC64_DTPREL16_HIGHEST": true,
+ "R_PPC64_DTPREL16_HIGHESTA": true,
+ "R_PPC64_DTPREL16_LO": true,
+ "R_PPC64_DTPREL16_LO_DS": true,
+ "R_PPC64_DTPREL64": true,
+ "R_PPC64_ENTRY": true,
+ "R_PPC64_GOT16": true,
+ "R_PPC64_GOT16_DS": true,
+ "R_PPC64_GOT16_HA": true,
+ "R_PPC64_GOT16_HI": true,
+ "R_PPC64_GOT16_LO": true,
+ "R_PPC64_GOT16_LO_DS": true,
+ "R_PPC64_GOT_DTPREL16_DS": true,
+ "R_PPC64_GOT_DTPREL16_HA": true,
+ "R_PPC64_GOT_DTPREL16_HI": true,
+ "R_PPC64_GOT_DTPREL16_LO_DS": true,
+ "R_PPC64_GOT_TLSGD16": true,
+ "R_PPC64_GOT_TLSGD16_HA": true,
+ "R_PPC64_GOT_TLSGD16_HI": true,
+ "R_PPC64_GOT_TLSGD16_LO": true,
+ "R_PPC64_GOT_TLSLD16": true,
+ "R_PPC64_GOT_TLSLD16_HA": true,
+ "R_PPC64_GOT_TLSLD16_HI": true,
+ "R_PPC64_GOT_TLSLD16_LO": true,
+ "R_PPC64_GOT_TPREL16_DS": true,
+ "R_PPC64_GOT_TPREL16_HA": true,
+ "R_PPC64_GOT_TPREL16_HI": true,
+ "R_PPC64_GOT_TPREL16_LO_DS": true,
+ "R_PPC64_IRELATIVE": true,
+ "R_PPC64_JMP_IREL": true,
+ "R_PPC64_JMP_SLOT": true,
+ "R_PPC64_NONE": true,
+ "R_PPC64_PLT16_LO_DS": true,
+ "R_PPC64_PLTGOT16": true,
+ "R_PPC64_PLTGOT16_DS": true,
+ "R_PPC64_PLTGOT16_HA": true,
+ "R_PPC64_PLTGOT16_HI": true,
+ "R_PPC64_PLTGOT16_LO": true,
+ "R_PPC64_PLTGOT_LO_DS": true,
+ "R_PPC64_REL14": true,
+ "R_PPC64_REL14_BRNTAKEN": true,
+ "R_PPC64_REL14_BRTAKEN": true,
+ "R_PPC64_REL16": true,
+ "R_PPC64_REL16DX_HA": true,
+ "R_PPC64_REL16_HA": true,
+ "R_PPC64_REL16_HI": true,
+ "R_PPC64_REL16_LO": true,
+ "R_PPC64_REL24": true,
+ "R_PPC64_REL24_NOTOC": true,
+ "R_PPC64_REL32": true,
+ "R_PPC64_REL64": true,
+ "R_PPC64_SECTOFF_DS": true,
+ "R_PPC64_SECTOFF_LO_DS": true,
+ "R_PPC64_TLS": true,
+ "R_PPC64_TLSGD": true,
+ "R_PPC64_TLSLD": true,
+ "R_PPC64_TOC": true,
+ "R_PPC64_TOC16": true,
+ "R_PPC64_TOC16_DS": true,
+ "R_PPC64_TOC16_HA": true,
+ "R_PPC64_TOC16_HI": true,
+ "R_PPC64_TOC16_LO": true,
+ "R_PPC64_TOC16_LO_DS": true,
+ "R_PPC64_TOCSAVE": true,
+ "R_PPC64_TPREL16": true,
+ "R_PPC64_TPREL16_DS": true,
+ "R_PPC64_TPREL16_HA": true,
+ "R_PPC64_TPREL16_HI": true,
+ "R_PPC64_TPREL16_HIGH": true,
+ "R_PPC64_TPREL16_HIGHA": true,
+ "R_PPC64_TPREL16_HIGHER": true,
+ "R_PPC64_TPREL16_HIGHERA": true,
+ "R_PPC64_TPREL16_HIGHEST": true,
+ "R_PPC64_TPREL16_HIGHESTA": true,
+ "R_PPC64_TPREL16_LO": true,
+ "R_PPC64_TPREL16_LO_DS": true,
+ "R_PPC64_TPREL64": true,
+ "R_PPC_ADDR14": true,
+ "R_PPC_ADDR14_BRNTAKEN": true,
+ "R_PPC_ADDR14_BRTAKEN": true,
+ "R_PPC_ADDR16": true,
+ "R_PPC_ADDR16_HA": true,
+ "R_PPC_ADDR16_HI": true,
+ "R_PPC_ADDR16_LO": true,
+ "R_PPC_ADDR24": true,
+ "R_PPC_ADDR32": true,
+ "R_PPC_COPY": true,
+ "R_PPC_DTPMOD32": true,
+ "R_PPC_DTPREL16": true,
+ "R_PPC_DTPREL16_HA": true,
+ "R_PPC_DTPREL16_HI": true,
+ "R_PPC_DTPREL16_LO": true,
+ "R_PPC_DTPREL32": true,
+ "R_PPC_EMB_BIT_FLD": true,
+ "R_PPC_EMB_MRKREF": true,
+ "R_PPC_EMB_NADDR16": true,
+ "R_PPC_EMB_NADDR16_HA": true,
+ "R_PPC_EMB_NADDR16_HI": true,
+ "R_PPC_EMB_NADDR16_LO": true,
+ "R_PPC_EMB_NADDR32": true,
+ "R_PPC_EMB_RELSDA": true,
+ "R_PPC_EMB_RELSEC16": true,
+ "R_PPC_EMB_RELST_HA": true,
+ "R_PPC_EMB_RELST_HI": true,
+ "R_PPC_EMB_RELST_LO": true,
+ "R_PPC_EMB_SDA21": true,
+ "R_PPC_EMB_SDA2I16": true,
+ "R_PPC_EMB_SDA2REL": true,
+ "R_PPC_EMB_SDAI16": true,
+ "R_PPC_GLOB_DAT": true,
+ "R_PPC_GOT16": true,
+ "R_PPC_GOT16_HA": true,
+ "R_PPC_GOT16_HI": true,
+ "R_PPC_GOT16_LO": true,
+ "R_PPC_GOT_TLSGD16": true,
+ "R_PPC_GOT_TLSGD16_HA": true,
+ "R_PPC_GOT_TLSGD16_HI": true,
+ "R_PPC_GOT_TLSGD16_LO": true,
+ "R_PPC_GOT_TLSLD16": true,
+ "R_PPC_GOT_TLSLD16_HA": true,
+ "R_PPC_GOT_TLSLD16_HI": true,
+ "R_PPC_GOT_TLSLD16_LO": true,
+ "R_PPC_GOT_TPREL16": true,
+ "R_PPC_GOT_TPREL16_HA": true,
+ "R_PPC_GOT_TPREL16_HI": true,
+ "R_PPC_GOT_TPREL16_LO": true,
+ "R_PPC_JMP_SLOT": true,
+ "R_PPC_LOCAL24PC": true,
+ "R_PPC_NONE": true,
+ "R_PPC_PLT16_HA": true,
+ "R_PPC_PLT16_HI": true,
+ "R_PPC_PLT16_LO": true,
+ "R_PPC_PLT32": true,
+ "R_PPC_PLTREL24": true,
+ "R_PPC_PLTREL32": true,
+ "R_PPC_REL14": true,
+ "R_PPC_REL14_BRNTAKEN": true,
+ "R_PPC_REL14_BRTAKEN": true,
+ "R_PPC_REL24": true,
+ "R_PPC_REL32": true,
+ "R_PPC_RELATIVE": true,
+ "R_PPC_SDAREL16": true,
+ "R_PPC_SECTOFF": true,
+ "R_PPC_SECTOFF_HA": true,
+ "R_PPC_SECTOFF_HI": true,
+ "R_PPC_SECTOFF_LO": true,
+ "R_PPC_TLS": true,
+ "R_PPC_TPREL16": true,
+ "R_PPC_TPREL16_HA": true,
+ "R_PPC_TPREL16_HI": true,
+ "R_PPC_TPREL16_LO": true,
+ "R_PPC_TPREL32": true,
+ "R_PPC_UADDR16": true,
+ "R_PPC_UADDR32": true,
+ "R_RISCV": true,
+ "R_RISCV_32": true,
+ "R_RISCV_32_PCREL": true,
+ "R_RISCV_64": true,
+ "R_RISCV_ADD16": true,
+ "R_RISCV_ADD32": true,
+ "R_RISCV_ADD64": true,
+ "R_RISCV_ADD8": true,
+ "R_RISCV_ALIGN": true,
+ "R_RISCV_BRANCH": true,
+ "R_RISCV_CALL": true,
+ "R_RISCV_CALL_PLT": true,
+ "R_RISCV_COPY": true,
+ "R_RISCV_GNU_VTENTRY": true,
+ "R_RISCV_GNU_VTINHERIT": true,
+ "R_RISCV_GOT_HI20": true,
+ "R_RISCV_GPREL_I": true,
+ "R_RISCV_GPREL_S": true,
+ "R_RISCV_HI20": true,
+ "R_RISCV_JAL": true,
+ "R_RISCV_JUMP_SLOT": true,
+ "R_RISCV_LO12_I": true,
+ "R_RISCV_LO12_S": true,
+ "R_RISCV_NONE": true,
+ "R_RISCV_PCREL_HI20": true,
+ "R_RISCV_PCREL_LO12_I": true,
+ "R_RISCV_PCREL_LO12_S": true,
+ "R_RISCV_RELATIVE": true,
+ "R_RISCV_RELAX": true,
+ "R_RISCV_RVC_BRANCH": true,
+ "R_RISCV_RVC_JUMP": true,
+ "R_RISCV_RVC_LUI": true,
+ "R_RISCV_SET16": true,
+ "R_RISCV_SET32": true,
+ "R_RISCV_SET6": true,
+ "R_RISCV_SET8": true,
+ "R_RISCV_SUB16": true,
+ "R_RISCV_SUB32": true,
+ "R_RISCV_SUB6": true,
+ "R_RISCV_SUB64": true,
+ "R_RISCV_SUB8": true,
+ "R_RISCV_TLS_DTPMOD32": true,
+ "R_RISCV_TLS_DTPMOD64": true,
+ "R_RISCV_TLS_DTPREL32": true,
+ "R_RISCV_TLS_DTPREL64": true,
+ "R_RISCV_TLS_GD_HI20": true,
+ "R_RISCV_TLS_GOT_HI20": true,
+ "R_RISCV_TLS_TPREL32": true,
+ "R_RISCV_TLS_TPREL64": true,
+ "R_RISCV_TPREL_ADD": true,
+ "R_RISCV_TPREL_HI20": true,
+ "R_RISCV_TPREL_I": true,
+ "R_RISCV_TPREL_LO12_I": true,
+ "R_RISCV_TPREL_LO12_S": true,
+ "R_RISCV_TPREL_S": true,
+ "R_SPARC": true,
+ "R_SPARC_10": true,
+ "R_SPARC_11": true,
+ "R_SPARC_13": true,
+ "R_SPARC_16": true,
+ "R_SPARC_22": true,
+ "R_SPARC_32": true,
+ "R_SPARC_5": true,
+ "R_SPARC_6": true,
+ "R_SPARC_64": true,
+ "R_SPARC_7": true,
+ "R_SPARC_8": true,
+ "R_SPARC_COPY": true,
+ "R_SPARC_DISP16": true,
+ "R_SPARC_DISP32": true,
+ "R_SPARC_DISP64": true,
+ "R_SPARC_DISP8": true,
+ "R_SPARC_GLOB_DAT": true,
+ "R_SPARC_GLOB_JMP": true,
+ "R_SPARC_GOT10": true,
+ "R_SPARC_GOT13": true,
+ "R_SPARC_GOT22": true,
+ "R_SPARC_H44": true,
+ "R_SPARC_HH22": true,
+ "R_SPARC_HI22": true,
+ "R_SPARC_HIPLT22": true,
+ "R_SPARC_HIX22": true,
+ "R_SPARC_HM10": true,
+ "R_SPARC_JMP_SLOT": true,
+ "R_SPARC_L44": true,
+ "R_SPARC_LM22": true,
+ "R_SPARC_LO10": true,
+ "R_SPARC_LOPLT10": true,
+ "R_SPARC_LOX10": true,
+ "R_SPARC_M44": true,
+ "R_SPARC_NONE": true,
+ "R_SPARC_OLO10": true,
+ "R_SPARC_PC10": true,
+ "R_SPARC_PC22": true,
+ "R_SPARC_PCPLT10": true,
+ "R_SPARC_PCPLT22": true,
+ "R_SPARC_PCPLT32": true,
+ "R_SPARC_PC_HH22": true,
+ "R_SPARC_PC_HM10": true,
+ "R_SPARC_PC_LM22": true,
+ "R_SPARC_PLT32": true,
+ "R_SPARC_PLT64": true,
+ "R_SPARC_REGISTER": true,
+ "R_SPARC_RELATIVE": true,
+ "R_SPARC_UA16": true,
+ "R_SPARC_UA32": true,
+ "R_SPARC_UA64": true,
+ "R_SPARC_WDISP16": true,
+ "R_SPARC_WDISP19": true,
+ "R_SPARC_WDISP22": true,
+ "R_SPARC_WDISP30": true,
+ "R_SPARC_WPLT30": true,
+ "R_SYM32": true,
+ "R_SYM64": true,
+ "R_TYPE32": true,
+ "R_TYPE64": true,
+ "R_X86_64": true,
+ "R_X86_64_16": true,
+ "R_X86_64_32": true,
+ "R_X86_64_32S": true,
+ "R_X86_64_64": true,
+ "R_X86_64_8": true,
+ "R_X86_64_COPY": true,
+ "R_X86_64_DTPMOD64": true,
+ "R_X86_64_DTPOFF32": true,
+ "R_X86_64_DTPOFF64": true,
+ "R_X86_64_GLOB_DAT": true,
+ "R_X86_64_GOT32": true,
+ "R_X86_64_GOT64": true,
+ "R_X86_64_GOTOFF64": true,
+ "R_X86_64_GOTPC32": true,
+ "R_X86_64_GOTPC32_TLSDESC": true,
+ "R_X86_64_GOTPC64": true,
+ "R_X86_64_GOTPCREL": true,
+ "R_X86_64_GOTPCREL64": true,
+ "R_X86_64_GOTPCRELX": true,
+ "R_X86_64_GOTPLT64": true,
+ "R_X86_64_GOTTPOFF": true,
+ "R_X86_64_IRELATIVE": true,
+ "R_X86_64_JMP_SLOT": true,
+ "R_X86_64_NONE": true,
+ "R_X86_64_PC16": true,
+ "R_X86_64_PC32": true,
+ "R_X86_64_PC32_BND": true,
+ "R_X86_64_PC64": true,
+ "R_X86_64_PC8": true,
+ "R_X86_64_PLT32": true,
+ "R_X86_64_PLT32_BND": true,
+ "R_X86_64_PLTOFF64": true,
+ "R_X86_64_RELATIVE": true,
+ "R_X86_64_RELATIVE64": true,
+ "R_X86_64_REX_GOTPCRELX": true,
+ "R_X86_64_SIZE32": true,
+ "R_X86_64_SIZE64": true,
+ "R_X86_64_TLSDESC": true,
+ "R_X86_64_TLSDESC_CALL": true,
+ "R_X86_64_TLSGD": true,
+ "R_X86_64_TLSLD": true,
+ "R_X86_64_TPOFF32": true,
+ "R_X86_64_TPOFF64": true,
+ "Rel32": true,
+ "Rel64": true,
+ "Rela32": true,
+ "Rela64": true,
+ "SHF_ALLOC": true,
+ "SHF_COMPRESSED": true,
+ "SHF_EXECINSTR": true,
+ "SHF_GROUP": true,
+ "SHF_INFO_LINK": true,
+ "SHF_LINK_ORDER": true,
+ "SHF_MASKOS": true,
+ "SHF_MASKPROC": true,
+ "SHF_MERGE": true,
+ "SHF_OS_NONCONFORMING": true,
+ "SHF_STRINGS": true,
+ "SHF_TLS": true,
+ "SHF_WRITE": true,
+ "SHN_ABS": true,
+ "SHN_COMMON": true,
+ "SHN_HIOS": true,
+ "SHN_HIPROC": true,
+ "SHN_HIRESERVE": true,
+ "SHN_LOOS": true,
+ "SHN_LOPROC": true,
+ "SHN_LORESERVE": true,
+ "SHN_UNDEF": true,
+ "SHN_XINDEX": true,
+ "SHT_DYNAMIC": true,
+ "SHT_DYNSYM": true,
+ "SHT_FINI_ARRAY": true,
+ "SHT_GNU_ATTRIBUTES": true,
+ "SHT_GNU_HASH": true,
+ "SHT_GNU_LIBLIST": true,
+ "SHT_GNU_VERDEF": true,
+ "SHT_GNU_VERNEED": true,
+ "SHT_GNU_VERSYM": true,
+ "SHT_GROUP": true,
+ "SHT_HASH": true,
+ "SHT_HIOS": true,
+ "SHT_HIPROC": true,
+ "SHT_HIUSER": true,
+ "SHT_INIT_ARRAY": true,
+ "SHT_LOOS": true,
+ "SHT_LOPROC": true,
+ "SHT_LOUSER": true,
+ "SHT_NOBITS": true,
+ "SHT_NOTE": true,
+ "SHT_NULL": true,
+ "SHT_PREINIT_ARRAY": true,
+ "SHT_PROGBITS": true,
+ "SHT_REL": true,
+ "SHT_RELA": true,
+ "SHT_SHLIB": true,
+ "SHT_STRTAB": true,
+ "SHT_SYMTAB": true,
+ "SHT_SYMTAB_SHNDX": true,
+ "STB_GLOBAL": true,
+ "STB_HIOS": true,
+ "STB_HIPROC": true,
+ "STB_LOCAL": true,
+ "STB_LOOS": true,
+ "STB_LOPROC": true,
+ "STB_WEAK": true,
+ "STT_COMMON": true,
+ "STT_FILE": true,
+ "STT_FUNC": true,
+ "STT_HIOS": true,
+ "STT_HIPROC": true,
+ "STT_LOOS": true,
+ "STT_LOPROC": true,
+ "STT_NOTYPE": true,
+ "STT_OBJECT": true,
+ "STT_SECTION": true,
+ "STT_TLS": true,
+ "STV_DEFAULT": true,
+ "STV_HIDDEN": true,
+ "STV_INTERNAL": true,
+ "STV_PROTECTED": true,
+ "ST_BIND": true,
+ "ST_INFO": true,
+ "ST_TYPE": true,
+ "ST_VISIBILITY": true,
+ "Section": true,
+ "Section32": true,
+ "Section64": true,
+ "SectionFlag": true,
+ "SectionHeader": true,
+ "SectionIndex": true,
+ "SectionType": true,
+ "Sym32": true,
+ "Sym32Size": true,
+ "Sym64": true,
+ "Sym64Size": true,
+ "SymBind": true,
+ "SymType": true,
+ "SymVis": true,
+ "Symbol": true,
+ "Type": true,
+ "Version": true,
+ },
+ "debug/gosym": map[string]bool{
+ "DecodingError": true,
+ "Func": true,
+ "LineTable": true,
+ "NewLineTable": true,
+ "NewTable": true,
+ "Obj": true,
+ "Sym": true,
+ "Table": true,
+ "UnknownFileError": true,
+ "UnknownLineError": true,
+ },
+ "debug/macho": map[string]bool{
+ "ARM64_RELOC_ADDEND": true,
+ "ARM64_RELOC_BRANCH26": true,
+ "ARM64_RELOC_GOT_LOAD_PAGE21": true,
+ "ARM64_RELOC_GOT_LOAD_PAGEOFF12": true,
+ "ARM64_RELOC_PAGE21": true,
+ "ARM64_RELOC_PAGEOFF12": true,
+ "ARM64_RELOC_POINTER_TO_GOT": true,
+ "ARM64_RELOC_SUBTRACTOR": true,
+ "ARM64_RELOC_TLVP_LOAD_PAGE21": true,
+ "ARM64_RELOC_TLVP_LOAD_PAGEOFF12": true,
+ "ARM64_RELOC_UNSIGNED": true,
+ "ARM_RELOC_BR24": true,
+ "ARM_RELOC_HALF": true,
+ "ARM_RELOC_HALF_SECTDIFF": true,
+ "ARM_RELOC_LOCAL_SECTDIFF": true,
+ "ARM_RELOC_PAIR": true,
+ "ARM_RELOC_PB_LA_PTR": true,
+ "ARM_RELOC_SECTDIFF": true,
+ "ARM_RELOC_VANILLA": true,
+ "ARM_THUMB_32BIT_BRANCH": true,
+ "ARM_THUMB_RELOC_BR22": true,
+ "Cpu": true,
+ "Cpu386": true,
+ "CpuAmd64": true,
+ "CpuArm": true,
+ "CpuArm64": true,
+ "CpuPpc": true,
+ "CpuPpc64": true,
+ "Dylib": true,
+ "DylibCmd": true,
+ "Dysymtab": true,
+ "DysymtabCmd": true,
+ "ErrNotFat": true,
+ "FatArch": true,
+ "FatArchHeader": true,
+ "FatFile": true,
+ "File": true,
+ "FileHeader": true,
+ "FlagAllModsBound": true,
+ "FlagAllowStackExecution": true,
+ "FlagAppExtensionSafe": true,
+ "FlagBindAtLoad": true,
+ "FlagBindsToWeak": true,
+ "FlagCanonical": true,
+ "FlagDeadStrippableDylib": true,
+ "FlagDyldLink": true,
+ "FlagForceFlat": true,
+ "FlagHasTLVDescriptors": true,
+ "FlagIncrLink": true,
+ "FlagLazyInit": true,
+ "FlagNoFixPrebinding": true,
+ "FlagNoHeapExecution": true,
+ "FlagNoMultiDefs": true,
+ "FlagNoReexportedDylibs": true,
+ "FlagNoUndefs": true,
+ "FlagPIE": true,
+ "FlagPrebindable": true,
+ "FlagPrebound": true,
+ "FlagRootSafe": true,
+ "FlagSetuidSafe": true,
+ "FlagSplitSegs": true,
+ "FlagSubsectionsViaSymbols": true,
+ "FlagTwoLevel": true,
+ "FlagWeakDefines": true,
+ "FormatError": true,
+ "GENERIC_RELOC_LOCAL_SECTDIFF": true,
+ "GENERIC_RELOC_PAIR": true,
+ "GENERIC_RELOC_PB_LA_PTR": true,
+ "GENERIC_RELOC_SECTDIFF": true,
+ "GENERIC_RELOC_TLV": true,
+ "GENERIC_RELOC_VANILLA": true,
+ "Load": true,
+ "LoadBytes": true,
+ "LoadCmd": true,
+ "LoadCmdDylib": true,
+ "LoadCmdDylinker": true,
+ "LoadCmdDysymtab": true,
+ "LoadCmdRpath": true,
+ "LoadCmdSegment": true,
+ "LoadCmdSegment64": true,
+ "LoadCmdSymtab": true,
+ "LoadCmdThread": true,
+ "LoadCmdUnixThread": true,
+ "Magic32": true,
+ "Magic64": true,
+ "MagicFat": true,
+ "NewFatFile": true,
+ "NewFile": true,
+ "Nlist32": true,
+ "Nlist64": true,
+ "Open": true,
+ "OpenFat": true,
+ "Regs386": true,
+ "RegsAMD64": true,
+ "Reloc": true,
+ "RelocTypeARM": true,
+ "RelocTypeARM64": true,
+ "RelocTypeGeneric": true,
+ "RelocTypeX86_64": true,
+ "Rpath": true,
+ "RpathCmd": true,
+ "Section": true,
+ "Section32": true,
+ "Section64": true,
+ "SectionHeader": true,
+ "Segment": true,
+ "Segment32": true,
+ "Segment64": true,
+ "SegmentHeader": true,
+ "Symbol": true,
+ "Symtab": true,
+ "SymtabCmd": true,
+ "Thread": true,
+ "Type": true,
+ "TypeBundle": true,
+ "TypeDylib": true,
+ "TypeExec": true,
+ "TypeObj": true,
+ "X86_64_RELOC_BRANCH": true,
+ "X86_64_RELOC_GOT": true,
+ "X86_64_RELOC_GOT_LOAD": true,
+ "X86_64_RELOC_SIGNED": true,
+ "X86_64_RELOC_SIGNED_1": true,
+ "X86_64_RELOC_SIGNED_2": true,
+ "X86_64_RELOC_SIGNED_4": true,
+ "X86_64_RELOC_SUBTRACTOR": true,
+ "X86_64_RELOC_TLV": true,
+ "X86_64_RELOC_UNSIGNED": true,
+ },
+ "debug/pe": map[string]bool{
+ "COFFSymbol": true,
+ "COFFSymbolSize": true,
+ "DataDirectory": true,
+ "File": true,
+ "FileHeader": true,
+ "FormatError": true,
+ "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE": true,
+ "IMAGE_DIRECTORY_ENTRY_BASERELOC": true,
+ "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT": true,
+ "IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR": true,
+ "IMAGE_DIRECTORY_ENTRY_DEBUG": true,
+ "IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT": true,
+ "IMAGE_DIRECTORY_ENTRY_EXCEPTION": true,
+ "IMAGE_DIRECTORY_ENTRY_EXPORT": true,
+ "IMAGE_DIRECTORY_ENTRY_GLOBALPTR": true,
+ "IMAGE_DIRECTORY_ENTRY_IAT": true,
+ "IMAGE_DIRECTORY_ENTRY_IMPORT": true,
+ "IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG": true,
+ "IMAGE_DIRECTORY_ENTRY_RESOURCE": true,
+ "IMAGE_DIRECTORY_ENTRY_SECURITY": true,
+ "IMAGE_DIRECTORY_ENTRY_TLS": true,
+ "IMAGE_FILE_MACHINE_AM33": true,
+ "IMAGE_FILE_MACHINE_AMD64": true,
+ "IMAGE_FILE_MACHINE_ARM": true,
+ "IMAGE_FILE_MACHINE_ARM64": true,
+ "IMAGE_FILE_MACHINE_ARMNT": true,
+ "IMAGE_FILE_MACHINE_EBC": true,
+ "IMAGE_FILE_MACHINE_I386": true,
+ "IMAGE_FILE_MACHINE_IA64": true,
+ "IMAGE_FILE_MACHINE_M32R": true,
+ "IMAGE_FILE_MACHINE_MIPS16": true,
+ "IMAGE_FILE_MACHINE_MIPSFPU": true,
+ "IMAGE_FILE_MACHINE_MIPSFPU16": true,
+ "IMAGE_FILE_MACHINE_POWERPC": true,
+ "IMAGE_FILE_MACHINE_POWERPCFP": true,
+ "IMAGE_FILE_MACHINE_R4000": true,
+ "IMAGE_FILE_MACHINE_SH3": true,
+ "IMAGE_FILE_MACHINE_SH3DSP": true,
+ "IMAGE_FILE_MACHINE_SH4": true,
+ "IMAGE_FILE_MACHINE_SH5": true,
+ "IMAGE_FILE_MACHINE_THUMB": true,
+ "IMAGE_FILE_MACHINE_UNKNOWN": true,
+ "IMAGE_FILE_MACHINE_WCEMIPSV2": true,
+ "ImportDirectory": true,
+ "NewFile": true,
+ "Open": true,
+ "OptionalHeader32": true,
+ "OptionalHeader64": true,
+ "Reloc": true,
+ "Section": true,
+ "SectionHeader": true,
+ "SectionHeader32": true,
+ "StringTable": true,
+ "Symbol": true,
+ },
+ "debug/plan9obj": map[string]bool{
+ "File": true,
+ "FileHeader": true,
+ "Magic386": true,
+ "Magic64": true,
+ "MagicAMD64": true,
+ "MagicARM": true,
+ "NewFile": true,
+ "Open": true,
+ "Section": true,
+ "SectionHeader": true,
+ "Sym": true,
+ },
+ "encoding": map[string]bool{
+ "BinaryMarshaler": true,
+ "BinaryUnmarshaler": true,
+ "TextMarshaler": true,
+ "TextUnmarshaler": true,
+ },
+ "encoding/ascii85": map[string]bool{
+ "CorruptInputError": true,
+ "Decode": true,
+ "Encode": true,
+ "MaxEncodedLen": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ },
+ "encoding/asn1": map[string]bool{
+ "BitString": true,
+ "ClassApplication": true,
+ "ClassContextSpecific": true,
+ "ClassPrivate": true,
+ "ClassUniversal": true,
+ "Enumerated": true,
+ "Flag": true,
+ "Marshal": true,
+ "MarshalWithParams": true,
+ "NullBytes": true,
+ "NullRawValue": true,
+ "ObjectIdentifier": true,
+ "RawContent": true,
+ "RawValue": true,
+ "StructuralError": true,
+ "SyntaxError": true,
+ "TagBitString": true,
+ "TagBoolean": true,
+ "TagEnum": true,
+ "TagGeneralString": true,
+ "TagGeneralizedTime": true,
+ "TagIA5String": true,
+ "TagInteger": true,
+ "TagNull": true,
+ "TagNumericString": true,
+ "TagOID": true,
+ "TagOctetString": true,
+ "TagPrintableString": true,
+ "TagSequence": true,
+ "TagSet": true,
+ "TagT61String": true,
+ "TagUTCTime": true,
+ "TagUTF8String": true,
+ "Unmarshal": true,
+ "UnmarshalWithParams": true,
+ },
+ "encoding/base32": map[string]bool{
+ "CorruptInputError": true,
+ "Encoding": true,
+ "HexEncoding": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "NewEncoding": true,
+ "NoPadding": true,
+ "StdEncoding": true,
+ "StdPadding": true,
+ },
+ "encoding/base64": map[string]bool{
+ "CorruptInputError": true,
+ "Encoding": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "NewEncoding": true,
+ "NoPadding": true,
+ "RawStdEncoding": true,
+ "RawURLEncoding": true,
+ "StdEncoding": true,
+ "StdPadding": true,
+ "URLEncoding": true,
+ },
+ "encoding/binary": map[string]bool{
+ "BigEndian": true,
+ "ByteOrder": true,
+ "LittleEndian": true,
+ "MaxVarintLen16": true,
+ "MaxVarintLen32": true,
+ "MaxVarintLen64": true,
+ "PutUvarint": true,
+ "PutVarint": true,
+ "Read": true,
+ "ReadUvarint": true,
+ "ReadVarint": true,
+ "Size": true,
+ "Uvarint": true,
+ "Varint": true,
+ "Write": true,
+ },
+ "encoding/csv": map[string]bool{
+ "ErrBareQuote": true,
+ "ErrFieldCount": true,
+ "ErrQuote": true,
+ "ErrTrailingComma": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "ParseError": true,
+ "Reader": true,
+ "Writer": true,
+ },
+ "encoding/gob": map[string]bool{
+ "CommonType": true,
+ "Decoder": true,
+ "Encoder": true,
+ "GobDecoder": true,
+ "GobEncoder": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "Register": true,
+ "RegisterName": true,
+ },
+ "encoding/hex": map[string]bool{
+ "Decode": true,
+ "DecodeString": true,
+ "DecodedLen": true,
+ "Dump": true,
+ "Dumper": true,
+ "Encode": true,
+ "EncodeToString": true,
+ "EncodedLen": true,
+ "ErrLength": true,
+ "InvalidByteError": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ },
+ "encoding/json": map[string]bool{
+ "Compact": true,
+ "Decoder": true,
+ "Delim": true,
+ "Encoder": true,
+ "HTMLEscape": true,
+ "Indent": true,
+ "InvalidUTF8Error": true,
+ "InvalidUnmarshalError": true,
+ "Marshal": true,
+ "MarshalIndent": true,
+ "Marshaler": true,
+ "MarshalerError": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "Number": true,
+ "RawMessage": true,
+ "SyntaxError": true,
+ "Token": true,
+ "Unmarshal": true,
+ "UnmarshalFieldError": true,
+ "UnmarshalTypeError": true,
+ "Unmarshaler": true,
+ "UnsupportedTypeError": true,
+ "UnsupportedValueError": true,
+ "Valid": true,
+ },
+ "encoding/pem": map[string]bool{
+ "Block": true,
+ "Decode": true,
+ "Encode": true,
+ "EncodeToMemory": true,
+ },
+ "encoding/xml": map[string]bool{
+ "Attr": true,
+ "CharData": true,
+ "Comment": true,
+ "CopyToken": true,
+ "Decoder": true,
+ "Directive": true,
+ "Encoder": true,
+ "EndElement": true,
+ "Escape": true,
+ "EscapeText": true,
+ "HTMLAutoClose": true,
+ "HTMLEntity": true,
+ "Header": true,
+ "Marshal": true,
+ "MarshalIndent": true,
+ "Marshaler": true,
+ "MarshalerAttr": true,
+ "Name": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "NewTokenDecoder": true,
+ "ProcInst": true,
+ "StartElement": true,
+ "SyntaxError": true,
+ "TagPathError": true,
+ "Token": true,
+ "TokenReader": true,
+ "Unmarshal": true,
+ "UnmarshalError": true,
+ "Unmarshaler": true,
+ "UnmarshalerAttr": true,
+ "UnsupportedTypeError": true,
+ },
+ "errors": map[string]bool{
+ "New": true,
+ },
+ "expvar": map[string]bool{
+ "Do": true,
+ "Float": true,
+ "Func": true,
+ "Get": true,
+ "Handler": true,
+ "Int": true,
+ "KeyValue": true,
+ "Map": true,
+ "NewFloat": true,
+ "NewInt": true,
+ "NewMap": true,
+ "NewString": true,
+ "Publish": true,
+ "String": true,
+ "Var": true,
+ },
+ "flag": map[string]bool{
+ "Arg": true,
+ "Args": true,
+ "Bool": true,
+ "BoolVar": true,
+ "CommandLine": true,
+ "ContinueOnError": true,
+ "Duration": true,
+ "DurationVar": true,
+ "ErrHelp": true,
+ "ErrorHandling": true,
+ "ExitOnError": true,
+ "Flag": true,
+ "FlagSet": true,
+ "Float64": true,
+ "Float64Var": true,
+ "Getter": true,
+ "Int": true,
+ "Int64": true,
+ "Int64Var": true,
+ "IntVar": true,
+ "Lookup": true,
+ "NArg": true,
+ "NFlag": true,
+ "NewFlagSet": true,
+ "PanicOnError": true,
+ "Parse": true,
+ "Parsed": true,
+ "PrintDefaults": true,
+ "Set": true,
+ "String": true,
+ "StringVar": true,
+ "Uint": true,
+ "Uint64": true,
+ "Uint64Var": true,
+ "UintVar": true,
+ "UnquoteUsage": true,
+ "Usage": true,
+ "Value": true,
+ "Var": true,
+ "Visit": true,
+ "VisitAll": true,
+ },
+ "fmt": map[string]bool{
+ "Errorf": true,
+ "Formatter": true,
+ "Fprint": true,
+ "Fprintf": true,
+ "Fprintln": true,
+ "Fscan": true,
+ "Fscanf": true,
+ "Fscanln": true,
+ "GoStringer": true,
+ "Print": true,
+ "Printf": true,
+ "Println": true,
+ "Scan": true,
+ "ScanState": true,
+ "Scanf": true,
+ "Scanln": true,
+ "Scanner": true,
+ "Sprint": true,
+ "Sprintf": true,
+ "Sprintln": true,
+ "Sscan": true,
+ "Sscanf": true,
+ "Sscanln": true,
+ "State": true,
+ "Stringer": true,
+ },
+ "go/ast": map[string]bool{
+ "ArrayType": true,
+ "AssignStmt": true,
+ "Bad": true,
+ "BadDecl": true,
+ "BadExpr": true,
+ "BadStmt": true,
+ "BasicLit": true,
+ "BinaryExpr": true,
+ "BlockStmt": true,
+ "BranchStmt": true,
+ "CallExpr": true,
+ "CaseClause": true,
+ "ChanDir": true,
+ "ChanType": true,
+ "CommClause": true,
+ "Comment": true,
+ "CommentGroup": true,
+ "CommentMap": true,
+ "CompositeLit": true,
+ "Con": true,
+ "DeclStmt": true,
+ "DeferStmt": true,
+ "Ellipsis": true,
+ "EmptyStmt": true,
+ "ExprStmt": true,
+ "Field": true,
+ "FieldFilter": true,
+ "FieldList": true,
+ "File": true,
+ "FileExports": true,
+ "Filter": true,
+ "FilterDecl": true,
+ "FilterFile": true,
+ "FilterFuncDuplicates": true,
+ "FilterImportDuplicates": true,
+ "FilterPackage": true,
+ "FilterUnassociatedComments": true,
+ "ForStmt": true,
+ "Fprint": true,
+ "Fun": true,
+ "FuncDecl": true,
+ "FuncLit": true,
+ "FuncType": true,
+ "GenDecl": true,
+ "GoStmt": true,
+ "Ident": true,
+ "IfStmt": true,
+ "ImportSpec": true,
+ "Importer": true,
+ "IncDecStmt": true,
+ "IndexExpr": true,
+ "Inspect": true,
+ "InterfaceType": true,
+ "IsExported": true,
+ "KeyValueExpr": true,
+ "LabeledStmt": true,
+ "Lbl": true,
+ "MapType": true,
+ "MergeMode": true,
+ "MergePackageFiles": true,
+ "NewCommentMap": true,
+ "NewIdent": true,
+ "NewObj": true,
+ "NewPackage": true,
+ "NewScope": true,
+ "Node": true,
+ "NotNilFilter": true,
+ "ObjKind": true,
+ "Object": true,
+ "Package": true,
+ "PackageExports": true,
+ "ParenExpr": true,
+ "Pkg": true,
+ "Print": true,
+ "RECV": true,
+ "RangeStmt": true,
+ "ReturnStmt": true,
+ "SEND": true,
+ "Scope": true,
+ "SelectStmt": true,
+ "SelectorExpr": true,
+ "SendStmt": true,
+ "SliceExpr": true,
+ "SortImports": true,
+ "StarExpr": true,
+ "StructType": true,
+ "SwitchStmt": true,
+ "Typ": true,
+ "TypeAssertExpr": true,
+ "TypeSpec": true,
+ "TypeSwitchStmt": true,
+ "UnaryExpr": true,
+ "ValueSpec": true,
+ "Var": true,
+ "Visitor": true,
+ "Walk": true,
+ },
+ "go/build": map[string]bool{
+ "AllowBinary": true,
+ "ArchChar": true,
+ "Context": true,
+ "Default": true,
+ "FindOnly": true,
+ "IgnoreVendor": true,
+ "Import": true,
+ "ImportComment": true,
+ "ImportDir": true,
+ "ImportMode": true,
+ "IsLocalImport": true,
+ "MultiplePackageError": true,
+ "NoGoError": true,
+ "Package": true,
+ "ToolDir": true,
+ },
+ "go/constant": map[string]bool{
+ "BinaryOp": true,
+ "BitLen": true,
+ "Bool": true,
+ "BoolVal": true,
+ "Bytes": true,
+ "Compare": true,
+ "Complex": true,
+ "Denom": true,
+ "Float": true,
+ "Float32Val": true,
+ "Float64Val": true,
+ "Imag": true,
+ "Int": true,
+ "Int64Val": true,
+ "Kind": true,
+ "MakeBool": true,
+ "MakeFloat64": true,
+ "MakeFromBytes": true,
+ "MakeFromLiteral": true,
+ "MakeImag": true,
+ "MakeInt64": true,
+ "MakeString": true,
+ "MakeUint64": true,
+ "MakeUnknown": true,
+ "Num": true,
+ "Real": true,
+ "Shift": true,
+ "Sign": true,
+ "String": true,
+ "StringVal": true,
+ "ToComplex": true,
+ "ToFloat": true,
+ "ToInt": true,
+ "Uint64Val": true,
+ "UnaryOp": true,
+ "Unknown": true,
+ },
+ "go/doc": map[string]bool{
+ "AllDecls": true,
+ "AllMethods": true,
+ "Example": true,
+ "Examples": true,
+ "Filter": true,
+ "Func": true,
+ "IllegalPrefixes": true,
+ "IsPredeclared": true,
+ "Mode": true,
+ "New": true,
+ "Note": true,
+ "Package": true,
+ "PreserveAST": true,
+ "Synopsis": true,
+ "ToHTML": true,
+ "ToText": true,
+ "Type": true,
+ "Value": true,
+ },
+ "go/format": map[string]bool{
+ "Node": true,
+ "Source": true,
+ },
+ "go/importer": map[string]bool{
+ "Default": true,
+ "For": true,
+ "ForCompiler": true,
+ "Lookup": true,
+ },
+ "go/parser": map[string]bool{
+ "AllErrors": true,
+ "DeclarationErrors": true,
+ "ImportsOnly": true,
+ "Mode": true,
+ "PackageClauseOnly": true,
+ "ParseComments": true,
+ "ParseDir": true,
+ "ParseExpr": true,
+ "ParseExprFrom": true,
+ "ParseFile": true,
+ "SpuriousErrors": true,
+ "Trace": true,
+ },
+ "go/printer": map[string]bool{
+ "CommentedNode": true,
+ "Config": true,
+ "Fprint": true,
+ "Mode": true,
+ "RawFormat": true,
+ "SourcePos": true,
+ "TabIndent": true,
+ "UseSpaces": true,
+ },
+ "go/scanner": map[string]bool{
+ "Error": true,
+ "ErrorHandler": true,
+ "ErrorList": true,
+ "Mode": true,
+ "PrintError": true,
+ "ScanComments": true,
+ "Scanner": true,
+ },
+ "go/token": map[string]bool{
+ "ADD": true,
+ "ADD_ASSIGN": true,
+ "AND": true,
+ "AND_ASSIGN": true,
+ "AND_NOT": true,
+ "AND_NOT_ASSIGN": true,
+ "ARROW": true,
+ "ASSIGN": true,
+ "BREAK": true,
+ "CASE": true,
+ "CHAN": true,
+ "CHAR": true,
+ "COLON": true,
+ "COMMA": true,
+ "COMMENT": true,
+ "CONST": true,
+ "CONTINUE": true,
+ "DEC": true,
+ "DEFAULT": true,
+ "DEFER": true,
+ "DEFINE": true,
+ "ELLIPSIS": true,
+ "ELSE": true,
+ "EOF": true,
+ "EQL": true,
+ "FALLTHROUGH": true,
+ "FLOAT": true,
+ "FOR": true,
+ "FUNC": true,
+ "File": true,
+ "FileSet": true,
+ "GEQ": true,
+ "GO": true,
+ "GOTO": true,
+ "GTR": true,
+ "HighestPrec": true,
+ "IDENT": true,
+ "IF": true,
+ "ILLEGAL": true,
+ "IMAG": true,
+ "IMPORT": true,
+ "INC": true,
+ "INT": true,
+ "INTERFACE": true,
+ "LAND": true,
+ "LBRACE": true,
+ "LBRACK": true,
+ "LEQ": true,
+ "LOR": true,
+ "LPAREN": true,
+ "LSS": true,
+ "Lookup": true,
+ "LowestPrec": true,
+ "MAP": true,
+ "MUL": true,
+ "MUL_ASSIGN": true,
+ "NEQ": true,
+ "NOT": true,
+ "NewFileSet": true,
+ "NoPos": true,
+ "OR": true,
+ "OR_ASSIGN": true,
+ "PACKAGE": true,
+ "PERIOD": true,
+ "Pos": true,
+ "Position": true,
+ "QUO": true,
+ "QUO_ASSIGN": true,
+ "RANGE": true,
+ "RBRACE": true,
+ "RBRACK": true,
+ "REM": true,
+ "REM_ASSIGN": true,
+ "RETURN": true,
+ "RPAREN": true,
+ "SELECT": true,
+ "SEMICOLON": true,
+ "SHL": true,
+ "SHL_ASSIGN": true,
+ "SHR": true,
+ "SHR_ASSIGN": true,
+ "STRING": true,
+ "STRUCT": true,
+ "SUB": true,
+ "SUB_ASSIGN": true,
+ "SWITCH": true,
+ "TYPE": true,
+ "Token": true,
+ "UnaryPrec": true,
+ "VAR": true,
+ "XOR": true,
+ "XOR_ASSIGN": true,
+ },
+ "go/types": map[string]bool{
+ "Array": true,
+ "AssertableTo": true,
+ "AssignableTo": true,
+ "Basic": true,
+ "BasicInfo": true,
+ "BasicKind": true,
+ "Bool": true,
+ "Builtin": true,
+ "Byte": true,
+ "Chan": true,
+ "ChanDir": true,
+ "Checker": true,
+ "Comparable": true,
+ "Complex128": true,
+ "Complex64": true,
+ "Config": true,
+ "Const": true,
+ "ConvertibleTo": true,
+ "DefPredeclaredTestFuncs": true,
+ "Default": true,
+ "Error": true,
+ "Eval": true,
+ "ExprString": true,
+ "FieldVal": true,
+ "Float32": true,
+ "Float64": true,
+ "Func": true,
+ "Id": true,
+ "Identical": true,
+ "IdenticalIgnoreTags": true,
+ "Implements": true,
+ "ImportMode": true,
+ "Importer": true,
+ "ImporterFrom": true,
+ "Info": true,
+ "Initializer": true,
+ "Int": true,
+ "Int16": true,
+ "Int32": true,
+ "Int64": true,
+ "Int8": true,
+ "Interface": true,
+ "Invalid": true,
+ "IsBoolean": true,
+ "IsComplex": true,
+ "IsConstType": true,
+ "IsFloat": true,
+ "IsInteger": true,
+ "IsInterface": true,
+ "IsNumeric": true,
+ "IsOrdered": true,
+ "IsString": true,
+ "IsUnsigned": true,
+ "IsUntyped": true,
+ "Label": true,
+ "LookupFieldOrMethod": true,
+ "Map": true,
+ "MethodExpr": true,
+ "MethodSet": true,
+ "MethodVal": true,
+ "MissingMethod": true,
+ "Named": true,
+ "NewArray": true,
+ "NewChan": true,
+ "NewChecker": true,
+ "NewConst": true,
+ "NewField": true,
+ "NewFunc": true,
+ "NewInterface": true,
+ "NewInterfaceType": true,
+ "NewLabel": true,
+ "NewMap": true,
+ "NewMethodSet": true,
+ "NewNamed": true,
+ "NewPackage": true,
+ "NewParam": true,
+ "NewPkgName": true,
+ "NewPointer": true,
+ "NewScope": true,
+ "NewSignature": true,
+ "NewSlice": true,
+ "NewStruct": true,
+ "NewTuple": true,
+ "NewTypeName": true,
+ "NewVar": true,
+ "Nil": true,
+ "ObjectString": true,
+ "Package": true,
+ "PkgName": true,
+ "Pointer": true,
+ "Qualifier": true,
+ "RecvOnly": true,
+ "RelativeTo": true,
+ "Rune": true,
+ "Scope": true,
+ "Selection": true,
+ "SelectionKind": true,
+ "SelectionString": true,
+ "SendOnly": true,
+ "SendRecv": true,
+ "Signature": true,
+ "Sizes": true,
+ "SizesFor": true,
+ "Slice": true,
+ "StdSizes": true,
+ "String": true,
+ "Struct": true,
+ "Tuple": true,
+ "Typ": true,
+ "Type": true,
+ "TypeAndValue": true,
+ "TypeName": true,
+ "TypeString": true,
+ "Uint": true,
+ "Uint16": true,
+ "Uint32": true,
+ "Uint64": true,
+ "Uint8": true,
+ "Uintptr": true,
+ "Universe": true,
+ "Unsafe": true,
+ "UnsafePointer": true,
+ "UntypedBool": true,
+ "UntypedComplex": true,
+ "UntypedFloat": true,
+ "UntypedInt": true,
+ "UntypedNil": true,
+ "UntypedRune": true,
+ "UntypedString": true,
+ "Var": true,
+ "WriteExpr": true,
+ "WriteSignature": true,
+ "WriteType": true,
+ },
+ "hash": map[string]bool{
+ "Hash": true,
+ "Hash32": true,
+ "Hash64": true,
+ },
+ "hash/adler32": map[string]bool{
+ "Checksum": true,
+ "New": true,
+ "Size": true,
+ },
+ "hash/crc32": map[string]bool{
+ "Castagnoli": true,
+ "Checksum": true,
+ "ChecksumIEEE": true,
+ "IEEE": true,
+ "IEEETable": true,
+ "Koopman": true,
+ "MakeTable": true,
+ "New": true,
+ "NewIEEE": true,
+ "Size": true,
+ "Table": true,
+ "Update": true,
+ },
+ "hash/crc64": map[string]bool{
+ "Checksum": true,
+ "ECMA": true,
+ "ISO": true,
+ "MakeTable": true,
+ "New": true,
+ "Size": true,
+ "Table": true,
+ "Update": true,
+ },
+ "hash/fnv": map[string]bool{
+ "New128": true,
+ "New128a": true,
+ "New32": true,
+ "New32a": true,
+ "New64": true,
+ "New64a": true,
+ },
+ "html": map[string]bool{
+ "EscapeString": true,
+ "UnescapeString": true,
+ },
+ "html/template": map[string]bool{
+ "CSS": true,
+ "ErrAmbigContext": true,
+ "ErrBadHTML": true,
+ "ErrBranchEnd": true,
+ "ErrEndContext": true,
+ "ErrNoSuchTemplate": true,
+ "ErrOutputContext": true,
+ "ErrPartialCharset": true,
+ "ErrPartialEscape": true,
+ "ErrPredefinedEscaper": true,
+ "ErrRangeLoopReentry": true,
+ "ErrSlashAmbig": true,
+ "Error": true,
+ "ErrorCode": true,
+ "FuncMap": true,
+ "HTML": true,
+ "HTMLAttr": true,
+ "HTMLEscape": true,
+ "HTMLEscapeString": true,
+ "HTMLEscaper": true,
+ "IsTrue": true,
+ "JS": true,
+ "JSEscape": true,
+ "JSEscapeString": true,
+ "JSEscaper": true,
+ "JSStr": true,
+ "Must": true,
+ "New": true,
+ "OK": true,
+ "ParseFiles": true,
+ "ParseGlob": true,
+ "Srcset": true,
+ "Template": true,
+ "URL": true,
+ "URLQueryEscaper": true,
+ },
+ "image": map[string]bool{
+ "Alpha": true,
+ "Alpha16": true,
+ "Black": true,
+ "CMYK": true,
+ "Config": true,
+ "Decode": true,
+ "DecodeConfig": true,
+ "ErrFormat": true,
+ "Gray": true,
+ "Gray16": true,
+ "Image": true,
+ "NRGBA": true,
+ "NRGBA64": true,
+ "NYCbCrA": true,
+ "NewAlpha": true,
+ "NewAlpha16": true,
+ "NewCMYK": true,
+ "NewGray": true,
+ "NewGray16": true,
+ "NewNRGBA": true,
+ "NewNRGBA64": true,
+ "NewNYCbCrA": true,
+ "NewPaletted": true,
+ "NewRGBA": true,
+ "NewRGBA64": true,
+ "NewUniform": true,
+ "NewYCbCr": true,
+ "Opaque": true,
+ "Paletted": true,
+ "PalettedImage": true,
+ "Point": true,
+ "Pt": true,
+ "RGBA": true,
+ "RGBA64": true,
+ "Rect": true,
+ "Rectangle": true,
+ "RegisterFormat": true,
+ "Transparent": true,
+ "Uniform": true,
+ "White": true,
+ "YCbCr": true,
+ "YCbCrSubsampleRatio": true,
+ "YCbCrSubsampleRatio410": true,
+ "YCbCrSubsampleRatio411": true,
+ "YCbCrSubsampleRatio420": true,
+ "YCbCrSubsampleRatio422": true,
+ "YCbCrSubsampleRatio440": true,
+ "YCbCrSubsampleRatio444": true,
+ "ZP": true,
+ "ZR": true,
+ },
+ "image/color": map[string]bool{
+ "Alpha": true,
+ "Alpha16": true,
+ "Alpha16Model": true,
+ "AlphaModel": true,
+ "Black": true,
+ "CMYK": true,
+ "CMYKModel": true,
+ "CMYKToRGB": true,
+ "Color": true,
+ "Gray": true,
+ "Gray16": true,
+ "Gray16Model": true,
+ "GrayModel": true,
+ "Model": true,
+ "ModelFunc": true,
+ "NRGBA": true,
+ "NRGBA64": true,
+ "NRGBA64Model": true,
+ "NRGBAModel": true,
+ "NYCbCrA": true,
+ "NYCbCrAModel": true,
+ "Opaque": true,
+ "Palette": true,
+ "RGBA": true,
+ "RGBA64": true,
+ "RGBA64Model": true,
+ "RGBAModel": true,
+ "RGBToCMYK": true,
+ "RGBToYCbCr": true,
+ "Transparent": true,
+ "White": true,
+ "YCbCr": true,
+ "YCbCrModel": true,
+ "YCbCrToRGB": true,
+ },
+ "image/color/palette": map[string]bool{
+ "Plan9": true,
+ "WebSafe": true,
+ },
+ "image/draw": map[string]bool{
+ "Draw": true,
+ "DrawMask": true,
+ "Drawer": true,
+ "FloydSteinberg": true,
+ "Image": true,
+ "Op": true,
+ "Over": true,
+ "Quantizer": true,
+ "Src": true,
+ },
+ "image/gif": map[string]bool{
+ "Decode": true,
+ "DecodeAll": true,
+ "DecodeConfig": true,
+ "DisposalBackground": true,
+ "DisposalNone": true,
+ "DisposalPrevious": true,
+ "Encode": true,
+ "EncodeAll": true,
+ "GIF": true,
+ "Options": true,
+ },
+ "image/jpeg": map[string]bool{
+ "Decode": true,
+ "DecodeConfig": true,
+ "DefaultQuality": true,
+ "Encode": true,
+ "FormatError": true,
+ "Options": true,
+ "Reader": true,
+ "UnsupportedError": true,
+ },
+ "image/png": map[string]bool{
+ "BestCompression": true,
+ "BestSpeed": true,
+ "CompressionLevel": true,
+ "Decode": true,
+ "DecodeConfig": true,
+ "DefaultCompression": true,
+ "Encode": true,
+ "Encoder": true,
+ "EncoderBuffer": true,
+ "EncoderBufferPool": true,
+ "FormatError": true,
+ "NoCompression": true,
+ "UnsupportedError": true,
+ },
+ "index/suffixarray": map[string]bool{
+ "Index": true,
+ "New": true,
+ },
+ "io": map[string]bool{
+ "ByteReader": true,
+ "ByteScanner": true,
+ "ByteWriter": true,
+ "Closer": true,
+ "Copy": true,
+ "CopyBuffer": true,
+ "CopyN": true,
+ "EOF": true,
+ "ErrClosedPipe": true,
+ "ErrNoProgress": true,
+ "ErrShortBuffer": true,
+ "ErrShortWrite": true,
+ "ErrUnexpectedEOF": true,
+ "LimitReader": true,
+ "LimitedReader": true,
+ "MultiReader": true,
+ "MultiWriter": true,
+ "NewSectionReader": true,
+ "Pipe": true,
+ "PipeReader": true,
+ "PipeWriter": true,
+ "ReadAtLeast": true,
+ "ReadCloser": true,
+ "ReadFull": true,
+ "ReadSeeker": true,
+ "ReadWriteCloser": true,
+ "ReadWriteSeeker": true,
+ "ReadWriter": true,
+ "Reader": true,
+ "ReaderAt": true,
+ "ReaderFrom": true,
+ "RuneReader": true,
+ "RuneScanner": true,
+ "SectionReader": true,
+ "SeekCurrent": true,
+ "SeekEnd": true,
+ "SeekStart": true,
+ "Seeker": true,
+ "StringWriter": true,
+ "TeeReader": true,
+ "WriteCloser": true,
+ "WriteSeeker": true,
+ "WriteString": true,
+ "Writer": true,
+ "WriterAt": true,
+ "WriterTo": true,
+ },
+ "io/ioutil": map[string]bool{
+ "Discard": true,
+ "NopCloser": true,
+ "ReadAll": true,
+ "ReadDir": true,
+ "ReadFile": true,
+ "TempDir": true,
+ "TempFile": true,
+ "WriteFile": true,
+ },
+ "log": map[string]bool{
+ "Fatal": true,
+ "Fatalf": true,
+ "Fatalln": true,
+ "Flags": true,
+ "LUTC": true,
+ "Ldate": true,
+ "Llongfile": true,
+ "Lmicroseconds": true,
+ "Logger": true,
+ "Lshortfile": true,
+ "LstdFlags": true,
+ "Ltime": true,
+ "New": true,
+ "Output": true,
+ "Panic": true,
+ "Panicf": true,
+ "Panicln": true,
+ "Prefix": true,
+ "Print": true,
+ "Printf": true,
+ "Println": true,
+ "SetFlags": true,
+ "SetOutput": true,
+ "SetPrefix": true,
+ },
+ "log/syslog": map[string]bool{
+ "Dial": true,
+ "LOG_ALERT": true,
+ "LOG_AUTH": true,
+ "LOG_AUTHPRIV": true,
+ "LOG_CRIT": true,
+ "LOG_CRON": true,
+ "LOG_DAEMON": true,
+ "LOG_DEBUG": true,
+ "LOG_EMERG": true,
+ "LOG_ERR": true,
+ "LOG_FTP": true,
+ "LOG_INFO": true,
+ "LOG_KERN": true,
+ "LOG_LOCAL0": true,
+ "LOG_LOCAL1": true,
+ "LOG_LOCAL2": true,
+ "LOG_LOCAL3": true,
+ "LOG_LOCAL4": true,
+ "LOG_LOCAL5": true,
+ "LOG_LOCAL6": true,
+ "LOG_LOCAL7": true,
+ "LOG_LPR": true,
+ "LOG_MAIL": true,
+ "LOG_NEWS": true,
+ "LOG_NOTICE": true,
+ "LOG_SYSLOG": true,
+ "LOG_USER": true,
+ "LOG_UUCP": true,
+ "LOG_WARNING": true,
+ "New": true,
+ "NewLogger": true,
+ "Priority": true,
+ "Writer": true,
+ },
+ "math": map[string]bool{
+ "Abs": true,
+ "Acos": true,
+ "Acosh": true,
+ "Asin": true,
+ "Asinh": true,
+ "Atan": true,
+ "Atan2": true,
+ "Atanh": true,
+ "Cbrt": true,
+ "Ceil": true,
+ "Copysign": true,
+ "Cos": true,
+ "Cosh": true,
+ "Dim": true,
+ "E": true,
+ "Erf": true,
+ "Erfc": true,
+ "Erfcinv": true,
+ "Erfinv": true,
+ "Exp": true,
+ "Exp2": true,
+ "Expm1": true,
+ "Float32bits": true,
+ "Float32frombits": true,
+ "Float64bits": true,
+ "Float64frombits": true,
+ "Floor": true,
+ "Frexp": true,
+ "Gamma": true,
+ "Hypot": true,
+ "Ilogb": true,
+ "Inf": true,
+ "IsInf": true,
+ "IsNaN": true,
+ "J0": true,
+ "J1": true,
+ "Jn": true,
+ "Ldexp": true,
+ "Lgamma": true,
+ "Ln10": true,
+ "Ln2": true,
+ "Log": true,
+ "Log10": true,
+ "Log10E": true,
+ "Log1p": true,
+ "Log2": true,
+ "Log2E": true,
+ "Logb": true,
+ "Max": true,
+ "MaxFloat32": true,
+ "MaxFloat64": true,
+ "MaxInt16": true,
+ "MaxInt32": true,
+ "MaxInt64": true,
+ "MaxInt8": true,
+ "MaxUint16": true,
+ "MaxUint32": true,
+ "MaxUint64": true,
+ "MaxUint8": true,
+ "Min": true,
+ "MinInt16": true,
+ "MinInt32": true,
+ "MinInt64": true,
+ "MinInt8": true,
+ "Mod": true,
+ "Modf": true,
+ "NaN": true,
+ "Nextafter": true,
+ "Nextafter32": true,
+ "Phi": true,
+ "Pi": true,
+ "Pow": true,
+ "Pow10": true,
+ "Remainder": true,
+ "Round": true,
+ "RoundToEven": true,
+ "Signbit": true,
+ "Sin": true,
+ "Sincos": true,
+ "Sinh": true,
+ "SmallestNonzeroFloat32": true,
+ "SmallestNonzeroFloat64": true,
+ "Sqrt": true,
+ "Sqrt2": true,
+ "SqrtE": true,
+ "SqrtPhi": true,
+ "SqrtPi": true,
+ "Tan": true,
+ "Tanh": true,
+ "Trunc": true,
+ "Y0": true,
+ "Y1": true,
+ "Yn": true,
+ },
+ "math/big": map[string]bool{
+ "Above": true,
+ "Accuracy": true,
+ "AwayFromZero": true,
+ "Below": true,
+ "ErrNaN": true,
+ "Exact": true,
+ "Float": true,
+ "Int": true,
+ "Jacobi": true,
+ "MaxBase": true,
+ "MaxExp": true,
+ "MaxPrec": true,
+ "MinExp": true,
+ "NewFloat": true,
+ "NewInt": true,
+ "NewRat": true,
+ "ParseFloat": true,
+ "Rat": true,
+ "RoundingMode": true,
+ "ToNearestAway": true,
+ "ToNearestEven": true,
+ "ToNegativeInf": true,
+ "ToPositiveInf": true,
+ "ToZero": true,
+ "Word": true,
+ },
+ "math/bits": map[string]bool{
+ "Add": true,
+ "Add32": true,
+ "Add64": true,
+ "Div": true,
+ "Div32": true,
+ "Div64": true,
+ "LeadingZeros": true,
+ "LeadingZeros16": true,
+ "LeadingZeros32": true,
+ "LeadingZeros64": true,
+ "LeadingZeros8": true,
+ "Len": true,
+ "Len16": true,
+ "Len32": true,
+ "Len64": true,
+ "Len8": true,
+ "Mul": true,
+ "Mul32": true,
+ "Mul64": true,
+ "OnesCount": true,
+ "OnesCount16": true,
+ "OnesCount32": true,
+ "OnesCount64": true,
+ "OnesCount8": true,
+ "Reverse": true,
+ "Reverse16": true,
+ "Reverse32": true,
+ "Reverse64": true,
+ "Reverse8": true,
+ "ReverseBytes": true,
+ "ReverseBytes16": true,
+ "ReverseBytes32": true,
+ "ReverseBytes64": true,
+ "RotateLeft": true,
+ "RotateLeft16": true,
+ "RotateLeft32": true,
+ "RotateLeft64": true,
+ "RotateLeft8": true,
+ "Sub": true,
+ "Sub32": true,
+ "Sub64": true,
+ "TrailingZeros": true,
+ "TrailingZeros16": true,
+ "TrailingZeros32": true,
+ "TrailingZeros64": true,
+ "TrailingZeros8": true,
+ "UintSize": true,
+ },
+ "math/cmplx": map[string]bool{
+ "Abs": true,
+ "Acos": true,
+ "Acosh": true,
+ "Asin": true,
+ "Asinh": true,
+ "Atan": true,
+ "Atanh": true,
+ "Conj": true,
+ "Cos": true,
+ "Cosh": true,
+ "Cot": true,
+ "Exp": true,
+ "Inf": true,
+ "IsInf": true,
+ "IsNaN": true,
+ "Log": true,
+ "Log10": true,
+ "NaN": true,
+ "Phase": true,
+ "Polar": true,
+ "Pow": true,
+ "Rect": true,
+ "Sin": true,
+ "Sinh": true,
+ "Sqrt": true,
+ "Tan": true,
+ "Tanh": true,
+ },
+ "math/rand": map[string]bool{
+ "ExpFloat64": true,
+ "Float32": true,
+ "Float64": true,
+ "Int": true,
+ "Int31": true,
+ "Int31n": true,
+ "Int63": true,
+ "Int63n": true,
+ "Intn": true,
+ "New": true,
+ "NewSource": true,
+ "NewZipf": true,
+ "NormFloat64": true,
+ "Perm": true,
+ "Rand": true,
+ "Read": true,
+ "Seed": true,
+ "Shuffle": true,
+ "Source": true,
+ "Source64": true,
+ "Uint32": true,
+ "Uint64": true,
+ "Zipf": true,
+ },
+ "mime": map[string]bool{
+ "AddExtensionType": true,
+ "BEncoding": true,
+ "ErrInvalidMediaParameter": true,
+ "ExtensionsByType": true,
+ "FormatMediaType": true,
+ "ParseMediaType": true,
+ "QEncoding": true,
+ "TypeByExtension": true,
+ "WordDecoder": true,
+ "WordEncoder": true,
+ },
+ "mime/multipart": map[string]bool{
+ "ErrMessageTooLarge": true,
+ "File": true,
+ "FileHeader": true,
+ "Form": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "Part": true,
+ "Reader": true,
+ "Writer": true,
+ },
+ "mime/quotedprintable": map[string]bool{
+ "NewReader": true,
+ "NewWriter": true,
+ "Reader": true,
+ "Writer": true,
+ },
+ "net": map[string]bool{
+ "Addr": true,
+ "AddrError": true,
+ "Buffers": true,
+ "CIDRMask": true,
+ "Conn": true,
+ "DNSConfigError": true,
+ "DNSError": true,
+ "DefaultResolver": true,
+ "Dial": true,
+ "DialIP": true,
+ "DialTCP": true,
+ "DialTimeout": true,
+ "DialUDP": true,
+ "DialUnix": true,
+ "Dialer": true,
+ "ErrWriteToConnected": true,
+ "Error": true,
+ "FileConn": true,
+ "FileListener": true,
+ "FilePacketConn": true,
+ "FlagBroadcast": true,
+ "FlagLoopback": true,
+ "FlagMulticast": true,
+ "FlagPointToPoint": true,
+ "FlagUp": true,
+ "Flags": true,
+ "HardwareAddr": true,
+ "IP": true,
+ "IPAddr": true,
+ "IPConn": true,
+ "IPMask": true,
+ "IPNet": true,
+ "IPv4": true,
+ "IPv4Mask": true,
+ "IPv4allrouter": true,
+ "IPv4allsys": true,
+ "IPv4bcast": true,
+ "IPv4len": true,
+ "IPv4zero": true,
+ "IPv6interfacelocalallnodes": true,
+ "IPv6len": true,
+ "IPv6linklocalallnodes": true,
+ "IPv6linklocalallrouters": true,
+ "IPv6loopback": true,
+ "IPv6unspecified": true,
+ "IPv6zero": true,
+ "Interface": true,
+ "InterfaceAddrs": true,
+ "InterfaceByIndex": true,
+ "InterfaceByName": true,
+ "Interfaces": true,
+ "InvalidAddrError": true,
+ "JoinHostPort": true,
+ "Listen": true,
+ "ListenConfig": true,
+ "ListenIP": true,
+ "ListenMulticastUDP": true,
+ "ListenPacket": true,
+ "ListenTCP": true,
+ "ListenUDP": true,
+ "ListenUnix": true,
+ "ListenUnixgram": true,
+ "Listener": true,
+ "LookupAddr": true,
+ "LookupCNAME": true,
+ "LookupHost": true,
+ "LookupIP": true,
+ "LookupMX": true,
+ "LookupNS": true,
+ "LookupPort": true,
+ "LookupSRV": true,
+ "LookupTXT": true,
+ "MX": true,
+ "NS": true,
+ "OpError": true,
+ "PacketConn": true,
+ "ParseCIDR": true,
+ "ParseError": true,
+ "ParseIP": true,
+ "ParseMAC": true,
+ "Pipe": true,
+ "ResolveIPAddr": true,
+ "ResolveTCPAddr": true,
+ "ResolveUDPAddr": true,
+ "ResolveUnixAddr": true,
+ "Resolver": true,
+ "SRV": true,
+ "SplitHostPort": true,
+ "TCPAddr": true,
+ "TCPConn": true,
+ "TCPListener": true,
+ "UDPAddr": true,
+ "UDPConn": true,
+ "UnixAddr": true,
+ "UnixConn": true,
+ "UnixListener": true,
+ "UnknownNetworkError": true,
+ },
+ "net/http": map[string]bool{
+ "CanonicalHeaderKey": true,
+ "Client": true,
+ "CloseNotifier": true,
+ "ConnState": true,
+ "Cookie": true,
+ "CookieJar": true,
+ "DefaultClient": true,
+ "DefaultMaxHeaderBytes": true,
+ "DefaultMaxIdleConnsPerHost": true,
+ "DefaultServeMux": true,
+ "DefaultTransport": true,
+ "DetectContentType": true,
+ "Dir": true,
+ "ErrAbortHandler": true,
+ "ErrBodyNotAllowed": true,
+ "ErrBodyReadAfterClose": true,
+ "ErrContentLength": true,
+ "ErrHandlerTimeout": true,
+ "ErrHeaderTooLong": true,
+ "ErrHijacked": true,
+ "ErrLineTooLong": true,
+ "ErrMissingBoundary": true,
+ "ErrMissingContentLength": true,
+ "ErrMissingFile": true,
+ "ErrNoCookie": true,
+ "ErrNoLocation": true,
+ "ErrNotMultipart": true,
+ "ErrNotSupported": true,
+ "ErrServerClosed": true,
+ "ErrShortBody": true,
+ "ErrSkipAltProtocol": true,
+ "ErrUnexpectedTrailer": true,
+ "ErrUseLastResponse": true,
+ "ErrWriteAfterFlush": true,
+ "Error": true,
+ "File": true,
+ "FileServer": true,
+ "FileSystem": true,
+ "Flusher": true,
+ "Get": true,
+ "Handle": true,
+ "HandleFunc": true,
+ "Handler": true,
+ "HandlerFunc": true,
+ "Head": true,
+ "Header": true,
+ "Hijacker": true,
+ "ListenAndServe": true,
+ "ListenAndServeTLS": true,
+ "LocalAddrContextKey": true,
+ "MaxBytesReader": true,
+ "MethodConnect": true,
+ "MethodDelete": true,
+ "MethodGet": true,
+ "MethodHead": true,
+ "MethodOptions": true,
+ "MethodPatch": true,
+ "MethodPost": true,
+ "MethodPut": true,
+ "MethodTrace": true,
+ "NewFileTransport": true,
+ "NewRequest": true,
+ "NewServeMux": true,
+ "NoBody": true,
+ "NotFound": true,
+ "NotFoundHandler": true,
+ "ParseHTTPVersion": true,
+ "ParseTime": true,
+ "Post": true,
+ "PostForm": true,
+ "ProtocolError": true,
+ "ProxyFromEnvironment": true,
+ "ProxyURL": true,
+ "PushOptions": true,
+ "Pusher": true,
+ "ReadRequest": true,
+ "ReadResponse": true,
+ "Redirect": true,
+ "RedirectHandler": true,
+ "Request": true,
+ "Response": true,
+ "ResponseWriter": true,
+ "RoundTripper": true,
+ "SameSite": true,
+ "SameSiteDefaultMode": true,
+ "SameSiteLaxMode": true,
+ "SameSiteStrictMode": true,
+ "Serve": true,
+ "ServeContent": true,
+ "ServeFile": true,
+ "ServeMux": true,
+ "ServeTLS": true,
+ "Server": true,
+ "ServerContextKey": true,
+ "SetCookie": true,
+ "StateActive": true,
+ "StateClosed": true,
+ "StateHijacked": true,
+ "StateIdle": true,
+ "StateNew": true,
+ "StatusAccepted": true,
+ "StatusAlreadyReported": true,
+ "StatusBadGateway": true,
+ "StatusBadRequest": true,
+ "StatusConflict": true,
+ "StatusContinue": true,
+ "StatusCreated": true,
+ "StatusExpectationFailed": true,
+ "StatusFailedDependency": true,
+ "StatusForbidden": true,
+ "StatusFound": true,
+ "StatusGatewayTimeout": true,
+ "StatusGone": true,
+ "StatusHTTPVersionNotSupported": true,
+ "StatusIMUsed": true,
+ "StatusInsufficientStorage": true,
+ "StatusInternalServerError": true,
+ "StatusLengthRequired": true,
+ "StatusLocked": true,
+ "StatusLoopDetected": true,
+ "StatusMethodNotAllowed": true,
+ "StatusMisdirectedRequest": true,
+ "StatusMovedPermanently": true,
+ "StatusMultiStatus": true,
+ "StatusMultipleChoices": true,
+ "StatusNetworkAuthenticationRequired": true,
+ "StatusNoContent": true,
+ "StatusNonAuthoritativeInfo": true,
+ "StatusNotAcceptable": true,
+ "StatusNotExtended": true,
+ "StatusNotFound": true,
+ "StatusNotImplemented": true,
+ "StatusNotModified": true,
+ "StatusOK": true,
+ "StatusPartialContent": true,
+ "StatusPaymentRequired": true,
+ "StatusPermanentRedirect": true,
+ "StatusPreconditionFailed": true,
+ "StatusPreconditionRequired": true,
+ "StatusProcessing": true,
+ "StatusProxyAuthRequired": true,
+ "StatusRequestEntityTooLarge": true,
+ "StatusRequestHeaderFieldsTooLarge": true,
+ "StatusRequestTimeout": true,
+ "StatusRequestURITooLong": true,
+ "StatusRequestedRangeNotSatisfiable": true,
+ "StatusResetContent": true,
+ "StatusSeeOther": true,
+ "StatusServiceUnavailable": true,
+ "StatusSwitchingProtocols": true,
+ "StatusTeapot": true,
+ "StatusTemporaryRedirect": true,
+ "StatusText": true,
+ "StatusTooEarly": true,
+ "StatusTooManyRequests": true,
+ "StatusUnauthorized": true,
+ "StatusUnavailableForLegalReasons": true,
+ "StatusUnprocessableEntity": true,
+ "StatusUnsupportedMediaType": true,
+ "StatusUpgradeRequired": true,
+ "StatusUseProxy": true,
+ "StatusVariantAlsoNegotiates": true,
+ "StripPrefix": true,
+ "TimeFormat": true,
+ "TimeoutHandler": true,
+ "TrailerPrefix": true,
+ "Transport": true,
+ },
+ "net/http/cgi": map[string]bool{
+ "Handler": true,
+ "Request": true,
+ "RequestFromMap": true,
+ "Serve": true,
+ },
+ "net/http/cookiejar": map[string]bool{
+ "Jar": true,
+ "New": true,
+ "Options": true,
+ "PublicSuffixList": true,
+ },
+ "net/http/fcgi": map[string]bool{
+ "ErrConnClosed": true,
+ "ErrRequestAborted": true,
+ "ProcessEnv": true,
+ "Serve": true,
+ },
+ "net/http/httptest": map[string]bool{
+ "DefaultRemoteAddr": true,
+ "NewRecorder": true,
+ "NewRequest": true,
+ "NewServer": true,
+ "NewTLSServer": true,
+ "NewUnstartedServer": true,
+ "ResponseRecorder": true,
+ "Server": true,
+ },
+ "net/http/httptrace": map[string]bool{
+ "ClientTrace": true,
+ "ContextClientTrace": true,
+ "DNSDoneInfo": true,
+ "DNSStartInfo": true,
+ "GotConnInfo": true,
+ "WithClientTrace": true,
+ "WroteRequestInfo": true,
+ },
+ "net/http/httputil": map[string]bool{
+ "BufferPool": true,
+ "ClientConn": true,
+ "DumpRequest": true,
+ "DumpRequestOut": true,
+ "DumpResponse": true,
+ "ErrClosed": true,
+ "ErrLineTooLong": true,
+ "ErrPersistEOF": true,
+ "ErrPipeline": true,
+ "NewChunkedReader": true,
+ "NewChunkedWriter": true,
+ "NewClientConn": true,
+ "NewProxyClientConn": true,
+ "NewServerConn": true,
+ "NewSingleHostReverseProxy": true,
+ "ReverseProxy": true,
+ "ServerConn": true,
+ },
+ "net/http/pprof": map[string]bool{
+ "Cmdline": true,
+ "Handler": true,
+ "Index": true,
+ "Profile": true,
+ "Symbol": true,
+ "Trace": true,
+ },
+ "net/mail": map[string]bool{
+ "Address": true,
+ "AddressParser": true,
+ "ErrHeaderNotPresent": true,
+ "Header": true,
+ "Message": true,
+ "ParseAddress": true,
+ "ParseAddressList": true,
+ "ParseDate": true,
+ "ReadMessage": true,
+ },
+ "net/rpc": map[string]bool{
+ "Accept": true,
+ "Call": true,
+ "Client": true,
+ "ClientCodec": true,
+ "DefaultDebugPath": true,
+ "DefaultRPCPath": true,
+ "DefaultServer": true,
+ "Dial": true,
+ "DialHTTP": true,
+ "DialHTTPPath": true,
+ "ErrShutdown": true,
+ "HandleHTTP": true,
+ "NewClient": true,
+ "NewClientWithCodec": true,
+ "NewServer": true,
+ "Register": true,
+ "RegisterName": true,
+ "Request": true,
+ "Response": true,
+ "ServeCodec": true,
+ "ServeConn": true,
+ "ServeRequest": true,
+ "Server": true,
+ "ServerCodec": true,
+ "ServerError": true,
+ },
+ "net/rpc/jsonrpc": map[string]bool{
+ "Dial": true,
+ "NewClient": true,
+ "NewClientCodec": true,
+ "NewServerCodec": true,
+ "ServeConn": true,
+ },
+ "net/smtp": map[string]bool{
+ "Auth": true,
+ "CRAMMD5Auth": true,
+ "Client": true,
+ "Dial": true,
+ "NewClient": true,
+ "PlainAuth": true,
+ "SendMail": true,
+ "ServerInfo": true,
+ },
+ "net/textproto": map[string]bool{
+ "CanonicalMIMEHeaderKey": true,
+ "Conn": true,
+ "Dial": true,
+ "Error": true,
+ "MIMEHeader": true,
+ "NewConn": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "Pipeline": true,
+ "ProtocolError": true,
+ "Reader": true,
+ "TrimBytes": true,
+ "TrimString": true,
+ "Writer": true,
+ },
+ "net/url": map[string]bool{
+ "Error": true,
+ "EscapeError": true,
+ "InvalidHostError": true,
+ "Parse": true,
+ "ParseQuery": true,
+ "ParseRequestURI": true,
+ "PathEscape": true,
+ "PathUnescape": true,
+ "QueryEscape": true,
+ "QueryUnescape": true,
+ "URL": true,
+ "User": true,
+ "UserPassword": true,
+ "Userinfo": true,
+ "Values": true,
+ },
+ "os": map[string]bool{
+ "Args": true,
+ "Chdir": true,
+ "Chmod": true,
+ "Chown": true,
+ "Chtimes": true,
+ "Clearenv": true,
+ "Create": true,
+ "DevNull": true,
+ "Environ": true,
+ "ErrClosed": true,
+ "ErrExist": true,
+ "ErrInvalid": true,
+ "ErrNoDeadline": true,
+ "ErrNotExist": true,
+ "ErrPermission": true,
+ "Executable": true,
+ "Exit": true,
+ "Expand": true,
+ "ExpandEnv": true,
+ "File": true,
+ "FileInfo": true,
+ "FileMode": true,
+ "FindProcess": true,
+ "Getegid": true,
+ "Getenv": true,
+ "Geteuid": true,
+ "Getgid": true,
+ "Getgroups": true,
+ "Getpagesize": true,
+ "Getpid": true,
+ "Getppid": true,
+ "Getuid": true,
+ "Getwd": true,
+ "Hostname": true,
+ "Interrupt": true,
+ "IsExist": true,
+ "IsNotExist": true,
+ "IsPathSeparator": true,
+ "IsPermission": true,
+ "IsTimeout": true,
+ "Kill": true,
+ "Lchown": true,
+ "Link": true,
+ "LinkError": true,
+ "LookupEnv": true,
+ "Lstat": true,
+ "Mkdir": true,
+ "MkdirAll": true,
+ "ModeAppend": true,
+ "ModeCharDevice": true,
+ "ModeDevice": true,
+ "ModeDir": true,
+ "ModeExclusive": true,
+ "ModeIrregular": true,
+ "ModeNamedPipe": true,
+ "ModePerm": true,
+ "ModeSetgid": true,
+ "ModeSetuid": true,
+ "ModeSocket": true,
+ "ModeSticky": true,
+ "ModeSymlink": true,
+ "ModeTemporary": true,
+ "ModeType": true,
+ "NewFile": true,
+ "NewSyscallError": true,
+ "O_APPEND": true,
+ "O_CREATE": true,
+ "O_EXCL": true,
+ "O_RDONLY": true,
+ "O_RDWR": true,
+ "O_SYNC": true,
+ "O_TRUNC": true,
+ "O_WRONLY": true,
+ "Open": true,
+ "OpenFile": true,
+ "PathError": true,
+ "PathListSeparator": true,
+ "PathSeparator": true,
+ "Pipe": true,
+ "ProcAttr": true,
+ "Process": true,
+ "ProcessState": true,
+ "Readlink": true,
+ "Remove": true,
+ "RemoveAll": true,
+ "Rename": true,
+ "SEEK_CUR": true,
+ "SEEK_END": true,
+ "SEEK_SET": true,
+ "SameFile": true,
+ "Setenv": true,
+ "Signal": true,
+ "StartProcess": true,
+ "Stat": true,
+ "Stderr": true,
+ "Stdin": true,
+ "Stdout": true,
+ "Symlink": true,
+ "SyscallError": true,
+ "TempDir": true,
+ "Truncate": true,
+ "Unsetenv": true,
+ "UserCacheDir": true,
+ "UserHomeDir": true,
+ },
+ "os/exec": map[string]bool{
+ "Cmd": true,
+ "Command": true,
+ "CommandContext": true,
+ "ErrNotFound": true,
+ "Error": true,
+ "ExitError": true,
+ "LookPath": true,
+ },
+ "os/signal": map[string]bool{
+ "Ignore": true,
+ "Ignored": true,
+ "Notify": true,
+ "Reset": true,
+ "Stop": true,
+ },
+ "os/user": map[string]bool{
+ "Current": true,
+ "Group": true,
+ "Lookup": true,
+ "LookupGroup": true,
+ "LookupGroupId": true,
+ "LookupId": true,
+ "UnknownGroupError": true,
+ "UnknownGroupIdError": true,
+ "UnknownUserError": true,
+ "UnknownUserIdError": true,
+ "User": true,
+ },
+ "path": map[string]bool{
+ "Base": true,
+ "Clean": true,
+ "Dir": true,
+ "ErrBadPattern": true,
+ "Ext": true,
+ "IsAbs": true,
+ "Join": true,
+ "Match": true,
+ "Split": true,
+ },
+ "path/filepath": map[string]bool{
+ "Abs": true,
+ "Base": true,
+ "Clean": true,
+ "Dir": true,
+ "ErrBadPattern": true,
+ "EvalSymlinks": true,
+ "Ext": true,
+ "FromSlash": true,
+ "Glob": true,
+ "HasPrefix": true,
+ "IsAbs": true,
+ "Join": true,
+ "ListSeparator": true,
+ "Match": true,
+ "Rel": true,
+ "Separator": true,
+ "SkipDir": true,
+ "Split": true,
+ "SplitList": true,
+ "ToSlash": true,
+ "VolumeName": true,
+ "Walk": true,
+ "WalkFunc": true,
+ },
+ "plugin": map[string]bool{
+ "Open": true,
+ "Plugin": true,
+ "Symbol": true,
+ },
+ "reflect": map[string]bool{
+ "Append": true,
+ "AppendSlice": true,
+ "Array": true,
+ "ArrayOf": true,
+ "Bool": true,
+ "BothDir": true,
+ "Chan": true,
+ "ChanDir": true,
+ "ChanOf": true,
+ "Complex128": true,
+ "Complex64": true,
+ "Copy": true,
+ "DeepEqual": true,
+ "Float32": true,
+ "Float64": true,
+ "Func": true,
+ "FuncOf": true,
+ "Indirect": true,
+ "Int": true,
+ "Int16": true,
+ "Int32": true,
+ "Int64": true,
+ "Int8": true,
+ "Interface": true,
+ "Invalid": true,
+ "Kind": true,
+ "MakeChan": true,
+ "MakeFunc": true,
+ "MakeMap": true,
+ "MakeMapWithSize": true,
+ "MakeSlice": true,
+ "Map": true,
+ "MapIter": true,
+ "MapOf": true,
+ "Method": true,
+ "New": true,
+ "NewAt": true,
+ "Ptr": true,
+ "PtrTo": true,
+ "RecvDir": true,
+ "Select": true,
+ "SelectCase": true,
+ "SelectDefault": true,
+ "SelectDir": true,
+ "SelectRecv": true,
+ "SelectSend": true,
+ "SendDir": true,
+ "Slice": true,
+ "SliceHeader": true,
+ "SliceOf": true,
+ "String": true,
+ "StringHeader": true,
+ "Struct": true,
+ "StructField": true,
+ "StructOf": true,
+ "StructTag": true,
+ "Swapper": true,
+ "TypeOf": true,
+ "Uint": true,
+ "Uint16": true,
+ "Uint32": true,
+ "Uint64": true,
+ "Uint8": true,
+ "Uintptr": true,
+ "UnsafePointer": true,
+ "Value": true,
+ "ValueError": true,
+ "ValueOf": true,
+ "Zero": true,
+ },
+ "regexp": map[string]bool{
+ "Compile": true,
+ "CompilePOSIX": true,
+ "Match": true,
+ "MatchReader": true,
+ "MatchString": true,
+ "MustCompile": true,
+ "MustCompilePOSIX": true,
+ "QuoteMeta": true,
+ "Regexp": true,
+ },
+ "regexp/syntax": map[string]bool{
+ "ClassNL": true,
+ "Compile": true,
+ "DotNL": true,
+ "EmptyBeginLine": true,
+ "EmptyBeginText": true,
+ "EmptyEndLine": true,
+ "EmptyEndText": true,
+ "EmptyNoWordBoundary": true,
+ "EmptyOp": true,
+ "EmptyOpContext": true,
+ "EmptyWordBoundary": true,
+ "ErrInternalError": true,
+ "ErrInvalidCharClass": true,
+ "ErrInvalidCharRange": true,
+ "ErrInvalidEscape": true,
+ "ErrInvalidNamedCapture": true,
+ "ErrInvalidPerlOp": true,
+ "ErrInvalidRepeatOp": true,
+ "ErrInvalidRepeatSize": true,
+ "ErrInvalidUTF8": true,
+ "ErrMissingBracket": true,
+ "ErrMissingParen": true,
+ "ErrMissingRepeatArgument": true,
+ "ErrTrailingBackslash": true,
+ "ErrUnexpectedParen": true,
+ "Error": true,
+ "ErrorCode": true,
+ "Flags": true,
+ "FoldCase": true,
+ "Inst": true,
+ "InstAlt": true,
+ "InstAltMatch": true,
+ "InstCapture": true,
+ "InstEmptyWidth": true,
+ "InstFail": true,
+ "InstMatch": true,
+ "InstNop": true,
+ "InstOp": true,
+ "InstRune": true,
+ "InstRune1": true,
+ "InstRuneAny": true,
+ "InstRuneAnyNotNL": true,
+ "IsWordChar": true,
+ "Literal": true,
+ "MatchNL": true,
+ "NonGreedy": true,
+ "OneLine": true,
+ "Op": true,
+ "OpAlternate": true,
+ "OpAnyChar": true,
+ "OpAnyCharNotNL": true,
+ "OpBeginLine": true,
+ "OpBeginText": true,
+ "OpCapture": true,
+ "OpCharClass": true,
+ "OpConcat": true,
+ "OpEmptyMatch": true,
+ "OpEndLine": true,
+ "OpEndText": true,
+ "OpLiteral": true,
+ "OpNoMatch": true,
+ "OpNoWordBoundary": true,
+ "OpPlus": true,
+ "OpQuest": true,
+ "OpRepeat": true,
+ "OpStar": true,
+ "OpWordBoundary": true,
+ "POSIX": true,
+ "Parse": true,
+ "Perl": true,
+ "PerlX": true,
+ "Prog": true,
+ "Regexp": true,
+ "Simple": true,
+ "UnicodeGroups": true,
+ "WasDollar": true,
+ },
+ "runtime": map[string]bool{
+ "BlockProfile": true,
+ "BlockProfileRecord": true,
+ "Breakpoint": true,
+ "CPUProfile": true,
+ "Caller": true,
+ "Callers": true,
+ "CallersFrames": true,
+ "Compiler": true,
+ "Error": true,
+ "Frame": true,
+ "Frames": true,
+ "Func": true,
+ "FuncForPC": true,
+ "GC": true,
+ "GOARCH": true,
+ "GOMAXPROCS": true,
+ "GOOS": true,
+ "GOROOT": true,
+ "Goexit": true,
+ "GoroutineProfile": true,
+ "Gosched": true,
+ "KeepAlive": true,
+ "LockOSThread": true,
+ "MemProfile": true,
+ "MemProfileRate": true,
+ "MemProfileRecord": true,
+ "MemStats": true,
+ "MutexProfile": true,
+ "NumCPU": true,
+ "NumCgoCall": true,
+ "NumGoroutine": true,
+ "ReadMemStats": true,
+ "ReadTrace": true,
+ "SetBlockProfileRate": true,
+ "SetCPUProfileRate": true,
+ "SetCgoTraceback": true,
+ "SetFinalizer": true,
+ "SetMutexProfileFraction": true,
+ "Stack": true,
+ "StackRecord": true,
+ "StartTrace": true,
+ "StopTrace": true,
+ "ThreadCreateProfile": true,
+ "TypeAssertionError": true,
+ "UnlockOSThread": true,
+ "Version": true,
+ },
+ "runtime/debug": map[string]bool{
+ "BuildInfo": true,
+ "FreeOSMemory": true,
+ "GCStats": true,
+ "Module": true,
+ "PrintStack": true,
+ "ReadBuildInfo": true,
+ "ReadGCStats": true,
+ "SetGCPercent": true,
+ "SetMaxStack": true,
+ "SetMaxThreads": true,
+ "SetPanicOnFault": true,
+ "SetTraceback": true,
+ "Stack": true,
+ "WriteHeapDump": true,
+ },
+ "runtime/pprof": map[string]bool{
+ "Do": true,
+ "ForLabels": true,
+ "Label": true,
+ "LabelSet": true,
+ "Labels": true,
+ "Lookup": true,
+ "NewProfile": true,
+ "Profile": true,
+ "Profiles": true,
+ "SetGoroutineLabels": true,
+ "StartCPUProfile": true,
+ "StopCPUProfile": true,
+ "WithLabels": true,
+ "WriteHeapProfile": true,
+ },
+ "runtime/trace": map[string]bool{
+ "IsEnabled": true,
+ "Log": true,
+ "Logf": true,
+ "NewTask": true,
+ "Region": true,
+ "Start": true,
+ "StartRegion": true,
+ "Stop": true,
+ "Task": true,
+ "WithRegion": true,
+ },
+ "sort": map[string]bool{
+ "Float64Slice": true,
+ "Float64s": true,
+ "Float64sAreSorted": true,
+ "IntSlice": true,
+ "Interface": true,
+ "Ints": true,
+ "IntsAreSorted": true,
+ "IsSorted": true,
+ "Reverse": true,
+ "Search": true,
+ "SearchFloat64s": true,
+ "SearchInts": true,
+ "SearchStrings": true,
+ "Slice": true,
+ "SliceIsSorted": true,
+ "SliceStable": true,
+ "Sort": true,
+ "Stable": true,
+ "StringSlice": true,
+ "Strings": true,
+ "StringsAreSorted": true,
+ },
+ "strconv": map[string]bool{
+ "AppendBool": true,
+ "AppendFloat": true,
+ "AppendInt": true,
+ "AppendQuote": true,
+ "AppendQuoteRune": true,
+ "AppendQuoteRuneToASCII": true,
+ "AppendQuoteRuneToGraphic": true,
+ "AppendQuoteToASCII": true,
+ "AppendQuoteToGraphic": true,
+ "AppendUint": true,
+ "Atoi": true,
+ "CanBackquote": true,
+ "ErrRange": true,
+ "ErrSyntax": true,
+ "FormatBool": true,
+ "FormatFloat": true,
+ "FormatInt": true,
+ "FormatUint": true,
+ "IntSize": true,
+ "IsGraphic": true,
+ "IsPrint": true,
+ "Itoa": true,
+ "NumError": true,
+ "ParseBool": true,
+ "ParseFloat": true,
+ "ParseInt": true,
+ "ParseUint": true,
+ "Quote": true,
+ "QuoteRune": true,
+ "QuoteRuneToASCII": true,
+ "QuoteRuneToGraphic": true,
+ "QuoteToASCII": true,
+ "QuoteToGraphic": true,
+ "Unquote": true,
+ "UnquoteChar": true,
+ },
+ "strings": map[string]bool{
+ "Builder": true,
+ "Compare": true,
+ "Contains": true,
+ "ContainsAny": true,
+ "ContainsRune": true,
+ "Count": true,
+ "EqualFold": true,
+ "Fields": true,
+ "FieldsFunc": true,
+ "HasPrefix": true,
+ "HasSuffix": true,
+ "Index": true,
+ "IndexAny": true,
+ "IndexByte": true,
+ "IndexFunc": true,
+ "IndexRune": true,
+ "Join": true,
+ "LastIndex": true,
+ "LastIndexAny": true,
+ "LastIndexByte": true,
+ "LastIndexFunc": true,
+ "Map": true,
+ "NewReader": true,
+ "NewReplacer": true,
+ "Reader": true,
+ "Repeat": true,
+ "Replace": true,
+ "ReplaceAll": true,
+ "Replacer": true,
+ "Split": true,
+ "SplitAfter": true,
+ "SplitAfterN": true,
+ "SplitN": true,
+ "Title": true,
+ "ToLower": true,
+ "ToLowerSpecial": true,
+ "ToTitle": true,
+ "ToTitleSpecial": true,
+ "ToUpper": true,
+ "ToUpperSpecial": true,
+ "Trim": true,
+ "TrimFunc": true,
+ "TrimLeft": true,
+ "TrimLeftFunc": true,
+ "TrimPrefix": true,
+ "TrimRight": true,
+ "TrimRightFunc": true,
+ "TrimSpace": true,
+ "TrimSuffix": true,
+ },
+ "sync": map[string]bool{
+ "Cond": true,
+ "Locker": true,
+ "Map": true,
+ "Mutex": true,
+ "NewCond": true,
+ "Once": true,
+ "Pool": true,
+ "RWMutex": true,
+ "WaitGroup": true,
+ },
+ "sync/atomic": map[string]bool{
+ "AddInt32": true,
+ "AddInt64": true,
+ "AddUint32": true,
+ "AddUint64": true,
+ "AddUintptr": true,
+ "CompareAndSwapInt32": true,
+ "CompareAndSwapInt64": true,
+ "CompareAndSwapPointer": true,
+ "CompareAndSwapUint32": true,
+ "CompareAndSwapUint64": true,
+ "CompareAndSwapUintptr": true,
+ "LoadInt32": true,
+ "LoadInt64": true,
+ "LoadPointer": true,
+ "LoadUint32": true,
+ "LoadUint64": true,
+ "LoadUintptr": true,
+ "StoreInt32": true,
+ "StoreInt64": true,
+ "StorePointer": true,
+ "StoreUint32": true,
+ "StoreUint64": true,
+ "StoreUintptr": true,
+ "SwapInt32": true,
+ "SwapInt64": true,
+ "SwapPointer": true,
+ "SwapUint32": true,
+ "SwapUint64": true,
+ "SwapUintptr": true,
+ "Value": true,
+ },
+ "syscall": map[string]bool{
+ "AF_ALG": true,
+ "AF_APPLETALK": true,
+ "AF_ARP": true,
+ "AF_ASH": true,
+ "AF_ATM": true,
+ "AF_ATMPVC": true,
+ "AF_ATMSVC": true,
+ "AF_AX25": true,
+ "AF_BLUETOOTH": true,
+ "AF_BRIDGE": true,
+ "AF_CAIF": true,
+ "AF_CAN": true,
+ "AF_CCITT": true,
+ "AF_CHAOS": true,
+ "AF_CNT": true,
+ "AF_COIP": true,
+ "AF_DATAKIT": true,
+ "AF_DECnet": true,
+ "AF_DLI": true,
+ "AF_E164": true,
+ "AF_ECMA": true,
+ "AF_ECONET": true,
+ "AF_ENCAP": true,
+ "AF_FILE": true,
+ "AF_HYLINK": true,
+ "AF_IEEE80211": true,
+ "AF_IEEE802154": true,
+ "AF_IMPLINK": true,
+ "AF_INET": true,
+ "AF_INET6": true,
+ "AF_INET6_SDP": true,
+ "AF_INET_SDP": true,
+ "AF_IPX": true,
+ "AF_IRDA": true,
+ "AF_ISDN": true,
+ "AF_ISO": true,
+ "AF_IUCV": true,
+ "AF_KEY": true,
+ "AF_LAT": true,
+ "AF_LINK": true,
+ "AF_LLC": true,
+ "AF_LOCAL": true,
+ "AF_MAX": true,
+ "AF_MPLS": true,
+ "AF_NATM": true,
+ "AF_NDRV": true,
+ "AF_NETBEUI": true,
+ "AF_NETBIOS": true,
+ "AF_NETGRAPH": true,
+ "AF_NETLINK": true,
+ "AF_NETROM": true,
+ "AF_NS": true,
+ "AF_OROUTE": true,
+ "AF_OSI": true,
+ "AF_PACKET": true,
+ "AF_PHONET": true,
+ "AF_PPP": true,
+ "AF_PPPOX": true,
+ "AF_PUP": true,
+ "AF_RDS": true,
+ "AF_RESERVED_36": true,
+ "AF_ROSE": true,
+ "AF_ROUTE": true,
+ "AF_RXRPC": true,
+ "AF_SCLUSTER": true,
+ "AF_SECURITY": true,
+ "AF_SIP": true,
+ "AF_SLOW": true,
+ "AF_SNA": true,
+ "AF_SYSTEM": true,
+ "AF_TIPC": true,
+ "AF_UNIX": true,
+ "AF_UNSPEC": true,
+ "AF_VENDOR00": true,
+ "AF_VENDOR01": true,
+ "AF_VENDOR02": true,
+ "AF_VENDOR03": true,
+ "AF_VENDOR04": true,
+ "AF_VENDOR05": true,
+ "AF_VENDOR06": true,
+ "AF_VENDOR07": true,
+ "AF_VENDOR08": true,
+ "AF_VENDOR09": true,
+ "AF_VENDOR10": true,
+ "AF_VENDOR11": true,
+ "AF_VENDOR12": true,
+ "AF_VENDOR13": true,
+ "AF_VENDOR14": true,
+ "AF_VENDOR15": true,
+ "AF_VENDOR16": true,
+ "AF_VENDOR17": true,
+ "AF_VENDOR18": true,
+ "AF_VENDOR19": true,
+ "AF_VENDOR20": true,
+ "AF_VENDOR21": true,
+ "AF_VENDOR22": true,
+ "AF_VENDOR23": true,
+ "AF_VENDOR24": true,
+ "AF_VENDOR25": true,
+ "AF_VENDOR26": true,
+ "AF_VENDOR27": true,
+ "AF_VENDOR28": true,
+ "AF_VENDOR29": true,
+ "AF_VENDOR30": true,
+ "AF_VENDOR31": true,
+ "AF_VENDOR32": true,
+ "AF_VENDOR33": true,
+ "AF_VENDOR34": true,
+ "AF_VENDOR35": true,
+ "AF_VENDOR36": true,
+ "AF_VENDOR37": true,
+ "AF_VENDOR38": true,
+ "AF_VENDOR39": true,
+ "AF_VENDOR40": true,
+ "AF_VENDOR41": true,
+ "AF_VENDOR42": true,
+ "AF_VENDOR43": true,
+ "AF_VENDOR44": true,
+ "AF_VENDOR45": true,
+ "AF_VENDOR46": true,
+ "AF_VENDOR47": true,
+ "AF_WANPIPE": true,
+ "AF_X25": true,
+ "AI_CANONNAME": true,
+ "AI_NUMERICHOST": true,
+ "AI_PASSIVE": true,
+ "APPLICATION_ERROR": true,
+ "ARPHRD_ADAPT": true,
+ "ARPHRD_APPLETLK": true,
+ "ARPHRD_ARCNET": true,
+ "ARPHRD_ASH": true,
+ "ARPHRD_ATM": true,
+ "ARPHRD_AX25": true,
+ "ARPHRD_BIF": true,
+ "ARPHRD_CHAOS": true,
+ "ARPHRD_CISCO": true,
+ "ARPHRD_CSLIP": true,
+ "ARPHRD_CSLIP6": true,
+ "ARPHRD_DDCMP": true,
+ "ARPHRD_DLCI": true,
+ "ARPHRD_ECONET": true,
+ "ARPHRD_EETHER": true,
+ "ARPHRD_ETHER": true,
+ "ARPHRD_EUI64": true,
+ "ARPHRD_FCAL": true,
+ "ARPHRD_FCFABRIC": true,
+ "ARPHRD_FCPL": true,
+ "ARPHRD_FCPP": true,
+ "ARPHRD_FDDI": true,
+ "ARPHRD_FRAD": true,
+ "ARPHRD_FRELAY": true,
+ "ARPHRD_HDLC": true,
+ "ARPHRD_HIPPI": true,
+ "ARPHRD_HWX25": true,
+ "ARPHRD_IEEE1394": true,
+ "ARPHRD_IEEE802": true,
+ "ARPHRD_IEEE80211": true,
+ "ARPHRD_IEEE80211_PRISM": true,
+ "ARPHRD_IEEE80211_RADIOTAP": true,
+ "ARPHRD_IEEE802154": true,
+ "ARPHRD_IEEE802154_PHY": true,
+ "ARPHRD_IEEE802_TR": true,
+ "ARPHRD_INFINIBAND": true,
+ "ARPHRD_IPDDP": true,
+ "ARPHRD_IPGRE": true,
+ "ARPHRD_IRDA": true,
+ "ARPHRD_LAPB": true,
+ "ARPHRD_LOCALTLK": true,
+ "ARPHRD_LOOPBACK": true,
+ "ARPHRD_METRICOM": true,
+ "ARPHRD_NETROM": true,
+ "ARPHRD_NONE": true,
+ "ARPHRD_PIMREG": true,
+ "ARPHRD_PPP": true,
+ "ARPHRD_PRONET": true,
+ "ARPHRD_RAWHDLC": true,
+ "ARPHRD_ROSE": true,
+ "ARPHRD_RSRVD": true,
+ "ARPHRD_SIT": true,
+ "ARPHRD_SKIP": true,
+ "ARPHRD_SLIP": true,
+ "ARPHRD_SLIP6": true,
+ "ARPHRD_STRIP": true,
+ "ARPHRD_TUNNEL": true,
+ "ARPHRD_TUNNEL6": true,
+ "ARPHRD_VOID": true,
+ "ARPHRD_X25": true,
+ "AUTHTYPE_CLIENT": true,
+ "AUTHTYPE_SERVER": true,
+ "Accept": true,
+ "Accept4": true,
+ "AcceptEx": true,
+ "Access": true,
+ "Acct": true,
+ "AddrinfoW": true,
+ "Adjtime": true,
+ "Adjtimex": true,
+ "AttachLsf": true,
+ "B0": true,
+ "B1000000": true,
+ "B110": true,
+ "B115200": true,
+ "B1152000": true,
+ "B1200": true,
+ "B134": true,
+ "B14400": true,
+ "B150": true,
+ "B1500000": true,
+ "B1800": true,
+ "B19200": true,
+ "B200": true,
+ "B2000000": true,
+ "B230400": true,
+ "B2400": true,
+ "B2500000": true,
+ "B28800": true,
+ "B300": true,
+ "B3000000": true,
+ "B3500000": true,
+ "B38400": true,
+ "B4000000": true,
+ "B460800": true,
+ "B4800": true,
+ "B50": true,
+ "B500000": true,
+ "B57600": true,
+ "B576000": true,
+ "B600": true,
+ "B7200": true,
+ "B75": true,
+ "B76800": true,
+ "B921600": true,
+ "B9600": true,
+ "BASE_PROTOCOL": true,
+ "BIOCFEEDBACK": true,
+ "BIOCFLUSH": true,
+ "BIOCGBLEN": true,
+ "BIOCGDIRECTION": true,
+ "BIOCGDIRFILT": true,
+ "BIOCGDLT": true,
+ "BIOCGDLTLIST": true,
+ "BIOCGETBUFMODE": true,
+ "BIOCGETIF": true,
+ "BIOCGETZMAX": true,
+ "BIOCGFEEDBACK": true,
+ "BIOCGFILDROP": true,
+ "BIOCGHDRCMPLT": true,
+ "BIOCGRSIG": true,
+ "BIOCGRTIMEOUT": true,
+ "BIOCGSEESENT": true,
+ "BIOCGSTATS": true,
+ "BIOCGSTATSOLD": true,
+ "BIOCGTSTAMP": true,
+ "BIOCIMMEDIATE": true,
+ "BIOCLOCK": true,
+ "BIOCPROMISC": true,
+ "BIOCROTZBUF": true,
+ "BIOCSBLEN": true,
+ "BIOCSDIRECTION": true,
+ "BIOCSDIRFILT": true,
+ "BIOCSDLT": true,
+ "BIOCSETBUFMODE": true,
+ "BIOCSETF": true,
+ "BIOCSETFNR": true,
+ "BIOCSETIF": true,
+ "BIOCSETWF": true,
+ "BIOCSETZBUF": true,
+ "BIOCSFEEDBACK": true,
+ "BIOCSFILDROP": true,
+ "BIOCSHDRCMPLT": true,
+ "BIOCSRSIG": true,
+ "BIOCSRTIMEOUT": true,
+ "BIOCSSEESENT": true,
+ "BIOCSTCPF": true,
+ "BIOCSTSTAMP": true,
+ "BIOCSUDPF": true,
+ "BIOCVERSION": true,
+ "BPF_A": true,
+ "BPF_ABS": true,
+ "BPF_ADD": true,
+ "BPF_ALIGNMENT": true,
+ "BPF_ALIGNMENT32": true,
+ "BPF_ALU": true,
+ "BPF_AND": true,
+ "BPF_B": true,
+ "BPF_BUFMODE_BUFFER": true,
+ "BPF_BUFMODE_ZBUF": true,
+ "BPF_DFLTBUFSIZE": true,
+ "BPF_DIRECTION_IN": true,
+ "BPF_DIRECTION_OUT": true,
+ "BPF_DIV": true,
+ "BPF_H": true,
+ "BPF_IMM": true,
+ "BPF_IND": true,
+ "BPF_JA": true,
+ "BPF_JEQ": true,
+ "BPF_JGE": true,
+ "BPF_JGT": true,
+ "BPF_JMP": true,
+ "BPF_JSET": true,
+ "BPF_K": true,
+ "BPF_LD": true,
+ "BPF_LDX": true,
+ "BPF_LEN": true,
+ "BPF_LSH": true,
+ "BPF_MAJOR_VERSION": true,
+ "BPF_MAXBUFSIZE": true,
+ "BPF_MAXINSNS": true,
+ "BPF_MEM": true,
+ "BPF_MEMWORDS": true,
+ "BPF_MINBUFSIZE": true,
+ "BPF_MINOR_VERSION": true,
+ "BPF_MISC": true,
+ "BPF_MSH": true,
+ "BPF_MUL": true,
+ "BPF_NEG": true,
+ "BPF_OR": true,
+ "BPF_RELEASE": true,
+ "BPF_RET": true,
+ "BPF_RSH": true,
+ "BPF_ST": true,
+ "BPF_STX": true,
+ "BPF_SUB": true,
+ "BPF_TAX": true,
+ "BPF_TXA": true,
+ "BPF_T_BINTIME": true,
+ "BPF_T_BINTIME_FAST": true,
+ "BPF_T_BINTIME_MONOTONIC": true,
+ "BPF_T_BINTIME_MONOTONIC_FAST": true,
+ "BPF_T_FAST": true,
+ "BPF_T_FLAG_MASK": true,
+ "BPF_T_FORMAT_MASK": true,
+ "BPF_T_MICROTIME": true,
+ "BPF_T_MICROTIME_FAST": true,
+ "BPF_T_MICROTIME_MONOTONIC": true,
+ "BPF_T_MICROTIME_MONOTONIC_FAST": true,
+ "BPF_T_MONOTONIC": true,
+ "BPF_T_MONOTONIC_FAST": true,
+ "BPF_T_NANOTIME": true,
+ "BPF_T_NANOTIME_FAST": true,
+ "BPF_T_NANOTIME_MONOTONIC": true,
+ "BPF_T_NANOTIME_MONOTONIC_FAST": true,
+ "BPF_T_NONE": true,
+ "BPF_T_NORMAL": true,
+ "BPF_W": true,
+ "BPF_X": true,
+ "BRKINT": true,
+ "Bind": true,
+ "BindToDevice": true,
+ "BpfBuflen": true,
+ "BpfDatalink": true,
+ "BpfHdr": true,
+ "BpfHeadercmpl": true,
+ "BpfInsn": true,
+ "BpfInterface": true,
+ "BpfJump": true,
+ "BpfProgram": true,
+ "BpfStat": true,
+ "BpfStats": true,
+ "BpfStmt": true,
+ "BpfTimeout": true,
+ "BpfTimeval": true,
+ "BpfVersion": true,
+ "BpfZbuf": true,
+ "BpfZbufHeader": true,
+ "ByHandleFileInformation": true,
+ "BytePtrFromString": true,
+ "ByteSliceFromString": true,
+ "CCR0_FLUSH": true,
+ "CERT_CHAIN_POLICY_AUTHENTICODE": true,
+ "CERT_CHAIN_POLICY_AUTHENTICODE_TS": true,
+ "CERT_CHAIN_POLICY_BASE": true,
+ "CERT_CHAIN_POLICY_BASIC_CONSTRAINTS": true,
+ "CERT_CHAIN_POLICY_EV": true,
+ "CERT_CHAIN_POLICY_MICROSOFT_ROOT": true,
+ "CERT_CHAIN_POLICY_NT_AUTH": true,
+ "CERT_CHAIN_POLICY_SSL": true,
+ "CERT_E_CN_NO_MATCH": true,
+ "CERT_E_EXPIRED": true,
+ "CERT_E_PURPOSE": true,
+ "CERT_E_ROLE": true,
+ "CERT_E_UNTRUSTEDROOT": true,
+ "CERT_STORE_ADD_ALWAYS": true,
+ "CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG": true,
+ "CERT_STORE_PROV_MEMORY": true,
+ "CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT": true,
+ "CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT": true,
+ "CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT": true,
+ "CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT": true,
+ "CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT": true,
+ "CERT_TRUST_INVALID_BASIC_CONSTRAINTS": true,
+ "CERT_TRUST_INVALID_EXTENSION": true,
+ "CERT_TRUST_INVALID_NAME_CONSTRAINTS": true,
+ "CERT_TRUST_INVALID_POLICY_CONSTRAINTS": true,
+ "CERT_TRUST_IS_CYCLIC": true,
+ "CERT_TRUST_IS_EXPLICIT_DISTRUST": true,
+ "CERT_TRUST_IS_NOT_SIGNATURE_VALID": true,
+ "CERT_TRUST_IS_NOT_TIME_VALID": true,
+ "CERT_TRUST_IS_NOT_VALID_FOR_USAGE": true,
+ "CERT_TRUST_IS_OFFLINE_REVOCATION": true,
+ "CERT_TRUST_IS_REVOKED": true,
+ "CERT_TRUST_IS_UNTRUSTED_ROOT": true,
+ "CERT_TRUST_NO_ERROR": true,
+ "CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY": true,
+ "CERT_TRUST_REVOCATION_STATUS_UNKNOWN": true,
+ "CFLUSH": true,
+ "CLOCAL": true,
+ "CLONE_CHILD_CLEARTID": true,
+ "CLONE_CHILD_SETTID": true,
+ "CLONE_CSIGNAL": true,
+ "CLONE_DETACHED": true,
+ "CLONE_FILES": true,
+ "CLONE_FS": true,
+ "CLONE_IO": true,
+ "CLONE_NEWIPC": true,
+ "CLONE_NEWNET": true,
+ "CLONE_NEWNS": true,
+ "CLONE_NEWPID": true,
+ "CLONE_NEWUSER": true,
+ "CLONE_NEWUTS": true,
+ "CLONE_PARENT": true,
+ "CLONE_PARENT_SETTID": true,
+ "CLONE_PID": true,
+ "CLONE_PTRACE": true,
+ "CLONE_SETTLS": true,
+ "CLONE_SIGHAND": true,
+ "CLONE_SYSVSEM": true,
+ "CLONE_THREAD": true,
+ "CLONE_UNTRACED": true,
+ "CLONE_VFORK": true,
+ "CLONE_VM": true,
+ "CPUID_CFLUSH": true,
+ "CREAD": true,
+ "CREATE_ALWAYS": true,
+ "CREATE_NEW": true,
+ "CREATE_NEW_PROCESS_GROUP": true,
+ "CREATE_UNICODE_ENVIRONMENT": true,
+ "CRYPT_DEFAULT_CONTAINER_OPTIONAL": true,
+ "CRYPT_DELETEKEYSET": true,
+ "CRYPT_MACHINE_KEYSET": true,
+ "CRYPT_NEWKEYSET": true,
+ "CRYPT_SILENT": true,
+ "CRYPT_VERIFYCONTEXT": true,
+ "CS5": true,
+ "CS6": true,
+ "CS7": true,
+ "CS8": true,
+ "CSIZE": true,
+ "CSTART": true,
+ "CSTATUS": true,
+ "CSTOP": true,
+ "CSTOPB": true,
+ "CSUSP": true,
+ "CTL_MAXNAME": true,
+ "CTL_NET": true,
+ "CTL_QUERY": true,
+ "CTRL_BREAK_EVENT": true,
+ "CTRL_C_EVENT": true,
+ "CancelIo": true,
+ "CancelIoEx": true,
+ "CertAddCertificateContextToStore": true,
+ "CertChainContext": true,
+ "CertChainElement": true,
+ "CertChainPara": true,
+ "CertChainPolicyPara": true,
+ "CertChainPolicyStatus": true,
+ "CertCloseStore": true,
+ "CertContext": true,
+ "CertCreateCertificateContext": true,
+ "CertEnhKeyUsage": true,
+ "CertEnumCertificatesInStore": true,
+ "CertFreeCertificateChain": true,
+ "CertFreeCertificateContext": true,
+ "CertGetCertificateChain": true,
+ "CertInfo": true,
+ "CertOpenStore": true,
+ "CertOpenSystemStore": true,
+ "CertRevocationCrlInfo": true,
+ "CertRevocationInfo": true,
+ "CertSimpleChain": true,
+ "CertTrustListInfo": true,
+ "CertTrustStatus": true,
+ "CertUsageMatch": true,
+ "CertVerifyCertificateChainPolicy": true,
+ "Chdir": true,
+ "CheckBpfVersion": true,
+ "Chflags": true,
+ "Chmod": true,
+ "Chown": true,
+ "Chroot": true,
+ "Clearenv": true,
+ "Close": true,
+ "CloseHandle": true,
+ "CloseOnExec": true,
+ "Closesocket": true,
+ "CmsgLen": true,
+ "CmsgSpace": true,
+ "Cmsghdr": true,
+ "CommandLineToArgv": true,
+ "ComputerName": true,
+ "Conn": true,
+ "Connect": true,
+ "ConnectEx": true,
+ "ConvertSidToStringSid": true,
+ "ConvertStringSidToSid": true,
+ "CopySid": true,
+ "Creat": true,
+ "CreateDirectory": true,
+ "CreateFile": true,
+ "CreateFileMapping": true,
+ "CreateHardLink": true,
+ "CreateIoCompletionPort": true,
+ "CreatePipe": true,
+ "CreateProcess": true,
+ "CreateProcessAsUser": true,
+ "CreateSymbolicLink": true,
+ "CreateToolhelp32Snapshot": true,
+ "Credential": true,
+ "CryptAcquireContext": true,
+ "CryptGenRandom": true,
+ "CryptReleaseContext": true,
+ "DIOCBSFLUSH": true,
+ "DIOCOSFPFLUSH": true,
+ "DLL": true,
+ "DLLError": true,
+ "DLT_A429": true,
+ "DLT_A653_ICM": true,
+ "DLT_AIRONET_HEADER": true,
+ "DLT_AOS": true,
+ "DLT_APPLE_IP_OVER_IEEE1394": true,
+ "DLT_ARCNET": true,
+ "DLT_ARCNET_LINUX": true,
+ "DLT_ATM_CLIP": true,
+ "DLT_ATM_RFC1483": true,
+ "DLT_AURORA": true,
+ "DLT_AX25": true,
+ "DLT_AX25_KISS": true,
+ "DLT_BACNET_MS_TP": true,
+ "DLT_BLUETOOTH_HCI_H4": true,
+ "DLT_BLUETOOTH_HCI_H4_WITH_PHDR": true,
+ "DLT_CAN20B": true,
+ "DLT_CAN_SOCKETCAN": true,
+ "DLT_CHAOS": true,
+ "DLT_CHDLC": true,
+ "DLT_CISCO_IOS": true,
+ "DLT_C_HDLC": true,
+ "DLT_C_HDLC_WITH_DIR": true,
+ "DLT_DBUS": true,
+ "DLT_DECT": true,
+ "DLT_DOCSIS": true,
+ "DLT_DVB_CI": true,
+ "DLT_ECONET": true,
+ "DLT_EN10MB": true,
+ "DLT_EN3MB": true,
+ "DLT_ENC": true,
+ "DLT_ERF": true,
+ "DLT_ERF_ETH": true,
+ "DLT_ERF_POS": true,
+ "DLT_FC_2": true,
+ "DLT_FC_2_WITH_FRAME_DELIMS": true,
+ "DLT_FDDI": true,
+ "DLT_FLEXRAY": true,
+ "DLT_FRELAY": true,
+ "DLT_FRELAY_WITH_DIR": true,
+ "DLT_GCOM_SERIAL": true,
+ "DLT_GCOM_T1E1": true,
+ "DLT_GPF_F": true,
+ "DLT_GPF_T": true,
+ "DLT_GPRS_LLC": true,
+ "DLT_GSMTAP_ABIS": true,
+ "DLT_GSMTAP_UM": true,
+ "DLT_HDLC": true,
+ "DLT_HHDLC": true,
+ "DLT_HIPPI": true,
+ "DLT_IBM_SN": true,
+ "DLT_IBM_SP": true,
+ "DLT_IEEE802": true,
+ "DLT_IEEE802_11": true,
+ "DLT_IEEE802_11_RADIO": true,
+ "DLT_IEEE802_11_RADIO_AVS": true,
+ "DLT_IEEE802_15_4": true,
+ "DLT_IEEE802_15_4_LINUX": true,
+ "DLT_IEEE802_15_4_NOFCS": true,
+ "DLT_IEEE802_15_4_NONASK_PHY": true,
+ "DLT_IEEE802_16_MAC_CPS": true,
+ "DLT_IEEE802_16_MAC_CPS_RADIO": true,
+ "DLT_IPFILTER": true,
+ "DLT_IPMB": true,
+ "DLT_IPMB_LINUX": true,
+ "DLT_IPNET": true,
+ "DLT_IPOIB": true,
+ "DLT_IPV4": true,
+ "DLT_IPV6": true,
+ "DLT_IP_OVER_FC": true,
+ "DLT_JUNIPER_ATM1": true,
+ "DLT_JUNIPER_ATM2": true,
+ "DLT_JUNIPER_ATM_CEMIC": true,
+ "DLT_JUNIPER_CHDLC": true,
+ "DLT_JUNIPER_ES": true,
+ "DLT_JUNIPER_ETHER": true,
+ "DLT_JUNIPER_FIBRECHANNEL": true,
+ "DLT_JUNIPER_FRELAY": true,
+ "DLT_JUNIPER_GGSN": true,
+ "DLT_JUNIPER_ISM": true,
+ "DLT_JUNIPER_MFR": true,
+ "DLT_JUNIPER_MLFR": true,
+ "DLT_JUNIPER_MLPPP": true,
+ "DLT_JUNIPER_MONITOR": true,
+ "DLT_JUNIPER_PIC_PEER": true,
+ "DLT_JUNIPER_PPP": true,
+ "DLT_JUNIPER_PPPOE": true,
+ "DLT_JUNIPER_PPPOE_ATM": true,
+ "DLT_JUNIPER_SERVICES": true,
+ "DLT_JUNIPER_SRX_E2E": true,
+ "DLT_JUNIPER_ST": true,
+ "DLT_JUNIPER_VP": true,
+ "DLT_JUNIPER_VS": true,
+ "DLT_LAPB_WITH_DIR": true,
+ "DLT_LAPD": true,
+ "DLT_LIN": true,
+ "DLT_LINUX_EVDEV": true,
+ "DLT_LINUX_IRDA": true,
+ "DLT_LINUX_LAPD": true,
+ "DLT_LINUX_PPP_WITHDIRECTION": true,
+ "DLT_LINUX_SLL": true,
+ "DLT_LOOP": true,
+ "DLT_LTALK": true,
+ "DLT_MATCHING_MAX": true,
+ "DLT_MATCHING_MIN": true,
+ "DLT_MFR": true,
+ "DLT_MOST": true,
+ "DLT_MPEG_2_TS": true,
+ "DLT_MPLS": true,
+ "DLT_MTP2": true,
+ "DLT_MTP2_WITH_PHDR": true,
+ "DLT_MTP3": true,
+ "DLT_MUX27010": true,
+ "DLT_NETANALYZER": true,
+ "DLT_NETANALYZER_TRANSPARENT": true,
+ "DLT_NFC_LLCP": true,
+ "DLT_NFLOG": true,
+ "DLT_NG40": true,
+ "DLT_NULL": true,
+ "DLT_PCI_EXP": true,
+ "DLT_PFLOG": true,
+ "DLT_PFSYNC": true,
+ "DLT_PPI": true,
+ "DLT_PPP": true,
+ "DLT_PPP_BSDOS": true,
+ "DLT_PPP_ETHER": true,
+ "DLT_PPP_PPPD": true,
+ "DLT_PPP_SERIAL": true,
+ "DLT_PPP_WITH_DIR": true,
+ "DLT_PPP_WITH_DIRECTION": true,
+ "DLT_PRISM_HEADER": true,
+ "DLT_PRONET": true,
+ "DLT_RAIF1": true,
+ "DLT_RAW": true,
+ "DLT_RAWAF_MASK": true,
+ "DLT_RIO": true,
+ "DLT_SCCP": true,
+ "DLT_SITA": true,
+ "DLT_SLIP": true,
+ "DLT_SLIP_BSDOS": true,
+ "DLT_STANAG_5066_D_PDU": true,
+ "DLT_SUNATM": true,
+ "DLT_SYMANTEC_FIREWALL": true,
+ "DLT_TZSP": true,
+ "DLT_USB": true,
+ "DLT_USB_LINUX": true,
+ "DLT_USB_LINUX_MMAPPED": true,
+ "DLT_USER0": true,
+ "DLT_USER1": true,
+ "DLT_USER10": true,
+ "DLT_USER11": true,
+ "DLT_USER12": true,
+ "DLT_USER13": true,
+ "DLT_USER14": true,
+ "DLT_USER15": true,
+ "DLT_USER2": true,
+ "DLT_USER3": true,
+ "DLT_USER4": true,
+ "DLT_USER5": true,
+ "DLT_USER6": true,
+ "DLT_USER7": true,
+ "DLT_USER8": true,
+ "DLT_USER9": true,
+ "DLT_WIHART": true,
+ "DLT_X2E_SERIAL": true,
+ "DLT_X2E_XORAYA": true,
+ "DNSMXData": true,
+ "DNSPTRData": true,
+ "DNSRecord": true,
+ "DNSSRVData": true,
+ "DNSTXTData": true,
+ "DNS_INFO_NO_RECORDS": true,
+ "DNS_TYPE_A": true,
+ "DNS_TYPE_A6": true,
+ "DNS_TYPE_AAAA": true,
+ "DNS_TYPE_ADDRS": true,
+ "DNS_TYPE_AFSDB": true,
+ "DNS_TYPE_ALL": true,
+ "DNS_TYPE_ANY": true,
+ "DNS_TYPE_ATMA": true,
+ "DNS_TYPE_AXFR": true,
+ "DNS_TYPE_CERT": true,
+ "DNS_TYPE_CNAME": true,
+ "DNS_TYPE_DHCID": true,
+ "DNS_TYPE_DNAME": true,
+ "DNS_TYPE_DNSKEY": true,
+ "DNS_TYPE_DS": true,
+ "DNS_TYPE_EID": true,
+ "DNS_TYPE_GID": true,
+ "DNS_TYPE_GPOS": true,
+ "DNS_TYPE_HINFO": true,
+ "DNS_TYPE_ISDN": true,
+ "DNS_TYPE_IXFR": true,
+ "DNS_TYPE_KEY": true,
+ "DNS_TYPE_KX": true,
+ "DNS_TYPE_LOC": true,
+ "DNS_TYPE_MAILA": true,
+ "DNS_TYPE_MAILB": true,
+ "DNS_TYPE_MB": true,
+ "DNS_TYPE_MD": true,
+ "DNS_TYPE_MF": true,
+ "DNS_TYPE_MG": true,
+ "DNS_TYPE_MINFO": true,
+ "DNS_TYPE_MR": true,
+ "DNS_TYPE_MX": true,
+ "DNS_TYPE_NAPTR": true,
+ "DNS_TYPE_NBSTAT": true,
+ "DNS_TYPE_NIMLOC": true,
+ "DNS_TYPE_NS": true,
+ "DNS_TYPE_NSAP": true,
+ "DNS_TYPE_NSAPPTR": true,
+ "DNS_TYPE_NSEC": true,
+ "DNS_TYPE_NULL": true,
+ "DNS_TYPE_NXT": true,
+ "DNS_TYPE_OPT": true,
+ "DNS_TYPE_PTR": true,
+ "DNS_TYPE_PX": true,
+ "DNS_TYPE_RP": true,
+ "DNS_TYPE_RRSIG": true,
+ "DNS_TYPE_RT": true,
+ "DNS_TYPE_SIG": true,
+ "DNS_TYPE_SINK": true,
+ "DNS_TYPE_SOA": true,
+ "DNS_TYPE_SRV": true,
+ "DNS_TYPE_TEXT": true,
+ "DNS_TYPE_TKEY": true,
+ "DNS_TYPE_TSIG": true,
+ "DNS_TYPE_UID": true,
+ "DNS_TYPE_UINFO": true,
+ "DNS_TYPE_UNSPEC": true,
+ "DNS_TYPE_WINS": true,
+ "DNS_TYPE_WINSR": true,
+ "DNS_TYPE_WKS": true,
+ "DNS_TYPE_X25": true,
+ "DT_BLK": true,
+ "DT_CHR": true,
+ "DT_DIR": true,
+ "DT_FIFO": true,
+ "DT_LNK": true,
+ "DT_REG": true,
+ "DT_SOCK": true,
+ "DT_UNKNOWN": true,
+ "DT_WHT": true,
+ "DUPLICATE_CLOSE_SOURCE": true,
+ "DUPLICATE_SAME_ACCESS": true,
+ "DeleteFile": true,
+ "DetachLsf": true,
+ "DeviceIoControl": true,
+ "Dirent": true,
+ "DnsNameCompare": true,
+ "DnsQuery": true,
+ "DnsRecordListFree": true,
+ "DnsSectionAdditional": true,
+ "DnsSectionAnswer": true,
+ "DnsSectionAuthority": true,
+ "DnsSectionQuestion": true,
+ "Dup": true,
+ "Dup2": true,
+ "Dup3": true,
+ "DuplicateHandle": true,
+ "E2BIG": true,
+ "EACCES": true,
+ "EADDRINUSE": true,
+ "EADDRNOTAVAIL": true,
+ "EADV": true,
+ "EAFNOSUPPORT": true,
+ "EAGAIN": true,
+ "EALREADY": true,
+ "EAUTH": true,
+ "EBADARCH": true,
+ "EBADE": true,
+ "EBADEXEC": true,
+ "EBADF": true,
+ "EBADFD": true,
+ "EBADMACHO": true,
+ "EBADMSG": true,
+ "EBADR": true,
+ "EBADRPC": true,
+ "EBADRQC": true,
+ "EBADSLT": true,
+ "EBFONT": true,
+ "EBUSY": true,
+ "ECANCELED": true,
+ "ECAPMODE": true,
+ "ECHILD": true,
+ "ECHO": true,
+ "ECHOCTL": true,
+ "ECHOE": true,
+ "ECHOK": true,
+ "ECHOKE": true,
+ "ECHONL": true,
+ "ECHOPRT": true,
+ "ECHRNG": true,
+ "ECOMM": true,
+ "ECONNABORTED": true,
+ "ECONNREFUSED": true,
+ "ECONNRESET": true,
+ "EDEADLK": true,
+ "EDEADLOCK": true,
+ "EDESTADDRREQ": true,
+ "EDEVERR": true,
+ "EDOM": true,
+ "EDOOFUS": true,
+ "EDOTDOT": true,
+ "EDQUOT": true,
+ "EEXIST": true,
+ "EFAULT": true,
+ "EFBIG": true,
+ "EFER_LMA": true,
+ "EFER_LME": true,
+ "EFER_NXE": true,
+ "EFER_SCE": true,
+ "EFTYPE": true,
+ "EHOSTDOWN": true,
+ "EHOSTUNREACH": true,
+ "EHWPOISON": true,
+ "EIDRM": true,
+ "EILSEQ": true,
+ "EINPROGRESS": true,
+ "EINTR": true,
+ "EINVAL": true,
+ "EIO": true,
+ "EIPSEC": true,
+ "EISCONN": true,
+ "EISDIR": true,
+ "EISNAM": true,
+ "EKEYEXPIRED": true,
+ "EKEYREJECTED": true,
+ "EKEYREVOKED": true,
+ "EL2HLT": true,
+ "EL2NSYNC": true,
+ "EL3HLT": true,
+ "EL3RST": true,
+ "ELAST": true,
+ "ELF_NGREG": true,
+ "ELF_PRARGSZ": true,
+ "ELIBACC": true,
+ "ELIBBAD": true,
+ "ELIBEXEC": true,
+ "ELIBMAX": true,
+ "ELIBSCN": true,
+ "ELNRNG": true,
+ "ELOOP": true,
+ "EMEDIUMTYPE": true,
+ "EMFILE": true,
+ "EMLINK": true,
+ "EMSGSIZE": true,
+ "EMT_TAGOVF": true,
+ "EMULTIHOP": true,
+ "EMUL_ENABLED": true,
+ "EMUL_LINUX": true,
+ "EMUL_LINUX32": true,
+ "EMUL_MAXID": true,
+ "EMUL_NATIVE": true,
+ "ENAMETOOLONG": true,
+ "ENAVAIL": true,
+ "ENDRUNDISC": true,
+ "ENEEDAUTH": true,
+ "ENETDOWN": true,
+ "ENETRESET": true,
+ "ENETUNREACH": true,
+ "ENFILE": true,
+ "ENOANO": true,
+ "ENOATTR": true,
+ "ENOBUFS": true,
+ "ENOCSI": true,
+ "ENODATA": true,
+ "ENODEV": true,
+ "ENOENT": true,
+ "ENOEXEC": true,
+ "ENOKEY": true,
+ "ENOLCK": true,
+ "ENOLINK": true,
+ "ENOMEDIUM": true,
+ "ENOMEM": true,
+ "ENOMSG": true,
+ "ENONET": true,
+ "ENOPKG": true,
+ "ENOPOLICY": true,
+ "ENOPROTOOPT": true,
+ "ENOSPC": true,
+ "ENOSR": true,
+ "ENOSTR": true,
+ "ENOSYS": true,
+ "ENOTBLK": true,
+ "ENOTCAPABLE": true,
+ "ENOTCONN": true,
+ "ENOTDIR": true,
+ "ENOTEMPTY": true,
+ "ENOTNAM": true,
+ "ENOTRECOVERABLE": true,
+ "ENOTSOCK": true,
+ "ENOTSUP": true,
+ "ENOTTY": true,
+ "ENOTUNIQ": true,
+ "ENXIO": true,
+ "EN_SW_CTL_INF": true,
+ "EN_SW_CTL_PREC": true,
+ "EN_SW_CTL_ROUND": true,
+ "EN_SW_DATACHAIN": true,
+ "EN_SW_DENORM": true,
+ "EN_SW_INVOP": true,
+ "EN_SW_OVERFLOW": true,
+ "EN_SW_PRECLOSS": true,
+ "EN_SW_UNDERFLOW": true,
+ "EN_SW_ZERODIV": true,
+ "EOPNOTSUPP": true,
+ "EOVERFLOW": true,
+ "EOWNERDEAD": true,
+ "EPERM": true,
+ "EPFNOSUPPORT": true,
+ "EPIPE": true,
+ "EPOLLERR": true,
+ "EPOLLET": true,
+ "EPOLLHUP": true,
+ "EPOLLIN": true,
+ "EPOLLMSG": true,
+ "EPOLLONESHOT": true,
+ "EPOLLOUT": true,
+ "EPOLLPRI": true,
+ "EPOLLRDBAND": true,
+ "EPOLLRDHUP": true,
+ "EPOLLRDNORM": true,
+ "EPOLLWRBAND": true,
+ "EPOLLWRNORM": true,
+ "EPOLL_CLOEXEC": true,
+ "EPOLL_CTL_ADD": true,
+ "EPOLL_CTL_DEL": true,
+ "EPOLL_CTL_MOD": true,
+ "EPOLL_NONBLOCK": true,
+ "EPROCLIM": true,
+ "EPROCUNAVAIL": true,
+ "EPROGMISMATCH": true,
+ "EPROGUNAVAIL": true,
+ "EPROTO": true,
+ "EPROTONOSUPPORT": true,
+ "EPROTOTYPE": true,
+ "EPWROFF": true,
+ "ERANGE": true,
+ "EREMCHG": true,
+ "EREMOTE": true,
+ "EREMOTEIO": true,
+ "ERESTART": true,
+ "ERFKILL": true,
+ "EROFS": true,
+ "ERPCMISMATCH": true,
+ "ERROR_ACCESS_DENIED": true,
+ "ERROR_ALREADY_EXISTS": true,
+ "ERROR_BROKEN_PIPE": true,
+ "ERROR_BUFFER_OVERFLOW": true,
+ "ERROR_DIR_NOT_EMPTY": true,
+ "ERROR_ENVVAR_NOT_FOUND": true,
+ "ERROR_FILE_EXISTS": true,
+ "ERROR_FILE_NOT_FOUND": true,
+ "ERROR_HANDLE_EOF": true,
+ "ERROR_INSUFFICIENT_BUFFER": true,
+ "ERROR_IO_PENDING": true,
+ "ERROR_MOD_NOT_FOUND": true,
+ "ERROR_MORE_DATA": true,
+ "ERROR_NETNAME_DELETED": true,
+ "ERROR_NOT_FOUND": true,
+ "ERROR_NO_MORE_FILES": true,
+ "ERROR_OPERATION_ABORTED": true,
+ "ERROR_PATH_NOT_FOUND": true,
+ "ERROR_PRIVILEGE_NOT_HELD": true,
+ "ERROR_PROC_NOT_FOUND": true,
+ "ESHLIBVERS": true,
+ "ESHUTDOWN": true,
+ "ESOCKTNOSUPPORT": true,
+ "ESPIPE": true,
+ "ESRCH": true,
+ "ESRMNT": true,
+ "ESTALE": true,
+ "ESTRPIPE": true,
+ "ETHERCAP_JUMBO_MTU": true,
+ "ETHERCAP_VLAN_HWTAGGING": true,
+ "ETHERCAP_VLAN_MTU": true,
+ "ETHERMIN": true,
+ "ETHERMTU": true,
+ "ETHERMTU_JUMBO": true,
+ "ETHERTYPE_8023": true,
+ "ETHERTYPE_AARP": true,
+ "ETHERTYPE_ACCTON": true,
+ "ETHERTYPE_AEONIC": true,
+ "ETHERTYPE_ALPHA": true,
+ "ETHERTYPE_AMBER": true,
+ "ETHERTYPE_AMOEBA": true,
+ "ETHERTYPE_AOE": true,
+ "ETHERTYPE_APOLLO": true,
+ "ETHERTYPE_APOLLODOMAIN": true,
+ "ETHERTYPE_APPLETALK": true,
+ "ETHERTYPE_APPLITEK": true,
+ "ETHERTYPE_ARGONAUT": true,
+ "ETHERTYPE_ARP": true,
+ "ETHERTYPE_AT": true,
+ "ETHERTYPE_ATALK": true,
+ "ETHERTYPE_ATOMIC": true,
+ "ETHERTYPE_ATT": true,
+ "ETHERTYPE_ATTSTANFORD": true,
+ "ETHERTYPE_AUTOPHON": true,
+ "ETHERTYPE_AXIS": true,
+ "ETHERTYPE_BCLOOP": true,
+ "ETHERTYPE_BOFL": true,
+ "ETHERTYPE_CABLETRON": true,
+ "ETHERTYPE_CHAOS": true,
+ "ETHERTYPE_COMDESIGN": true,
+ "ETHERTYPE_COMPUGRAPHIC": true,
+ "ETHERTYPE_COUNTERPOINT": true,
+ "ETHERTYPE_CRONUS": true,
+ "ETHERTYPE_CRONUSVLN": true,
+ "ETHERTYPE_DCA": true,
+ "ETHERTYPE_DDE": true,
+ "ETHERTYPE_DEBNI": true,
+ "ETHERTYPE_DECAM": true,
+ "ETHERTYPE_DECCUST": true,
+ "ETHERTYPE_DECDIAG": true,
+ "ETHERTYPE_DECDNS": true,
+ "ETHERTYPE_DECDTS": true,
+ "ETHERTYPE_DECEXPER": true,
+ "ETHERTYPE_DECLAST": true,
+ "ETHERTYPE_DECLTM": true,
+ "ETHERTYPE_DECMUMPS": true,
+ "ETHERTYPE_DECNETBIOS": true,
+ "ETHERTYPE_DELTACON": true,
+ "ETHERTYPE_DIDDLE": true,
+ "ETHERTYPE_DLOG1": true,
+ "ETHERTYPE_DLOG2": true,
+ "ETHERTYPE_DN": true,
+ "ETHERTYPE_DOGFIGHT": true,
+ "ETHERTYPE_DSMD": true,
+ "ETHERTYPE_ECMA": true,
+ "ETHERTYPE_ENCRYPT": true,
+ "ETHERTYPE_ES": true,
+ "ETHERTYPE_EXCELAN": true,
+ "ETHERTYPE_EXPERDATA": true,
+ "ETHERTYPE_FLIP": true,
+ "ETHERTYPE_FLOWCONTROL": true,
+ "ETHERTYPE_FRARP": true,
+ "ETHERTYPE_GENDYN": true,
+ "ETHERTYPE_HAYES": true,
+ "ETHERTYPE_HIPPI_FP": true,
+ "ETHERTYPE_HITACHI": true,
+ "ETHERTYPE_HP": true,
+ "ETHERTYPE_IEEEPUP": true,
+ "ETHERTYPE_IEEEPUPAT": true,
+ "ETHERTYPE_IMLBL": true,
+ "ETHERTYPE_IMLBLDIAG": true,
+ "ETHERTYPE_IP": true,
+ "ETHERTYPE_IPAS": true,
+ "ETHERTYPE_IPV6": true,
+ "ETHERTYPE_IPX": true,
+ "ETHERTYPE_IPXNEW": true,
+ "ETHERTYPE_KALPANA": true,
+ "ETHERTYPE_LANBRIDGE": true,
+ "ETHERTYPE_LANPROBE": true,
+ "ETHERTYPE_LAT": true,
+ "ETHERTYPE_LBACK": true,
+ "ETHERTYPE_LITTLE": true,
+ "ETHERTYPE_LLDP": true,
+ "ETHERTYPE_LOGICRAFT": true,
+ "ETHERTYPE_LOOPBACK": true,
+ "ETHERTYPE_MATRA": true,
+ "ETHERTYPE_MAX": true,
+ "ETHERTYPE_MERIT": true,
+ "ETHERTYPE_MICP": true,
+ "ETHERTYPE_MOPDL": true,
+ "ETHERTYPE_MOPRC": true,
+ "ETHERTYPE_MOTOROLA": true,
+ "ETHERTYPE_MPLS": true,
+ "ETHERTYPE_MPLS_MCAST": true,
+ "ETHERTYPE_MUMPS": true,
+ "ETHERTYPE_NBPCC": true,
+ "ETHERTYPE_NBPCLAIM": true,
+ "ETHERTYPE_NBPCLREQ": true,
+ "ETHERTYPE_NBPCLRSP": true,
+ "ETHERTYPE_NBPCREQ": true,
+ "ETHERTYPE_NBPCRSP": true,
+ "ETHERTYPE_NBPDG": true,
+ "ETHERTYPE_NBPDGB": true,
+ "ETHERTYPE_NBPDLTE": true,
+ "ETHERTYPE_NBPRAR": true,
+ "ETHERTYPE_NBPRAS": true,
+ "ETHERTYPE_NBPRST": true,
+ "ETHERTYPE_NBPSCD": true,
+ "ETHERTYPE_NBPVCD": true,
+ "ETHERTYPE_NBS": true,
+ "ETHERTYPE_NCD": true,
+ "ETHERTYPE_NESTAR": true,
+ "ETHERTYPE_NETBEUI": true,
+ "ETHERTYPE_NOVELL": true,
+ "ETHERTYPE_NS": true,
+ "ETHERTYPE_NSAT": true,
+ "ETHERTYPE_NSCOMPAT": true,
+ "ETHERTYPE_NTRAILER": true,
+ "ETHERTYPE_OS9": true,
+ "ETHERTYPE_OS9NET": true,
+ "ETHERTYPE_PACER": true,
+ "ETHERTYPE_PAE": true,
+ "ETHERTYPE_PCS": true,
+ "ETHERTYPE_PLANNING": true,
+ "ETHERTYPE_PPP": true,
+ "ETHERTYPE_PPPOE": true,
+ "ETHERTYPE_PPPOEDISC": true,
+ "ETHERTYPE_PRIMENTS": true,
+ "ETHERTYPE_PUP": true,
+ "ETHERTYPE_PUPAT": true,
+ "ETHERTYPE_QINQ": true,
+ "ETHERTYPE_RACAL": true,
+ "ETHERTYPE_RATIONAL": true,
+ "ETHERTYPE_RAWFR": true,
+ "ETHERTYPE_RCL": true,
+ "ETHERTYPE_RDP": true,
+ "ETHERTYPE_RETIX": true,
+ "ETHERTYPE_REVARP": true,
+ "ETHERTYPE_SCA": true,
+ "ETHERTYPE_SECTRA": true,
+ "ETHERTYPE_SECUREDATA": true,
+ "ETHERTYPE_SGITW": true,
+ "ETHERTYPE_SG_BOUNCE": true,
+ "ETHERTYPE_SG_DIAG": true,
+ "ETHERTYPE_SG_NETGAMES": true,
+ "ETHERTYPE_SG_RESV": true,
+ "ETHERTYPE_SIMNET": true,
+ "ETHERTYPE_SLOW": true,
+ "ETHERTYPE_SLOWPROTOCOLS": true,
+ "ETHERTYPE_SNA": true,
+ "ETHERTYPE_SNMP": true,
+ "ETHERTYPE_SONIX": true,
+ "ETHERTYPE_SPIDER": true,
+ "ETHERTYPE_SPRITE": true,
+ "ETHERTYPE_STP": true,
+ "ETHERTYPE_TALARIS": true,
+ "ETHERTYPE_TALARISMC": true,
+ "ETHERTYPE_TCPCOMP": true,
+ "ETHERTYPE_TCPSM": true,
+ "ETHERTYPE_TEC": true,
+ "ETHERTYPE_TIGAN": true,
+ "ETHERTYPE_TRAIL": true,
+ "ETHERTYPE_TRANSETHER": true,
+ "ETHERTYPE_TYMSHARE": true,
+ "ETHERTYPE_UBBST": true,
+ "ETHERTYPE_UBDEBUG": true,
+ "ETHERTYPE_UBDIAGLOOP": true,
+ "ETHERTYPE_UBDL": true,
+ "ETHERTYPE_UBNIU": true,
+ "ETHERTYPE_UBNMC": true,
+ "ETHERTYPE_VALID": true,
+ "ETHERTYPE_VARIAN": true,
+ "ETHERTYPE_VAXELN": true,
+ "ETHERTYPE_VEECO": true,
+ "ETHERTYPE_VEXP": true,
+ "ETHERTYPE_VGLAB": true,
+ "ETHERTYPE_VINES": true,
+ "ETHERTYPE_VINESECHO": true,
+ "ETHERTYPE_VINESLOOP": true,
+ "ETHERTYPE_VITAL": true,
+ "ETHERTYPE_VLAN": true,
+ "ETHERTYPE_VLTLMAN": true,
+ "ETHERTYPE_VPROD": true,
+ "ETHERTYPE_VURESERVED": true,
+ "ETHERTYPE_WATERLOO": true,
+ "ETHERTYPE_WELLFLEET": true,
+ "ETHERTYPE_X25": true,
+ "ETHERTYPE_X75": true,
+ "ETHERTYPE_XNSSM": true,
+ "ETHERTYPE_XTP": true,
+ "ETHER_ADDR_LEN": true,
+ "ETHER_ALIGN": true,
+ "ETHER_CRC_LEN": true,
+ "ETHER_CRC_POLY_BE": true,
+ "ETHER_CRC_POLY_LE": true,
+ "ETHER_HDR_LEN": true,
+ "ETHER_MAX_DIX_LEN": true,
+ "ETHER_MAX_LEN": true,
+ "ETHER_MAX_LEN_JUMBO": true,
+ "ETHER_MIN_LEN": true,
+ "ETHER_PPPOE_ENCAP_LEN": true,
+ "ETHER_TYPE_LEN": true,
+ "ETHER_VLAN_ENCAP_LEN": true,
+ "ETH_P_1588": true,
+ "ETH_P_8021Q": true,
+ "ETH_P_802_2": true,
+ "ETH_P_802_3": true,
+ "ETH_P_AARP": true,
+ "ETH_P_ALL": true,
+ "ETH_P_AOE": true,
+ "ETH_P_ARCNET": true,
+ "ETH_P_ARP": true,
+ "ETH_P_ATALK": true,
+ "ETH_P_ATMFATE": true,
+ "ETH_P_ATMMPOA": true,
+ "ETH_P_AX25": true,
+ "ETH_P_BPQ": true,
+ "ETH_P_CAIF": true,
+ "ETH_P_CAN": true,
+ "ETH_P_CONTROL": true,
+ "ETH_P_CUST": true,
+ "ETH_P_DDCMP": true,
+ "ETH_P_DEC": true,
+ "ETH_P_DIAG": true,
+ "ETH_P_DNA_DL": true,
+ "ETH_P_DNA_RC": true,
+ "ETH_P_DNA_RT": true,
+ "ETH_P_DSA": true,
+ "ETH_P_ECONET": true,
+ "ETH_P_EDSA": true,
+ "ETH_P_FCOE": true,
+ "ETH_P_FIP": true,
+ "ETH_P_HDLC": true,
+ "ETH_P_IEEE802154": true,
+ "ETH_P_IEEEPUP": true,
+ "ETH_P_IEEEPUPAT": true,
+ "ETH_P_IP": true,
+ "ETH_P_IPV6": true,
+ "ETH_P_IPX": true,
+ "ETH_P_IRDA": true,
+ "ETH_P_LAT": true,
+ "ETH_P_LINK_CTL": true,
+ "ETH_P_LOCALTALK": true,
+ "ETH_P_LOOP": true,
+ "ETH_P_MOBITEX": true,
+ "ETH_P_MPLS_MC": true,
+ "ETH_P_MPLS_UC": true,
+ "ETH_P_PAE": true,
+ "ETH_P_PAUSE": true,
+ "ETH_P_PHONET": true,
+ "ETH_P_PPPTALK": true,
+ "ETH_P_PPP_DISC": true,
+ "ETH_P_PPP_MP": true,
+ "ETH_P_PPP_SES": true,
+ "ETH_P_PUP": true,
+ "ETH_P_PUPAT": true,
+ "ETH_P_RARP": true,
+ "ETH_P_SCA": true,
+ "ETH_P_SLOW": true,
+ "ETH_P_SNAP": true,
+ "ETH_P_TEB": true,
+ "ETH_P_TIPC": true,
+ "ETH_P_TRAILER": true,
+ "ETH_P_TR_802_2": true,
+ "ETH_P_WAN_PPP": true,
+ "ETH_P_WCCP": true,
+ "ETH_P_X25": true,
+ "ETIME": true,
+ "ETIMEDOUT": true,
+ "ETOOMANYREFS": true,
+ "ETXTBSY": true,
+ "EUCLEAN": true,
+ "EUNATCH": true,
+ "EUSERS": true,
+ "EVFILT_AIO": true,
+ "EVFILT_FS": true,
+ "EVFILT_LIO": true,
+ "EVFILT_MACHPORT": true,
+ "EVFILT_PROC": true,
+ "EVFILT_READ": true,
+ "EVFILT_SIGNAL": true,
+ "EVFILT_SYSCOUNT": true,
+ "EVFILT_THREADMARKER": true,
+ "EVFILT_TIMER": true,
+ "EVFILT_USER": true,
+ "EVFILT_VM": true,
+ "EVFILT_VNODE": true,
+ "EVFILT_WRITE": true,
+ "EV_ADD": true,
+ "EV_CLEAR": true,
+ "EV_DELETE": true,
+ "EV_DISABLE": true,
+ "EV_DISPATCH": true,
+ "EV_DROP": true,
+ "EV_ENABLE": true,
+ "EV_EOF": true,
+ "EV_ERROR": true,
+ "EV_FLAG0": true,
+ "EV_FLAG1": true,
+ "EV_ONESHOT": true,
+ "EV_OOBAND": true,
+ "EV_POLL": true,
+ "EV_RECEIPT": true,
+ "EV_SYSFLAGS": true,
+ "EWINDOWS": true,
+ "EWOULDBLOCK": true,
+ "EXDEV": true,
+ "EXFULL": true,
+ "EXTA": true,
+ "EXTB": true,
+ "EXTPROC": true,
+ "Environ": true,
+ "EpollCreate": true,
+ "EpollCreate1": true,
+ "EpollCtl": true,
+ "EpollEvent": true,
+ "EpollWait": true,
+ "Errno": true,
+ "EscapeArg": true,
+ "Exchangedata": true,
+ "Exec": true,
+ "Exit": true,
+ "ExitProcess": true,
+ "FD_CLOEXEC": true,
+ "FD_SETSIZE": true,
+ "FILE_ACTION_ADDED": true,
+ "FILE_ACTION_MODIFIED": true,
+ "FILE_ACTION_REMOVED": true,
+ "FILE_ACTION_RENAMED_NEW_NAME": true,
+ "FILE_ACTION_RENAMED_OLD_NAME": true,
+ "FILE_APPEND_DATA": true,
+ "FILE_ATTRIBUTE_ARCHIVE": true,
+ "FILE_ATTRIBUTE_DIRECTORY": true,
+ "FILE_ATTRIBUTE_HIDDEN": true,
+ "FILE_ATTRIBUTE_NORMAL": true,
+ "FILE_ATTRIBUTE_READONLY": true,
+ "FILE_ATTRIBUTE_REPARSE_POINT": true,
+ "FILE_ATTRIBUTE_SYSTEM": true,
+ "FILE_BEGIN": true,
+ "FILE_CURRENT": true,
+ "FILE_END": true,
+ "FILE_FLAG_BACKUP_SEMANTICS": true,
+ "FILE_FLAG_OPEN_REPARSE_POINT": true,
+ "FILE_FLAG_OVERLAPPED": true,
+ "FILE_LIST_DIRECTORY": true,
+ "FILE_MAP_COPY": true,
+ "FILE_MAP_EXECUTE": true,
+ "FILE_MAP_READ": true,
+ "FILE_MAP_WRITE": true,
+ "FILE_NOTIFY_CHANGE_ATTRIBUTES": true,
+ "FILE_NOTIFY_CHANGE_CREATION": true,
+ "FILE_NOTIFY_CHANGE_DIR_NAME": true,
+ "FILE_NOTIFY_CHANGE_FILE_NAME": true,
+ "FILE_NOTIFY_CHANGE_LAST_ACCESS": true,
+ "FILE_NOTIFY_CHANGE_LAST_WRITE": true,
+ "FILE_NOTIFY_CHANGE_SIZE": true,
+ "FILE_SHARE_DELETE": true,
+ "FILE_SHARE_READ": true,
+ "FILE_SHARE_WRITE": true,
+ "FILE_SKIP_COMPLETION_PORT_ON_SUCCESS": true,
+ "FILE_SKIP_SET_EVENT_ON_HANDLE": true,
+ "FILE_TYPE_CHAR": true,
+ "FILE_TYPE_DISK": true,
+ "FILE_TYPE_PIPE": true,
+ "FILE_TYPE_REMOTE": true,
+ "FILE_TYPE_UNKNOWN": true,
+ "FILE_WRITE_ATTRIBUTES": true,
+ "FLUSHO": true,
+ "FORMAT_MESSAGE_ALLOCATE_BUFFER": true,
+ "FORMAT_MESSAGE_ARGUMENT_ARRAY": true,
+ "FORMAT_MESSAGE_FROM_HMODULE": true,
+ "FORMAT_MESSAGE_FROM_STRING": true,
+ "FORMAT_MESSAGE_FROM_SYSTEM": true,
+ "FORMAT_MESSAGE_IGNORE_INSERTS": true,
+ "FORMAT_MESSAGE_MAX_WIDTH_MASK": true,
+ "FSCTL_GET_REPARSE_POINT": true,
+ "F_ADDFILESIGS": true,
+ "F_ADDSIGS": true,
+ "F_ALLOCATEALL": true,
+ "F_ALLOCATECONTIG": true,
+ "F_CANCEL": true,
+ "F_CHKCLEAN": true,
+ "F_CLOSEM": true,
+ "F_DUP2FD": true,
+ "F_DUP2FD_CLOEXEC": true,
+ "F_DUPFD": true,
+ "F_DUPFD_CLOEXEC": true,
+ "F_EXLCK": true,
+ "F_FLUSH_DATA": true,
+ "F_FREEZE_FS": true,
+ "F_FSCTL": true,
+ "F_FSDIRMASK": true,
+ "F_FSIN": true,
+ "F_FSINOUT": true,
+ "F_FSOUT": true,
+ "F_FSPRIV": true,
+ "F_FSVOID": true,
+ "F_FULLFSYNC": true,
+ "F_GETFD": true,
+ "F_GETFL": true,
+ "F_GETLEASE": true,
+ "F_GETLK": true,
+ "F_GETLK64": true,
+ "F_GETLKPID": true,
+ "F_GETNOSIGPIPE": true,
+ "F_GETOWN": true,
+ "F_GETOWN_EX": true,
+ "F_GETPATH": true,
+ "F_GETPATH_MTMINFO": true,
+ "F_GETPIPE_SZ": true,
+ "F_GETPROTECTIONCLASS": true,
+ "F_GETSIG": true,
+ "F_GLOBAL_NOCACHE": true,
+ "F_LOCK": true,
+ "F_LOG2PHYS": true,
+ "F_LOG2PHYS_EXT": true,
+ "F_MARKDEPENDENCY": true,
+ "F_MAXFD": true,
+ "F_NOCACHE": true,
+ "F_NODIRECT": true,
+ "F_NOTIFY": true,
+ "F_OGETLK": true,
+ "F_OK": true,
+ "F_OSETLK": true,
+ "F_OSETLKW": true,
+ "F_PARAM_MASK": true,
+ "F_PARAM_MAX": true,
+ "F_PATHPKG_CHECK": true,
+ "F_PEOFPOSMODE": true,
+ "F_PREALLOCATE": true,
+ "F_RDADVISE": true,
+ "F_RDAHEAD": true,
+ "F_RDLCK": true,
+ "F_READAHEAD": true,
+ "F_READBOOTSTRAP": true,
+ "F_SETBACKINGSTORE": true,
+ "F_SETFD": true,
+ "F_SETFL": true,
+ "F_SETLEASE": true,
+ "F_SETLK": true,
+ "F_SETLK64": true,
+ "F_SETLKW": true,
+ "F_SETLKW64": true,
+ "F_SETLK_REMOTE": true,
+ "F_SETNOSIGPIPE": true,
+ "F_SETOWN": true,
+ "F_SETOWN_EX": true,
+ "F_SETPIPE_SZ": true,
+ "F_SETPROTECTIONCLASS": true,
+ "F_SETSIG": true,
+ "F_SETSIZE": true,
+ "F_SHLCK": true,
+ "F_TEST": true,
+ "F_THAW_FS": true,
+ "F_TLOCK": true,
+ "F_ULOCK": true,
+ "F_UNLCK": true,
+ "F_UNLCKSYS": true,
+ "F_VOLPOSMODE": true,
+ "F_WRITEBOOTSTRAP": true,
+ "F_WRLCK": true,
+ "Faccessat": true,
+ "Fallocate": true,
+ "Fbootstraptransfer_t": true,
+ "Fchdir": true,
+ "Fchflags": true,
+ "Fchmod": true,
+ "Fchmodat": true,
+ "Fchown": true,
+ "Fchownat": true,
+ "FcntlFlock": true,
+ "FdSet": true,
+ "Fdatasync": true,
+ "FileNotifyInformation": true,
+ "Filetime": true,
+ "FindClose": true,
+ "FindFirstFile": true,
+ "FindNextFile": true,
+ "Flock": true,
+ "Flock_t": true,
+ "FlushBpf": true,
+ "FlushFileBuffers": true,
+ "FlushViewOfFile": true,
+ "ForkExec": true,
+ "ForkLock": true,
+ "FormatMessage": true,
+ "Fpathconf": true,
+ "FreeAddrInfoW": true,
+ "FreeEnvironmentStrings": true,
+ "FreeLibrary": true,
+ "Fsid": true,
+ "Fstat": true,
+ "Fstatat": true,
+ "Fstatfs": true,
+ "Fstore_t": true,
+ "Fsync": true,
+ "Ftruncate": true,
+ "FullPath": true,
+ "Futimes": true,
+ "Futimesat": true,
+ "GENERIC_ALL": true,
+ "GENERIC_EXECUTE": true,
+ "GENERIC_READ": true,
+ "GENERIC_WRITE": true,
+ "GUID": true,
+ "GetAcceptExSockaddrs": true,
+ "GetAdaptersInfo": true,
+ "GetAddrInfoW": true,
+ "GetCommandLine": true,
+ "GetComputerName": true,
+ "GetConsoleMode": true,
+ "GetCurrentDirectory": true,
+ "GetCurrentProcess": true,
+ "GetEnvironmentStrings": true,
+ "GetEnvironmentVariable": true,
+ "GetExitCodeProcess": true,
+ "GetFileAttributes": true,
+ "GetFileAttributesEx": true,
+ "GetFileExInfoStandard": true,
+ "GetFileExMaxInfoLevel": true,
+ "GetFileInformationByHandle": true,
+ "GetFileType": true,
+ "GetFullPathName": true,
+ "GetHostByName": true,
+ "GetIfEntry": true,
+ "GetLastError": true,
+ "GetLengthSid": true,
+ "GetLongPathName": true,
+ "GetProcAddress": true,
+ "GetProcessTimes": true,
+ "GetProtoByName": true,
+ "GetQueuedCompletionStatus": true,
+ "GetServByName": true,
+ "GetShortPathName": true,
+ "GetStartupInfo": true,
+ "GetStdHandle": true,
+ "GetSystemTimeAsFileTime": true,
+ "GetTempPath": true,
+ "GetTimeZoneInformation": true,
+ "GetTokenInformation": true,
+ "GetUserNameEx": true,
+ "GetUserProfileDirectory": true,
+ "GetVersion": true,
+ "Getcwd": true,
+ "Getdents": true,
+ "Getdirentries": true,
+ "Getdtablesize": true,
+ "Getegid": true,
+ "Getenv": true,
+ "Geteuid": true,
+ "Getfsstat": true,
+ "Getgid": true,
+ "Getgroups": true,
+ "Getpagesize": true,
+ "Getpeername": true,
+ "Getpgid": true,
+ "Getpgrp": true,
+ "Getpid": true,
+ "Getppid": true,
+ "Getpriority": true,
+ "Getrlimit": true,
+ "Getrusage": true,
+ "Getsid": true,
+ "Getsockname": true,
+ "Getsockopt": true,
+ "GetsockoptByte": true,
+ "GetsockoptICMPv6Filter": true,
+ "GetsockoptIPMreq": true,
+ "GetsockoptIPMreqn": true,
+ "GetsockoptIPv6MTUInfo": true,
+ "GetsockoptIPv6Mreq": true,
+ "GetsockoptInet4Addr": true,
+ "GetsockoptInt": true,
+ "GetsockoptUcred": true,
+ "Gettid": true,
+ "Gettimeofday": true,
+ "Getuid": true,
+ "Getwd": true,
+ "Getxattr": true,
+ "HANDLE_FLAG_INHERIT": true,
+ "HKEY_CLASSES_ROOT": true,
+ "HKEY_CURRENT_CONFIG": true,
+ "HKEY_CURRENT_USER": true,
+ "HKEY_DYN_DATA": true,
+ "HKEY_LOCAL_MACHINE": true,
+ "HKEY_PERFORMANCE_DATA": true,
+ "HKEY_USERS": true,
+ "HUPCL": true,
+ "Handle": true,
+ "Hostent": true,
+ "ICANON": true,
+ "ICMP6_FILTER": true,
+ "ICMPV6_FILTER": true,
+ "ICMPv6Filter": true,
+ "ICRNL": true,
+ "IEXTEN": true,
+ "IFAN_ARRIVAL": true,
+ "IFAN_DEPARTURE": true,
+ "IFA_ADDRESS": true,
+ "IFA_ANYCAST": true,
+ "IFA_BROADCAST": true,
+ "IFA_CACHEINFO": true,
+ "IFA_F_DADFAILED": true,
+ "IFA_F_DEPRECATED": true,
+ "IFA_F_HOMEADDRESS": true,
+ "IFA_F_NODAD": true,
+ "IFA_F_OPTIMISTIC": true,
+ "IFA_F_PERMANENT": true,
+ "IFA_F_SECONDARY": true,
+ "IFA_F_TEMPORARY": true,
+ "IFA_F_TENTATIVE": true,
+ "IFA_LABEL": true,
+ "IFA_LOCAL": true,
+ "IFA_MAX": true,
+ "IFA_MULTICAST": true,
+ "IFA_ROUTE": true,
+ "IFA_UNSPEC": true,
+ "IFF_ALLMULTI": true,
+ "IFF_ALTPHYS": true,
+ "IFF_AUTOMEDIA": true,
+ "IFF_BROADCAST": true,
+ "IFF_CANTCHANGE": true,
+ "IFF_CANTCONFIG": true,
+ "IFF_DEBUG": true,
+ "IFF_DRV_OACTIVE": true,
+ "IFF_DRV_RUNNING": true,
+ "IFF_DYING": true,
+ "IFF_DYNAMIC": true,
+ "IFF_LINK0": true,
+ "IFF_LINK1": true,
+ "IFF_LINK2": true,
+ "IFF_LOOPBACK": true,
+ "IFF_MASTER": true,
+ "IFF_MONITOR": true,
+ "IFF_MULTICAST": true,
+ "IFF_NOARP": true,
+ "IFF_NOTRAILERS": true,
+ "IFF_NO_PI": true,
+ "IFF_OACTIVE": true,
+ "IFF_ONE_QUEUE": true,
+ "IFF_POINTOPOINT": true,
+ "IFF_POINTTOPOINT": true,
+ "IFF_PORTSEL": true,
+ "IFF_PPROMISC": true,
+ "IFF_PROMISC": true,
+ "IFF_RENAMING": true,
+ "IFF_RUNNING": true,
+ "IFF_SIMPLEX": true,
+ "IFF_SLAVE": true,
+ "IFF_SMART": true,
+ "IFF_STATICARP": true,
+ "IFF_TAP": true,
+ "IFF_TUN": true,
+ "IFF_TUN_EXCL": true,
+ "IFF_UP": true,
+ "IFF_VNET_HDR": true,
+ "IFLA_ADDRESS": true,
+ "IFLA_BROADCAST": true,
+ "IFLA_COST": true,
+ "IFLA_IFALIAS": true,
+ "IFLA_IFNAME": true,
+ "IFLA_LINK": true,
+ "IFLA_LINKINFO": true,
+ "IFLA_LINKMODE": true,
+ "IFLA_MAP": true,
+ "IFLA_MASTER": true,
+ "IFLA_MAX": true,
+ "IFLA_MTU": true,
+ "IFLA_NET_NS_PID": true,
+ "IFLA_OPERSTATE": true,
+ "IFLA_PRIORITY": true,
+ "IFLA_PROTINFO": true,
+ "IFLA_QDISC": true,
+ "IFLA_STATS": true,
+ "IFLA_TXQLEN": true,
+ "IFLA_UNSPEC": true,
+ "IFLA_WEIGHT": true,
+ "IFLA_WIRELESS": true,
+ "IFNAMSIZ": true,
+ "IFT_1822": true,
+ "IFT_A12MPPSWITCH": true,
+ "IFT_AAL2": true,
+ "IFT_AAL5": true,
+ "IFT_ADSL": true,
+ "IFT_AFLANE8023": true,
+ "IFT_AFLANE8025": true,
+ "IFT_ARAP": true,
+ "IFT_ARCNET": true,
+ "IFT_ARCNETPLUS": true,
+ "IFT_ASYNC": true,
+ "IFT_ATM": true,
+ "IFT_ATMDXI": true,
+ "IFT_ATMFUNI": true,
+ "IFT_ATMIMA": true,
+ "IFT_ATMLOGICAL": true,
+ "IFT_ATMRADIO": true,
+ "IFT_ATMSUBINTERFACE": true,
+ "IFT_ATMVCIENDPT": true,
+ "IFT_ATMVIRTUAL": true,
+ "IFT_BGPPOLICYACCOUNTING": true,
+ "IFT_BLUETOOTH": true,
+ "IFT_BRIDGE": true,
+ "IFT_BSC": true,
+ "IFT_CARP": true,
+ "IFT_CCTEMUL": true,
+ "IFT_CELLULAR": true,
+ "IFT_CEPT": true,
+ "IFT_CES": true,
+ "IFT_CHANNEL": true,
+ "IFT_CNR": true,
+ "IFT_COFFEE": true,
+ "IFT_COMPOSITELINK": true,
+ "IFT_DCN": true,
+ "IFT_DIGITALPOWERLINE": true,
+ "IFT_DIGITALWRAPPEROVERHEADCHANNEL": true,
+ "IFT_DLSW": true,
+ "IFT_DOCSCABLEDOWNSTREAM": true,
+ "IFT_DOCSCABLEMACLAYER": true,
+ "IFT_DOCSCABLEUPSTREAM": true,
+ "IFT_DOCSCABLEUPSTREAMCHANNEL": true,
+ "IFT_DS0": true,
+ "IFT_DS0BUNDLE": true,
+ "IFT_DS1FDL": true,
+ "IFT_DS3": true,
+ "IFT_DTM": true,
+ "IFT_DUMMY": true,
+ "IFT_DVBASILN": true,
+ "IFT_DVBASIOUT": true,
+ "IFT_DVBRCCDOWNSTREAM": true,
+ "IFT_DVBRCCMACLAYER": true,
+ "IFT_DVBRCCUPSTREAM": true,
+ "IFT_ECONET": true,
+ "IFT_ENC": true,
+ "IFT_EON": true,
+ "IFT_EPLRS": true,
+ "IFT_ESCON": true,
+ "IFT_ETHER": true,
+ "IFT_FAITH": true,
+ "IFT_FAST": true,
+ "IFT_FASTETHER": true,
+ "IFT_FASTETHERFX": true,
+ "IFT_FDDI": true,
+ "IFT_FIBRECHANNEL": true,
+ "IFT_FRAMERELAYINTERCONNECT": true,
+ "IFT_FRAMERELAYMPI": true,
+ "IFT_FRDLCIENDPT": true,
+ "IFT_FRELAY": true,
+ "IFT_FRELAYDCE": true,
+ "IFT_FRF16MFRBUNDLE": true,
+ "IFT_FRFORWARD": true,
+ "IFT_G703AT2MB": true,
+ "IFT_G703AT64K": true,
+ "IFT_GIF": true,
+ "IFT_GIGABITETHERNET": true,
+ "IFT_GR303IDT": true,
+ "IFT_GR303RDT": true,
+ "IFT_H323GATEKEEPER": true,
+ "IFT_H323PROXY": true,
+ "IFT_HDH1822": true,
+ "IFT_HDLC": true,
+ "IFT_HDSL2": true,
+ "IFT_HIPERLAN2": true,
+ "IFT_HIPPI": true,
+ "IFT_HIPPIINTERFACE": true,
+ "IFT_HOSTPAD": true,
+ "IFT_HSSI": true,
+ "IFT_HY": true,
+ "IFT_IBM370PARCHAN": true,
+ "IFT_IDSL": true,
+ "IFT_IEEE1394": true,
+ "IFT_IEEE80211": true,
+ "IFT_IEEE80212": true,
+ "IFT_IEEE8023ADLAG": true,
+ "IFT_IFGSN": true,
+ "IFT_IMT": true,
+ "IFT_INFINIBAND": true,
+ "IFT_INTERLEAVE": true,
+ "IFT_IP": true,
+ "IFT_IPFORWARD": true,
+ "IFT_IPOVERATM": true,
+ "IFT_IPOVERCDLC": true,
+ "IFT_IPOVERCLAW": true,
+ "IFT_IPSWITCH": true,
+ "IFT_IPXIP": true,
+ "IFT_ISDN": true,
+ "IFT_ISDNBASIC": true,
+ "IFT_ISDNPRIMARY": true,
+ "IFT_ISDNS": true,
+ "IFT_ISDNU": true,
+ "IFT_ISO88022LLC": true,
+ "IFT_ISO88023": true,
+ "IFT_ISO88024": true,
+ "IFT_ISO88025": true,
+ "IFT_ISO88025CRFPINT": true,
+ "IFT_ISO88025DTR": true,
+ "IFT_ISO88025FIBER": true,
+ "IFT_ISO88026": true,
+ "IFT_ISUP": true,
+ "IFT_L2VLAN": true,
+ "IFT_L3IPVLAN": true,
+ "IFT_L3IPXVLAN": true,
+ "IFT_LAPB": true,
+ "IFT_LAPD": true,
+ "IFT_LAPF": true,
+ "IFT_LINEGROUP": true,
+ "IFT_LOCALTALK": true,
+ "IFT_LOOP": true,
+ "IFT_MEDIAMAILOVERIP": true,
+ "IFT_MFSIGLINK": true,
+ "IFT_MIOX25": true,
+ "IFT_MODEM": true,
+ "IFT_MPC": true,
+ "IFT_MPLS": true,
+ "IFT_MPLSTUNNEL": true,
+ "IFT_MSDSL": true,
+ "IFT_MVL": true,
+ "IFT_MYRINET": true,
+ "IFT_NFAS": true,
+ "IFT_NSIP": true,
+ "IFT_OPTICALCHANNEL": true,
+ "IFT_OPTICALTRANSPORT": true,
+ "IFT_OTHER": true,
+ "IFT_P10": true,
+ "IFT_P80": true,
+ "IFT_PARA": true,
+ "IFT_PDP": true,
+ "IFT_PFLOG": true,
+ "IFT_PFLOW": true,
+ "IFT_PFSYNC": true,
+ "IFT_PLC": true,
+ "IFT_PON155": true,
+ "IFT_PON622": true,
+ "IFT_POS": true,
+ "IFT_PPP": true,
+ "IFT_PPPMULTILINKBUNDLE": true,
+ "IFT_PROPATM": true,
+ "IFT_PROPBWAP2MP": true,
+ "IFT_PROPCNLS": true,
+ "IFT_PROPDOCSWIRELESSDOWNSTREAM": true,
+ "IFT_PROPDOCSWIRELESSMACLAYER": true,
+ "IFT_PROPDOCSWIRELESSUPSTREAM": true,
+ "IFT_PROPMUX": true,
+ "IFT_PROPVIRTUAL": true,
+ "IFT_PROPWIRELESSP2P": true,
+ "IFT_PTPSERIAL": true,
+ "IFT_PVC": true,
+ "IFT_Q2931": true,
+ "IFT_QLLC": true,
+ "IFT_RADIOMAC": true,
+ "IFT_RADSL": true,
+ "IFT_REACHDSL": true,
+ "IFT_RFC1483": true,
+ "IFT_RS232": true,
+ "IFT_RSRB": true,
+ "IFT_SDLC": true,
+ "IFT_SDSL": true,
+ "IFT_SHDSL": true,
+ "IFT_SIP": true,
+ "IFT_SIPSIG": true,
+ "IFT_SIPTG": true,
+ "IFT_SLIP": true,
+ "IFT_SMDSDXI": true,
+ "IFT_SMDSICIP": true,
+ "IFT_SONET": true,
+ "IFT_SONETOVERHEADCHANNEL": true,
+ "IFT_SONETPATH": true,
+ "IFT_SONETVT": true,
+ "IFT_SRP": true,
+ "IFT_SS7SIGLINK": true,
+ "IFT_STACKTOSTACK": true,
+ "IFT_STARLAN": true,
+ "IFT_STF": true,
+ "IFT_T1": true,
+ "IFT_TDLC": true,
+ "IFT_TELINK": true,
+ "IFT_TERMPAD": true,
+ "IFT_TR008": true,
+ "IFT_TRANSPHDLC": true,
+ "IFT_TUNNEL": true,
+ "IFT_ULTRA": true,
+ "IFT_USB": true,
+ "IFT_V11": true,
+ "IFT_V35": true,
+ "IFT_V36": true,
+ "IFT_V37": true,
+ "IFT_VDSL": true,
+ "IFT_VIRTUALIPADDRESS": true,
+ "IFT_VIRTUALTG": true,
+ "IFT_VOICEDID": true,
+ "IFT_VOICEEM": true,
+ "IFT_VOICEEMFGD": true,
+ "IFT_VOICEENCAP": true,
+ "IFT_VOICEFGDEANA": true,
+ "IFT_VOICEFXO": true,
+ "IFT_VOICEFXS": true,
+ "IFT_VOICEOVERATM": true,
+ "IFT_VOICEOVERCABLE": true,
+ "IFT_VOICEOVERFRAMERELAY": true,
+ "IFT_VOICEOVERIP": true,
+ "IFT_X213": true,
+ "IFT_X25": true,
+ "IFT_X25DDN": true,
+ "IFT_X25HUNTGROUP": true,
+ "IFT_X25MLP": true,
+ "IFT_X25PLE": true,
+ "IFT_XETHER": true,
+ "IGNBRK": true,
+ "IGNCR": true,
+ "IGNORE": true,
+ "IGNPAR": true,
+ "IMAXBEL": true,
+ "INFINITE": true,
+ "INLCR": true,
+ "INPCK": true,
+ "INVALID_FILE_ATTRIBUTES": true,
+ "IN_ACCESS": true,
+ "IN_ALL_EVENTS": true,
+ "IN_ATTRIB": true,
+ "IN_CLASSA_HOST": true,
+ "IN_CLASSA_MAX": true,
+ "IN_CLASSA_NET": true,
+ "IN_CLASSA_NSHIFT": true,
+ "IN_CLASSB_HOST": true,
+ "IN_CLASSB_MAX": true,
+ "IN_CLASSB_NET": true,
+ "IN_CLASSB_NSHIFT": true,
+ "IN_CLASSC_HOST": true,
+ "IN_CLASSC_NET": true,
+ "IN_CLASSC_NSHIFT": true,
+ "IN_CLASSD_HOST": true,
+ "IN_CLASSD_NET": true,
+ "IN_CLASSD_NSHIFT": true,
+ "IN_CLOEXEC": true,
+ "IN_CLOSE": true,
+ "IN_CLOSE_NOWRITE": true,
+ "IN_CLOSE_WRITE": true,
+ "IN_CREATE": true,
+ "IN_DELETE": true,
+ "IN_DELETE_SELF": true,
+ "IN_DONT_FOLLOW": true,
+ "IN_EXCL_UNLINK": true,
+ "IN_IGNORED": true,
+ "IN_ISDIR": true,
+ "IN_LINKLOCALNETNUM": true,
+ "IN_LOOPBACKNET": true,
+ "IN_MASK_ADD": true,
+ "IN_MODIFY": true,
+ "IN_MOVE": true,
+ "IN_MOVED_FROM": true,
+ "IN_MOVED_TO": true,
+ "IN_MOVE_SELF": true,
+ "IN_NONBLOCK": true,
+ "IN_ONESHOT": true,
+ "IN_ONLYDIR": true,
+ "IN_OPEN": true,
+ "IN_Q_OVERFLOW": true,
+ "IN_RFC3021_HOST": true,
+ "IN_RFC3021_MASK": true,
+ "IN_RFC3021_NET": true,
+ "IN_RFC3021_NSHIFT": true,
+ "IN_UNMOUNT": true,
+ "IOC_IN": true,
+ "IOC_INOUT": true,
+ "IOC_OUT": true,
+ "IOC_VENDOR": true,
+ "IOC_WS2": true,
+ "IO_REPARSE_TAG_SYMLINK": true,
+ "IPMreq": true,
+ "IPMreqn": true,
+ "IPPROTO_3PC": true,
+ "IPPROTO_ADFS": true,
+ "IPPROTO_AH": true,
+ "IPPROTO_AHIP": true,
+ "IPPROTO_APES": true,
+ "IPPROTO_ARGUS": true,
+ "IPPROTO_AX25": true,
+ "IPPROTO_BHA": true,
+ "IPPROTO_BLT": true,
+ "IPPROTO_BRSATMON": true,
+ "IPPROTO_CARP": true,
+ "IPPROTO_CFTP": true,
+ "IPPROTO_CHAOS": true,
+ "IPPROTO_CMTP": true,
+ "IPPROTO_COMP": true,
+ "IPPROTO_CPHB": true,
+ "IPPROTO_CPNX": true,
+ "IPPROTO_DCCP": true,
+ "IPPROTO_DDP": true,
+ "IPPROTO_DGP": true,
+ "IPPROTO_DIVERT": true,
+ "IPPROTO_DIVERT_INIT": true,
+ "IPPROTO_DIVERT_RESP": true,
+ "IPPROTO_DONE": true,
+ "IPPROTO_DSTOPTS": true,
+ "IPPROTO_EGP": true,
+ "IPPROTO_EMCON": true,
+ "IPPROTO_ENCAP": true,
+ "IPPROTO_EON": true,
+ "IPPROTO_ESP": true,
+ "IPPROTO_ETHERIP": true,
+ "IPPROTO_FRAGMENT": true,
+ "IPPROTO_GGP": true,
+ "IPPROTO_GMTP": true,
+ "IPPROTO_GRE": true,
+ "IPPROTO_HELLO": true,
+ "IPPROTO_HMP": true,
+ "IPPROTO_HOPOPTS": true,
+ "IPPROTO_ICMP": true,
+ "IPPROTO_ICMPV6": true,
+ "IPPROTO_IDP": true,
+ "IPPROTO_IDPR": true,
+ "IPPROTO_IDRP": true,
+ "IPPROTO_IGMP": true,
+ "IPPROTO_IGP": true,
+ "IPPROTO_IGRP": true,
+ "IPPROTO_IL": true,
+ "IPPROTO_INLSP": true,
+ "IPPROTO_INP": true,
+ "IPPROTO_IP": true,
+ "IPPROTO_IPCOMP": true,
+ "IPPROTO_IPCV": true,
+ "IPPROTO_IPEIP": true,
+ "IPPROTO_IPIP": true,
+ "IPPROTO_IPPC": true,
+ "IPPROTO_IPV4": true,
+ "IPPROTO_IPV6": true,
+ "IPPROTO_IPV6_ICMP": true,
+ "IPPROTO_IRTP": true,
+ "IPPROTO_KRYPTOLAN": true,
+ "IPPROTO_LARP": true,
+ "IPPROTO_LEAF1": true,
+ "IPPROTO_LEAF2": true,
+ "IPPROTO_MAX": true,
+ "IPPROTO_MAXID": true,
+ "IPPROTO_MEAS": true,
+ "IPPROTO_MH": true,
+ "IPPROTO_MHRP": true,
+ "IPPROTO_MICP": true,
+ "IPPROTO_MOBILE": true,
+ "IPPROTO_MPLS": true,
+ "IPPROTO_MTP": true,
+ "IPPROTO_MUX": true,
+ "IPPROTO_ND": true,
+ "IPPROTO_NHRP": true,
+ "IPPROTO_NONE": true,
+ "IPPROTO_NSP": true,
+ "IPPROTO_NVPII": true,
+ "IPPROTO_OLD_DIVERT": true,
+ "IPPROTO_OSPFIGP": true,
+ "IPPROTO_PFSYNC": true,
+ "IPPROTO_PGM": true,
+ "IPPROTO_PIGP": true,
+ "IPPROTO_PIM": true,
+ "IPPROTO_PRM": true,
+ "IPPROTO_PUP": true,
+ "IPPROTO_PVP": true,
+ "IPPROTO_RAW": true,
+ "IPPROTO_RCCMON": true,
+ "IPPROTO_RDP": true,
+ "IPPROTO_ROUTING": true,
+ "IPPROTO_RSVP": true,
+ "IPPROTO_RVD": true,
+ "IPPROTO_SATEXPAK": true,
+ "IPPROTO_SATMON": true,
+ "IPPROTO_SCCSP": true,
+ "IPPROTO_SCTP": true,
+ "IPPROTO_SDRP": true,
+ "IPPROTO_SEND": true,
+ "IPPROTO_SEP": true,
+ "IPPROTO_SKIP": true,
+ "IPPROTO_SPACER": true,
+ "IPPROTO_SRPC": true,
+ "IPPROTO_ST": true,
+ "IPPROTO_SVMTP": true,
+ "IPPROTO_SWIPE": true,
+ "IPPROTO_TCF": true,
+ "IPPROTO_TCP": true,
+ "IPPROTO_TLSP": true,
+ "IPPROTO_TP": true,
+ "IPPROTO_TPXX": true,
+ "IPPROTO_TRUNK1": true,
+ "IPPROTO_TRUNK2": true,
+ "IPPROTO_TTP": true,
+ "IPPROTO_UDP": true,
+ "IPPROTO_UDPLITE": true,
+ "IPPROTO_VINES": true,
+ "IPPROTO_VISA": true,
+ "IPPROTO_VMTP": true,
+ "IPPROTO_VRRP": true,
+ "IPPROTO_WBEXPAK": true,
+ "IPPROTO_WBMON": true,
+ "IPPROTO_WSN": true,
+ "IPPROTO_XNET": true,
+ "IPPROTO_XTP": true,
+ "IPV6_2292DSTOPTS": true,
+ "IPV6_2292HOPLIMIT": true,
+ "IPV6_2292HOPOPTS": true,
+ "IPV6_2292NEXTHOP": true,
+ "IPV6_2292PKTINFO": true,
+ "IPV6_2292PKTOPTIONS": true,
+ "IPV6_2292RTHDR": true,
+ "IPV6_ADDRFORM": true,
+ "IPV6_ADD_MEMBERSHIP": true,
+ "IPV6_AUTHHDR": true,
+ "IPV6_AUTH_LEVEL": true,
+ "IPV6_AUTOFLOWLABEL": true,
+ "IPV6_BINDANY": true,
+ "IPV6_BINDV6ONLY": true,
+ "IPV6_BOUND_IF": true,
+ "IPV6_CHECKSUM": true,
+ "IPV6_DEFAULT_MULTICAST_HOPS": true,
+ "IPV6_DEFAULT_MULTICAST_LOOP": true,
+ "IPV6_DEFHLIM": true,
+ "IPV6_DONTFRAG": true,
+ "IPV6_DROP_MEMBERSHIP": true,
+ "IPV6_DSTOPTS": true,
+ "IPV6_ESP_NETWORK_LEVEL": true,
+ "IPV6_ESP_TRANS_LEVEL": true,
+ "IPV6_FAITH": true,
+ "IPV6_FLOWINFO_MASK": true,
+ "IPV6_FLOWLABEL_MASK": true,
+ "IPV6_FRAGTTL": true,
+ "IPV6_FW_ADD": true,
+ "IPV6_FW_DEL": true,
+ "IPV6_FW_FLUSH": true,
+ "IPV6_FW_GET": true,
+ "IPV6_FW_ZERO": true,
+ "IPV6_HLIMDEC": true,
+ "IPV6_HOPLIMIT": true,
+ "IPV6_HOPOPTS": true,
+ "IPV6_IPCOMP_LEVEL": true,
+ "IPV6_IPSEC_POLICY": true,
+ "IPV6_JOIN_ANYCAST": true,
+ "IPV6_JOIN_GROUP": true,
+ "IPV6_LEAVE_ANYCAST": true,
+ "IPV6_LEAVE_GROUP": true,
+ "IPV6_MAXHLIM": true,
+ "IPV6_MAXOPTHDR": true,
+ "IPV6_MAXPACKET": true,
+ "IPV6_MAX_GROUP_SRC_FILTER": true,
+ "IPV6_MAX_MEMBERSHIPS": true,
+ "IPV6_MAX_SOCK_SRC_FILTER": true,
+ "IPV6_MIN_MEMBERSHIPS": true,
+ "IPV6_MMTU": true,
+ "IPV6_MSFILTER": true,
+ "IPV6_MTU": true,
+ "IPV6_MTU_DISCOVER": true,
+ "IPV6_MULTICAST_HOPS": true,
+ "IPV6_MULTICAST_IF": true,
+ "IPV6_MULTICAST_LOOP": true,
+ "IPV6_NEXTHOP": true,
+ "IPV6_OPTIONS": true,
+ "IPV6_PATHMTU": true,
+ "IPV6_PIPEX": true,
+ "IPV6_PKTINFO": true,
+ "IPV6_PMTUDISC_DO": true,
+ "IPV6_PMTUDISC_DONT": true,
+ "IPV6_PMTUDISC_PROBE": true,
+ "IPV6_PMTUDISC_WANT": true,
+ "IPV6_PORTRANGE": true,
+ "IPV6_PORTRANGE_DEFAULT": true,
+ "IPV6_PORTRANGE_HIGH": true,
+ "IPV6_PORTRANGE_LOW": true,
+ "IPV6_PREFER_TEMPADDR": true,
+ "IPV6_RECVDSTOPTS": true,
+ "IPV6_RECVDSTPORT": true,
+ "IPV6_RECVERR": true,
+ "IPV6_RECVHOPLIMIT": true,
+ "IPV6_RECVHOPOPTS": true,
+ "IPV6_RECVPATHMTU": true,
+ "IPV6_RECVPKTINFO": true,
+ "IPV6_RECVRTHDR": true,
+ "IPV6_RECVTCLASS": true,
+ "IPV6_ROUTER_ALERT": true,
+ "IPV6_RTABLE": true,
+ "IPV6_RTHDR": true,
+ "IPV6_RTHDRDSTOPTS": true,
+ "IPV6_RTHDR_LOOSE": true,
+ "IPV6_RTHDR_STRICT": true,
+ "IPV6_RTHDR_TYPE_0": true,
+ "IPV6_RXDSTOPTS": true,
+ "IPV6_RXHOPOPTS": true,
+ "IPV6_SOCKOPT_RESERVED1": true,
+ "IPV6_TCLASS": true,
+ "IPV6_UNICAST_HOPS": true,
+ "IPV6_USE_MIN_MTU": true,
+ "IPV6_V6ONLY": true,
+ "IPV6_VERSION": true,
+ "IPV6_VERSION_MASK": true,
+ "IPV6_XFRM_POLICY": true,
+ "IP_ADD_MEMBERSHIP": true,
+ "IP_ADD_SOURCE_MEMBERSHIP": true,
+ "IP_AUTH_LEVEL": true,
+ "IP_BINDANY": true,
+ "IP_BLOCK_SOURCE": true,
+ "IP_BOUND_IF": true,
+ "IP_DEFAULT_MULTICAST_LOOP": true,
+ "IP_DEFAULT_MULTICAST_TTL": true,
+ "IP_DF": true,
+ "IP_DIVERTFL": true,
+ "IP_DONTFRAG": true,
+ "IP_DROP_MEMBERSHIP": true,
+ "IP_DROP_SOURCE_MEMBERSHIP": true,
+ "IP_DUMMYNET3": true,
+ "IP_DUMMYNET_CONFIGURE": true,
+ "IP_DUMMYNET_DEL": true,
+ "IP_DUMMYNET_FLUSH": true,
+ "IP_DUMMYNET_GET": true,
+ "IP_EF": true,
+ "IP_ERRORMTU": true,
+ "IP_ESP_NETWORK_LEVEL": true,
+ "IP_ESP_TRANS_LEVEL": true,
+ "IP_FAITH": true,
+ "IP_FREEBIND": true,
+ "IP_FW3": true,
+ "IP_FW_ADD": true,
+ "IP_FW_DEL": true,
+ "IP_FW_FLUSH": true,
+ "IP_FW_GET": true,
+ "IP_FW_NAT_CFG": true,
+ "IP_FW_NAT_DEL": true,
+ "IP_FW_NAT_GET_CONFIG": true,
+ "IP_FW_NAT_GET_LOG": true,
+ "IP_FW_RESETLOG": true,
+ "IP_FW_TABLE_ADD": true,
+ "IP_FW_TABLE_DEL": true,
+ "IP_FW_TABLE_FLUSH": true,
+ "IP_FW_TABLE_GETSIZE": true,
+ "IP_FW_TABLE_LIST": true,
+ "IP_FW_ZERO": true,
+ "IP_HDRINCL": true,
+ "IP_IPCOMP_LEVEL": true,
+ "IP_IPSECFLOWINFO": true,
+ "IP_IPSEC_LOCAL_AUTH": true,
+ "IP_IPSEC_LOCAL_CRED": true,
+ "IP_IPSEC_LOCAL_ID": true,
+ "IP_IPSEC_POLICY": true,
+ "IP_IPSEC_REMOTE_AUTH": true,
+ "IP_IPSEC_REMOTE_CRED": true,
+ "IP_IPSEC_REMOTE_ID": true,
+ "IP_MAXPACKET": true,
+ "IP_MAX_GROUP_SRC_FILTER": true,
+ "IP_MAX_MEMBERSHIPS": true,
+ "IP_MAX_SOCK_MUTE_FILTER": true,
+ "IP_MAX_SOCK_SRC_FILTER": true,
+ "IP_MAX_SOURCE_FILTER": true,
+ "IP_MF": true,
+ "IP_MINFRAGSIZE": true,
+ "IP_MINTTL": true,
+ "IP_MIN_MEMBERSHIPS": true,
+ "IP_MSFILTER": true,
+ "IP_MSS": true,
+ "IP_MTU": true,
+ "IP_MTU_DISCOVER": true,
+ "IP_MULTICAST_IF": true,
+ "IP_MULTICAST_IFINDEX": true,
+ "IP_MULTICAST_LOOP": true,
+ "IP_MULTICAST_TTL": true,
+ "IP_MULTICAST_VIF": true,
+ "IP_NAT__XXX": true,
+ "IP_OFFMASK": true,
+ "IP_OLD_FW_ADD": true,
+ "IP_OLD_FW_DEL": true,
+ "IP_OLD_FW_FLUSH": true,
+ "IP_OLD_FW_GET": true,
+ "IP_OLD_FW_RESETLOG": true,
+ "IP_OLD_FW_ZERO": true,
+ "IP_ONESBCAST": true,
+ "IP_OPTIONS": true,
+ "IP_ORIGDSTADDR": true,
+ "IP_PASSSEC": true,
+ "IP_PIPEX": true,
+ "IP_PKTINFO": true,
+ "IP_PKTOPTIONS": true,
+ "IP_PMTUDISC": true,
+ "IP_PMTUDISC_DO": true,
+ "IP_PMTUDISC_DONT": true,
+ "IP_PMTUDISC_PROBE": true,
+ "IP_PMTUDISC_WANT": true,
+ "IP_PORTRANGE": true,
+ "IP_PORTRANGE_DEFAULT": true,
+ "IP_PORTRANGE_HIGH": true,
+ "IP_PORTRANGE_LOW": true,
+ "IP_RECVDSTADDR": true,
+ "IP_RECVDSTPORT": true,
+ "IP_RECVERR": true,
+ "IP_RECVIF": true,
+ "IP_RECVOPTS": true,
+ "IP_RECVORIGDSTADDR": true,
+ "IP_RECVPKTINFO": true,
+ "IP_RECVRETOPTS": true,
+ "IP_RECVRTABLE": true,
+ "IP_RECVTOS": true,
+ "IP_RECVTTL": true,
+ "IP_RETOPTS": true,
+ "IP_RF": true,
+ "IP_ROUTER_ALERT": true,
+ "IP_RSVP_OFF": true,
+ "IP_RSVP_ON": true,
+ "IP_RSVP_VIF_OFF": true,
+ "IP_RSVP_VIF_ON": true,
+ "IP_RTABLE": true,
+ "IP_SENDSRCADDR": true,
+ "IP_STRIPHDR": true,
+ "IP_TOS": true,
+ "IP_TRAFFIC_MGT_BACKGROUND": true,
+ "IP_TRANSPARENT": true,
+ "IP_TTL": true,
+ "IP_UNBLOCK_SOURCE": true,
+ "IP_XFRM_POLICY": true,
+ "IPv6MTUInfo": true,
+ "IPv6Mreq": true,
+ "ISIG": true,
+ "ISTRIP": true,
+ "IUCLC": true,
+ "IUTF8": true,
+ "IXANY": true,
+ "IXOFF": true,
+ "IXON": true,
+ "IfAddrmsg": true,
+ "IfAnnounceMsghdr": true,
+ "IfData": true,
+ "IfInfomsg": true,
+ "IfMsghdr": true,
+ "IfaMsghdr": true,
+ "IfmaMsghdr": true,
+ "IfmaMsghdr2": true,
+ "ImplementsGetwd": true,
+ "Inet4Pktinfo": true,
+ "Inet6Pktinfo": true,
+ "InotifyAddWatch": true,
+ "InotifyEvent": true,
+ "InotifyInit": true,
+ "InotifyInit1": true,
+ "InotifyRmWatch": true,
+ "InterfaceAddrMessage": true,
+ "InterfaceAnnounceMessage": true,
+ "InterfaceInfo": true,
+ "InterfaceMessage": true,
+ "InterfaceMulticastAddrMessage": true,
+ "InvalidHandle": true,
+ "Ioperm": true,
+ "Iopl": true,
+ "Iovec": true,
+ "IpAdapterInfo": true,
+ "IpAddrString": true,
+ "IpAddressString": true,
+ "IpMaskString": true,
+ "Issetugid": true,
+ "KEY_ALL_ACCESS": true,
+ "KEY_CREATE_LINK": true,
+ "KEY_CREATE_SUB_KEY": true,
+ "KEY_ENUMERATE_SUB_KEYS": true,
+ "KEY_EXECUTE": true,
+ "KEY_NOTIFY": true,
+ "KEY_QUERY_VALUE": true,
+ "KEY_READ": true,
+ "KEY_SET_VALUE": true,
+ "KEY_WOW64_32KEY": true,
+ "KEY_WOW64_64KEY": true,
+ "KEY_WRITE": true,
+ "Kevent": true,
+ "Kevent_t": true,
+ "Kill": true,
+ "Klogctl": true,
+ "Kqueue": true,
+ "LANG_ENGLISH": true,
+ "LAYERED_PROTOCOL": true,
+ "LCNT_OVERLOAD_FLUSH": true,
+ "LINUX_REBOOT_CMD_CAD_OFF": true,
+ "LINUX_REBOOT_CMD_CAD_ON": true,
+ "LINUX_REBOOT_CMD_HALT": true,
+ "LINUX_REBOOT_CMD_KEXEC": true,
+ "LINUX_REBOOT_CMD_POWER_OFF": true,
+ "LINUX_REBOOT_CMD_RESTART": true,
+ "LINUX_REBOOT_CMD_RESTART2": true,
+ "LINUX_REBOOT_CMD_SW_SUSPEND": true,
+ "LINUX_REBOOT_MAGIC1": true,
+ "LINUX_REBOOT_MAGIC2": true,
+ "LOCK_EX": true,
+ "LOCK_NB": true,
+ "LOCK_SH": true,
+ "LOCK_UN": true,
+ "LazyDLL": true,
+ "LazyProc": true,
+ "Lchown": true,
+ "Linger": true,
+ "Link": true,
+ "Listen": true,
+ "Listxattr": true,
+ "LoadCancelIoEx": true,
+ "LoadConnectEx": true,
+ "LoadCreateSymbolicLink": true,
+ "LoadDLL": true,
+ "LoadGetAddrInfo": true,
+ "LoadLibrary": true,
+ "LoadSetFileCompletionNotificationModes": true,
+ "LocalFree": true,
+ "Log2phys_t": true,
+ "LookupAccountName": true,
+ "LookupAccountSid": true,
+ "LookupSID": true,
+ "LsfJump": true,
+ "LsfSocket": true,
+ "LsfStmt": true,
+ "Lstat": true,
+ "MADV_AUTOSYNC": true,
+ "MADV_CAN_REUSE": true,
+ "MADV_CORE": true,
+ "MADV_DOFORK": true,
+ "MADV_DONTFORK": true,
+ "MADV_DONTNEED": true,
+ "MADV_FREE": true,
+ "MADV_FREE_REUSABLE": true,
+ "MADV_FREE_REUSE": true,
+ "MADV_HUGEPAGE": true,
+ "MADV_HWPOISON": true,
+ "MADV_MERGEABLE": true,
+ "MADV_NOCORE": true,
+ "MADV_NOHUGEPAGE": true,
+ "MADV_NORMAL": true,
+ "MADV_NOSYNC": true,
+ "MADV_PROTECT": true,
+ "MADV_RANDOM": true,
+ "MADV_REMOVE": true,
+ "MADV_SEQUENTIAL": true,
+ "MADV_SPACEAVAIL": true,
+ "MADV_UNMERGEABLE": true,
+ "MADV_WILLNEED": true,
+ "MADV_ZERO_WIRED_PAGES": true,
+ "MAP_32BIT": true,
+ "MAP_ALIGNED_SUPER": true,
+ "MAP_ALIGNMENT_16MB": true,
+ "MAP_ALIGNMENT_1TB": true,
+ "MAP_ALIGNMENT_256TB": true,
+ "MAP_ALIGNMENT_4GB": true,
+ "MAP_ALIGNMENT_64KB": true,
+ "MAP_ALIGNMENT_64PB": true,
+ "MAP_ALIGNMENT_MASK": true,
+ "MAP_ALIGNMENT_SHIFT": true,
+ "MAP_ANON": true,
+ "MAP_ANONYMOUS": true,
+ "MAP_COPY": true,
+ "MAP_DENYWRITE": true,
+ "MAP_EXECUTABLE": true,
+ "MAP_FILE": true,
+ "MAP_FIXED": true,
+ "MAP_FLAGMASK": true,
+ "MAP_GROWSDOWN": true,
+ "MAP_HASSEMAPHORE": true,
+ "MAP_HUGETLB": true,
+ "MAP_INHERIT": true,
+ "MAP_INHERIT_COPY": true,
+ "MAP_INHERIT_DEFAULT": true,
+ "MAP_INHERIT_DONATE_COPY": true,
+ "MAP_INHERIT_NONE": true,
+ "MAP_INHERIT_SHARE": true,
+ "MAP_JIT": true,
+ "MAP_LOCKED": true,
+ "MAP_NOCACHE": true,
+ "MAP_NOCORE": true,
+ "MAP_NOEXTEND": true,
+ "MAP_NONBLOCK": true,
+ "MAP_NORESERVE": true,
+ "MAP_NOSYNC": true,
+ "MAP_POPULATE": true,
+ "MAP_PREFAULT_READ": true,
+ "MAP_PRIVATE": true,
+ "MAP_RENAME": true,
+ "MAP_RESERVED0080": true,
+ "MAP_RESERVED0100": true,
+ "MAP_SHARED": true,
+ "MAP_STACK": true,
+ "MAP_TRYFIXED": true,
+ "MAP_TYPE": true,
+ "MAP_WIRED": true,
+ "MAXIMUM_REPARSE_DATA_BUFFER_SIZE": true,
+ "MAXLEN_IFDESCR": true,
+ "MAXLEN_PHYSADDR": true,
+ "MAX_ADAPTER_ADDRESS_LENGTH": true,
+ "MAX_ADAPTER_DESCRIPTION_LENGTH": true,
+ "MAX_ADAPTER_NAME_LENGTH": true,
+ "MAX_COMPUTERNAME_LENGTH": true,
+ "MAX_INTERFACE_NAME_LEN": true,
+ "MAX_LONG_PATH": true,
+ "MAX_PATH": true,
+ "MAX_PROTOCOL_CHAIN": true,
+ "MCL_CURRENT": true,
+ "MCL_FUTURE": true,
+ "MNT_DETACH": true,
+ "MNT_EXPIRE": true,
+ "MNT_FORCE": true,
+ "MSG_BCAST": true,
+ "MSG_CMSG_CLOEXEC": true,
+ "MSG_COMPAT": true,
+ "MSG_CONFIRM": true,
+ "MSG_CONTROLMBUF": true,
+ "MSG_CTRUNC": true,
+ "MSG_DONTROUTE": true,
+ "MSG_DONTWAIT": true,
+ "MSG_EOF": true,
+ "MSG_EOR": true,
+ "MSG_ERRQUEUE": true,
+ "MSG_FASTOPEN": true,
+ "MSG_FIN": true,
+ "MSG_FLUSH": true,
+ "MSG_HAVEMORE": true,
+ "MSG_HOLD": true,
+ "MSG_IOVUSRSPACE": true,
+ "MSG_LENUSRSPACE": true,
+ "MSG_MCAST": true,
+ "MSG_MORE": true,
+ "MSG_NAMEMBUF": true,
+ "MSG_NBIO": true,
+ "MSG_NEEDSA": true,
+ "MSG_NOSIGNAL": true,
+ "MSG_NOTIFICATION": true,
+ "MSG_OOB": true,
+ "MSG_PEEK": true,
+ "MSG_PROXY": true,
+ "MSG_RCVMORE": true,
+ "MSG_RST": true,
+ "MSG_SEND": true,
+ "MSG_SYN": true,
+ "MSG_TRUNC": true,
+ "MSG_TRYHARD": true,
+ "MSG_USERFLAGS": true,
+ "MSG_WAITALL": true,
+ "MSG_WAITFORONE": true,
+ "MSG_WAITSTREAM": true,
+ "MS_ACTIVE": true,
+ "MS_ASYNC": true,
+ "MS_BIND": true,
+ "MS_DEACTIVATE": true,
+ "MS_DIRSYNC": true,
+ "MS_INVALIDATE": true,
+ "MS_I_VERSION": true,
+ "MS_KERNMOUNT": true,
+ "MS_KILLPAGES": true,
+ "MS_MANDLOCK": true,
+ "MS_MGC_MSK": true,
+ "MS_MGC_VAL": true,
+ "MS_MOVE": true,
+ "MS_NOATIME": true,
+ "MS_NODEV": true,
+ "MS_NODIRATIME": true,
+ "MS_NOEXEC": true,
+ "MS_NOSUID": true,
+ "MS_NOUSER": true,
+ "MS_POSIXACL": true,
+ "MS_PRIVATE": true,
+ "MS_RDONLY": true,
+ "MS_REC": true,
+ "MS_RELATIME": true,
+ "MS_REMOUNT": true,
+ "MS_RMT_MASK": true,
+ "MS_SHARED": true,
+ "MS_SILENT": true,
+ "MS_SLAVE": true,
+ "MS_STRICTATIME": true,
+ "MS_SYNC": true,
+ "MS_SYNCHRONOUS": true,
+ "MS_UNBINDABLE": true,
+ "Madvise": true,
+ "MapViewOfFile": true,
+ "MaxTokenInfoClass": true,
+ "Mclpool": true,
+ "MibIfRow": true,
+ "Mkdir": true,
+ "Mkdirat": true,
+ "Mkfifo": true,
+ "Mknod": true,
+ "Mknodat": true,
+ "Mlock": true,
+ "Mlockall": true,
+ "Mmap": true,
+ "Mount": true,
+ "MoveFile": true,
+ "Mprotect": true,
+ "Msghdr": true,
+ "Munlock": true,
+ "Munlockall": true,
+ "Munmap": true,
+ "MustLoadDLL": true,
+ "NAME_MAX": true,
+ "NETLINK_ADD_MEMBERSHIP": true,
+ "NETLINK_AUDIT": true,
+ "NETLINK_BROADCAST_ERROR": true,
+ "NETLINK_CONNECTOR": true,
+ "NETLINK_DNRTMSG": true,
+ "NETLINK_DROP_MEMBERSHIP": true,
+ "NETLINK_ECRYPTFS": true,
+ "NETLINK_FIB_LOOKUP": true,
+ "NETLINK_FIREWALL": true,
+ "NETLINK_GENERIC": true,
+ "NETLINK_INET_DIAG": true,
+ "NETLINK_IP6_FW": true,
+ "NETLINK_ISCSI": true,
+ "NETLINK_KOBJECT_UEVENT": true,
+ "NETLINK_NETFILTER": true,
+ "NETLINK_NFLOG": true,
+ "NETLINK_NO_ENOBUFS": true,
+ "NETLINK_PKTINFO": true,
+ "NETLINK_RDMA": true,
+ "NETLINK_ROUTE": true,
+ "NETLINK_SCSITRANSPORT": true,
+ "NETLINK_SELINUX": true,
+ "NETLINK_UNUSED": true,
+ "NETLINK_USERSOCK": true,
+ "NETLINK_XFRM": true,
+ "NET_RT_DUMP": true,
+ "NET_RT_DUMP2": true,
+ "NET_RT_FLAGS": true,
+ "NET_RT_IFLIST": true,
+ "NET_RT_IFLIST2": true,
+ "NET_RT_IFLISTL": true,
+ "NET_RT_IFMALIST": true,
+ "NET_RT_MAXID": true,
+ "NET_RT_OIFLIST": true,
+ "NET_RT_OOIFLIST": true,
+ "NET_RT_STAT": true,
+ "NET_RT_STATS": true,
+ "NET_RT_TABLE": true,
+ "NET_RT_TRASH": true,
+ "NLA_ALIGNTO": true,
+ "NLA_F_NESTED": true,
+ "NLA_F_NET_BYTEORDER": true,
+ "NLA_HDRLEN": true,
+ "NLMSG_ALIGNTO": true,
+ "NLMSG_DONE": true,
+ "NLMSG_ERROR": true,
+ "NLMSG_HDRLEN": true,
+ "NLMSG_MIN_TYPE": true,
+ "NLMSG_NOOP": true,
+ "NLMSG_OVERRUN": true,
+ "NLM_F_ACK": true,
+ "NLM_F_APPEND": true,
+ "NLM_F_ATOMIC": true,
+ "NLM_F_CREATE": true,
+ "NLM_F_DUMP": true,
+ "NLM_F_ECHO": true,
+ "NLM_F_EXCL": true,
+ "NLM_F_MATCH": true,
+ "NLM_F_MULTI": true,
+ "NLM_F_REPLACE": true,
+ "NLM_F_REQUEST": true,
+ "NLM_F_ROOT": true,
+ "NOFLSH": true,
+ "NOTE_ABSOLUTE": true,
+ "NOTE_ATTRIB": true,
+ "NOTE_CHILD": true,
+ "NOTE_DELETE": true,
+ "NOTE_EOF": true,
+ "NOTE_EXEC": true,
+ "NOTE_EXIT": true,
+ "NOTE_EXITSTATUS": true,
+ "NOTE_EXTEND": true,
+ "NOTE_FFAND": true,
+ "NOTE_FFCOPY": true,
+ "NOTE_FFCTRLMASK": true,
+ "NOTE_FFLAGSMASK": true,
+ "NOTE_FFNOP": true,
+ "NOTE_FFOR": true,
+ "NOTE_FORK": true,
+ "NOTE_LINK": true,
+ "NOTE_LOWAT": true,
+ "NOTE_NONE": true,
+ "NOTE_NSECONDS": true,
+ "NOTE_PCTRLMASK": true,
+ "NOTE_PDATAMASK": true,
+ "NOTE_REAP": true,
+ "NOTE_RENAME": true,
+ "NOTE_RESOURCEEND": true,
+ "NOTE_REVOKE": true,
+ "NOTE_SECONDS": true,
+ "NOTE_SIGNAL": true,
+ "NOTE_TRACK": true,
+ "NOTE_TRACKERR": true,
+ "NOTE_TRIGGER": true,
+ "NOTE_TRUNCATE": true,
+ "NOTE_USECONDS": true,
+ "NOTE_VM_ERROR": true,
+ "NOTE_VM_PRESSURE": true,
+ "NOTE_VM_PRESSURE_SUDDEN_TERMINATE": true,
+ "NOTE_VM_PRESSURE_TERMINATE": true,
+ "NOTE_WRITE": true,
+ "NameCanonical": true,
+ "NameCanonicalEx": true,
+ "NameDisplay": true,
+ "NameDnsDomain": true,
+ "NameFullyQualifiedDN": true,
+ "NameSamCompatible": true,
+ "NameServicePrincipal": true,
+ "NameUniqueId": true,
+ "NameUnknown": true,
+ "NameUserPrincipal": true,
+ "Nanosleep": true,
+ "NetApiBufferFree": true,
+ "NetGetJoinInformation": true,
+ "NetSetupDomainName": true,
+ "NetSetupUnjoined": true,
+ "NetSetupUnknownStatus": true,
+ "NetSetupWorkgroupName": true,
+ "NetUserGetInfo": true,
+ "NetlinkMessage": true,
+ "NetlinkRIB": true,
+ "NetlinkRouteAttr": true,
+ "NetlinkRouteRequest": true,
+ "NewCallback": true,
+ "NewCallbackCDecl": true,
+ "NewLazyDLL": true,
+ "NlAttr": true,
+ "NlMsgerr": true,
+ "NlMsghdr": true,
+ "NsecToFiletime": true,
+ "NsecToTimespec": true,
+ "NsecToTimeval": true,
+ "Ntohs": true,
+ "OCRNL": true,
+ "OFDEL": true,
+ "OFILL": true,
+ "OFIOGETBMAP": true,
+ "OID_PKIX_KP_SERVER_AUTH": true,
+ "OID_SERVER_GATED_CRYPTO": true,
+ "OID_SGC_NETSCAPE": true,
+ "OLCUC": true,
+ "ONLCR": true,
+ "ONLRET": true,
+ "ONOCR": true,
+ "ONOEOT": true,
+ "OPEN_ALWAYS": true,
+ "OPEN_EXISTING": true,
+ "OPOST": true,
+ "O_ACCMODE": true,
+ "O_ALERT": true,
+ "O_ALT_IO": true,
+ "O_APPEND": true,
+ "O_ASYNC": true,
+ "O_CLOEXEC": true,
+ "O_CREAT": true,
+ "O_DIRECT": true,
+ "O_DIRECTORY": true,
+ "O_DSYNC": true,
+ "O_EVTONLY": true,
+ "O_EXCL": true,
+ "O_EXEC": true,
+ "O_EXLOCK": true,
+ "O_FSYNC": true,
+ "O_LARGEFILE": true,
+ "O_NDELAY": true,
+ "O_NOATIME": true,
+ "O_NOCTTY": true,
+ "O_NOFOLLOW": true,
+ "O_NONBLOCK": true,
+ "O_NOSIGPIPE": true,
+ "O_POPUP": true,
+ "O_RDONLY": true,
+ "O_RDWR": true,
+ "O_RSYNC": true,
+ "O_SHLOCK": true,
+ "O_SYMLINK": true,
+ "O_SYNC": true,
+ "O_TRUNC": true,
+ "O_TTY_INIT": true,
+ "O_WRONLY": true,
+ "Open": true,
+ "OpenCurrentProcessToken": true,
+ "OpenProcess": true,
+ "OpenProcessToken": true,
+ "Openat": true,
+ "Overlapped": true,
+ "PACKET_ADD_MEMBERSHIP": true,
+ "PACKET_BROADCAST": true,
+ "PACKET_DROP_MEMBERSHIP": true,
+ "PACKET_FASTROUTE": true,
+ "PACKET_HOST": true,
+ "PACKET_LOOPBACK": true,
+ "PACKET_MR_ALLMULTI": true,
+ "PACKET_MR_MULTICAST": true,
+ "PACKET_MR_PROMISC": true,
+ "PACKET_MULTICAST": true,
+ "PACKET_OTHERHOST": true,
+ "PACKET_OUTGOING": true,
+ "PACKET_RECV_OUTPUT": true,
+ "PACKET_RX_RING": true,
+ "PACKET_STATISTICS": true,
+ "PAGE_EXECUTE_READ": true,
+ "PAGE_EXECUTE_READWRITE": true,
+ "PAGE_EXECUTE_WRITECOPY": true,
+ "PAGE_READONLY": true,
+ "PAGE_READWRITE": true,
+ "PAGE_WRITECOPY": true,
+ "PARENB": true,
+ "PARMRK": true,
+ "PARODD": true,
+ "PENDIN": true,
+ "PFL_HIDDEN": true,
+ "PFL_MATCHES_PROTOCOL_ZERO": true,
+ "PFL_MULTIPLE_PROTO_ENTRIES": true,
+ "PFL_NETWORKDIRECT_PROVIDER": true,
+ "PFL_RECOMMENDED_PROTO_ENTRY": true,
+ "PF_FLUSH": true,
+ "PKCS_7_ASN_ENCODING": true,
+ "PMC5_PIPELINE_FLUSH": true,
+ "PRIO_PGRP": true,
+ "PRIO_PROCESS": true,
+ "PRIO_USER": true,
+ "PRI_IOFLUSH": true,
+ "PROCESS_QUERY_INFORMATION": true,
+ "PROCESS_TERMINATE": true,
+ "PROT_EXEC": true,
+ "PROT_GROWSDOWN": true,
+ "PROT_GROWSUP": true,
+ "PROT_NONE": true,
+ "PROT_READ": true,
+ "PROT_WRITE": true,
+ "PROV_DH_SCHANNEL": true,
+ "PROV_DSS": true,
+ "PROV_DSS_DH": true,
+ "PROV_EC_ECDSA_FULL": true,
+ "PROV_EC_ECDSA_SIG": true,
+ "PROV_EC_ECNRA_FULL": true,
+ "PROV_EC_ECNRA_SIG": true,
+ "PROV_FORTEZZA": true,
+ "PROV_INTEL_SEC": true,
+ "PROV_MS_EXCHANGE": true,
+ "PROV_REPLACE_OWF": true,
+ "PROV_RNG": true,
+ "PROV_RSA_AES": true,
+ "PROV_RSA_FULL": true,
+ "PROV_RSA_SCHANNEL": true,
+ "PROV_RSA_SIG": true,
+ "PROV_SPYRUS_LYNKS": true,
+ "PROV_SSL": true,
+ "PR_CAPBSET_DROP": true,
+ "PR_CAPBSET_READ": true,
+ "PR_CLEAR_SECCOMP_FILTER": true,
+ "PR_ENDIAN_BIG": true,
+ "PR_ENDIAN_LITTLE": true,
+ "PR_ENDIAN_PPC_LITTLE": true,
+ "PR_FPEMU_NOPRINT": true,
+ "PR_FPEMU_SIGFPE": true,
+ "PR_FP_EXC_ASYNC": true,
+ "PR_FP_EXC_DISABLED": true,
+ "PR_FP_EXC_DIV": true,
+ "PR_FP_EXC_INV": true,
+ "PR_FP_EXC_NONRECOV": true,
+ "PR_FP_EXC_OVF": true,
+ "PR_FP_EXC_PRECISE": true,
+ "PR_FP_EXC_RES": true,
+ "PR_FP_EXC_SW_ENABLE": true,
+ "PR_FP_EXC_UND": true,
+ "PR_GET_DUMPABLE": true,
+ "PR_GET_ENDIAN": true,
+ "PR_GET_FPEMU": true,
+ "PR_GET_FPEXC": true,
+ "PR_GET_KEEPCAPS": true,
+ "PR_GET_NAME": true,
+ "PR_GET_PDEATHSIG": true,
+ "PR_GET_SECCOMP": true,
+ "PR_GET_SECCOMP_FILTER": true,
+ "PR_GET_SECUREBITS": true,
+ "PR_GET_TIMERSLACK": true,
+ "PR_GET_TIMING": true,
+ "PR_GET_TSC": true,
+ "PR_GET_UNALIGN": true,
+ "PR_MCE_KILL": true,
+ "PR_MCE_KILL_CLEAR": true,
+ "PR_MCE_KILL_DEFAULT": true,
+ "PR_MCE_KILL_EARLY": true,
+ "PR_MCE_KILL_GET": true,
+ "PR_MCE_KILL_LATE": true,
+ "PR_MCE_KILL_SET": true,
+ "PR_SECCOMP_FILTER_EVENT": true,
+ "PR_SECCOMP_FILTER_SYSCALL": true,
+ "PR_SET_DUMPABLE": true,
+ "PR_SET_ENDIAN": true,
+ "PR_SET_FPEMU": true,
+ "PR_SET_FPEXC": true,
+ "PR_SET_KEEPCAPS": true,
+ "PR_SET_NAME": true,
+ "PR_SET_PDEATHSIG": true,
+ "PR_SET_PTRACER": true,
+ "PR_SET_SECCOMP": true,
+ "PR_SET_SECCOMP_FILTER": true,
+ "PR_SET_SECUREBITS": true,
+ "PR_SET_TIMERSLACK": true,
+ "PR_SET_TIMING": true,
+ "PR_SET_TSC": true,
+ "PR_SET_UNALIGN": true,
+ "PR_TASK_PERF_EVENTS_DISABLE": true,
+ "PR_TASK_PERF_EVENTS_ENABLE": true,
+ "PR_TIMING_STATISTICAL": true,
+ "PR_TIMING_TIMESTAMP": true,
+ "PR_TSC_ENABLE": true,
+ "PR_TSC_SIGSEGV": true,
+ "PR_UNALIGN_NOPRINT": true,
+ "PR_UNALIGN_SIGBUS": true,
+ "PTRACE_ARCH_PRCTL": true,
+ "PTRACE_ATTACH": true,
+ "PTRACE_CONT": true,
+ "PTRACE_DETACH": true,
+ "PTRACE_EVENT_CLONE": true,
+ "PTRACE_EVENT_EXEC": true,
+ "PTRACE_EVENT_EXIT": true,
+ "PTRACE_EVENT_FORK": true,
+ "PTRACE_EVENT_VFORK": true,
+ "PTRACE_EVENT_VFORK_DONE": true,
+ "PTRACE_GETCRUNCHREGS": true,
+ "PTRACE_GETEVENTMSG": true,
+ "PTRACE_GETFPREGS": true,
+ "PTRACE_GETFPXREGS": true,
+ "PTRACE_GETHBPREGS": true,
+ "PTRACE_GETREGS": true,
+ "PTRACE_GETREGSET": true,
+ "PTRACE_GETSIGINFO": true,
+ "PTRACE_GETVFPREGS": true,
+ "PTRACE_GETWMMXREGS": true,
+ "PTRACE_GET_THREAD_AREA": true,
+ "PTRACE_KILL": true,
+ "PTRACE_OLDSETOPTIONS": true,
+ "PTRACE_O_MASK": true,
+ "PTRACE_O_TRACECLONE": true,
+ "PTRACE_O_TRACEEXEC": true,
+ "PTRACE_O_TRACEEXIT": true,
+ "PTRACE_O_TRACEFORK": true,
+ "PTRACE_O_TRACESYSGOOD": true,
+ "PTRACE_O_TRACEVFORK": true,
+ "PTRACE_O_TRACEVFORKDONE": true,
+ "PTRACE_PEEKDATA": true,
+ "PTRACE_PEEKTEXT": true,
+ "PTRACE_PEEKUSR": true,
+ "PTRACE_POKEDATA": true,
+ "PTRACE_POKETEXT": true,
+ "PTRACE_POKEUSR": true,
+ "PTRACE_SETCRUNCHREGS": true,
+ "PTRACE_SETFPREGS": true,
+ "PTRACE_SETFPXREGS": true,
+ "PTRACE_SETHBPREGS": true,
+ "PTRACE_SETOPTIONS": true,
+ "PTRACE_SETREGS": true,
+ "PTRACE_SETREGSET": true,
+ "PTRACE_SETSIGINFO": true,
+ "PTRACE_SETVFPREGS": true,
+ "PTRACE_SETWMMXREGS": true,
+ "PTRACE_SET_SYSCALL": true,
+ "PTRACE_SET_THREAD_AREA": true,
+ "PTRACE_SINGLEBLOCK": true,
+ "PTRACE_SINGLESTEP": true,
+ "PTRACE_SYSCALL": true,
+ "PTRACE_SYSEMU": true,
+ "PTRACE_SYSEMU_SINGLESTEP": true,
+ "PTRACE_TRACEME": true,
+ "PT_ATTACH": true,
+ "PT_ATTACHEXC": true,
+ "PT_CONTINUE": true,
+ "PT_DATA_ADDR": true,
+ "PT_DENY_ATTACH": true,
+ "PT_DETACH": true,
+ "PT_FIRSTMACH": true,
+ "PT_FORCEQUOTA": true,
+ "PT_KILL": true,
+ "PT_MASK": true,
+ "PT_READ_D": true,
+ "PT_READ_I": true,
+ "PT_READ_U": true,
+ "PT_SIGEXC": true,
+ "PT_STEP": true,
+ "PT_TEXT_ADDR": true,
+ "PT_TEXT_END_ADDR": true,
+ "PT_THUPDATE": true,
+ "PT_TRACE_ME": true,
+ "PT_WRITE_D": true,
+ "PT_WRITE_I": true,
+ "PT_WRITE_U": true,
+ "ParseDirent": true,
+ "ParseNetlinkMessage": true,
+ "ParseNetlinkRouteAttr": true,
+ "ParseRoutingMessage": true,
+ "ParseRoutingSockaddr": true,
+ "ParseSocketControlMessage": true,
+ "ParseUnixCredentials": true,
+ "ParseUnixRights": true,
+ "PathMax": true,
+ "Pathconf": true,
+ "Pause": true,
+ "Pipe": true,
+ "Pipe2": true,
+ "PivotRoot": true,
+ "Pointer": true,
+ "PostQueuedCompletionStatus": true,
+ "Pread": true,
+ "Proc": true,
+ "ProcAttr": true,
+ "Process32First": true,
+ "Process32Next": true,
+ "ProcessEntry32": true,
+ "ProcessInformation": true,
+ "Protoent": true,
+ "PtraceAttach": true,
+ "PtraceCont": true,
+ "PtraceDetach": true,
+ "PtraceGetEventMsg": true,
+ "PtraceGetRegs": true,
+ "PtracePeekData": true,
+ "PtracePeekText": true,
+ "PtracePokeData": true,
+ "PtracePokeText": true,
+ "PtraceRegs": true,
+ "PtraceSetOptions": true,
+ "PtraceSetRegs": true,
+ "PtraceSingleStep": true,
+ "PtraceSyscall": true,
+ "Pwrite": true,
+ "REG_BINARY": true,
+ "REG_DWORD": true,
+ "REG_DWORD_BIG_ENDIAN": true,
+ "REG_DWORD_LITTLE_ENDIAN": true,
+ "REG_EXPAND_SZ": true,
+ "REG_FULL_RESOURCE_DESCRIPTOR": true,
+ "REG_LINK": true,
+ "REG_MULTI_SZ": true,
+ "REG_NONE": true,
+ "REG_QWORD": true,
+ "REG_QWORD_LITTLE_ENDIAN": true,
+ "REG_RESOURCE_LIST": true,
+ "REG_RESOURCE_REQUIREMENTS_LIST": true,
+ "REG_SZ": true,
+ "RLIMIT_AS": true,
+ "RLIMIT_CORE": true,
+ "RLIMIT_CPU": true,
+ "RLIMIT_DATA": true,
+ "RLIMIT_FSIZE": true,
+ "RLIMIT_NOFILE": true,
+ "RLIMIT_STACK": true,
+ "RLIM_INFINITY": true,
+ "RTAX_ADVMSS": true,
+ "RTAX_AUTHOR": true,
+ "RTAX_BRD": true,
+ "RTAX_CWND": true,
+ "RTAX_DST": true,
+ "RTAX_FEATURES": true,
+ "RTAX_FEATURE_ALLFRAG": true,
+ "RTAX_FEATURE_ECN": true,
+ "RTAX_FEATURE_SACK": true,
+ "RTAX_FEATURE_TIMESTAMP": true,
+ "RTAX_GATEWAY": true,
+ "RTAX_GENMASK": true,
+ "RTAX_HOPLIMIT": true,
+ "RTAX_IFA": true,
+ "RTAX_IFP": true,
+ "RTAX_INITCWND": true,
+ "RTAX_INITRWND": true,
+ "RTAX_LABEL": true,
+ "RTAX_LOCK": true,
+ "RTAX_MAX": true,
+ "RTAX_MTU": true,
+ "RTAX_NETMASK": true,
+ "RTAX_REORDERING": true,
+ "RTAX_RTO_MIN": true,
+ "RTAX_RTT": true,
+ "RTAX_RTTVAR": true,
+ "RTAX_SRC": true,
+ "RTAX_SRCMASK": true,
+ "RTAX_SSTHRESH": true,
+ "RTAX_TAG": true,
+ "RTAX_UNSPEC": true,
+ "RTAX_WINDOW": true,
+ "RTA_ALIGNTO": true,
+ "RTA_AUTHOR": true,
+ "RTA_BRD": true,
+ "RTA_CACHEINFO": true,
+ "RTA_DST": true,
+ "RTA_FLOW": true,
+ "RTA_GATEWAY": true,
+ "RTA_GENMASK": true,
+ "RTA_IFA": true,
+ "RTA_IFP": true,
+ "RTA_IIF": true,
+ "RTA_LABEL": true,
+ "RTA_MAX": true,
+ "RTA_METRICS": true,
+ "RTA_MULTIPATH": true,
+ "RTA_NETMASK": true,
+ "RTA_OIF": true,
+ "RTA_PREFSRC": true,
+ "RTA_PRIORITY": true,
+ "RTA_SRC": true,
+ "RTA_SRCMASK": true,
+ "RTA_TABLE": true,
+ "RTA_TAG": true,
+ "RTA_UNSPEC": true,
+ "RTCF_DIRECTSRC": true,
+ "RTCF_DOREDIRECT": true,
+ "RTCF_LOG": true,
+ "RTCF_MASQ": true,
+ "RTCF_NAT": true,
+ "RTCF_VALVE": true,
+ "RTF_ADDRCLASSMASK": true,
+ "RTF_ADDRCONF": true,
+ "RTF_ALLONLINK": true,
+ "RTF_ANNOUNCE": true,
+ "RTF_BLACKHOLE": true,
+ "RTF_BROADCAST": true,
+ "RTF_CACHE": true,
+ "RTF_CLONED": true,
+ "RTF_CLONING": true,
+ "RTF_CONDEMNED": true,
+ "RTF_DEFAULT": true,
+ "RTF_DELCLONE": true,
+ "RTF_DONE": true,
+ "RTF_DYNAMIC": true,
+ "RTF_FLOW": true,
+ "RTF_FMASK": true,
+ "RTF_GATEWAY": true,
+ "RTF_GWFLAG_COMPAT": true,
+ "RTF_HOST": true,
+ "RTF_IFREF": true,
+ "RTF_IFSCOPE": true,
+ "RTF_INTERFACE": true,
+ "RTF_IRTT": true,
+ "RTF_LINKRT": true,
+ "RTF_LLDATA": true,
+ "RTF_LLINFO": true,
+ "RTF_LOCAL": true,
+ "RTF_MASK": true,
+ "RTF_MODIFIED": true,
+ "RTF_MPATH": true,
+ "RTF_MPLS": true,
+ "RTF_MSS": true,
+ "RTF_MTU": true,
+ "RTF_MULTICAST": true,
+ "RTF_NAT": true,
+ "RTF_NOFORWARD": true,
+ "RTF_NONEXTHOP": true,
+ "RTF_NOPMTUDISC": true,
+ "RTF_PERMANENT_ARP": true,
+ "RTF_PINNED": true,
+ "RTF_POLICY": true,
+ "RTF_PRCLONING": true,
+ "RTF_PROTO1": true,
+ "RTF_PROTO2": true,
+ "RTF_PROTO3": true,
+ "RTF_REINSTATE": true,
+ "RTF_REJECT": true,
+ "RTF_RNH_LOCKED": true,
+ "RTF_SOURCE": true,
+ "RTF_SRC": true,
+ "RTF_STATIC": true,
+ "RTF_STICKY": true,
+ "RTF_THROW": true,
+ "RTF_TUNNEL": true,
+ "RTF_UP": true,
+ "RTF_USETRAILERS": true,
+ "RTF_WASCLONED": true,
+ "RTF_WINDOW": true,
+ "RTF_XRESOLVE": true,
+ "RTM_ADD": true,
+ "RTM_BASE": true,
+ "RTM_CHANGE": true,
+ "RTM_CHGADDR": true,
+ "RTM_DELACTION": true,
+ "RTM_DELADDR": true,
+ "RTM_DELADDRLABEL": true,
+ "RTM_DELETE": true,
+ "RTM_DELLINK": true,
+ "RTM_DELMADDR": true,
+ "RTM_DELNEIGH": true,
+ "RTM_DELQDISC": true,
+ "RTM_DELROUTE": true,
+ "RTM_DELRULE": true,
+ "RTM_DELTCLASS": true,
+ "RTM_DELTFILTER": true,
+ "RTM_DESYNC": true,
+ "RTM_F_CLONED": true,
+ "RTM_F_EQUALIZE": true,
+ "RTM_F_NOTIFY": true,
+ "RTM_F_PREFIX": true,
+ "RTM_GET": true,
+ "RTM_GET2": true,
+ "RTM_GETACTION": true,
+ "RTM_GETADDR": true,
+ "RTM_GETADDRLABEL": true,
+ "RTM_GETANYCAST": true,
+ "RTM_GETDCB": true,
+ "RTM_GETLINK": true,
+ "RTM_GETMULTICAST": true,
+ "RTM_GETNEIGH": true,
+ "RTM_GETNEIGHTBL": true,
+ "RTM_GETQDISC": true,
+ "RTM_GETROUTE": true,
+ "RTM_GETRULE": true,
+ "RTM_GETTCLASS": true,
+ "RTM_GETTFILTER": true,
+ "RTM_IEEE80211": true,
+ "RTM_IFANNOUNCE": true,
+ "RTM_IFINFO": true,
+ "RTM_IFINFO2": true,
+ "RTM_LLINFO_UPD": true,
+ "RTM_LOCK": true,
+ "RTM_LOSING": true,
+ "RTM_MAX": true,
+ "RTM_MAXSIZE": true,
+ "RTM_MISS": true,
+ "RTM_NEWACTION": true,
+ "RTM_NEWADDR": true,
+ "RTM_NEWADDRLABEL": true,
+ "RTM_NEWLINK": true,
+ "RTM_NEWMADDR": true,
+ "RTM_NEWMADDR2": true,
+ "RTM_NEWNDUSEROPT": true,
+ "RTM_NEWNEIGH": true,
+ "RTM_NEWNEIGHTBL": true,
+ "RTM_NEWPREFIX": true,
+ "RTM_NEWQDISC": true,
+ "RTM_NEWROUTE": true,
+ "RTM_NEWRULE": true,
+ "RTM_NEWTCLASS": true,
+ "RTM_NEWTFILTER": true,
+ "RTM_NR_FAMILIES": true,
+ "RTM_NR_MSGTYPES": true,
+ "RTM_OIFINFO": true,
+ "RTM_OLDADD": true,
+ "RTM_OLDDEL": true,
+ "RTM_OOIFINFO": true,
+ "RTM_REDIRECT": true,
+ "RTM_RESOLVE": true,
+ "RTM_RTTUNIT": true,
+ "RTM_SETDCB": true,
+ "RTM_SETGATE": true,
+ "RTM_SETLINK": true,
+ "RTM_SETNEIGHTBL": true,
+ "RTM_VERSION": true,
+ "RTNH_ALIGNTO": true,
+ "RTNH_F_DEAD": true,
+ "RTNH_F_ONLINK": true,
+ "RTNH_F_PERVASIVE": true,
+ "RTNLGRP_IPV4_IFADDR": true,
+ "RTNLGRP_IPV4_MROUTE": true,
+ "RTNLGRP_IPV4_ROUTE": true,
+ "RTNLGRP_IPV4_RULE": true,
+ "RTNLGRP_IPV6_IFADDR": true,
+ "RTNLGRP_IPV6_IFINFO": true,
+ "RTNLGRP_IPV6_MROUTE": true,
+ "RTNLGRP_IPV6_PREFIX": true,
+ "RTNLGRP_IPV6_ROUTE": true,
+ "RTNLGRP_IPV6_RULE": true,
+ "RTNLGRP_LINK": true,
+ "RTNLGRP_ND_USEROPT": true,
+ "RTNLGRP_NEIGH": true,
+ "RTNLGRP_NONE": true,
+ "RTNLGRP_NOTIFY": true,
+ "RTNLGRP_TC": true,
+ "RTN_ANYCAST": true,
+ "RTN_BLACKHOLE": true,
+ "RTN_BROADCAST": true,
+ "RTN_LOCAL": true,
+ "RTN_MAX": true,
+ "RTN_MULTICAST": true,
+ "RTN_NAT": true,
+ "RTN_PROHIBIT": true,
+ "RTN_THROW": true,
+ "RTN_UNICAST": true,
+ "RTN_UNREACHABLE": true,
+ "RTN_UNSPEC": true,
+ "RTN_XRESOLVE": true,
+ "RTPROT_BIRD": true,
+ "RTPROT_BOOT": true,
+ "RTPROT_DHCP": true,
+ "RTPROT_DNROUTED": true,
+ "RTPROT_GATED": true,
+ "RTPROT_KERNEL": true,
+ "RTPROT_MRT": true,
+ "RTPROT_NTK": true,
+ "RTPROT_RA": true,
+ "RTPROT_REDIRECT": true,
+ "RTPROT_STATIC": true,
+ "RTPROT_UNSPEC": true,
+ "RTPROT_XORP": true,
+ "RTPROT_ZEBRA": true,
+ "RTV_EXPIRE": true,
+ "RTV_HOPCOUNT": true,
+ "RTV_MTU": true,
+ "RTV_RPIPE": true,
+ "RTV_RTT": true,
+ "RTV_RTTVAR": true,
+ "RTV_SPIPE": true,
+ "RTV_SSTHRESH": true,
+ "RTV_WEIGHT": true,
+ "RT_CACHING_CONTEXT": true,
+ "RT_CLASS_DEFAULT": true,
+ "RT_CLASS_LOCAL": true,
+ "RT_CLASS_MAIN": true,
+ "RT_CLASS_MAX": true,
+ "RT_CLASS_UNSPEC": true,
+ "RT_DEFAULT_FIB": true,
+ "RT_NORTREF": true,
+ "RT_SCOPE_HOST": true,
+ "RT_SCOPE_LINK": true,
+ "RT_SCOPE_NOWHERE": true,
+ "RT_SCOPE_SITE": true,
+ "RT_SCOPE_UNIVERSE": true,
+ "RT_TABLEID_MAX": true,
+ "RT_TABLE_COMPAT": true,
+ "RT_TABLE_DEFAULT": true,
+ "RT_TABLE_LOCAL": true,
+ "RT_TABLE_MAIN": true,
+ "RT_TABLE_MAX": true,
+ "RT_TABLE_UNSPEC": true,
+ "RUSAGE_CHILDREN": true,
+ "RUSAGE_SELF": true,
+ "RUSAGE_THREAD": true,
+ "Radvisory_t": true,
+ "RawConn": true,
+ "RawSockaddr": true,
+ "RawSockaddrAny": true,
+ "RawSockaddrDatalink": true,
+ "RawSockaddrInet4": true,
+ "RawSockaddrInet6": true,
+ "RawSockaddrLinklayer": true,
+ "RawSockaddrNetlink": true,
+ "RawSockaddrUnix": true,
+ "RawSyscall": true,
+ "RawSyscall6": true,
+ "Read": true,
+ "ReadConsole": true,
+ "ReadDirectoryChanges": true,
+ "ReadDirent": true,
+ "ReadFile": true,
+ "Readlink": true,
+ "Reboot": true,
+ "Recvfrom": true,
+ "Recvmsg": true,
+ "RegCloseKey": true,
+ "RegEnumKeyEx": true,
+ "RegOpenKeyEx": true,
+ "RegQueryInfoKey": true,
+ "RegQueryValueEx": true,
+ "RemoveDirectory": true,
+ "Removexattr": true,
+ "Rename": true,
+ "Renameat": true,
+ "Revoke": true,
+ "Rlimit": true,
+ "Rmdir": true,
+ "RouteMessage": true,
+ "RouteRIB": true,
+ "RtAttr": true,
+ "RtGenmsg": true,
+ "RtMetrics": true,
+ "RtMsg": true,
+ "RtMsghdr": true,
+ "RtNexthop": true,
+ "Rusage": true,
+ "SCM_BINTIME": true,
+ "SCM_CREDENTIALS": true,
+ "SCM_CREDS": true,
+ "SCM_RIGHTS": true,
+ "SCM_TIMESTAMP": true,
+ "SCM_TIMESTAMPING": true,
+ "SCM_TIMESTAMPNS": true,
+ "SCM_TIMESTAMP_MONOTONIC": true,
+ "SHUT_RD": true,
+ "SHUT_RDWR": true,
+ "SHUT_WR": true,
+ "SID": true,
+ "SIDAndAttributes": true,
+ "SIGABRT": true,
+ "SIGALRM": true,
+ "SIGBUS": true,
+ "SIGCHLD": true,
+ "SIGCLD": true,
+ "SIGCONT": true,
+ "SIGEMT": true,
+ "SIGFPE": true,
+ "SIGHUP": true,
+ "SIGILL": true,
+ "SIGINFO": true,
+ "SIGINT": true,
+ "SIGIO": true,
+ "SIGIOT": true,
+ "SIGKILL": true,
+ "SIGLIBRT": true,
+ "SIGLWP": true,
+ "SIGPIPE": true,
+ "SIGPOLL": true,
+ "SIGPROF": true,
+ "SIGPWR": true,
+ "SIGQUIT": true,
+ "SIGSEGV": true,
+ "SIGSTKFLT": true,
+ "SIGSTOP": true,
+ "SIGSYS": true,
+ "SIGTERM": true,
+ "SIGTHR": true,
+ "SIGTRAP": true,
+ "SIGTSTP": true,
+ "SIGTTIN": true,
+ "SIGTTOU": true,
+ "SIGUNUSED": true,
+ "SIGURG": true,
+ "SIGUSR1": true,
+ "SIGUSR2": true,
+ "SIGVTALRM": true,
+ "SIGWINCH": true,
+ "SIGXCPU": true,
+ "SIGXFSZ": true,
+ "SIOCADDDLCI": true,
+ "SIOCADDMULTI": true,
+ "SIOCADDRT": true,
+ "SIOCAIFADDR": true,
+ "SIOCAIFGROUP": true,
+ "SIOCALIFADDR": true,
+ "SIOCARPIPLL": true,
+ "SIOCATMARK": true,
+ "SIOCAUTOADDR": true,
+ "SIOCAUTONETMASK": true,
+ "SIOCBRDGADD": true,
+ "SIOCBRDGADDS": true,
+ "SIOCBRDGARL": true,
+ "SIOCBRDGDADDR": true,
+ "SIOCBRDGDEL": true,
+ "SIOCBRDGDELS": true,
+ "SIOCBRDGFLUSH": true,
+ "SIOCBRDGFRL": true,
+ "SIOCBRDGGCACHE": true,
+ "SIOCBRDGGFD": true,
+ "SIOCBRDGGHT": true,
+ "SIOCBRDGGIFFLGS": true,
+ "SIOCBRDGGMA": true,
+ "SIOCBRDGGPARAM": true,
+ "SIOCBRDGGPRI": true,
+ "SIOCBRDGGRL": true,
+ "SIOCBRDGGSIFS": true,
+ "SIOCBRDGGTO": true,
+ "SIOCBRDGIFS": true,
+ "SIOCBRDGRTS": true,
+ "SIOCBRDGSADDR": true,
+ "SIOCBRDGSCACHE": true,
+ "SIOCBRDGSFD": true,
+ "SIOCBRDGSHT": true,
+ "SIOCBRDGSIFCOST": true,
+ "SIOCBRDGSIFFLGS": true,
+ "SIOCBRDGSIFPRIO": true,
+ "SIOCBRDGSMA": true,
+ "SIOCBRDGSPRI": true,
+ "SIOCBRDGSPROTO": true,
+ "SIOCBRDGSTO": true,
+ "SIOCBRDGSTXHC": true,
+ "SIOCDARP": true,
+ "SIOCDELDLCI": true,
+ "SIOCDELMULTI": true,
+ "SIOCDELRT": true,
+ "SIOCDEVPRIVATE": true,
+ "SIOCDIFADDR": true,
+ "SIOCDIFGROUP": true,
+ "SIOCDIFPHYADDR": true,
+ "SIOCDLIFADDR": true,
+ "SIOCDRARP": true,
+ "SIOCGARP": true,
+ "SIOCGDRVSPEC": true,
+ "SIOCGETKALIVE": true,
+ "SIOCGETLABEL": true,
+ "SIOCGETPFLOW": true,
+ "SIOCGETPFSYNC": true,
+ "SIOCGETSGCNT": true,
+ "SIOCGETVIFCNT": true,
+ "SIOCGETVLAN": true,
+ "SIOCGHIWAT": true,
+ "SIOCGIFADDR": true,
+ "SIOCGIFADDRPREF": true,
+ "SIOCGIFALIAS": true,
+ "SIOCGIFALTMTU": true,
+ "SIOCGIFASYNCMAP": true,
+ "SIOCGIFBOND": true,
+ "SIOCGIFBR": true,
+ "SIOCGIFBRDADDR": true,
+ "SIOCGIFCAP": true,
+ "SIOCGIFCONF": true,
+ "SIOCGIFCOUNT": true,
+ "SIOCGIFDATA": true,
+ "SIOCGIFDESCR": true,
+ "SIOCGIFDEVMTU": true,
+ "SIOCGIFDLT": true,
+ "SIOCGIFDSTADDR": true,
+ "SIOCGIFENCAP": true,
+ "SIOCGIFFIB": true,
+ "SIOCGIFFLAGS": true,
+ "SIOCGIFGATTR": true,
+ "SIOCGIFGENERIC": true,
+ "SIOCGIFGMEMB": true,
+ "SIOCGIFGROUP": true,
+ "SIOCGIFHARDMTU": true,
+ "SIOCGIFHWADDR": true,
+ "SIOCGIFINDEX": true,
+ "SIOCGIFKPI": true,
+ "SIOCGIFMAC": true,
+ "SIOCGIFMAP": true,
+ "SIOCGIFMEDIA": true,
+ "SIOCGIFMEM": true,
+ "SIOCGIFMETRIC": true,
+ "SIOCGIFMTU": true,
+ "SIOCGIFNAME": true,
+ "SIOCGIFNETMASK": true,
+ "SIOCGIFPDSTADDR": true,
+ "SIOCGIFPFLAGS": true,
+ "SIOCGIFPHYS": true,
+ "SIOCGIFPRIORITY": true,
+ "SIOCGIFPSRCADDR": true,
+ "SIOCGIFRDOMAIN": true,
+ "SIOCGIFRTLABEL": true,
+ "SIOCGIFSLAVE": true,
+ "SIOCGIFSTATUS": true,
+ "SIOCGIFTIMESLOT": true,
+ "SIOCGIFTXQLEN": true,
+ "SIOCGIFVLAN": true,
+ "SIOCGIFWAKEFLAGS": true,
+ "SIOCGIFXFLAGS": true,
+ "SIOCGLIFADDR": true,
+ "SIOCGLIFPHYADDR": true,
+ "SIOCGLIFPHYRTABLE": true,
+ "SIOCGLIFPHYTTL": true,
+ "SIOCGLINKSTR": true,
+ "SIOCGLOWAT": true,
+ "SIOCGPGRP": true,
+ "SIOCGPRIVATE_0": true,
+ "SIOCGPRIVATE_1": true,
+ "SIOCGRARP": true,
+ "SIOCGSPPPPARAMS": true,
+ "SIOCGSTAMP": true,
+ "SIOCGSTAMPNS": true,
+ "SIOCGVH": true,
+ "SIOCGVNETID": true,
+ "SIOCIFCREATE": true,
+ "SIOCIFCREATE2": true,
+ "SIOCIFDESTROY": true,
+ "SIOCIFGCLONERS": true,
+ "SIOCINITIFADDR": true,
+ "SIOCPROTOPRIVATE": true,
+ "SIOCRSLVMULTI": true,
+ "SIOCRTMSG": true,
+ "SIOCSARP": true,
+ "SIOCSDRVSPEC": true,
+ "SIOCSETKALIVE": true,
+ "SIOCSETLABEL": true,
+ "SIOCSETPFLOW": true,
+ "SIOCSETPFSYNC": true,
+ "SIOCSETVLAN": true,
+ "SIOCSHIWAT": true,
+ "SIOCSIFADDR": true,
+ "SIOCSIFADDRPREF": true,
+ "SIOCSIFALTMTU": true,
+ "SIOCSIFASYNCMAP": true,
+ "SIOCSIFBOND": true,
+ "SIOCSIFBR": true,
+ "SIOCSIFBRDADDR": true,
+ "SIOCSIFCAP": true,
+ "SIOCSIFDESCR": true,
+ "SIOCSIFDSTADDR": true,
+ "SIOCSIFENCAP": true,
+ "SIOCSIFFIB": true,
+ "SIOCSIFFLAGS": true,
+ "SIOCSIFGATTR": true,
+ "SIOCSIFGENERIC": true,
+ "SIOCSIFHWADDR": true,
+ "SIOCSIFHWBROADCAST": true,
+ "SIOCSIFKPI": true,
+ "SIOCSIFLINK": true,
+ "SIOCSIFLLADDR": true,
+ "SIOCSIFMAC": true,
+ "SIOCSIFMAP": true,
+ "SIOCSIFMEDIA": true,
+ "SIOCSIFMEM": true,
+ "SIOCSIFMETRIC": true,
+ "SIOCSIFMTU": true,
+ "SIOCSIFNAME": true,
+ "SIOCSIFNETMASK": true,
+ "SIOCSIFPFLAGS": true,
+ "SIOCSIFPHYADDR": true,
+ "SIOCSIFPHYS": true,
+ "SIOCSIFPRIORITY": true,
+ "SIOCSIFRDOMAIN": true,
+ "SIOCSIFRTLABEL": true,
+ "SIOCSIFRVNET": true,
+ "SIOCSIFSLAVE": true,
+ "SIOCSIFTIMESLOT": true,
+ "SIOCSIFTXQLEN": true,
+ "SIOCSIFVLAN": true,
+ "SIOCSIFVNET": true,
+ "SIOCSIFXFLAGS": true,
+ "SIOCSLIFPHYADDR": true,
+ "SIOCSLIFPHYRTABLE": true,
+ "SIOCSLIFPHYTTL": true,
+ "SIOCSLINKSTR": true,
+ "SIOCSLOWAT": true,
+ "SIOCSPGRP": true,
+ "SIOCSRARP": true,
+ "SIOCSSPPPPARAMS": true,
+ "SIOCSVH": true,
+ "SIOCSVNETID": true,
+ "SIOCZIFDATA": true,
+ "SIO_GET_EXTENSION_FUNCTION_POINTER": true,
+ "SIO_GET_INTERFACE_LIST": true,
+ "SIO_KEEPALIVE_VALS": true,
+ "SIO_UDP_CONNRESET": true,
+ "SOCK_CLOEXEC": true,
+ "SOCK_DCCP": true,
+ "SOCK_DGRAM": true,
+ "SOCK_FLAGS_MASK": true,
+ "SOCK_MAXADDRLEN": true,
+ "SOCK_NONBLOCK": true,
+ "SOCK_NOSIGPIPE": true,
+ "SOCK_PACKET": true,
+ "SOCK_RAW": true,
+ "SOCK_RDM": true,
+ "SOCK_SEQPACKET": true,
+ "SOCK_STREAM": true,
+ "SOL_AAL": true,
+ "SOL_ATM": true,
+ "SOL_DECNET": true,
+ "SOL_ICMPV6": true,
+ "SOL_IP": true,
+ "SOL_IPV6": true,
+ "SOL_IRDA": true,
+ "SOL_PACKET": true,
+ "SOL_RAW": true,
+ "SOL_SOCKET": true,
+ "SOL_TCP": true,
+ "SOL_X25": true,
+ "SOMAXCONN": true,
+ "SO_ACCEPTCONN": true,
+ "SO_ACCEPTFILTER": true,
+ "SO_ATTACH_FILTER": true,
+ "SO_BINDANY": true,
+ "SO_BINDTODEVICE": true,
+ "SO_BINTIME": true,
+ "SO_BROADCAST": true,
+ "SO_BSDCOMPAT": true,
+ "SO_DEBUG": true,
+ "SO_DETACH_FILTER": true,
+ "SO_DOMAIN": true,
+ "SO_DONTROUTE": true,
+ "SO_DONTTRUNC": true,
+ "SO_ERROR": true,
+ "SO_KEEPALIVE": true,
+ "SO_LABEL": true,
+ "SO_LINGER": true,
+ "SO_LINGER_SEC": true,
+ "SO_LISTENINCQLEN": true,
+ "SO_LISTENQLEN": true,
+ "SO_LISTENQLIMIT": true,
+ "SO_MARK": true,
+ "SO_NETPROC": true,
+ "SO_NKE": true,
+ "SO_NOADDRERR": true,
+ "SO_NOHEADER": true,
+ "SO_NOSIGPIPE": true,
+ "SO_NOTIFYCONFLICT": true,
+ "SO_NO_CHECK": true,
+ "SO_NO_DDP": true,
+ "SO_NO_OFFLOAD": true,
+ "SO_NP_EXTENSIONS": true,
+ "SO_NREAD": true,
+ "SO_NWRITE": true,
+ "SO_OOBINLINE": true,
+ "SO_OVERFLOWED": true,
+ "SO_PASSCRED": true,
+ "SO_PASSSEC": true,
+ "SO_PEERCRED": true,
+ "SO_PEERLABEL": true,
+ "SO_PEERNAME": true,
+ "SO_PEERSEC": true,
+ "SO_PRIORITY": true,
+ "SO_PROTOCOL": true,
+ "SO_PROTOTYPE": true,
+ "SO_RANDOMPORT": true,
+ "SO_RCVBUF": true,
+ "SO_RCVBUFFORCE": true,
+ "SO_RCVLOWAT": true,
+ "SO_RCVTIMEO": true,
+ "SO_RESTRICTIONS": true,
+ "SO_RESTRICT_DENYIN": true,
+ "SO_RESTRICT_DENYOUT": true,
+ "SO_RESTRICT_DENYSET": true,
+ "SO_REUSEADDR": true,
+ "SO_REUSEPORT": true,
+ "SO_REUSESHAREUID": true,
+ "SO_RTABLE": true,
+ "SO_RXQ_OVFL": true,
+ "SO_SECURITY_AUTHENTICATION": true,
+ "SO_SECURITY_ENCRYPTION_NETWORK": true,
+ "SO_SECURITY_ENCRYPTION_TRANSPORT": true,
+ "SO_SETFIB": true,
+ "SO_SNDBUF": true,
+ "SO_SNDBUFFORCE": true,
+ "SO_SNDLOWAT": true,
+ "SO_SNDTIMEO": true,
+ "SO_SPLICE": true,
+ "SO_TIMESTAMP": true,
+ "SO_TIMESTAMPING": true,
+ "SO_TIMESTAMPNS": true,
+ "SO_TIMESTAMP_MONOTONIC": true,
+ "SO_TYPE": true,
+ "SO_UPCALLCLOSEWAIT": true,
+ "SO_UPDATE_ACCEPT_CONTEXT": true,
+ "SO_UPDATE_CONNECT_CONTEXT": true,
+ "SO_USELOOPBACK": true,
+ "SO_USER_COOKIE": true,
+ "SO_VENDOR": true,
+ "SO_WANTMORE": true,
+ "SO_WANTOOBFLAG": true,
+ "SSLExtraCertChainPolicyPara": true,
+ "STANDARD_RIGHTS_ALL": true,
+ "STANDARD_RIGHTS_EXECUTE": true,
+ "STANDARD_RIGHTS_READ": true,
+ "STANDARD_RIGHTS_REQUIRED": true,
+ "STANDARD_RIGHTS_WRITE": true,
+ "STARTF_USESHOWWINDOW": true,
+ "STARTF_USESTDHANDLES": true,
+ "STD_ERROR_HANDLE": true,
+ "STD_INPUT_HANDLE": true,
+ "STD_OUTPUT_HANDLE": true,
+ "SUBLANG_ENGLISH_US": true,
+ "SW_FORCEMINIMIZE": true,
+ "SW_HIDE": true,
+ "SW_MAXIMIZE": true,
+ "SW_MINIMIZE": true,
+ "SW_NORMAL": true,
+ "SW_RESTORE": true,
+ "SW_SHOW": true,
+ "SW_SHOWDEFAULT": true,
+ "SW_SHOWMAXIMIZED": true,
+ "SW_SHOWMINIMIZED": true,
+ "SW_SHOWMINNOACTIVE": true,
+ "SW_SHOWNA": true,
+ "SW_SHOWNOACTIVATE": true,
+ "SW_SHOWNORMAL": true,
+ "SYMBOLIC_LINK_FLAG_DIRECTORY": true,
+ "SYNCHRONIZE": true,
+ "SYSCTL_VERSION": true,
+ "SYSCTL_VERS_0": true,
+ "SYSCTL_VERS_1": true,
+ "SYSCTL_VERS_MASK": true,
+ "SYS_ABORT2": true,
+ "SYS_ACCEPT": true,
+ "SYS_ACCEPT4": true,
+ "SYS_ACCEPT_NOCANCEL": true,
+ "SYS_ACCESS": true,
+ "SYS_ACCESS_EXTENDED": true,
+ "SYS_ACCT": true,
+ "SYS_ADD_KEY": true,
+ "SYS_ADD_PROFIL": true,
+ "SYS_ADJFREQ": true,
+ "SYS_ADJTIME": true,
+ "SYS_ADJTIMEX": true,
+ "SYS_AFS_SYSCALL": true,
+ "SYS_AIO_CANCEL": true,
+ "SYS_AIO_ERROR": true,
+ "SYS_AIO_FSYNC": true,
+ "SYS_AIO_READ": true,
+ "SYS_AIO_RETURN": true,
+ "SYS_AIO_SUSPEND": true,
+ "SYS_AIO_SUSPEND_NOCANCEL": true,
+ "SYS_AIO_WRITE": true,
+ "SYS_ALARM": true,
+ "SYS_ARCH_PRCTL": true,
+ "SYS_ARM_FADVISE64_64": true,
+ "SYS_ARM_SYNC_FILE_RANGE": true,
+ "SYS_ATGETMSG": true,
+ "SYS_ATPGETREQ": true,
+ "SYS_ATPGETRSP": true,
+ "SYS_ATPSNDREQ": true,
+ "SYS_ATPSNDRSP": true,
+ "SYS_ATPUTMSG": true,
+ "SYS_ATSOCKET": true,
+ "SYS_AUDIT": true,
+ "SYS_AUDITCTL": true,
+ "SYS_AUDITON": true,
+ "SYS_AUDIT_SESSION_JOIN": true,
+ "SYS_AUDIT_SESSION_PORT": true,
+ "SYS_AUDIT_SESSION_SELF": true,
+ "SYS_BDFLUSH": true,
+ "SYS_BIND": true,
+ "SYS_BINDAT": true,
+ "SYS_BREAK": true,
+ "SYS_BRK": true,
+ "SYS_BSDTHREAD_CREATE": true,
+ "SYS_BSDTHREAD_REGISTER": true,
+ "SYS_BSDTHREAD_TERMINATE": true,
+ "SYS_CAPGET": true,
+ "SYS_CAPSET": true,
+ "SYS_CAP_ENTER": true,
+ "SYS_CAP_FCNTLS_GET": true,
+ "SYS_CAP_FCNTLS_LIMIT": true,
+ "SYS_CAP_GETMODE": true,
+ "SYS_CAP_GETRIGHTS": true,
+ "SYS_CAP_IOCTLS_GET": true,
+ "SYS_CAP_IOCTLS_LIMIT": true,
+ "SYS_CAP_NEW": true,
+ "SYS_CAP_RIGHTS_GET": true,
+ "SYS_CAP_RIGHTS_LIMIT": true,
+ "SYS_CHDIR": true,
+ "SYS_CHFLAGS": true,
+ "SYS_CHFLAGSAT": true,
+ "SYS_CHMOD": true,
+ "SYS_CHMOD_EXTENDED": true,
+ "SYS_CHOWN": true,
+ "SYS_CHOWN32": true,
+ "SYS_CHROOT": true,
+ "SYS_CHUD": true,
+ "SYS_CLOCK_ADJTIME": true,
+ "SYS_CLOCK_GETCPUCLOCKID2": true,
+ "SYS_CLOCK_GETRES": true,
+ "SYS_CLOCK_GETTIME": true,
+ "SYS_CLOCK_NANOSLEEP": true,
+ "SYS_CLOCK_SETTIME": true,
+ "SYS_CLONE": true,
+ "SYS_CLOSE": true,
+ "SYS_CLOSEFROM": true,
+ "SYS_CLOSE_NOCANCEL": true,
+ "SYS_CONNECT": true,
+ "SYS_CONNECTAT": true,
+ "SYS_CONNECT_NOCANCEL": true,
+ "SYS_COPYFILE": true,
+ "SYS_CPUSET": true,
+ "SYS_CPUSET_GETAFFINITY": true,
+ "SYS_CPUSET_GETID": true,
+ "SYS_CPUSET_SETAFFINITY": true,
+ "SYS_CPUSET_SETID": true,
+ "SYS_CREAT": true,
+ "SYS_CREATE_MODULE": true,
+ "SYS_CSOPS": true,
+ "SYS_DELETE": true,
+ "SYS_DELETE_MODULE": true,
+ "SYS_DUP": true,
+ "SYS_DUP2": true,
+ "SYS_DUP3": true,
+ "SYS_EACCESS": true,
+ "SYS_EPOLL_CREATE": true,
+ "SYS_EPOLL_CREATE1": true,
+ "SYS_EPOLL_CTL": true,
+ "SYS_EPOLL_CTL_OLD": true,
+ "SYS_EPOLL_PWAIT": true,
+ "SYS_EPOLL_WAIT": true,
+ "SYS_EPOLL_WAIT_OLD": true,
+ "SYS_EVENTFD": true,
+ "SYS_EVENTFD2": true,
+ "SYS_EXCHANGEDATA": true,
+ "SYS_EXECVE": true,
+ "SYS_EXIT": true,
+ "SYS_EXIT_GROUP": true,
+ "SYS_EXTATTRCTL": true,
+ "SYS_EXTATTR_DELETE_FD": true,
+ "SYS_EXTATTR_DELETE_FILE": true,
+ "SYS_EXTATTR_DELETE_LINK": true,
+ "SYS_EXTATTR_GET_FD": true,
+ "SYS_EXTATTR_GET_FILE": true,
+ "SYS_EXTATTR_GET_LINK": true,
+ "SYS_EXTATTR_LIST_FD": true,
+ "SYS_EXTATTR_LIST_FILE": true,
+ "SYS_EXTATTR_LIST_LINK": true,
+ "SYS_EXTATTR_SET_FD": true,
+ "SYS_EXTATTR_SET_FILE": true,
+ "SYS_EXTATTR_SET_LINK": true,
+ "SYS_FACCESSAT": true,
+ "SYS_FADVISE64": true,
+ "SYS_FADVISE64_64": true,
+ "SYS_FALLOCATE": true,
+ "SYS_FANOTIFY_INIT": true,
+ "SYS_FANOTIFY_MARK": true,
+ "SYS_FCHDIR": true,
+ "SYS_FCHFLAGS": true,
+ "SYS_FCHMOD": true,
+ "SYS_FCHMODAT": true,
+ "SYS_FCHMOD_EXTENDED": true,
+ "SYS_FCHOWN": true,
+ "SYS_FCHOWN32": true,
+ "SYS_FCHOWNAT": true,
+ "SYS_FCHROOT": true,
+ "SYS_FCNTL": true,
+ "SYS_FCNTL64": true,
+ "SYS_FCNTL_NOCANCEL": true,
+ "SYS_FDATASYNC": true,
+ "SYS_FEXECVE": true,
+ "SYS_FFCLOCK_GETCOUNTER": true,
+ "SYS_FFCLOCK_GETESTIMATE": true,
+ "SYS_FFCLOCK_SETESTIMATE": true,
+ "SYS_FFSCTL": true,
+ "SYS_FGETATTRLIST": true,
+ "SYS_FGETXATTR": true,
+ "SYS_FHOPEN": true,
+ "SYS_FHSTAT": true,
+ "SYS_FHSTATFS": true,
+ "SYS_FILEPORT_MAKEFD": true,
+ "SYS_FILEPORT_MAKEPORT": true,
+ "SYS_FKTRACE": true,
+ "SYS_FLISTXATTR": true,
+ "SYS_FLOCK": true,
+ "SYS_FORK": true,
+ "SYS_FPATHCONF": true,
+ "SYS_FREEBSD6_FTRUNCATE": true,
+ "SYS_FREEBSD6_LSEEK": true,
+ "SYS_FREEBSD6_MMAP": true,
+ "SYS_FREEBSD6_PREAD": true,
+ "SYS_FREEBSD6_PWRITE": true,
+ "SYS_FREEBSD6_TRUNCATE": true,
+ "SYS_FREMOVEXATTR": true,
+ "SYS_FSCTL": true,
+ "SYS_FSETATTRLIST": true,
+ "SYS_FSETXATTR": true,
+ "SYS_FSGETPATH": true,
+ "SYS_FSTAT": true,
+ "SYS_FSTAT64": true,
+ "SYS_FSTAT64_EXTENDED": true,
+ "SYS_FSTATAT": true,
+ "SYS_FSTATAT64": true,
+ "SYS_FSTATFS": true,
+ "SYS_FSTATFS64": true,
+ "SYS_FSTATV": true,
+ "SYS_FSTATVFS1": true,
+ "SYS_FSTAT_EXTENDED": true,
+ "SYS_FSYNC": true,
+ "SYS_FSYNC_NOCANCEL": true,
+ "SYS_FSYNC_RANGE": true,
+ "SYS_FTIME": true,
+ "SYS_FTRUNCATE": true,
+ "SYS_FTRUNCATE64": true,
+ "SYS_FUTEX": true,
+ "SYS_FUTIMENS": true,
+ "SYS_FUTIMES": true,
+ "SYS_FUTIMESAT": true,
+ "SYS_GETATTRLIST": true,
+ "SYS_GETAUDIT": true,
+ "SYS_GETAUDIT_ADDR": true,
+ "SYS_GETAUID": true,
+ "SYS_GETCONTEXT": true,
+ "SYS_GETCPU": true,
+ "SYS_GETCWD": true,
+ "SYS_GETDENTS": true,
+ "SYS_GETDENTS64": true,
+ "SYS_GETDIRENTRIES": true,
+ "SYS_GETDIRENTRIES64": true,
+ "SYS_GETDIRENTRIESATTR": true,
+ "SYS_GETDTABLECOUNT": true,
+ "SYS_GETDTABLESIZE": true,
+ "SYS_GETEGID": true,
+ "SYS_GETEGID32": true,
+ "SYS_GETEUID": true,
+ "SYS_GETEUID32": true,
+ "SYS_GETFH": true,
+ "SYS_GETFSSTAT": true,
+ "SYS_GETFSSTAT64": true,
+ "SYS_GETGID": true,
+ "SYS_GETGID32": true,
+ "SYS_GETGROUPS": true,
+ "SYS_GETGROUPS32": true,
+ "SYS_GETHOSTUUID": true,
+ "SYS_GETITIMER": true,
+ "SYS_GETLCID": true,
+ "SYS_GETLOGIN": true,
+ "SYS_GETLOGINCLASS": true,
+ "SYS_GETPEERNAME": true,
+ "SYS_GETPGID": true,
+ "SYS_GETPGRP": true,
+ "SYS_GETPID": true,
+ "SYS_GETPMSG": true,
+ "SYS_GETPPID": true,
+ "SYS_GETPRIORITY": true,
+ "SYS_GETRESGID": true,
+ "SYS_GETRESGID32": true,
+ "SYS_GETRESUID": true,
+ "SYS_GETRESUID32": true,
+ "SYS_GETRLIMIT": true,
+ "SYS_GETRTABLE": true,
+ "SYS_GETRUSAGE": true,
+ "SYS_GETSGROUPS": true,
+ "SYS_GETSID": true,
+ "SYS_GETSOCKNAME": true,
+ "SYS_GETSOCKOPT": true,
+ "SYS_GETTHRID": true,
+ "SYS_GETTID": true,
+ "SYS_GETTIMEOFDAY": true,
+ "SYS_GETUID": true,
+ "SYS_GETUID32": true,
+ "SYS_GETVFSSTAT": true,
+ "SYS_GETWGROUPS": true,
+ "SYS_GETXATTR": true,
+ "SYS_GET_KERNEL_SYMS": true,
+ "SYS_GET_MEMPOLICY": true,
+ "SYS_GET_ROBUST_LIST": true,
+ "SYS_GET_THREAD_AREA": true,
+ "SYS_GTTY": true,
+ "SYS_IDENTITYSVC": true,
+ "SYS_IDLE": true,
+ "SYS_INITGROUPS": true,
+ "SYS_INIT_MODULE": true,
+ "SYS_INOTIFY_ADD_WATCH": true,
+ "SYS_INOTIFY_INIT": true,
+ "SYS_INOTIFY_INIT1": true,
+ "SYS_INOTIFY_RM_WATCH": true,
+ "SYS_IOCTL": true,
+ "SYS_IOPERM": true,
+ "SYS_IOPL": true,
+ "SYS_IOPOLICYSYS": true,
+ "SYS_IOPRIO_GET": true,
+ "SYS_IOPRIO_SET": true,
+ "SYS_IO_CANCEL": true,
+ "SYS_IO_DESTROY": true,
+ "SYS_IO_GETEVENTS": true,
+ "SYS_IO_SETUP": true,
+ "SYS_IO_SUBMIT": true,
+ "SYS_IPC": true,
+ "SYS_ISSETUGID": true,
+ "SYS_JAIL": true,
+ "SYS_JAIL_ATTACH": true,
+ "SYS_JAIL_GET": true,
+ "SYS_JAIL_REMOVE": true,
+ "SYS_JAIL_SET": true,
+ "SYS_KDEBUG_TRACE": true,
+ "SYS_KENV": true,
+ "SYS_KEVENT": true,
+ "SYS_KEVENT64": true,
+ "SYS_KEXEC_LOAD": true,
+ "SYS_KEYCTL": true,
+ "SYS_KILL": true,
+ "SYS_KLDFIND": true,
+ "SYS_KLDFIRSTMOD": true,
+ "SYS_KLDLOAD": true,
+ "SYS_KLDNEXT": true,
+ "SYS_KLDSTAT": true,
+ "SYS_KLDSYM": true,
+ "SYS_KLDUNLOAD": true,
+ "SYS_KLDUNLOADF": true,
+ "SYS_KQUEUE": true,
+ "SYS_KQUEUE1": true,
+ "SYS_KTIMER_CREATE": true,
+ "SYS_KTIMER_DELETE": true,
+ "SYS_KTIMER_GETOVERRUN": true,
+ "SYS_KTIMER_GETTIME": true,
+ "SYS_KTIMER_SETTIME": true,
+ "SYS_KTRACE": true,
+ "SYS_LCHFLAGS": true,
+ "SYS_LCHMOD": true,
+ "SYS_LCHOWN": true,
+ "SYS_LCHOWN32": true,
+ "SYS_LGETFH": true,
+ "SYS_LGETXATTR": true,
+ "SYS_LINK": true,
+ "SYS_LINKAT": true,
+ "SYS_LIO_LISTIO": true,
+ "SYS_LISTEN": true,
+ "SYS_LISTXATTR": true,
+ "SYS_LLISTXATTR": true,
+ "SYS_LOCK": true,
+ "SYS_LOOKUP_DCOOKIE": true,
+ "SYS_LPATHCONF": true,
+ "SYS_LREMOVEXATTR": true,
+ "SYS_LSEEK": true,
+ "SYS_LSETXATTR": true,
+ "SYS_LSTAT": true,
+ "SYS_LSTAT64": true,
+ "SYS_LSTAT64_EXTENDED": true,
+ "SYS_LSTATV": true,
+ "SYS_LSTAT_EXTENDED": true,
+ "SYS_LUTIMES": true,
+ "SYS_MAC_SYSCALL": true,
+ "SYS_MADVISE": true,
+ "SYS_MADVISE1": true,
+ "SYS_MAXSYSCALL": true,
+ "SYS_MBIND": true,
+ "SYS_MIGRATE_PAGES": true,
+ "SYS_MINCORE": true,
+ "SYS_MINHERIT": true,
+ "SYS_MKCOMPLEX": true,
+ "SYS_MKDIR": true,
+ "SYS_MKDIRAT": true,
+ "SYS_MKDIR_EXTENDED": true,
+ "SYS_MKFIFO": true,
+ "SYS_MKFIFOAT": true,
+ "SYS_MKFIFO_EXTENDED": true,
+ "SYS_MKNOD": true,
+ "SYS_MKNODAT": true,
+ "SYS_MLOCK": true,
+ "SYS_MLOCKALL": true,
+ "SYS_MMAP": true,
+ "SYS_MMAP2": true,
+ "SYS_MODCTL": true,
+ "SYS_MODFIND": true,
+ "SYS_MODFNEXT": true,
+ "SYS_MODIFY_LDT": true,
+ "SYS_MODNEXT": true,
+ "SYS_MODSTAT": true,
+ "SYS_MODWATCH": true,
+ "SYS_MOUNT": true,
+ "SYS_MOVE_PAGES": true,
+ "SYS_MPROTECT": true,
+ "SYS_MPX": true,
+ "SYS_MQUERY": true,
+ "SYS_MQ_GETSETATTR": true,
+ "SYS_MQ_NOTIFY": true,
+ "SYS_MQ_OPEN": true,
+ "SYS_MQ_TIMEDRECEIVE": true,
+ "SYS_MQ_TIMEDSEND": true,
+ "SYS_MQ_UNLINK": true,
+ "SYS_MREMAP": true,
+ "SYS_MSGCTL": true,
+ "SYS_MSGGET": true,
+ "SYS_MSGRCV": true,
+ "SYS_MSGRCV_NOCANCEL": true,
+ "SYS_MSGSND": true,
+ "SYS_MSGSND_NOCANCEL": true,
+ "SYS_MSGSYS": true,
+ "SYS_MSYNC": true,
+ "SYS_MSYNC_NOCANCEL": true,
+ "SYS_MUNLOCK": true,
+ "SYS_MUNLOCKALL": true,
+ "SYS_MUNMAP": true,
+ "SYS_NAME_TO_HANDLE_AT": true,
+ "SYS_NANOSLEEP": true,
+ "SYS_NEWFSTATAT": true,
+ "SYS_NFSCLNT": true,
+ "SYS_NFSSERVCTL": true,
+ "SYS_NFSSVC": true,
+ "SYS_NFSTAT": true,
+ "SYS_NICE": true,
+ "SYS_NLSTAT": true,
+ "SYS_NMOUNT": true,
+ "SYS_NSTAT": true,
+ "SYS_NTP_ADJTIME": true,
+ "SYS_NTP_GETTIME": true,
+ "SYS_OABI_SYSCALL_BASE": true,
+ "SYS_OBREAK": true,
+ "SYS_OLDFSTAT": true,
+ "SYS_OLDLSTAT": true,
+ "SYS_OLDOLDUNAME": true,
+ "SYS_OLDSTAT": true,
+ "SYS_OLDUNAME": true,
+ "SYS_OPEN": true,
+ "SYS_OPENAT": true,
+ "SYS_OPENBSD_POLL": true,
+ "SYS_OPEN_BY_HANDLE_AT": true,
+ "SYS_OPEN_EXTENDED": true,
+ "SYS_OPEN_NOCANCEL": true,
+ "SYS_OVADVISE": true,
+ "SYS_PACCEPT": true,
+ "SYS_PATHCONF": true,
+ "SYS_PAUSE": true,
+ "SYS_PCICONFIG_IOBASE": true,
+ "SYS_PCICONFIG_READ": true,
+ "SYS_PCICONFIG_WRITE": true,
+ "SYS_PDFORK": true,
+ "SYS_PDGETPID": true,
+ "SYS_PDKILL": true,
+ "SYS_PERF_EVENT_OPEN": true,
+ "SYS_PERSONALITY": true,
+ "SYS_PID_HIBERNATE": true,
+ "SYS_PID_RESUME": true,
+ "SYS_PID_SHUTDOWN_SOCKETS": true,
+ "SYS_PID_SUSPEND": true,
+ "SYS_PIPE": true,
+ "SYS_PIPE2": true,
+ "SYS_PIVOT_ROOT": true,
+ "SYS_PMC_CONTROL": true,
+ "SYS_PMC_GET_INFO": true,
+ "SYS_POLL": true,
+ "SYS_POLLTS": true,
+ "SYS_POLL_NOCANCEL": true,
+ "SYS_POSIX_FADVISE": true,
+ "SYS_POSIX_FALLOCATE": true,
+ "SYS_POSIX_OPENPT": true,
+ "SYS_POSIX_SPAWN": true,
+ "SYS_PPOLL": true,
+ "SYS_PRCTL": true,
+ "SYS_PREAD": true,
+ "SYS_PREAD64": true,
+ "SYS_PREADV": true,
+ "SYS_PREAD_NOCANCEL": true,
+ "SYS_PRLIMIT64": true,
+ "SYS_PROCCTL": true,
+ "SYS_PROCESS_POLICY": true,
+ "SYS_PROCESS_VM_READV": true,
+ "SYS_PROCESS_VM_WRITEV": true,
+ "SYS_PROC_INFO": true,
+ "SYS_PROF": true,
+ "SYS_PROFIL": true,
+ "SYS_PSELECT": true,
+ "SYS_PSELECT6": true,
+ "SYS_PSET_ASSIGN": true,
+ "SYS_PSET_CREATE": true,
+ "SYS_PSET_DESTROY": true,
+ "SYS_PSYNCH_CVBROAD": true,
+ "SYS_PSYNCH_CVCLRPREPOST": true,
+ "SYS_PSYNCH_CVSIGNAL": true,
+ "SYS_PSYNCH_CVWAIT": true,
+ "SYS_PSYNCH_MUTEXDROP": true,
+ "SYS_PSYNCH_MUTEXWAIT": true,
+ "SYS_PSYNCH_RW_DOWNGRADE": true,
+ "SYS_PSYNCH_RW_LONGRDLOCK": true,
+ "SYS_PSYNCH_RW_RDLOCK": true,
+ "SYS_PSYNCH_RW_UNLOCK": true,
+ "SYS_PSYNCH_RW_UNLOCK2": true,
+ "SYS_PSYNCH_RW_UPGRADE": true,
+ "SYS_PSYNCH_RW_WRLOCK": true,
+ "SYS_PSYNCH_RW_YIELDWRLOCK": true,
+ "SYS_PTRACE": true,
+ "SYS_PUTPMSG": true,
+ "SYS_PWRITE": true,
+ "SYS_PWRITE64": true,
+ "SYS_PWRITEV": true,
+ "SYS_PWRITE_NOCANCEL": true,
+ "SYS_QUERY_MODULE": true,
+ "SYS_QUOTACTL": true,
+ "SYS_RASCTL": true,
+ "SYS_RCTL_ADD_RULE": true,
+ "SYS_RCTL_GET_LIMITS": true,
+ "SYS_RCTL_GET_RACCT": true,
+ "SYS_RCTL_GET_RULES": true,
+ "SYS_RCTL_REMOVE_RULE": true,
+ "SYS_READ": true,
+ "SYS_READAHEAD": true,
+ "SYS_READDIR": true,
+ "SYS_READLINK": true,
+ "SYS_READLINKAT": true,
+ "SYS_READV": true,
+ "SYS_READV_NOCANCEL": true,
+ "SYS_READ_NOCANCEL": true,
+ "SYS_REBOOT": true,
+ "SYS_RECV": true,
+ "SYS_RECVFROM": true,
+ "SYS_RECVFROM_NOCANCEL": true,
+ "SYS_RECVMMSG": true,
+ "SYS_RECVMSG": true,
+ "SYS_RECVMSG_NOCANCEL": true,
+ "SYS_REMAP_FILE_PAGES": true,
+ "SYS_REMOVEXATTR": true,
+ "SYS_RENAME": true,
+ "SYS_RENAMEAT": true,
+ "SYS_REQUEST_KEY": true,
+ "SYS_RESTART_SYSCALL": true,
+ "SYS_REVOKE": true,
+ "SYS_RFORK": true,
+ "SYS_RMDIR": true,
+ "SYS_RTPRIO": true,
+ "SYS_RTPRIO_THREAD": true,
+ "SYS_RT_SIGACTION": true,
+ "SYS_RT_SIGPENDING": true,
+ "SYS_RT_SIGPROCMASK": true,
+ "SYS_RT_SIGQUEUEINFO": true,
+ "SYS_RT_SIGRETURN": true,
+ "SYS_RT_SIGSUSPEND": true,
+ "SYS_RT_SIGTIMEDWAIT": true,
+ "SYS_RT_TGSIGQUEUEINFO": true,
+ "SYS_SBRK": true,
+ "SYS_SCHED_GETAFFINITY": true,
+ "SYS_SCHED_GETPARAM": true,
+ "SYS_SCHED_GETSCHEDULER": true,
+ "SYS_SCHED_GET_PRIORITY_MAX": true,
+ "SYS_SCHED_GET_PRIORITY_MIN": true,
+ "SYS_SCHED_RR_GET_INTERVAL": true,
+ "SYS_SCHED_SETAFFINITY": true,
+ "SYS_SCHED_SETPARAM": true,
+ "SYS_SCHED_SETSCHEDULER": true,
+ "SYS_SCHED_YIELD": true,
+ "SYS_SCTP_GENERIC_RECVMSG": true,
+ "SYS_SCTP_GENERIC_SENDMSG": true,
+ "SYS_SCTP_GENERIC_SENDMSG_IOV": true,
+ "SYS_SCTP_PEELOFF": true,
+ "SYS_SEARCHFS": true,
+ "SYS_SECURITY": true,
+ "SYS_SELECT": true,
+ "SYS_SELECT_NOCANCEL": true,
+ "SYS_SEMCONFIG": true,
+ "SYS_SEMCTL": true,
+ "SYS_SEMGET": true,
+ "SYS_SEMOP": true,
+ "SYS_SEMSYS": true,
+ "SYS_SEMTIMEDOP": true,
+ "SYS_SEM_CLOSE": true,
+ "SYS_SEM_DESTROY": true,
+ "SYS_SEM_GETVALUE": true,
+ "SYS_SEM_INIT": true,
+ "SYS_SEM_OPEN": true,
+ "SYS_SEM_POST": true,
+ "SYS_SEM_TRYWAIT": true,
+ "SYS_SEM_UNLINK": true,
+ "SYS_SEM_WAIT": true,
+ "SYS_SEM_WAIT_NOCANCEL": true,
+ "SYS_SEND": true,
+ "SYS_SENDFILE": true,
+ "SYS_SENDFILE64": true,
+ "SYS_SENDMMSG": true,
+ "SYS_SENDMSG": true,
+ "SYS_SENDMSG_NOCANCEL": true,
+ "SYS_SENDTO": true,
+ "SYS_SENDTO_NOCANCEL": true,
+ "SYS_SETATTRLIST": true,
+ "SYS_SETAUDIT": true,
+ "SYS_SETAUDIT_ADDR": true,
+ "SYS_SETAUID": true,
+ "SYS_SETCONTEXT": true,
+ "SYS_SETDOMAINNAME": true,
+ "SYS_SETEGID": true,
+ "SYS_SETEUID": true,
+ "SYS_SETFIB": true,
+ "SYS_SETFSGID": true,
+ "SYS_SETFSGID32": true,
+ "SYS_SETFSUID": true,
+ "SYS_SETFSUID32": true,
+ "SYS_SETGID": true,
+ "SYS_SETGID32": true,
+ "SYS_SETGROUPS": true,
+ "SYS_SETGROUPS32": true,
+ "SYS_SETHOSTNAME": true,
+ "SYS_SETITIMER": true,
+ "SYS_SETLCID": true,
+ "SYS_SETLOGIN": true,
+ "SYS_SETLOGINCLASS": true,
+ "SYS_SETNS": true,
+ "SYS_SETPGID": true,
+ "SYS_SETPRIORITY": true,
+ "SYS_SETPRIVEXEC": true,
+ "SYS_SETREGID": true,
+ "SYS_SETREGID32": true,
+ "SYS_SETRESGID": true,
+ "SYS_SETRESGID32": true,
+ "SYS_SETRESUID": true,
+ "SYS_SETRESUID32": true,
+ "SYS_SETREUID": true,
+ "SYS_SETREUID32": true,
+ "SYS_SETRLIMIT": true,
+ "SYS_SETRTABLE": true,
+ "SYS_SETSGROUPS": true,
+ "SYS_SETSID": true,
+ "SYS_SETSOCKOPT": true,
+ "SYS_SETTID": true,
+ "SYS_SETTID_WITH_PID": true,
+ "SYS_SETTIMEOFDAY": true,
+ "SYS_SETUID": true,
+ "SYS_SETUID32": true,
+ "SYS_SETWGROUPS": true,
+ "SYS_SETXATTR": true,
+ "SYS_SET_MEMPOLICY": true,
+ "SYS_SET_ROBUST_LIST": true,
+ "SYS_SET_THREAD_AREA": true,
+ "SYS_SET_TID_ADDRESS": true,
+ "SYS_SGETMASK": true,
+ "SYS_SHARED_REGION_CHECK_NP": true,
+ "SYS_SHARED_REGION_MAP_AND_SLIDE_NP": true,
+ "SYS_SHMAT": true,
+ "SYS_SHMCTL": true,
+ "SYS_SHMDT": true,
+ "SYS_SHMGET": true,
+ "SYS_SHMSYS": true,
+ "SYS_SHM_OPEN": true,
+ "SYS_SHM_UNLINK": true,
+ "SYS_SHUTDOWN": true,
+ "SYS_SIGACTION": true,
+ "SYS_SIGALTSTACK": true,
+ "SYS_SIGNAL": true,
+ "SYS_SIGNALFD": true,
+ "SYS_SIGNALFD4": true,
+ "SYS_SIGPENDING": true,
+ "SYS_SIGPROCMASK": true,
+ "SYS_SIGQUEUE": true,
+ "SYS_SIGQUEUEINFO": true,
+ "SYS_SIGRETURN": true,
+ "SYS_SIGSUSPEND": true,
+ "SYS_SIGSUSPEND_NOCANCEL": true,
+ "SYS_SIGTIMEDWAIT": true,
+ "SYS_SIGWAIT": true,
+ "SYS_SIGWAITINFO": true,
+ "SYS_SOCKET": true,
+ "SYS_SOCKETCALL": true,
+ "SYS_SOCKETPAIR": true,
+ "SYS_SPLICE": true,
+ "SYS_SSETMASK": true,
+ "SYS_SSTK": true,
+ "SYS_STACK_SNAPSHOT": true,
+ "SYS_STAT": true,
+ "SYS_STAT64": true,
+ "SYS_STAT64_EXTENDED": true,
+ "SYS_STATFS": true,
+ "SYS_STATFS64": true,
+ "SYS_STATV": true,
+ "SYS_STATVFS1": true,
+ "SYS_STAT_EXTENDED": true,
+ "SYS_STIME": true,
+ "SYS_STTY": true,
+ "SYS_SWAPCONTEXT": true,
+ "SYS_SWAPCTL": true,
+ "SYS_SWAPOFF": true,
+ "SYS_SWAPON": true,
+ "SYS_SYMLINK": true,
+ "SYS_SYMLINKAT": true,
+ "SYS_SYNC": true,
+ "SYS_SYNCFS": true,
+ "SYS_SYNC_FILE_RANGE": true,
+ "SYS_SYSARCH": true,
+ "SYS_SYSCALL": true,
+ "SYS_SYSCALL_BASE": true,
+ "SYS_SYSFS": true,
+ "SYS_SYSINFO": true,
+ "SYS_SYSLOG": true,
+ "SYS_TEE": true,
+ "SYS_TGKILL": true,
+ "SYS_THREAD_SELFID": true,
+ "SYS_THR_CREATE": true,
+ "SYS_THR_EXIT": true,
+ "SYS_THR_KILL": true,
+ "SYS_THR_KILL2": true,
+ "SYS_THR_NEW": true,
+ "SYS_THR_SELF": true,
+ "SYS_THR_SET_NAME": true,
+ "SYS_THR_SUSPEND": true,
+ "SYS_THR_WAKE": true,
+ "SYS_TIME": true,
+ "SYS_TIMERFD_CREATE": true,
+ "SYS_TIMERFD_GETTIME": true,
+ "SYS_TIMERFD_SETTIME": true,
+ "SYS_TIMER_CREATE": true,
+ "SYS_TIMER_DELETE": true,
+ "SYS_TIMER_GETOVERRUN": true,
+ "SYS_TIMER_GETTIME": true,
+ "SYS_TIMER_SETTIME": true,
+ "SYS_TIMES": true,
+ "SYS_TKILL": true,
+ "SYS_TRUNCATE": true,
+ "SYS_TRUNCATE64": true,
+ "SYS_TUXCALL": true,
+ "SYS_UGETRLIMIT": true,
+ "SYS_ULIMIT": true,
+ "SYS_UMASK": true,
+ "SYS_UMASK_EXTENDED": true,
+ "SYS_UMOUNT": true,
+ "SYS_UMOUNT2": true,
+ "SYS_UNAME": true,
+ "SYS_UNDELETE": true,
+ "SYS_UNLINK": true,
+ "SYS_UNLINKAT": true,
+ "SYS_UNMOUNT": true,
+ "SYS_UNSHARE": true,
+ "SYS_USELIB": true,
+ "SYS_USTAT": true,
+ "SYS_UTIME": true,
+ "SYS_UTIMENSAT": true,
+ "SYS_UTIMES": true,
+ "SYS_UTRACE": true,
+ "SYS_UUIDGEN": true,
+ "SYS_VADVISE": true,
+ "SYS_VFORK": true,
+ "SYS_VHANGUP": true,
+ "SYS_VM86": true,
+ "SYS_VM86OLD": true,
+ "SYS_VMSPLICE": true,
+ "SYS_VM_PRESSURE_MONITOR": true,
+ "SYS_VSERVER": true,
+ "SYS_WAIT4": true,
+ "SYS_WAIT4_NOCANCEL": true,
+ "SYS_WAIT6": true,
+ "SYS_WAITEVENT": true,
+ "SYS_WAITID": true,
+ "SYS_WAITID_NOCANCEL": true,
+ "SYS_WAITPID": true,
+ "SYS_WATCHEVENT": true,
+ "SYS_WORKQ_KERNRETURN": true,
+ "SYS_WORKQ_OPEN": true,
+ "SYS_WRITE": true,
+ "SYS_WRITEV": true,
+ "SYS_WRITEV_NOCANCEL": true,
+ "SYS_WRITE_NOCANCEL": true,
+ "SYS_YIELD": true,
+ "SYS__LLSEEK": true,
+ "SYS__LWP_CONTINUE": true,
+ "SYS__LWP_CREATE": true,
+ "SYS__LWP_CTL": true,
+ "SYS__LWP_DETACH": true,
+ "SYS__LWP_EXIT": true,
+ "SYS__LWP_GETNAME": true,
+ "SYS__LWP_GETPRIVATE": true,
+ "SYS__LWP_KILL": true,
+ "SYS__LWP_PARK": true,
+ "SYS__LWP_SELF": true,
+ "SYS__LWP_SETNAME": true,
+ "SYS__LWP_SETPRIVATE": true,
+ "SYS__LWP_SUSPEND": true,
+ "SYS__LWP_UNPARK": true,
+ "SYS__LWP_UNPARK_ALL": true,
+ "SYS__LWP_WAIT": true,
+ "SYS__LWP_WAKEUP": true,
+ "SYS__NEWSELECT": true,
+ "SYS__PSET_BIND": true,
+ "SYS__SCHED_GETAFFINITY": true,
+ "SYS__SCHED_GETPARAM": true,
+ "SYS__SCHED_SETAFFINITY": true,
+ "SYS__SCHED_SETPARAM": true,
+ "SYS__SYSCTL": true,
+ "SYS__UMTX_LOCK": true,
+ "SYS__UMTX_OP": true,
+ "SYS__UMTX_UNLOCK": true,
+ "SYS___ACL_ACLCHECK_FD": true,
+ "SYS___ACL_ACLCHECK_FILE": true,
+ "SYS___ACL_ACLCHECK_LINK": true,
+ "SYS___ACL_DELETE_FD": true,
+ "SYS___ACL_DELETE_FILE": true,
+ "SYS___ACL_DELETE_LINK": true,
+ "SYS___ACL_GET_FD": true,
+ "SYS___ACL_GET_FILE": true,
+ "SYS___ACL_GET_LINK": true,
+ "SYS___ACL_SET_FD": true,
+ "SYS___ACL_SET_FILE": true,
+ "SYS___ACL_SET_LINK": true,
+ "SYS___CLONE": true,
+ "SYS___DISABLE_THREADSIGNAL": true,
+ "SYS___GETCWD": true,
+ "SYS___GETLOGIN": true,
+ "SYS___GET_TCB": true,
+ "SYS___MAC_EXECVE": true,
+ "SYS___MAC_GETFSSTAT": true,
+ "SYS___MAC_GET_FD": true,
+ "SYS___MAC_GET_FILE": true,
+ "SYS___MAC_GET_LCID": true,
+ "SYS___MAC_GET_LCTX": true,
+ "SYS___MAC_GET_LINK": true,
+ "SYS___MAC_GET_MOUNT": true,
+ "SYS___MAC_GET_PID": true,
+ "SYS___MAC_GET_PROC": true,
+ "SYS___MAC_MOUNT": true,
+ "SYS___MAC_SET_FD": true,
+ "SYS___MAC_SET_FILE": true,
+ "SYS___MAC_SET_LCTX": true,
+ "SYS___MAC_SET_LINK": true,
+ "SYS___MAC_SET_PROC": true,
+ "SYS___MAC_SYSCALL": true,
+ "SYS___OLD_SEMWAIT_SIGNAL": true,
+ "SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL": true,
+ "SYS___POSIX_CHOWN": true,
+ "SYS___POSIX_FCHOWN": true,
+ "SYS___POSIX_LCHOWN": true,
+ "SYS___POSIX_RENAME": true,
+ "SYS___PTHREAD_CANCELED": true,
+ "SYS___PTHREAD_CHDIR": true,
+ "SYS___PTHREAD_FCHDIR": true,
+ "SYS___PTHREAD_KILL": true,
+ "SYS___PTHREAD_MARKCANCEL": true,
+ "SYS___PTHREAD_SIGMASK": true,
+ "SYS___QUOTACTL": true,
+ "SYS___SEMCTL": true,
+ "SYS___SEMWAIT_SIGNAL": true,
+ "SYS___SEMWAIT_SIGNAL_NOCANCEL": true,
+ "SYS___SETLOGIN": true,
+ "SYS___SETUGID": true,
+ "SYS___SET_TCB": true,
+ "SYS___SIGACTION_SIGTRAMP": true,
+ "SYS___SIGTIMEDWAIT": true,
+ "SYS___SIGWAIT": true,
+ "SYS___SIGWAIT_NOCANCEL": true,
+ "SYS___SYSCTL": true,
+ "SYS___TFORK": true,
+ "SYS___THREXIT": true,
+ "SYS___THRSIGDIVERT": true,
+ "SYS___THRSLEEP": true,
+ "SYS___THRWAKEUP": true,
+ "S_ARCH1": true,
+ "S_ARCH2": true,
+ "S_BLKSIZE": true,
+ "S_IEXEC": true,
+ "S_IFBLK": true,
+ "S_IFCHR": true,
+ "S_IFDIR": true,
+ "S_IFIFO": true,
+ "S_IFLNK": true,
+ "S_IFMT": true,
+ "S_IFREG": true,
+ "S_IFSOCK": true,
+ "S_IFWHT": true,
+ "S_IREAD": true,
+ "S_IRGRP": true,
+ "S_IROTH": true,
+ "S_IRUSR": true,
+ "S_IRWXG": true,
+ "S_IRWXO": true,
+ "S_IRWXU": true,
+ "S_ISGID": true,
+ "S_ISTXT": true,
+ "S_ISUID": true,
+ "S_ISVTX": true,
+ "S_IWGRP": true,
+ "S_IWOTH": true,
+ "S_IWRITE": true,
+ "S_IWUSR": true,
+ "S_IXGRP": true,
+ "S_IXOTH": true,
+ "S_IXUSR": true,
+ "S_LOGIN_SET": true,
+ "SecurityAttributes": true,
+ "Seek": true,
+ "Select": true,
+ "Sendfile": true,
+ "Sendmsg": true,
+ "SendmsgN": true,
+ "Sendto": true,
+ "Servent": true,
+ "SetBpf": true,
+ "SetBpfBuflen": true,
+ "SetBpfDatalink": true,
+ "SetBpfHeadercmpl": true,
+ "SetBpfImmediate": true,
+ "SetBpfInterface": true,
+ "SetBpfPromisc": true,
+ "SetBpfTimeout": true,
+ "SetCurrentDirectory": true,
+ "SetEndOfFile": true,
+ "SetEnvironmentVariable": true,
+ "SetFileAttributes": true,
+ "SetFileCompletionNotificationModes": true,
+ "SetFilePointer": true,
+ "SetFileTime": true,
+ "SetHandleInformation": true,
+ "SetKevent": true,
+ "SetLsfPromisc": true,
+ "SetNonblock": true,
+ "Setdomainname": true,
+ "Setegid": true,
+ "Setenv": true,
+ "Seteuid": true,
+ "Setfsgid": true,
+ "Setfsuid": true,
+ "Setgid": true,
+ "Setgroups": true,
+ "Sethostname": true,
+ "Setlogin": true,
+ "Setpgid": true,
+ "Setpriority": true,
+ "Setprivexec": true,
+ "Setregid": true,
+ "Setresgid": true,
+ "Setresuid": true,
+ "Setreuid": true,
+ "Setrlimit": true,
+ "Setsid": true,
+ "Setsockopt": true,
+ "SetsockoptByte": true,
+ "SetsockoptICMPv6Filter": true,
+ "SetsockoptIPMreq": true,
+ "SetsockoptIPMreqn": true,
+ "SetsockoptIPv6Mreq": true,
+ "SetsockoptInet4Addr": true,
+ "SetsockoptInt": true,
+ "SetsockoptLinger": true,
+ "SetsockoptString": true,
+ "SetsockoptTimeval": true,
+ "Settimeofday": true,
+ "Setuid": true,
+ "Setxattr": true,
+ "Shutdown": true,
+ "SidTypeAlias": true,
+ "SidTypeComputer": true,
+ "SidTypeDeletedAccount": true,
+ "SidTypeDomain": true,
+ "SidTypeGroup": true,
+ "SidTypeInvalid": true,
+ "SidTypeLabel": true,
+ "SidTypeUnknown": true,
+ "SidTypeUser": true,
+ "SidTypeWellKnownGroup": true,
+ "Signal": true,
+ "SizeofBpfHdr": true,
+ "SizeofBpfInsn": true,
+ "SizeofBpfProgram": true,
+ "SizeofBpfStat": true,
+ "SizeofBpfVersion": true,
+ "SizeofBpfZbuf": true,
+ "SizeofBpfZbufHeader": true,
+ "SizeofCmsghdr": true,
+ "SizeofICMPv6Filter": true,
+ "SizeofIPMreq": true,
+ "SizeofIPMreqn": true,
+ "SizeofIPv6MTUInfo": true,
+ "SizeofIPv6Mreq": true,
+ "SizeofIfAddrmsg": true,
+ "SizeofIfAnnounceMsghdr": true,
+ "SizeofIfData": true,
+ "SizeofIfInfomsg": true,
+ "SizeofIfMsghdr": true,
+ "SizeofIfaMsghdr": true,
+ "SizeofIfmaMsghdr": true,
+ "SizeofIfmaMsghdr2": true,
+ "SizeofInet4Pktinfo": true,
+ "SizeofInet6Pktinfo": true,
+ "SizeofInotifyEvent": true,
+ "SizeofLinger": true,
+ "SizeofMsghdr": true,
+ "SizeofNlAttr": true,
+ "SizeofNlMsgerr": true,
+ "SizeofNlMsghdr": true,
+ "SizeofRtAttr": true,
+ "SizeofRtGenmsg": true,
+ "SizeofRtMetrics": true,
+ "SizeofRtMsg": true,
+ "SizeofRtMsghdr": true,
+ "SizeofRtNexthop": true,
+ "SizeofSockFilter": true,
+ "SizeofSockFprog": true,
+ "SizeofSockaddrAny": true,
+ "SizeofSockaddrDatalink": true,
+ "SizeofSockaddrInet4": true,
+ "SizeofSockaddrInet6": true,
+ "SizeofSockaddrLinklayer": true,
+ "SizeofSockaddrNetlink": true,
+ "SizeofSockaddrUnix": true,
+ "SizeofTCPInfo": true,
+ "SizeofUcred": true,
+ "SlicePtrFromStrings": true,
+ "SockFilter": true,
+ "SockFprog": true,
+ "SockaddrDatalink": true,
+ "SockaddrGen": true,
+ "SockaddrInet4": true,
+ "SockaddrInet6": true,
+ "SockaddrLinklayer": true,
+ "SockaddrNetlink": true,
+ "SockaddrUnix": true,
+ "Socket": true,
+ "SocketControlMessage": true,
+ "SocketDisableIPv6": true,
+ "Socketpair": true,
+ "Splice": true,
+ "StartProcess": true,
+ "StartupInfo": true,
+ "Stat": true,
+ "Stat_t": true,
+ "Statfs": true,
+ "Statfs_t": true,
+ "Stderr": true,
+ "Stdin": true,
+ "Stdout": true,
+ "StringBytePtr": true,
+ "StringByteSlice": true,
+ "StringSlicePtr": true,
+ "StringToSid": true,
+ "StringToUTF16": true,
+ "StringToUTF16Ptr": true,
+ "Symlink": true,
+ "Sync": true,
+ "SyncFileRange": true,
+ "SysProcAttr": true,
+ "SysProcIDMap": true,
+ "Syscall": true,
+ "Syscall12": true,
+ "Syscall15": true,
+ "Syscall18": true,
+ "Syscall6": true,
+ "Syscall9": true,
+ "Sysctl": true,
+ "SysctlUint32": true,
+ "Sysctlnode": true,
+ "Sysinfo": true,
+ "Sysinfo_t": true,
+ "Systemtime": true,
+ "TCGETS": true,
+ "TCIFLUSH": true,
+ "TCIOFLUSH": true,
+ "TCOFLUSH": true,
+ "TCPInfo": true,
+ "TCPKeepalive": true,
+ "TCP_CA_NAME_MAX": true,
+ "TCP_CONGCTL": true,
+ "TCP_CONGESTION": true,
+ "TCP_CONNECTIONTIMEOUT": true,
+ "TCP_CORK": true,
+ "TCP_DEFER_ACCEPT": true,
+ "TCP_INFO": true,
+ "TCP_KEEPALIVE": true,
+ "TCP_KEEPCNT": true,
+ "TCP_KEEPIDLE": true,
+ "TCP_KEEPINIT": true,
+ "TCP_KEEPINTVL": true,
+ "TCP_LINGER2": true,
+ "TCP_MAXBURST": true,
+ "TCP_MAXHLEN": true,
+ "TCP_MAXOLEN": true,
+ "TCP_MAXSEG": true,
+ "TCP_MAXWIN": true,
+ "TCP_MAX_SACK": true,
+ "TCP_MAX_WINSHIFT": true,
+ "TCP_MD5SIG": true,
+ "TCP_MD5SIG_MAXKEYLEN": true,
+ "TCP_MINMSS": true,
+ "TCP_MINMSSOVERLOAD": true,
+ "TCP_MSS": true,
+ "TCP_NODELAY": true,
+ "TCP_NOOPT": true,
+ "TCP_NOPUSH": true,
+ "TCP_NSTATES": true,
+ "TCP_QUICKACK": true,
+ "TCP_RXT_CONNDROPTIME": true,
+ "TCP_RXT_FINDROP": true,
+ "TCP_SACK_ENABLE": true,
+ "TCP_SYNCNT": true,
+ "TCP_VENDOR": true,
+ "TCP_WINDOW_CLAMP": true,
+ "TCSAFLUSH": true,
+ "TCSETS": true,
+ "TF_DISCONNECT": true,
+ "TF_REUSE_SOCKET": true,
+ "TF_USE_DEFAULT_WORKER": true,
+ "TF_USE_KERNEL_APC": true,
+ "TF_USE_SYSTEM_THREAD": true,
+ "TF_WRITE_BEHIND": true,
+ "TH32CS_INHERIT": true,
+ "TH32CS_SNAPALL": true,
+ "TH32CS_SNAPHEAPLIST": true,
+ "TH32CS_SNAPMODULE": true,
+ "TH32CS_SNAPMODULE32": true,
+ "TH32CS_SNAPPROCESS": true,
+ "TH32CS_SNAPTHREAD": true,
+ "TIME_ZONE_ID_DAYLIGHT": true,
+ "TIME_ZONE_ID_STANDARD": true,
+ "TIME_ZONE_ID_UNKNOWN": true,
+ "TIOCCBRK": true,
+ "TIOCCDTR": true,
+ "TIOCCONS": true,
+ "TIOCDCDTIMESTAMP": true,
+ "TIOCDRAIN": true,
+ "TIOCDSIMICROCODE": true,
+ "TIOCEXCL": true,
+ "TIOCEXT": true,
+ "TIOCFLAG_CDTRCTS": true,
+ "TIOCFLAG_CLOCAL": true,
+ "TIOCFLAG_CRTSCTS": true,
+ "TIOCFLAG_MDMBUF": true,
+ "TIOCFLAG_PPS": true,
+ "TIOCFLAG_SOFTCAR": true,
+ "TIOCFLUSH": true,
+ "TIOCGDEV": true,
+ "TIOCGDRAINWAIT": true,
+ "TIOCGETA": true,
+ "TIOCGETD": true,
+ "TIOCGFLAGS": true,
+ "TIOCGICOUNT": true,
+ "TIOCGLCKTRMIOS": true,
+ "TIOCGLINED": true,
+ "TIOCGPGRP": true,
+ "TIOCGPTN": true,
+ "TIOCGQSIZE": true,
+ "TIOCGRANTPT": true,
+ "TIOCGRS485": true,
+ "TIOCGSERIAL": true,
+ "TIOCGSID": true,
+ "TIOCGSIZE": true,
+ "TIOCGSOFTCAR": true,
+ "TIOCGTSTAMP": true,
+ "TIOCGWINSZ": true,
+ "TIOCINQ": true,
+ "TIOCIXOFF": true,
+ "TIOCIXON": true,
+ "TIOCLINUX": true,
+ "TIOCMBIC": true,
+ "TIOCMBIS": true,
+ "TIOCMGDTRWAIT": true,
+ "TIOCMGET": true,
+ "TIOCMIWAIT": true,
+ "TIOCMODG": true,
+ "TIOCMODS": true,
+ "TIOCMSDTRWAIT": true,
+ "TIOCMSET": true,
+ "TIOCM_CAR": true,
+ "TIOCM_CD": true,
+ "TIOCM_CTS": true,
+ "TIOCM_DCD": true,
+ "TIOCM_DSR": true,
+ "TIOCM_DTR": true,
+ "TIOCM_LE": true,
+ "TIOCM_RI": true,
+ "TIOCM_RNG": true,
+ "TIOCM_RTS": true,
+ "TIOCM_SR": true,
+ "TIOCM_ST": true,
+ "TIOCNOTTY": true,
+ "TIOCNXCL": true,
+ "TIOCOUTQ": true,
+ "TIOCPKT": true,
+ "TIOCPKT_DATA": true,
+ "TIOCPKT_DOSTOP": true,
+ "TIOCPKT_FLUSHREAD": true,
+ "TIOCPKT_FLUSHWRITE": true,
+ "TIOCPKT_IOCTL": true,
+ "TIOCPKT_NOSTOP": true,
+ "TIOCPKT_START": true,
+ "TIOCPKT_STOP": true,
+ "TIOCPTMASTER": true,
+ "TIOCPTMGET": true,
+ "TIOCPTSNAME": true,
+ "TIOCPTYGNAME": true,
+ "TIOCPTYGRANT": true,
+ "TIOCPTYUNLK": true,
+ "TIOCRCVFRAME": true,
+ "TIOCREMOTE": true,
+ "TIOCSBRK": true,
+ "TIOCSCONS": true,
+ "TIOCSCTTY": true,
+ "TIOCSDRAINWAIT": true,
+ "TIOCSDTR": true,
+ "TIOCSERCONFIG": true,
+ "TIOCSERGETLSR": true,
+ "TIOCSERGETMULTI": true,
+ "TIOCSERGSTRUCT": true,
+ "TIOCSERGWILD": true,
+ "TIOCSERSETMULTI": true,
+ "TIOCSERSWILD": true,
+ "TIOCSER_TEMT": true,
+ "TIOCSETA": true,
+ "TIOCSETAF": true,
+ "TIOCSETAW": true,
+ "TIOCSETD": true,
+ "TIOCSFLAGS": true,
+ "TIOCSIG": true,
+ "TIOCSLCKTRMIOS": true,
+ "TIOCSLINED": true,
+ "TIOCSPGRP": true,
+ "TIOCSPTLCK": true,
+ "TIOCSQSIZE": true,
+ "TIOCSRS485": true,
+ "TIOCSSERIAL": true,
+ "TIOCSSIZE": true,
+ "TIOCSSOFTCAR": true,
+ "TIOCSTART": true,
+ "TIOCSTAT": true,
+ "TIOCSTI": true,
+ "TIOCSTOP": true,
+ "TIOCSTSTAMP": true,
+ "TIOCSWINSZ": true,
+ "TIOCTIMESTAMP": true,
+ "TIOCUCNTL": true,
+ "TIOCVHANGUP": true,
+ "TIOCXMTFRAME": true,
+ "TOKEN_ADJUST_DEFAULT": true,
+ "TOKEN_ADJUST_GROUPS": true,
+ "TOKEN_ADJUST_PRIVILEGES": true,
+ "TOKEN_ADJUST_SESSIONID": true,
+ "TOKEN_ALL_ACCESS": true,
+ "TOKEN_ASSIGN_PRIMARY": true,
+ "TOKEN_DUPLICATE": true,
+ "TOKEN_EXECUTE": true,
+ "TOKEN_IMPERSONATE": true,
+ "TOKEN_QUERY": true,
+ "TOKEN_QUERY_SOURCE": true,
+ "TOKEN_READ": true,
+ "TOKEN_WRITE": true,
+ "TOSTOP": true,
+ "TRUNCATE_EXISTING": true,
+ "TUNATTACHFILTER": true,
+ "TUNDETACHFILTER": true,
+ "TUNGETFEATURES": true,
+ "TUNGETIFF": true,
+ "TUNGETSNDBUF": true,
+ "TUNGETVNETHDRSZ": true,
+ "TUNSETDEBUG": true,
+ "TUNSETGROUP": true,
+ "TUNSETIFF": true,
+ "TUNSETLINK": true,
+ "TUNSETNOCSUM": true,
+ "TUNSETOFFLOAD": true,
+ "TUNSETOWNER": true,
+ "TUNSETPERSIST": true,
+ "TUNSETSNDBUF": true,
+ "TUNSETTXFILTER": true,
+ "TUNSETVNETHDRSZ": true,
+ "Tee": true,
+ "TerminateProcess": true,
+ "Termios": true,
+ "Tgkill": true,
+ "Time": true,
+ "Time_t": true,
+ "Times": true,
+ "Timespec": true,
+ "TimespecToNsec": true,
+ "Timeval": true,
+ "Timeval32": true,
+ "TimevalToNsec": true,
+ "Timex": true,
+ "Timezoneinformation": true,
+ "Tms": true,
+ "Token": true,
+ "TokenAccessInformation": true,
+ "TokenAuditPolicy": true,
+ "TokenDefaultDacl": true,
+ "TokenElevation": true,
+ "TokenElevationType": true,
+ "TokenGroups": true,
+ "TokenGroupsAndPrivileges": true,
+ "TokenHasRestrictions": true,
+ "TokenImpersonationLevel": true,
+ "TokenIntegrityLevel": true,
+ "TokenLinkedToken": true,
+ "TokenLogonSid": true,
+ "TokenMandatoryPolicy": true,
+ "TokenOrigin": true,
+ "TokenOwner": true,
+ "TokenPrimaryGroup": true,
+ "TokenPrivileges": true,
+ "TokenRestrictedSids": true,
+ "TokenSandBoxInert": true,
+ "TokenSessionId": true,
+ "TokenSessionReference": true,
+ "TokenSource": true,
+ "TokenStatistics": true,
+ "TokenType": true,
+ "TokenUIAccess": true,
+ "TokenUser": true,
+ "TokenVirtualizationAllowed": true,
+ "TokenVirtualizationEnabled": true,
+ "Tokenprimarygroup": true,
+ "Tokenuser": true,
+ "TranslateAccountName": true,
+ "TranslateName": true,
+ "TransmitFile": true,
+ "TransmitFileBuffers": true,
+ "Truncate": true,
+ "UNIX_PATH_MAX": true,
+ "USAGE_MATCH_TYPE_AND": true,
+ "USAGE_MATCH_TYPE_OR": true,
+ "UTF16FromString": true,
+ "UTF16PtrFromString": true,
+ "UTF16ToString": true,
+ "Ucred": true,
+ "Umask": true,
+ "Uname": true,
+ "Undelete": true,
+ "UnixCredentials": true,
+ "UnixRights": true,
+ "Unlink": true,
+ "Unlinkat": true,
+ "UnmapViewOfFile": true,
+ "Unmount": true,
+ "Unsetenv": true,
+ "Unshare": true,
+ "UserInfo10": true,
+ "Ustat": true,
+ "Ustat_t": true,
+ "Utimbuf": true,
+ "Utime": true,
+ "Utimes": true,
+ "UtimesNano": true,
+ "Utsname": true,
+ "VDISCARD": true,
+ "VDSUSP": true,
+ "VEOF": true,
+ "VEOL": true,
+ "VEOL2": true,
+ "VERASE": true,
+ "VERASE2": true,
+ "VINTR": true,
+ "VKILL": true,
+ "VLNEXT": true,
+ "VMIN": true,
+ "VQUIT": true,
+ "VREPRINT": true,
+ "VSTART": true,
+ "VSTATUS": true,
+ "VSTOP": true,
+ "VSUSP": true,
+ "VSWTC": true,
+ "VT0": true,
+ "VT1": true,
+ "VTDLY": true,
+ "VTIME": true,
+ "VWERASE": true,
+ "VirtualLock": true,
+ "VirtualUnlock": true,
+ "WAIT_ABANDONED": true,
+ "WAIT_FAILED": true,
+ "WAIT_OBJECT_0": true,
+ "WAIT_TIMEOUT": true,
+ "WALL": true,
+ "WALLSIG": true,
+ "WALTSIG": true,
+ "WCLONE": true,
+ "WCONTINUED": true,
+ "WCOREFLAG": true,
+ "WEXITED": true,
+ "WLINUXCLONE": true,
+ "WNOHANG": true,
+ "WNOTHREAD": true,
+ "WNOWAIT": true,
+ "WNOZOMBIE": true,
+ "WOPTSCHECKED": true,
+ "WORDSIZE": true,
+ "WSABuf": true,
+ "WSACleanup": true,
+ "WSADESCRIPTION_LEN": true,
+ "WSAData": true,
+ "WSAEACCES": true,
+ "WSAECONNABORTED": true,
+ "WSAECONNRESET": true,
+ "WSAEnumProtocols": true,
+ "WSAID_CONNECTEX": true,
+ "WSAIoctl": true,
+ "WSAPROTOCOL_LEN": true,
+ "WSAProtocolChain": true,
+ "WSAProtocolInfo": true,
+ "WSARecv": true,
+ "WSARecvFrom": true,
+ "WSASYS_STATUS_LEN": true,
+ "WSASend": true,
+ "WSASendTo": true,
+ "WSASendto": true,
+ "WSAStartup": true,
+ "WSTOPPED": true,
+ "WTRAPPED": true,
+ "WUNTRACED": true,
+ "Wait4": true,
+ "WaitForSingleObject": true,
+ "WaitStatus": true,
+ "Win32FileAttributeData": true,
+ "Win32finddata": true,
+ "Write": true,
+ "WriteConsole": true,
+ "WriteFile": true,
+ "X509_ASN_ENCODING": true,
+ "XCASE": true,
+ "XP1_CONNECTIONLESS": true,
+ "XP1_CONNECT_DATA": true,
+ "XP1_DISCONNECT_DATA": true,
+ "XP1_EXPEDITED_DATA": true,
+ "XP1_GRACEFUL_CLOSE": true,
+ "XP1_GUARANTEED_DELIVERY": true,
+ "XP1_GUARANTEED_ORDER": true,
+ "XP1_IFS_HANDLES": true,
+ "XP1_MESSAGE_ORIENTED": true,
+ "XP1_MULTIPOINT_CONTROL_PLANE": true,
+ "XP1_MULTIPOINT_DATA_PLANE": true,
+ "XP1_PARTIAL_MESSAGE": true,
+ "XP1_PSEUDO_STREAM": true,
+ "XP1_QOS_SUPPORTED": true,
+ "XP1_SAN_SUPPORT_SDP": true,
+ "XP1_SUPPORT_BROADCAST": true,
+ "XP1_SUPPORT_MULTIPOINT": true,
+ "XP1_UNI_RECV": true,
+ "XP1_UNI_SEND": true,
+ },
+ "syscall/js": map[string]bool{
+ "Error": true,
+ "Func": true,
+ "FuncOf": true,
+ "Global": true,
+ "Null": true,
+ "Type": true,
+ "TypeBoolean": true,
+ "TypeFunction": true,
+ "TypeNull": true,
+ "TypeNumber": true,
+ "TypeObject": true,
+ "TypeString": true,
+ "TypeSymbol": true,
+ "TypeUndefined": true,
+ "TypedArray": true,
+ "TypedArrayOf": true,
+ "Undefined": true,
+ "Value": true,
+ "ValueError": true,
+ "ValueOf": true,
+ "Wrapper": true,
+ },
+ "testing": map[string]bool{
+ "AllocsPerRun": true,
+ "B": true,
+ "Benchmark": true,
+ "BenchmarkResult": true,
+ "Cover": true,
+ "CoverBlock": true,
+ "CoverMode": true,
+ "Coverage": true,
+ "InternalBenchmark": true,
+ "InternalExample": true,
+ "InternalTest": true,
+ "M": true,
+ "Main": true,
+ "MainStart": true,
+ "PB": true,
+ "RegisterCover": true,
+ "RunBenchmarks": true,
+ "RunExamples": true,
+ "RunTests": true,
+ "Short": true,
+ "T": true,
+ "Verbose": true,
+ },
+ "testing/iotest": map[string]bool{
+ "DataErrReader": true,
+ "ErrTimeout": true,
+ "HalfReader": true,
+ "NewReadLogger": true,
+ "NewWriteLogger": true,
+ "OneByteReader": true,
+ "TimeoutReader": true,
+ "TruncateWriter": true,
+ },
+ "testing/quick": map[string]bool{
+ "Check": true,
+ "CheckEqual": true,
+ "CheckEqualError": true,
+ "CheckError": true,
+ "Config": true,
+ "Generator": true,
+ "SetupError": true,
+ "Value": true,
+ },
+ "text/scanner": map[string]bool{
+ "Char": true,
+ "Comment": true,
+ "EOF": true,
+ "Float": true,
+ "GoTokens": true,
+ "GoWhitespace": true,
+ "Ident": true,
+ "Int": true,
+ "Position": true,
+ "RawString": true,
+ "ScanChars": true,
+ "ScanComments": true,
+ "ScanFloats": true,
+ "ScanIdents": true,
+ "ScanInts": true,
+ "ScanRawStrings": true,
+ "ScanStrings": true,
+ "Scanner": true,
+ "SkipComments": true,
+ "String": true,
+ "TokenString": true,
+ },
+ "text/tabwriter": map[string]bool{
+ "AlignRight": true,
+ "Debug": true,
+ "DiscardEmptyColumns": true,
+ "Escape": true,
+ "FilterHTML": true,
+ "NewWriter": true,
+ "StripEscape": true,
+ "TabIndent": true,
+ "Writer": true,
+ },
+ "text/template": map[string]bool{
+ "ExecError": true,
+ "FuncMap": true,
+ "HTMLEscape": true,
+ "HTMLEscapeString": true,
+ "HTMLEscaper": true,
+ "IsTrue": true,
+ "JSEscape": true,
+ "JSEscapeString": true,
+ "JSEscaper": true,
+ "Must": true,
+ "New": true,
+ "ParseFiles": true,
+ "ParseGlob": true,
+ "Template": true,
+ "URLQueryEscaper": true,
+ },
+ "text/template/parse": map[string]bool{
+ "ActionNode": true,
+ "BoolNode": true,
+ "BranchNode": true,
+ "ChainNode": true,
+ "CommandNode": true,
+ "DotNode": true,
+ "FieldNode": true,
+ "IdentifierNode": true,
+ "IfNode": true,
+ "IsEmptyTree": true,
+ "ListNode": true,
+ "New": true,
+ "NewIdentifier": true,
+ "NilNode": true,
+ "Node": true,
+ "NodeAction": true,
+ "NodeBool": true,
+ "NodeChain": true,
+ "NodeCommand": true,
+ "NodeDot": true,
+ "NodeField": true,
+ "NodeIdentifier": true,
+ "NodeIf": true,
+ "NodeList": true,
+ "NodeNil": true,
+ "NodeNumber": true,
+ "NodePipe": true,
+ "NodeRange": true,
+ "NodeString": true,
+ "NodeTemplate": true,
+ "NodeText": true,
+ "NodeType": true,
+ "NodeVariable": true,
+ "NodeWith": true,
+ "NumberNode": true,
+ "Parse": true,
+ "PipeNode": true,
+ "Pos": true,
+ "RangeNode": true,
+ "StringNode": true,
+ "TemplateNode": true,
+ "TextNode": true,
+ "Tree": true,
+ "VariableNode": true,
+ "WithNode": true,
+ },
+ "time": map[string]bool{
+ "ANSIC": true,
+ "After": true,
+ "AfterFunc": true,
+ "April": true,
+ "August": true,
+ "Date": true,
+ "December": true,
+ "Duration": true,
+ "February": true,
+ "FixedZone": true,
+ "Friday": true,
+ "Hour": true,
+ "January": true,
+ "July": true,
+ "June": true,
+ "Kitchen": true,
+ "LoadLocation": true,
+ "LoadLocationFromTZData": true,
+ "Local": true,
+ "Location": true,
+ "March": true,
+ "May": true,
+ "Microsecond": true,
+ "Millisecond": true,
+ "Minute": true,
+ "Monday": true,
+ "Month": true,
+ "Nanosecond": true,
+ "NewTicker": true,
+ "NewTimer": true,
+ "November": true,
+ "Now": true,
+ "October": true,
+ "Parse": true,
+ "ParseDuration": true,
+ "ParseError": true,
+ "ParseInLocation": true,
+ "RFC1123": true,
+ "RFC1123Z": true,
+ "RFC3339": true,
+ "RFC3339Nano": true,
+ "RFC822": true,
+ "RFC822Z": true,
+ "RFC850": true,
+ "RubyDate": true,
+ "Saturday": true,
+ "Second": true,
+ "September": true,
+ "Since": true,
+ "Sleep": true,
+ "Stamp": true,
+ "StampMicro": true,
+ "StampMilli": true,
+ "StampNano": true,
+ "Sunday": true,
+ "Thursday": true,
+ "Tick": true,
+ "Ticker": true,
+ "Time": true,
+ "Timer": true,
+ "Tuesday": true,
+ "UTC": true,
+ "Unix": true,
+ "UnixDate": true,
+ "Until": true,
+ "Wednesday": true,
+ "Weekday": true,
+ },
+ "unicode": map[string]bool{
+ "ASCII_Hex_Digit": true,
+ "Adlam": true,
+ "Ahom": true,
+ "Anatolian_Hieroglyphs": true,
+ "Arabic": true,
+ "Armenian": true,
+ "Avestan": true,
+ "AzeriCase": true,
+ "Balinese": true,
+ "Bamum": true,
+ "Bassa_Vah": true,
+ "Batak": true,
+ "Bengali": true,
+ "Bhaiksuki": true,
+ "Bidi_Control": true,
+ "Bopomofo": true,
+ "Brahmi": true,
+ "Braille": true,
+ "Buginese": true,
+ "Buhid": true,
+ "C": true,
+ "Canadian_Aboriginal": true,
+ "Carian": true,
+ "CaseRange": true,
+ "CaseRanges": true,
+ "Categories": true,
+ "Caucasian_Albanian": true,
+ "Cc": true,
+ "Cf": true,
+ "Chakma": true,
+ "Cham": true,
+ "Cherokee": true,
+ "Co": true,
+ "Common": true,
+ "Coptic": true,
+ "Cs": true,
+ "Cuneiform": true,
+ "Cypriot": true,
+ "Cyrillic": true,
+ "Dash": true,
+ "Deprecated": true,
+ "Deseret": true,
+ "Devanagari": true,
+ "Diacritic": true,
+ "Digit": true,
+ "Duployan": true,
+ "Egyptian_Hieroglyphs": true,
+ "Elbasan": true,
+ "Ethiopic": true,
+ "Extender": true,
+ "FoldCategory": true,
+ "FoldScript": true,
+ "Georgian": true,
+ "Glagolitic": true,
+ "Gothic": true,
+ "Grantha": true,
+ "GraphicRanges": true,
+ "Greek": true,
+ "Gujarati": true,
+ "Gurmukhi": true,
+ "Han": true,
+ "Hangul": true,
+ "Hanunoo": true,
+ "Hatran": true,
+ "Hebrew": true,
+ "Hex_Digit": true,
+ "Hiragana": true,
+ "Hyphen": true,
+ "IDS_Binary_Operator": true,
+ "IDS_Trinary_Operator": true,
+ "Ideographic": true,
+ "Imperial_Aramaic": true,
+ "In": true,
+ "Inherited": true,
+ "Inscriptional_Pahlavi": true,
+ "Inscriptional_Parthian": true,
+ "Is": true,
+ "IsControl": true,
+ "IsDigit": true,
+ "IsGraphic": true,
+ "IsLetter": true,
+ "IsLower": true,
+ "IsMark": true,
+ "IsNumber": true,
+ "IsOneOf": true,
+ "IsPrint": true,
+ "IsPunct": true,
+ "IsSpace": true,
+ "IsSymbol": true,
+ "IsTitle": true,
+ "IsUpper": true,
+ "Javanese": true,
+ "Join_Control": true,
+ "Kaithi": true,
+ "Kannada": true,
+ "Katakana": true,
+ "Kayah_Li": true,
+ "Kharoshthi": true,
+ "Khmer": true,
+ "Khojki": true,
+ "Khudawadi": true,
+ "L": true,
+ "Lao": true,
+ "Latin": true,
+ "Lepcha": true,
+ "Letter": true,
+ "Limbu": true,
+ "Linear_A": true,
+ "Linear_B": true,
+ "Lisu": true,
+ "Ll": true,
+ "Lm": true,
+ "Lo": true,
+ "Logical_Order_Exception": true,
+ "Lower": true,
+ "LowerCase": true,
+ "Lt": true,
+ "Lu": true,
+ "Lycian": true,
+ "Lydian": true,
+ "M": true,
+ "Mahajani": true,
+ "Malayalam": true,
+ "Mandaic": true,
+ "Manichaean": true,
+ "Marchen": true,
+ "Mark": true,
+ "Masaram_Gondi": true,
+ "MaxASCII": true,
+ "MaxCase": true,
+ "MaxLatin1": true,
+ "MaxRune": true,
+ "Mc": true,
+ "Me": true,
+ "Meetei_Mayek": true,
+ "Mende_Kikakui": true,
+ "Meroitic_Cursive": true,
+ "Meroitic_Hieroglyphs": true,
+ "Miao": true,
+ "Mn": true,
+ "Modi": true,
+ "Mongolian": true,
+ "Mro": true,
+ "Multani": true,
+ "Myanmar": true,
+ "N": true,
+ "Nabataean": true,
+ "Nd": true,
+ "New_Tai_Lue": true,
+ "Newa": true,
+ "Nko": true,
+ "Nl": true,
+ "No": true,
+ "Noncharacter_Code_Point": true,
+ "Number": true,
+ "Nushu": true,
+ "Ogham": true,
+ "Ol_Chiki": true,
+ "Old_Hungarian": true,
+ "Old_Italic": true,
+ "Old_North_Arabian": true,
+ "Old_Permic": true,
+ "Old_Persian": true,
+ "Old_South_Arabian": true,
+ "Old_Turkic": true,
+ "Oriya": true,
+ "Osage": true,
+ "Osmanya": true,
+ "Other": true,
+ "Other_Alphabetic": true,
+ "Other_Default_Ignorable_Code_Point": true,
+ "Other_Grapheme_Extend": true,
+ "Other_ID_Continue": true,
+ "Other_ID_Start": true,
+ "Other_Lowercase": true,
+ "Other_Math": true,
+ "Other_Uppercase": true,
+ "P": true,
+ "Pahawh_Hmong": true,
+ "Palmyrene": true,
+ "Pattern_Syntax": true,
+ "Pattern_White_Space": true,
+ "Pau_Cin_Hau": true,
+ "Pc": true,
+ "Pd": true,
+ "Pe": true,
+ "Pf": true,
+ "Phags_Pa": true,
+ "Phoenician": true,
+ "Pi": true,
+ "Po": true,
+ "Prepended_Concatenation_Mark": true,
+ "PrintRanges": true,
+ "Properties": true,
+ "Ps": true,
+ "Psalter_Pahlavi": true,
+ "Punct": true,
+ "Quotation_Mark": true,
+ "Radical": true,
+ "Range16": true,
+ "Range32": true,
+ "RangeTable": true,
+ "Regional_Indicator": true,
+ "Rejang": true,
+ "ReplacementChar": true,
+ "Runic": true,
+ "S": true,
+ "STerm": true,
+ "Samaritan": true,
+ "Saurashtra": true,
+ "Sc": true,
+ "Scripts": true,
+ "Sentence_Terminal": true,
+ "Sharada": true,
+ "Shavian": true,
+ "Siddham": true,
+ "SignWriting": true,
+ "SimpleFold": true,
+ "Sinhala": true,
+ "Sk": true,
+ "Sm": true,
+ "So": true,
+ "Soft_Dotted": true,
+ "Sora_Sompeng": true,
+ "Soyombo": true,
+ "Space": true,
+ "SpecialCase": true,
+ "Sundanese": true,
+ "Syloti_Nagri": true,
+ "Symbol": true,
+ "Syriac": true,
+ "Tagalog": true,
+ "Tagbanwa": true,
+ "Tai_Le": true,
+ "Tai_Tham": true,
+ "Tai_Viet": true,
+ "Takri": true,
+ "Tamil": true,
+ "Tangut": true,
+ "Telugu": true,
+ "Terminal_Punctuation": true,
+ "Thaana": true,
+ "Thai": true,
+ "Tibetan": true,
+ "Tifinagh": true,
+ "Tirhuta": true,
+ "Title": true,
+ "TitleCase": true,
+ "To": true,
+ "ToLower": true,
+ "ToTitle": true,
+ "ToUpper": true,
+ "TurkishCase": true,
+ "Ugaritic": true,
+ "Unified_Ideograph": true,
+ "Upper": true,
+ "UpperCase": true,
+ "UpperLower": true,
+ "Vai": true,
+ "Variation_Selector": true,
+ "Version": true,
+ "Warang_Citi": true,
+ "White_Space": true,
+ "Yi": true,
+ "Z": true,
+ "Zanabazar_Square": true,
+ "Zl": true,
+ "Zp": true,
+ "Zs": true,
+ },
+ "unicode/utf16": map[string]bool{
+ "Decode": true,
+ "DecodeRune": true,
+ "Encode": true,
+ "EncodeRune": true,
+ "IsSurrogate": true,
+ },
+ "unicode/utf8": map[string]bool{
+ "DecodeLastRune": true,
+ "DecodeLastRuneInString": true,
+ "DecodeRune": true,
+ "DecodeRuneInString": true,
+ "EncodeRune": true,
+ "FullRune": true,
+ "FullRuneInString": true,
+ "MaxRune": true,
+ "RuneCount": true,
+ "RuneCountInString": true,
+ "RuneError": true,
+ "RuneLen": true,
+ "RuneSelf": true,
+ "RuneStart": true,
+ "UTFMax": true,
+ "Valid": true,
+ "ValidRune": true,
+ "ValidString": true,
+ },
+ "unsafe": map[string]bool{
+ "Alignof": true,
+ "ArbitraryType": true,
+ "Offsetof": true,
+ "Pointer": true,
+ "Sizeof": true,
+ },
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
new file mode 100644
index 000000000..7219c8e9f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
@@ -0,0 +1,196 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fastwalk provides a faster version of filepath.Walk for file system
+// scanning tools.
+package fastwalk
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+)
+
+// TraverseLink is used as a return value from WalkFuncs to indicate that the
+// symlink named in the call may be traversed.
+var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
+
+// SkipFiles is a used as a return value from WalkFuncs to indicate that the
+// callback should not be called for any other files in the current directory.
+// Child directories will still be traversed.
+var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
+
+// Walk is a faster implementation of filepath.Walk.
+//
+// filepath.Walk's design necessarily calls os.Lstat on each file,
+// even if the caller needs less info.
+// Many tools need only the type of each file.
+// On some platforms, this information is provided directly by the readdir
+// system call, avoiding the need to stat each file individually.
+// fastwalk_unix.go contains a fork of the syscall routines.
+//
+// See golang.org/issue/16399
+//
+// Walk walks the file tree rooted at root, calling walkFn for
+// each file or directory in the tree, including root.
+//
+// If fastWalk returns filepath.SkipDir, the directory is skipped.
+//
+// Unlike filepath.Walk:
+// * file stat calls must be done by the user.
+// The only provided metadata is the file type, which does not include
+// any permission bits.
+// * multiple goroutines stat the filesystem concurrently. The provided
+// walkFn must be safe for concurrent use.
+// * fastWalk can follow symlinks if walkFn returns the TraverseLink
+// sentinel error. It is the walkFn's responsibility to prevent
+// fastWalk from going into symlink cycles.
+func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
+ // TODO(bradfitz): make numWorkers configurable? We used a
+ // minimum of 4 to give the kernel more info about multiple
+ // things we want, in hopes its I/O scheduling can take
+ // advantage of that. Hopefully most are in cache. Maybe 4 is
+ // even too low of a minimum. Profile more.
+ numWorkers := 4
+ if n := runtime.NumCPU(); n > numWorkers {
+ numWorkers = n
+ }
+
+ // Make sure to wait for all workers to finish, otherwise
+ // walkFn could still be called after returning. This Wait call
+ // runs after close(e.donec) below.
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ w := &walker{
+ fn: walkFn,
+ enqueuec: make(chan walkItem, numWorkers), // buffered for performance
+ workc: make(chan walkItem, numWorkers), // buffered for performance
+ donec: make(chan struct{}),
+
+ // buffered for correctness & not leaking goroutines:
+ resc: make(chan error, numWorkers),
+ }
+ defer close(w.donec)
+
+ for i := 0; i < numWorkers; i++ {
+ wg.Add(1)
+ go w.doWork(&wg)
+ }
+ todo := []walkItem{{dir: root}}
+ out := 0
+ for {
+ workc := w.workc
+ var workItem walkItem
+ if len(todo) == 0 {
+ workc = nil
+ } else {
+ workItem = todo[len(todo)-1]
+ }
+ select {
+ case workc <- workItem:
+ todo = todo[:len(todo)-1]
+ out++
+ case it := <-w.enqueuec:
+ todo = append(todo, it)
+ case err := <-w.resc:
+ out--
+ if err != nil {
+ return err
+ }
+ if out == 0 && len(todo) == 0 {
+ // It's safe to quit here, as long as the buffered
+ // enqueue channel isn't also readable, which might
+ // happen if the worker sends both another unit of
+ // work and its result before the other select was
+ // scheduled and both w.resc and w.enqueuec were
+ // readable.
+ select {
+ case it := <-w.enqueuec:
+ todo = append(todo, it)
+ default:
+ return nil
+ }
+ }
+ }
+ }
+}
+
+// doWork reads directories as instructed (via workc) and runs the
+// user's callback function.
+func (w *walker) doWork(wg *sync.WaitGroup) {
+ defer wg.Done()
+ for {
+ select {
+ case <-w.donec:
+ return
+ case it := <-w.workc:
+ select {
+ case <-w.donec:
+ return
+ case w.resc <- w.walk(it.dir, !it.callbackDone):
+ }
+ }
+ }
+}
+
+type walker struct {
+ fn func(path string, typ os.FileMode) error
+
+ donec chan struct{} // closed on fastWalk's return
+ workc chan walkItem // to workers
+ enqueuec chan walkItem // from workers
+ resc chan error // from workers
+}
+
+type walkItem struct {
+ dir string
+ callbackDone bool // callback already called; don't do it again
+}
+
+func (w *walker) enqueue(it walkItem) {
+ select {
+ case w.enqueuec <- it:
+ case <-w.donec:
+ }
+}
+
+func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
+ joined := dirName + string(os.PathSeparator) + baseName
+ if typ == os.ModeDir {
+ w.enqueue(walkItem{dir: joined})
+ return nil
+ }
+
+ err := w.fn(joined, typ)
+ if typ == os.ModeSymlink {
+ if err == TraverseLink {
+ // Set callbackDone so we don't call it twice for both the
+ // symlink-as-symlink and the symlink-as-directory later:
+ w.enqueue(walkItem{dir: joined, callbackDone: true})
+ return nil
+ }
+ if err == filepath.SkipDir {
+ // Permit SkipDir on symlinks too.
+ return nil
+ }
+ }
+ return err
+}
+
+func (w *walker) walk(root string, runUserCallback bool) error {
+ if runUserCallback {
+ err := w.fn(root, os.ModeDir)
+ if err == filepath.SkipDir {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ return readDir(root, w.onDirEnt)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
new file mode 100644
index 000000000..ccffec5ad
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd
+
+package fastwalk
+
+import "syscall"
+
+func direntInode(dirent *syscall.Dirent) uint64 {
+ return uint64(dirent.Fileno)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
new file mode 100644
index 000000000..ab7fbc0a9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin
+// +build !appengine
+
+package fastwalk
+
+import "syscall"
+
+func direntInode(dirent *syscall.Dirent) uint64 {
+ return uint64(dirent.Ino)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
new file mode 100644
index 000000000..a3b26a7ba
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd openbsd netbsd
+
+package fastwalk
+
+import "syscall"
+
+func direntNamlen(dirent *syscall.Dirent) uint64 {
+ return uint64(dirent.Namlen)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
new file mode 100644
index 000000000..e880d358b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build !appengine
+
+package fastwalk
+
+import (
+ "bytes"
+ "syscall"
+ "unsafe"
+)
+
+func direntNamlen(dirent *syscall.Dirent) uint64 {
+ const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
+ nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
+ const nameBufLen = uint16(len(nameBuf))
+ limit := dirent.Reclen - fixedHdr
+ if limit > nameBufLen {
+ limit = nameBufLen
+ }
+ nameLen := bytes.IndexByte(nameBuf[:limit], 0)
+ if nameLen < 0 {
+ panic("failed to find terminating 0 byte in dirent")
+ }
+ return uint64(nameLen)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
new file mode 100644
index 000000000..a906b8759
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
+
+package fastwalk
+
+import (
+ "io/ioutil"
+ "os"
+)
+
+// readDir calls fn for each directory entry in dirName.
+// It does not descend into directories or follow symlinks.
+// If fn returns a non-nil error, readDir returns with that error
+// immediately.
+func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
+ fis, err := ioutil.ReadDir(dirName)
+ if err != nil {
+ return err
+ }
+ skipFiles := false
+ for _, fi := range fis {
+ if fi.Mode().IsRegular() && skipFiles {
+ continue
+ }
+ if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
+ if err == SkipFiles {
+ skipFiles = true
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
new file mode 100644
index 000000000..3369b1a0b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
@@ -0,0 +1,127 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd
+// +build !appengine
+
+package fastwalk
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const blockSize = 8 << 10
+
+// unknownFileMode is a sentinel (and bogus) os.FileMode
+// value used to represent a syscall.DT_UNKNOWN Dirent.Type.
+const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice
+
+func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
+ fd, err := syscall.Open(dirName, 0, 0)
+ if err != nil {
+ return &os.PathError{Op: "open", Path: dirName, Err: err}
+ }
+ defer syscall.Close(fd)
+
+ // The buffer must be at least a block long.
+ buf := make([]byte, blockSize) // stack-allocated; doesn't escape
+ bufp := 0 // starting read position in buf
+ nbuf := 0 // end valid data in buf
+ skipFiles := false
+ for {
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = syscall.ReadDirent(fd, buf)
+ if err != nil {
+ return os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ return nil
+ }
+ }
+ consumed, name, typ := parseDirEnt(buf[bufp:nbuf])
+ bufp += consumed
+ if name == "" || name == "." || name == ".." {
+ continue
+ }
+ // Fallback for filesystems (like old XFS) that don't
+ // support Dirent.Type and have DT_UNKNOWN (0) there
+ // instead.
+ if typ == unknownFileMode {
+ fi, err := os.Lstat(dirName + "/" + name)
+ if err != nil {
+ // It got deleted in the meantime.
+ if os.IsNotExist(err) {
+ continue
+ }
+ return err
+ }
+ typ = fi.Mode() & os.ModeType
+ }
+ if skipFiles && typ.IsRegular() {
+ continue
+ }
+ if err := fn(dirName, name, typ); err != nil {
+ if err == SkipFiles {
+ skipFiles = true
+ continue
+ }
+ return err
+ }
+ }
+}
+
+func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
+ // golang.org/issue/15653
+ dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
+ if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
+ panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
+ }
+ if len(buf) < int(dirent.Reclen) {
+ panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen))
+ }
+ consumed = int(dirent.Reclen)
+ if direntInode(dirent) == 0 { // File absent in directory.
+ return
+ }
+ switch dirent.Type {
+ case syscall.DT_REG:
+ typ = 0
+ case syscall.DT_DIR:
+ typ = os.ModeDir
+ case syscall.DT_LNK:
+ typ = os.ModeSymlink
+ case syscall.DT_BLK:
+ typ = os.ModeDevice
+ case syscall.DT_FIFO:
+ typ = os.ModeNamedPipe
+ case syscall.DT_SOCK:
+ typ = os.ModeSocket
+ case syscall.DT_UNKNOWN:
+ typ = unknownFileMode
+ default:
+ // Skip weird things.
+ // It's probably a DT_WHT (http://lwn.net/Articles/325369/)
+ // or something. Revisit if/when this package is moved outside
+ // of goimports. goimports only cares about regular files,
+ // symlinks, and directories.
+ return
+ }
+
+ nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
+ nameLen := direntNamlen(dirent)
+
+ // Special cases for common things:
+ if nameLen == 1 && nameBuf[0] == '.' {
+ name = "."
+ } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' {
+ name = ".."
+ } else {
+ name = string(nameBuf[:nameLen])
+ }
+ return
+}
diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
new file mode 100644
index 000000000..04bb96a36
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
@@ -0,0 +1,250 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gopathwalk is like filepath.Walk but specialized for finding Go
+// packages, particularly in $GOPATH and $GOROOT.
+package gopathwalk
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/build"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/tools/internal/fastwalk"
+)
+
+// Options controls the behavior of a Walk call.
+type Options struct {
+ Debug bool // Enable debug logging
+ ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
+}
+
+// RootType indicates the type of a Root.
+type RootType int
+
+const (
+ RootUnknown RootType = iota
+ RootGOROOT
+ RootGOPATH
+ RootCurrentModule
+ RootModuleCache
+ RootOther
+)
+
+// A Root is a starting point for a Walk.
+type Root struct {
+ Path string
+ Type RootType
+}
+
+// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible.
+func SrcDirsRoots(ctx *build.Context) []Root {
+ var roots []Root
+ roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT})
+ for _, p := range filepath.SplitList(ctx.GOPATH) {
+ roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH})
+ }
+ return roots
+}
+
+// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
+// For each package found, add will be called (concurrently) with the absolute
+// paths of the containing source directory and the package directory.
+// add will be called concurrently.
+func Walk(roots []Root, add func(root Root, dir string), opts Options) {
+ for _, root := range roots {
+ walkDir(root, add, opts)
+ }
+}
+
+func walkDir(root Root, add func(Root, string), opts Options) {
+ if _, err := os.Stat(root.Path); os.IsNotExist(err) {
+ if opts.Debug {
+ log.Printf("skipping nonexistant directory: %v", root.Path)
+ }
+ return
+ }
+ if opts.Debug {
+ log.Printf("scanning %s", root.Path)
+ }
+ w := &walker{
+ root: root,
+ add: add,
+ opts: opts,
+ }
+ w.init()
+ if err := fastwalk.Walk(root.Path, w.walk); err != nil {
+ log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
+ }
+
+ if opts.Debug {
+ log.Printf("scanned %s", root.Path)
+ }
+}
+
+// walker is the callback for fastwalk.Walk.
+type walker struct {
+ root Root // The source directory to scan.
+ add func(Root, string) // The callback that will be invoked for every possible Go package dir.
+ opts Options // Options passed to Walk by the user.
+
+ ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
+}
+
+// init initializes the walker based on its Options.
+func (w *walker) init() {
+ var ignoredPaths []string
+ if w.root.Type == RootModuleCache {
+ ignoredPaths = []string{"cache"}
+ }
+ if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
+ ignoredPaths = w.getIgnoredDirs(w.root.Path)
+ ignoredPaths = append(ignoredPaths, "v", "mod")
+ }
+
+ for _, p := range ignoredPaths {
+ full := filepath.Join(w.root.Path, p)
+ if fi, err := os.Stat(full); err == nil {
+ w.ignoredDirs = append(w.ignoredDirs, fi)
+ if w.opts.Debug {
+ log.Printf("Directory added to ignore list: %s", full)
+ }
+ } else if w.opts.Debug {
+ log.Printf("Error statting ignored directory: %v", err)
+ }
+ }
+}
+
+// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
+// of relative directories to ignore when scanning for go files.
+// The provided path is one of the $GOPATH entries with "src" appended.
+func (w *walker) getIgnoredDirs(path string) []string {
+ file := filepath.Join(path, ".goimportsignore")
+ slurp, err := ioutil.ReadFile(file)
+ if w.opts.Debug {
+ if err != nil {
+ log.Print(err)
+ } else {
+ log.Printf("Read %s", file)
+ }
+ }
+ if err != nil {
+ return nil
+ }
+
+ var ignoredDirs []string
+ bs := bufio.NewScanner(bytes.NewReader(slurp))
+ for bs.Scan() {
+ line := strings.TrimSpace(bs.Text())
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+ ignoredDirs = append(ignoredDirs, line)
+ }
+ return ignoredDirs
+}
+
+func (w *walker) shouldSkipDir(fi os.FileInfo) bool {
+ for _, ignoredDir := range w.ignoredDirs {
+ if os.SameFile(fi, ignoredDir) {
+ return true
+ }
+ }
+ return false
+}
+
+func (w *walker) walk(path string, typ os.FileMode) error {
+ dir := filepath.Dir(path)
+ if typ.IsRegular() {
+ if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
+ // Doesn't make sense to have regular files
+ // directly in your $GOPATH/src or $GOROOT/src.
+ return fastwalk.SkipFiles
+ }
+ if !strings.HasSuffix(path, ".go") {
+ return nil
+ }
+
+ w.add(w.root, dir)
+ return fastwalk.SkipFiles
+ }
+ if typ == os.ModeDir {
+ base := filepath.Base(path)
+ if base == "" || base[0] == '.' || base[0] == '_' ||
+ base == "testdata" ||
+ (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
+ (!w.opts.ModulesEnabled && base == "node_modules") {
+ return filepath.SkipDir
+ }
+ fi, err := os.Lstat(path)
+ if err == nil && w.shouldSkipDir(fi) {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ if typ == os.ModeSymlink {
+ base := filepath.Base(path)
+ if strings.HasPrefix(base, ".#") {
+ // Emacs noise.
+ return nil
+ }
+ fi, err := os.Lstat(path)
+ if err != nil {
+ // Just ignore it.
+ return nil
+ }
+ if w.shouldTraverse(dir, fi) {
+ return fastwalk.TraverseLink
+ }
+ }
+ return nil
+}
+
+// shouldTraverse reports whether the symlink fi, found in dir,
+// should be followed. It makes sure symlinks were never visited
+// before to avoid symlink loops.
+func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
+ path := filepath.Join(dir, fi.Name())
+ target, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return false
+ }
+ ts, err := os.Stat(target)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return false
+ }
+ if !ts.IsDir() {
+ return false
+ }
+ if w.shouldSkipDir(ts) {
+ return false
+ }
+ // Check for symlink loops by statting each directory component
+ // and seeing if any are the same file as ts.
+ for {
+ parent := filepath.Dir(path)
+ if parent == path {
+ // Made it to the root without seeing a cycle.
+ // Use this symlink.
+ return true
+ }
+ parentInfo, err := os.Stat(parent)
+ if err != nil {
+ return false
+ }
+ if os.SameFile(ts, parentInfo) {
+ // Cycle. Don't traverse.
+ return false
+ }
+ path = parent
+ }
+
+}
diff --git a/vendor/golang.org/x/tools/internal/module/module.go b/vendor/golang.org/x/tools/internal/module/module.go
new file mode 100644
index 000000000..9a4edb9de
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/module/module.go
@@ -0,0 +1,540 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package module defines the module.Version type
+// along with support code.
+package module
+
+// IMPORTANT NOTE
+//
+// This file essentially defines the set of valid import paths for the go command.
+// There are many subtle considerations, including Unicode ambiguity,
+// security, network, and file system representations.
+//
+// This file also defines the set of valid module path and version combinations,
+// another topic with many subtle considerations.
+//
+// Changes to the semantics in this file require approval from rsc.
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/tools/internal/semver"
+)
+
+// A Version is defined by a module path and version pair.
+type Version struct {
+ Path string
+
+ // Version is usually a semantic version in canonical form.
+ // There are two exceptions to this general rule.
+ // First, the top-level target of a build has no specific version
+ // and uses Version = "".
+ // Second, during MVS calculations the version "none" is used
+ // to represent the decision to take no version of a given module.
+ Version string `json:",omitempty"`
+}
+
+// Check checks that a given module path, version pair is valid.
+// In addition to the path being a valid module path
+// and the version being a valid semantic version,
+// the two must correspond.
+// For example, the path "yaml/v2" only corresponds to
+// semantic versions beginning with "v2.".
+func Check(path, version string) error {
+ if err := CheckPath(path); err != nil {
+ return err
+ }
+ if !semver.IsValid(version) {
+ return fmt.Errorf("malformed semantic version %v", version)
+ }
+ _, pathMajor, _ := SplitPathVersion(path)
+ if !MatchPathMajor(version, pathMajor) {
+ if pathMajor == "" {
+ pathMajor = "v0 or v1"
+ }
+ if pathMajor[0] == '.' { // .v1
+ pathMajor = pathMajor[1:]
+ }
+ return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
+ }
+ return nil
+}
+
+// firstPathOK reports whether r can appear in the first element of a module path.
+// The first element of the path must be an LDH domain name, at least for now.
+// To avoid case ambiguity, the domain name must be entirely lower case.
+func firstPathOK(r rune) bool {
+ return r == '-' || r == '.' ||
+ '0' <= r && r <= '9' ||
+ 'a' <= r && r <= 'z'
+}
+
+// pathOK reports whether r can appear in an import path element.
+// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
+// This matches what "go get" has historically recognized in import paths.
+// TODO(rsc): We would like to allow Unicode letters, but that requires additional
+// care in the safe encoding (see note below).
+func pathOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
+ '0' <= r && r <= '9' ||
+ 'A' <= r && r <= 'Z' ||
+ 'a' <= r && r <= 'z'
+ }
+ return false
+}
+
+// fileNameOK reports whether r can appear in a file name.
+// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
+// If we expand the set of allowed characters here, we have to
+// work harder at detecting potential case-folding and normalization collisions.
+// See note about "safe encoding" below.
+func fileNameOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ // Entire set of ASCII punctuation, from which we remove characters:
+ // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
+ // We disallow some shell special characters: " ' * < > ? ` |
+ // (Note that some of those are disallowed by the Windows file system as well.)
+ // We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
+ // We allow spaces (U+0020) in file names.
+ const allowed = "!#$%&()+,-.=@[]^_{}~ "
+ if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
+ return true
+ }
+ for i := 0; i < len(allowed); i++ {
+ if rune(allowed[i]) == r {
+ return true
+ }
+ }
+ return false
+ }
+ // It may be OK to add more ASCII punctuation here, but only carefully.
+ // For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
+ return unicode.IsLetter(r)
+}
+
+// CheckPath checks that a module path is valid.
+func CheckPath(path string) error {
+ if err := checkPath(path, false); err != nil {
+ return fmt.Errorf("malformed module path %q: %v", path, err)
+ }
+ i := strings.Index(path, "/")
+ if i < 0 {
+ i = len(path)
+ }
+ if i == 0 {
+ return fmt.Errorf("malformed module path %q: leading slash", path)
+ }
+ if !strings.Contains(path[:i], ".") {
+ return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
+ }
+ if path[0] == '-' {
+ return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
+ }
+ for _, r := range path[:i] {
+ if !firstPathOK(r) {
+ return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
+ }
+ }
+ if _, _, ok := SplitPathVersion(path); !ok {
+ return fmt.Errorf("malformed module path %q: invalid version", path)
+ }
+ return nil
+}
+
+// CheckImportPath checks that an import path is valid.
+func CheckImportPath(path string) error {
+ if err := checkPath(path, false); err != nil {
+ return fmt.Errorf("malformed import path %q: %v", path, err)
+ }
+ return nil
+}
+
+// checkPath checks that a general path is valid.
+// It returns an error describing why but not mentioning path.
+// Because these checks apply to both module paths and import paths,
+// the caller is expected to add the "malformed ___ path %q: " prefix.
+// fileName indicates whether the final element of the path is a file name
+// (as opposed to a directory name).
+func checkPath(path string, fileName bool) error {
+ if !utf8.ValidString(path) {
+ return fmt.Errorf("invalid UTF-8")
+ }
+ if path == "" {
+ return fmt.Errorf("empty string")
+ }
+ if strings.Contains(path, "..") {
+ return fmt.Errorf("double dot")
+ }
+ if strings.Contains(path, "//") {
+ return fmt.Errorf("double slash")
+ }
+ if path[len(path)-1] == '/' {
+ return fmt.Errorf("trailing slash")
+ }
+ elemStart := 0
+ for i, r := range path {
+ if r == '/' {
+ if err := checkElem(path[elemStart:i], fileName); err != nil {
+ return err
+ }
+ elemStart = i + 1
+ }
+ }
+ if err := checkElem(path[elemStart:], fileName); err != nil {
+ return err
+ }
+ return nil
+}
+
+// checkElem checks whether an individual path element is valid.
+// fileName indicates whether the element is a file name (not a directory name).
+func checkElem(elem string, fileName bool) error {
+ if elem == "" {
+ return fmt.Errorf("empty path element")
+ }
+ if strings.Count(elem, ".") == len(elem) {
+ return fmt.Errorf("invalid path element %q", elem)
+ }
+ if elem[0] == '.' && !fileName {
+ return fmt.Errorf("leading dot in path element")
+ }
+ if elem[len(elem)-1] == '.' {
+ return fmt.Errorf("trailing dot in path element")
+ }
+ charOK := pathOK
+ if fileName {
+ charOK = fileNameOK
+ }
+ for _, r := range elem {
+ if !charOK(r) {
+ return fmt.Errorf("invalid char %q", r)
+ }
+ }
+
+ // Windows disallows a bunch of path elements, sadly.
+ // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+ short := elem
+ if i := strings.Index(short, "."); i >= 0 {
+ short = short[:i]
+ }
+ for _, bad := range badWindowsNames {
+ if strings.EqualFold(bad, short) {
+ return fmt.Errorf("disallowed path element %q", elem)
+ }
+ }
+ return nil
+}
+
+// CheckFilePath checks whether a slash-separated file path is valid.
+func CheckFilePath(path string) error {
+ if err := checkPath(path, true); err != nil {
+ return fmt.Errorf("malformed file path %q: %v", path, err)
+ }
+ return nil
+}
+
+// badWindowsNames are the reserved file path elements on Windows.
+// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+var badWindowsNames = []string{
+ "CON",
+ "PRN",
+ "AUX",
+ "NUL",
+ "COM1",
+ "COM2",
+ "COM3",
+ "COM4",
+ "COM5",
+ "COM6",
+ "COM7",
+ "COM8",
+ "COM9",
+ "LPT1",
+ "LPT2",
+ "LPT3",
+ "LPT4",
+ "LPT5",
+ "LPT6",
+ "LPT7",
+ "LPT8",
+ "LPT9",
+}
+
+// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
+// and version is either empty or "/vN" for N >= 2.
+// As a special case, gopkg.in paths are recognized directly;
+// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
+func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
+ if strings.HasPrefix(path, "gopkg.in/") {
+ return splitGopkgIn(path)
+ }
+
+ i := len(path)
+ dot := false
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
+ if path[i-1] == '.' {
+ dot = true
+ }
+ i--
+ }
+ if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
+ return path, "", true
+ }
+ prefix, pathMajor = path[:i-2], path[i-2:]
+ if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
+ return path, "", false
+ }
+ return prefix, pathMajor, true
+}
+
+// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
+func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
+ if !strings.HasPrefix(path, "gopkg.in/") {
+ return path, "", false
+ }
+ i := len(path)
+ if strings.HasSuffix(path, "-unstable") {
+ i -= len("-unstable")
+ }
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
+ i--
+ }
+ if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
+ // All gopkg.in paths must end in vN for some N.
+ return path, "", false
+ }
+ prefix, pathMajor = path[:i-2], path[i-2:]
+ if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
+ return path, "", false
+ }
+ return prefix, pathMajor, true
+}
+
+// MatchPathMajor reports whether the semantic version v
+// matches the path major version pathMajor.
+func MatchPathMajor(v, pathMajor string) bool {
+ if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
+ pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
+ }
+ if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
+ // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
+ // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
+ return true
+ }
+ m := semver.Major(v)
+ if pathMajor == "" {
+ return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
+ }
+ return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
+}
+
+// CanonicalVersion returns the canonical form of the version string v.
+// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
+func CanonicalVersion(v string) string {
+ cv := semver.Canonical(v)
+ if semver.Build(v) == "+incompatible" {
+ cv += "+incompatible"
+ }
+ return cv
+}
+
+// Sort sorts the list by Path, breaking ties by comparing Versions.
+func Sort(list []Version) {
+ sort.Slice(list, func(i, j int) bool {
+ mi := list[i]
+ mj := list[j]
+ if mi.Path != mj.Path {
+ return mi.Path < mj.Path
+ }
+ // To help go.sum formatting, allow version/file.
+ // Compare semver prefix by semver rules,
+ // file by string order.
+ vi := mi.Version
+ vj := mj.Version
+ var fi, fj string
+ if k := strings.Index(vi, "/"); k >= 0 {
+ vi, fi = vi[:k], vi[k:]
+ }
+ if k := strings.Index(vj, "/"); k >= 0 {
+ vj, fj = vj[:k], vj[k:]
+ }
+ if vi != vj {
+ return semver.Compare(vi, vj) < 0
+ }
+ return fi < fj
+ })
+}
+
+// Safe encodings
+//
+// Module paths appear as substrings of file system paths
+// (in the download cache) and of web server URLs in the proxy protocol.
+// In general we cannot rely on file systems to be case-sensitive,
+// nor can we rely on web servers, since they read from file systems.
+// That is, we cannot rely on the file system to keep rsc.io/QUOTE
+// and rsc.io/quote separate. Windows and macOS don't.
+// Instead, we must never require two different casings of a file path.
+// Because we want the download cache to match the proxy protocol,
+// and because we want the proxy protocol to be possible to serve
+// from a tree of static files (which might be stored on a case-insensitive
+// file system), the proxy protocol must never require two different casings
+// of a URL path either.
+//
+// One possibility would be to make the safe encoding be the lowercase
+// hexadecimal encoding of the actual path bytes. This would avoid ever
+// needing different casings of a file path, but it would be fairly illegible
+// to most programmers when those paths appeared in the file system
+// (including in file paths in compiler errors and stack traces)
+// in web server logs, and so on. Instead, we want a safe encoding that
+// leaves most paths unaltered.
+//
+// The safe encoding is this:
+// replace every uppercase letter with an exclamation mark
+// followed by the letter's lowercase equivalent.
+//
+// For example,
+// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
+// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
+// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
+//
+// Import paths that avoid upper-case letters are left unchanged.
+// Note that because import paths are ASCII-only and avoid various
+// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
+// and avoids the same problematic punctuation.
+//
+// Import paths have never allowed exclamation marks, so there is no
+// need to define how to encode a literal !.
+//
+// Although paths are disallowed from using Unicode (see pathOK above),
+// the eventual plan is to allow Unicode letters as well, to assume that
+// file systems and URLs are Unicode-safe (storing UTF-8), and apply
+// the !-for-uppercase convention. Note however that not all runes that
+// are different but case-fold equivalent are an upper/lower pair.
+// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
+// are considered to case-fold to each other. When we do add Unicode
+// letters, we must not assume that upper/lower are the only case-equivalent pairs.
+// Perhaps the Kelvin symbol would be disallowed entirely, for example.
+// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
+//
+// Also, it would be nice to allow Unicode marks as well as letters,
+// but marks include combining marks, and then we must deal not
+// only with case folding but also normalization: both U+00E9 ('é')
+// and U+0065 U+0301 ('e' followed by combining acute accent)
+// look the same on the page and are treated by some file systems
+// as the same path. If we do allow Unicode marks in paths, there
+// must be some kind of normalization to allow only one canonical
+// encoding of any character used in an import path.
+
+// EncodePath returns the safe encoding of the given module path.
+// It fails if the module path is invalid.
+func EncodePath(path string) (encoding string, err error) {
+ if err := CheckPath(path); err != nil {
+ return "", err
+ }
+
+ return encodeString(path)
+}
+
+// EncodeVersion returns the safe encoding of the given module version.
+// Versions are allowed to be in non-semver form but must be valid file names
+// and not contain exclamation marks.
+func EncodeVersion(v string) (encoding string, err error) {
+ if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
+ return "", fmt.Errorf("disallowed version string %q", v)
+ }
+ return encodeString(v)
+}
+
+func encodeString(s string) (encoding string, err error) {
+ haveUpper := false
+ for _, r := range s {
+ if r == '!' || r >= utf8.RuneSelf {
+ // This should be disallowed by CheckPath, but diagnose anyway.
+ // The correctness of the encoding loop below depends on it.
+ return "", fmt.Errorf("internal error: inconsistency in EncodePath")
+ }
+ if 'A' <= r && r <= 'Z' {
+ haveUpper = true
+ }
+ }
+
+ if !haveUpper {
+ return s, nil
+ }
+
+ var buf []byte
+ for _, r := range s {
+ if 'A' <= r && r <= 'Z' {
+ buf = append(buf, '!', byte(r+'a'-'A'))
+ } else {
+ buf = append(buf, byte(r))
+ }
+ }
+ return string(buf), nil
+}
+
+// DecodePath returns the module path of the given safe encoding.
+// It fails if the encoding is invalid or encodes an invalid path.
+func DecodePath(encoding string) (path string, err error) {
+ path, ok := decodeString(encoding)
+ if !ok {
+ return "", fmt.Errorf("invalid module path encoding %q", encoding)
+ }
+ if err := CheckPath(path); err != nil {
+ return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
+ }
+ return path, nil
+}
+
+// DecodeVersion returns the version string for the given safe encoding.
+// It fails if the encoding is invalid or encodes an invalid version.
+// Versions are allowed to be in non-semver form but must be valid file names
+// and not contain exclamation marks.
+func DecodeVersion(encoding string) (v string, err error) {
+ v, ok := decodeString(encoding)
+ if !ok {
+ return "", fmt.Errorf("invalid version encoding %q", encoding)
+ }
+ if err := checkElem(v, true); err != nil {
+ return "", fmt.Errorf("disallowed version string %q", v)
+ }
+ return v, nil
+}
+
+func decodeString(encoding string) (string, bool) {
+ var buf []byte
+
+ bang := false
+ for _, r := range encoding {
+ if r >= utf8.RuneSelf {
+ return "", false
+ }
+ if bang {
+ bang = false
+ if r < 'a' || 'z' < r {
+ return "", false
+ }
+ buf = append(buf, byte(r+'A'-'a'))
+ continue
+ }
+ if r == '!' {
+ bang = true
+ continue
+ }
+ if 'A' <= r && r <= 'Z' {
+ return "", false
+ }
+ buf = append(buf, byte(r))
+ }
+ if bang {
+ return "", false
+ }
+ return string(buf), true
+}
diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go
new file mode 100644
index 000000000..4af7118e5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/semver/semver.go
@@ -0,0 +1,388 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semver implements comparison of semantic version strings.
+// In this package, semantic version strings must begin with a leading "v",
+// as in "v1.0.0".
+//
+// The general form of a semantic version string accepted by this package is
+//
+// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
+//
+// where square brackets indicate optional parts of the syntax;
+// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
+// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
+// using only alphanumeric characters and hyphens; and
+// all-numeric PRERELEASE identifiers must not have leading zeros.
+//
+// This package follows Semantic Versioning 2.0.0 (see semver.org)
+// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
+// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
+// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
+package semver
+
+// parsed returns the parsed form of a semantic version string.
+type parsed struct {
+ major string
+ minor string
+ patch string
+ short string
+ prerelease string
+ build string
+ err string
+}
+
+// IsValid reports whether v is a valid semantic version string.
+func IsValid(v string) bool {
+ _, ok := parse(v)
+ return ok
+}
+
+// Canonical returns the canonical formatting of the semantic version v.
+// It fills in any missing .MINOR or .PATCH and discards build metadata.
+// Two semantic versions compare equal only if their canonical formattings
+// are identical strings.
+// The canonical invalid semantic version is the empty string.
+func Canonical(v string) string {
+ p, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ if p.build != "" {
+ return v[:len(v)-len(p.build)]
+ }
+ if p.short != "" {
+ return v + p.short
+ }
+ return v
+}
+
+// Major returns the major version prefix of the semantic version v.
+// For example, Major("v2.1.0") == "v2".
+// If v is an invalid semantic version string, Major returns the empty string.
+func Major(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return v[:1+len(pv.major)]
+}
+
+// MajorMinor returns the major.minor version prefix of the semantic version v.
+// For example, MajorMinor("v2.1.0") == "v2.1".
+// If v is an invalid semantic version string, MajorMinor returns the empty string.
+func MajorMinor(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ i := 1 + len(pv.major)
+ if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
+ return v[:j]
+ }
+ return v[:i] + "." + pv.minor
+}
+
+// Prerelease returns the prerelease suffix of the semantic version v.
+// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
+// If v is an invalid semantic version string, Prerelease returns the empty string.
+func Prerelease(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.prerelease
+}
+
+// Build returns the build suffix of the semantic version v.
+// For example, Build("v2.1.0+meta") == "+meta".
+// If v is an invalid semantic version string, Build returns the empty string.
+func Build(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.build
+}
+
+// Compare returns an integer comparing two versions according to
+// according to semantic version precedence.
+// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
+//
+// An invalid semantic version string is considered less than a valid one.
+// All invalid semantic version strings compare equal to each other.
+func Compare(v, w string) int {
+ pv, ok1 := parse(v)
+ pw, ok2 := parse(w)
+ if !ok1 && !ok2 {
+ return 0
+ }
+ if !ok1 {
+ return -1
+ }
+ if !ok2 {
+ return +1
+ }
+ if c := compareInt(pv.major, pw.major); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.minor, pw.minor); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.patch, pw.patch); c != 0 {
+ return c
+ }
+ return comparePrerelease(pv.prerelease, pw.prerelease)
+}
+
+// Max canonicalizes its arguments and then returns the version string
+// that compares greater.
+func Max(v, w string) string {
+ v = Canonical(v)
+ w = Canonical(w)
+ if Compare(v, w) > 0 {
+ return v
+ }
+ return w
+}
+
+func parse(v string) (p parsed, ok bool) {
+ if v == "" || v[0] != 'v' {
+ p.err = "missing v prefix"
+ return
+ }
+ p.major, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad major version"
+ return
+ }
+ if v == "" {
+ p.minor = "0"
+ p.patch = "0"
+ p.short = ".0.0"
+ return
+ }
+ if v[0] != '.' {
+ p.err = "bad minor prefix"
+ ok = false
+ return
+ }
+ p.minor, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad minor version"
+ return
+ }
+ if v == "" {
+ p.patch = "0"
+ p.short = ".0"
+ return
+ }
+ if v[0] != '.' {
+ p.err = "bad patch prefix"
+ ok = false
+ return
+ }
+ p.patch, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad patch version"
+ return
+ }
+ if len(v) > 0 && v[0] == '-' {
+ p.prerelease, v, ok = parsePrerelease(v)
+ if !ok {
+ p.err = "bad prerelease"
+ return
+ }
+ }
+ if len(v) > 0 && v[0] == '+' {
+ p.build, v, ok = parseBuild(v)
+ if !ok {
+ p.err = "bad build"
+ return
+ }
+ }
+ if v != "" {
+ p.err = "junk on end"
+ ok = false
+ return
+ }
+ ok = true
+ return
+}
+
+func parseInt(v string) (t, rest string, ok bool) {
+ if v == "" {
+ return
+ }
+ if v[0] < '0' || '9' < v[0] {
+ return
+ }
+ i := 1
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ if v[0] == '0' && i != 1 {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parsePrerelease(v string) (t, rest string, ok bool) {
+ // "A pre-release version MAY be denoted by appending a hyphen and
+ // a series of dot separated identifiers immediately following the patch version.
+ // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
+ // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
+ if v == "" || v[0] != '-' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) && v[i] != '+' {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parseBuild(v string) (t, rest string, ok bool) {
+ if v == "" || v[0] != '+' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) {
+ if !isIdentChar(v[i]) {
+ return
+ }
+ if v[i] == '.' {
+ if start == i {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func isIdentChar(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
+}
+
+func isBadNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v) && i > 1 && v[0] == '0'
+}
+
+func isNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v)
+}
+
+func compareInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func comparePrerelease(x, y string) int {
+ // "When major, minor, and patch are equal, a pre-release version has
+ // lower precedence than a normal version.
+ // Example: 1.0.0-alpha < 1.0.0.
+ // Precedence for two pre-release versions with the same major, minor,
+ // and patch version MUST be determined by comparing each dot separated
+ // identifier from left to right until a difference is found as follows:
+ // identifiers consisting of only digits are compared numerically and
+ // identifiers with letters or hyphens are compared lexically in ASCII
+ // sort order. Numeric identifiers always have lower precedence than
+ // non-numeric identifiers. A larger set of pre-release fields has a
+ // higher precedence than a smaller set, if all of the preceding
+ // identifiers are equal.
+ // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
+ // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
+ if x == y {
+ return 0
+ }
+ if x == "" {
+ return +1
+ }
+ if y == "" {
+ return -1
+ }
+ for x != "" && y != "" {
+ x = x[1:] // skip - or .
+ y = y[1:] // skip - or .
+ var dx, dy string
+ dx, x = nextIdent(x)
+ dy, y = nextIdent(y)
+ if dx != dy {
+ ix := isNum(dx)
+ iy := isNum(dy)
+ if ix != iy {
+ if ix {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ if ix {
+ if len(dx) < len(dy) {
+ return -1
+ }
+ if len(dx) > len(dy) {
+ return +1
+ }
+ }
+ if dx < dy {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ }
+ if x == "" {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func nextIdent(x string) (dx, rest string) {
+ i := 0
+ for i < len(x) && x[i] != '.' {
+ i++
+ }
+ return x[:i], x[i:]
+}
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 000000000..8da58fbf6
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 000000000..866d74a7a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 000000000..b50c6e877
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,133 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 000000000..1f7e87e67
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 000000000..e4e56e28e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,775 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ // For an alias node, alias holds the resolved alias.
+ alias *node
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ n.children = append(n.children, p.parse())
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ n.alias = p.doc.anchors[n.value]
+ if n.alias == nil {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[*node]bool
+ mapType reflect.Type
+ terrors []string
+ strict bool
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+ d := &decoder{mapType: defaultMapType, strict: strict}
+ d.aliases = make(map[*node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n] = true
+ good = d.unmarshal(n.alias, out)
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == yaml_BINARY_TAG {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ return true
+ }
+ if resolved != nil {
+ out.SetString(n.value)
+ return true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else if tag == yaml_TIMESTAMP_TAG {
+ // It looks like a timestamp but for backward compatibility
+ // reasons we set it as a string, so that code that unmarshals
+ // timestamp-like values into interface{} will continue to
+ // see a string and not a time.Time.
+ // TODO(v3) Drop this.
+ out.Set(reflect.ValueOf(n.value))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ return true
+ }
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ d.setMapIndex(n.children[i+1], out, k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+ if d.strict && out.MapIndex(k) != zeroValue {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+ return
+ }
+ out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ var doneFields []bool
+ if d.strict {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.strict {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ d.setMapIndex(n.children[i+1], inlineMap, name, value)
+ } else if d.strict {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 000000000..a1c2cc526
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 000000000..0ee738e11
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,390 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// jsonNumber is the interface of the encoding/json.Number datatype.
+// Repeating the interface here avoids a dependency on encoding/json, and also
+// supports other libraries like jsoniter, which use a similar datatype with
+// the same interface. Detecting this interface is useful when dealing with
+// structures containing json.Number, which is a string under the hood. The
+// encoder should prefer the use of Int64(), Float64() and string(), in that
+// order, when encoding this type.
+type jsonNumber interface {
+ Float64() (float64, error)
+ Int64() (int64, error)
+ String() string
+}
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ // doneInit holds whether the initial stream_start_event has been
+ // emitted.
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch m := iface.(type) {
+ case jsonNumber:
+ integer, err := m.Int64()
+ if err == nil {
+ // In this case the json.Number is a valid int64
+ in = reflect.ValueOf(integer)
+ break
+ }
+ float, err := m.Float64()
+ if err == nil {
+ // In this case the json.Number is a valid float64
+ in = reflect.ValueOf(float)
+ break
+ }
+ // fallback case - no number could be obtained
+ in = reflect.ValueOf(m.String())
+ case time.Time, *time.Time:
+ // Although time.Time implements TextMarshaler,
+ // we don't want to treat it as a string for YAML
+ // purposes because YAML has special support for
+ // timestamps.
+ case Marshaler:
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ case encoding.TextMarshaler:
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.Type() == ptrTimeType {
+ e.timev(tag, in.Elem())
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ if in.Type() == timeType {
+ e.timev(tag, in)
+ } else {
+ e.structv(tag, in)
+ }
+ case reflect.Slice, reflect.Array:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = yaml_BINARY_TAG
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod
new file mode 100644
index 000000000..1934e8769
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v2"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 000000000..81d05dfe5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 000000000..7c1f5fac3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 000000000..6c151db6f
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ case yaml_FLOAT_TAG:
+ if rtag == yaml_INT_TAG {
+ switch v := out.(type) {
+ case int64:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ case int:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == yaml_TIMESTAMP_TAG {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return yaml_TIMESTAMP_TAG, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ }
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 000000000..077fd1dd2
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2696 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 000000000..4c45e660a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 000000000..a2dde608c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 000000000..de85aa4cd
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,466 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+ dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder(dec.strict)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder(strict)
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 000000000..e25cee563
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,738 @@
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 000000000..8110ce3c3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/honnef.co/go/tools/LICENSE b/vendor/honnef.co/go/tools/LICENSE
new file mode 100644
index 000000000..dfd031454
--- /dev/null
+++ b/vendor/honnef.co/go/tools/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2016 Dominik Honnef
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/honnef.co/go/tools/arg/arg.go b/vendor/honnef.co/go/tools/arg/arg.go
new file mode 100644
index 000000000..d9e42dbea
--- /dev/null
+++ b/vendor/honnef.co/go/tools/arg/arg.go
@@ -0,0 +1,39 @@
+package arg
+
+var args = map[string]int{
+ "(*sync.Pool).Put.x": 0,
+ "(*text/template.Template).Parse.text": 0,
+ "(io.Seeker).Seek.offset": 0,
+ "(time.Time).Sub.u": 0,
+ "append.elems": 1,
+ "append.slice": 0,
+ "bytes.Equal.a": 0,
+ "bytes.Equal.b": 1,
+ "encoding/binary.Write.data": 2,
+ "errors.New.text": 0,
+ "fmt.Printf.format": 0,
+ "fmt.Sprintf.a[0]": 1,
+ "fmt.Sprintf.format": 0,
+ "len.v": 0,
+ "make.size[0]": 1,
+ "make.size[1]": 2,
+ "make.t": 0,
+ "net/url.Parse.rawurl": 0,
+ "os.OpenFile.flag": 1,
+ "os/exec.Command.name": 0,
+ "os/signal.Notify.c": 0,
+ "regexp.Compile.expr": 0,
+ "runtime.SetFinalizer.finalizer": 1,
+ "runtime.SetFinalizer.obj": 0,
+ "sort.Sort.data": 0,
+ "time.Parse.layout": 0,
+ "time.Sleep.d": 0,
+}
+
+func Arg(name string) int {
+ n, ok := args[name]
+ if !ok {
+ panic("unknown argument " + name)
+ }
+ return n
+}
diff --git a/vendor/honnef.co/go/tools/callgraph/callgraph.go b/vendor/honnef.co/go/tools/callgraph/callgraph.go
new file mode 100644
index 000000000..d93a20a3a
--- /dev/null
+++ b/vendor/honnef.co/go/tools/callgraph/callgraph.go
@@ -0,0 +1,129 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Package callgraph defines the call graph and various algorithms
+and utilities to operate on it.
+
+A call graph is a labelled directed graph whose nodes represent
+functions and whose edge labels represent syntactic function call
+sites. The presence of a labelled edge (caller, site, callee)
+indicates that caller may call callee at the specified call site.
+
+A call graph is a multigraph: it may contain multiple edges (caller,
+*, callee) connecting the same pair of nodes, so long as the edges
+differ by label; this occurs when one function calls another function
+from multiple call sites. Also, it may contain multiple edges
+(caller, site, *) that differ only by callee; this indicates a
+polymorphic call.
+
+A SOUND call graph is one that overapproximates the dynamic calling
+behaviors of the program in all possible executions. One call graph
+is more PRECISE than another if it is a smaller overapproximation of
+the dynamic behavior.
+
+All call graphs have a synthetic root node which is responsible for
+calling main() and init().
+
+Calls to built-in functions (e.g. panic, println) are not represented
+in the call graph; they are treated like built-in operators of the
+language.
+
+*/
+package callgraph // import "honnef.co/go/tools/callgraph"
+
+// TODO(adonovan): add a function to eliminate wrappers from the
+// callgraph, preserving topology.
+// More generally, we could eliminate "uninteresting" nodes such as
+// nodes from packages we don't care about.
+
+import (
+ "fmt"
+ "go/token"
+
+ "honnef.co/go/tools/ssa"
+)
+
+// A Graph represents a call graph.
+//
+// A graph may contain nodes that are not reachable from the root.
+// If the call graph is sound, such nodes indicate unreachable
+// functions.
+//
+type Graph struct {
+ Root *Node // the distinguished root node
+ Nodes map[*ssa.Function]*Node // all nodes by function
+}
+
+// New returns a new Graph with the specified root node.
+func New(root *ssa.Function) *Graph {
+ g := &Graph{Nodes: make(map[*ssa.Function]*Node)}
+ g.Root = g.CreateNode(root)
+ return g
+}
+
+// CreateNode returns the Node for fn, creating it if not present.
+func (g *Graph) CreateNode(fn *ssa.Function) *Node {
+ n, ok := g.Nodes[fn]
+ if !ok {
+ n = &Node{Func: fn, ID: len(g.Nodes)}
+ g.Nodes[fn] = n
+ }
+ return n
+}
+
+// A Node represents a node in a call graph.
+type Node struct {
+ Func *ssa.Function // the function this node represents
+ ID int // 0-based sequence number
+ In []*Edge // unordered set of incoming call edges (n.In[*].Callee == n)
+ Out []*Edge // unordered set of outgoing call edges (n.Out[*].Caller == n)
+}
+
+func (n *Node) String() string {
+ return fmt.Sprintf("n%d:%s", n.ID, n.Func)
+}
+
+// A Edge represents an edge in the call graph.
+//
+// Site is nil for edges originating in synthetic or intrinsic
+// functions, e.g. reflect.Call or the root of the call graph.
+type Edge struct {
+ Caller *Node
+ Site ssa.CallInstruction
+ Callee *Node
+}
+
+func (e Edge) String() string {
+ return fmt.Sprintf("%s --> %s", e.Caller, e.Callee)
+}
+
+func (e Edge) Description() string {
+ var prefix string
+ switch e.Site.(type) {
+ case nil:
+ return "synthetic call"
+ case *ssa.Go:
+ prefix = "concurrent "
+ case *ssa.Defer:
+ prefix = "deferred "
+ }
+ return prefix + e.Site.Common().Description()
+}
+
+func (e Edge) Pos() token.Pos {
+ if e.Site == nil {
+ return token.NoPos
+ }
+ return e.Site.Pos()
+}
+
+// AddEdge adds the edge (caller, site, callee) to the call graph.
+// Elimination of duplicate edges is the caller's responsibility.
+func AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) {
+ e := &Edge{caller, site, callee}
+ callee.In = append(callee.In, e)
+ caller.Out = append(caller.Out, e)
+}
diff --git a/vendor/honnef.co/go/tools/callgraph/static/static.go b/vendor/honnef.co/go/tools/callgraph/static/static.go
new file mode 100644
index 000000000..5444e8411
--- /dev/null
+++ b/vendor/honnef.co/go/tools/callgraph/static/static.go
@@ -0,0 +1,35 @@
+// Package static computes the call graph of a Go program containing
+// only static call edges.
+package static // import "honnef.co/go/tools/callgraph/static"
+
+import (
+ "honnef.co/go/tools/callgraph"
+ "honnef.co/go/tools/ssa"
+ "honnef.co/go/tools/ssa/ssautil"
+)
+
+// CallGraph computes the call graph of the specified program
+// considering only static calls.
+//
+func CallGraph(prog *ssa.Program) *callgraph.Graph {
+ cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
+
+ // TODO(adonovan): opt: use only a single pass over the ssa.Program.
+ // TODO(adonovan): opt: this is slower than RTA (perhaps because
+ // the lower precision means so many edges are allocated)!
+ for f := range ssautil.AllFunctions(prog) {
+ fnode := cg.CreateNode(f)
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if site, ok := instr.(ssa.CallInstruction); ok {
+ if g := site.Common().StaticCallee(); g != nil {
+ gnode := cg.CreateNode(g)
+ callgraph.AddEdge(fnode, site, gnode)
+ }
+ }
+ }
+ }
+ }
+
+ return cg
+}
diff --git a/vendor/honnef.co/go/tools/callgraph/util.go b/vendor/honnef.co/go/tools/callgraph/util.go
new file mode 100644
index 000000000..7aeda9641
--- /dev/null
+++ b/vendor/honnef.co/go/tools/callgraph/util.go
@@ -0,0 +1,181 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package callgraph
+
+import "honnef.co/go/tools/ssa"
+
+// This file provides various utilities over call graphs, such as
+// visitation and path search.
+
+// CalleesOf returns a new set containing all direct callees of the
+// caller node.
+//
+func CalleesOf(caller *Node) map[*Node]bool {
+ callees := make(map[*Node]bool)
+ for _, e := range caller.Out {
+ callees[e.Callee] = true
+ }
+ return callees
+}
+
+// GraphVisitEdges visits all the edges in graph g in depth-first order.
+// The edge function is called for each edge in postorder. If it
+// returns non-nil, visitation stops and GraphVisitEdges returns that
+// value.
+//
+func GraphVisitEdges(g *Graph, edge func(*Edge) error) error {
+ seen := make(map[*Node]bool)
+ var visit func(n *Node) error
+ visit = func(n *Node) error {
+ if !seen[n] {
+ seen[n] = true
+ for _, e := range n.Out {
+ if err := visit(e.Callee); err != nil {
+ return err
+ }
+ if err := edge(e); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ for _, n := range g.Nodes {
+ if err := visit(n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PathSearch finds an arbitrary path starting at node start and
+// ending at some node for which isEnd() returns true. On success,
+// PathSearch returns the path as an ordered list of edges; on
+// failure, it returns nil.
+//
+func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge {
+ stack := make([]*Edge, 0, 32)
+ seen := make(map[*Node]bool)
+ var search func(n *Node) []*Edge
+ search = func(n *Node) []*Edge {
+ if !seen[n] {
+ seen[n] = true
+ if isEnd(n) {
+ return stack
+ }
+ for _, e := range n.Out {
+ stack = append(stack, e) // push
+ if found := search(e.Callee); found != nil {
+ return found
+ }
+ stack = stack[:len(stack)-1] // pop
+ }
+ }
+ return nil
+ }
+ return search(start)
+}
+
+// DeleteSyntheticNodes removes from call graph g all nodes for
+// synthetic functions (except g.Root and package initializers),
+// preserving the topology. In effect, calls to synthetic wrappers
+// are "inlined".
+//
+func (g *Graph) DeleteSyntheticNodes() {
+ // Measurements on the standard library and go.tools show that
+ // resulting graph has ~15% fewer nodes and 4-8% fewer edges
+ // than the input.
+ //
+ // Inlining a wrapper of in-degree m, out-degree n adds m*n
+ // and removes m+n edges. Since most wrappers are monomorphic
+ // (n=1) this results in a slight reduction. Polymorphic
+ // wrappers (n>1), e.g. from embedding an interface value
+ // inside a struct to satisfy some interface, cause an
+ // increase in the graph, but they seem to be uncommon.
+
+ // Hash all existing edges to avoid creating duplicates.
+ edges := make(map[Edge]bool)
+ for _, cgn := range g.Nodes {
+ for _, e := range cgn.Out {
+ edges[*e] = true
+ }
+ }
+ for fn, cgn := range g.Nodes {
+ if cgn == g.Root || fn.Synthetic == "" || isInit(cgn.Func) {
+ continue // keep
+ }
+ for _, eIn := range cgn.In {
+ for _, eOut := range cgn.Out {
+ newEdge := Edge{eIn.Caller, eIn.Site, eOut.Callee}
+ if edges[newEdge] {
+ continue // don't add duplicate
+ }
+ AddEdge(eIn.Caller, eIn.Site, eOut.Callee)
+ edges[newEdge] = true
+ }
+ }
+ g.DeleteNode(cgn)
+ }
+}
+
+func isInit(fn *ssa.Function) bool {
+ return fn.Pkg != nil && fn.Pkg.Func("init") == fn
+}
+
+// DeleteNode removes node n and its edges from the graph g.
+// (NB: not efficient for batch deletion.)
+func (g *Graph) DeleteNode(n *Node) {
+ n.deleteIns()
+ n.deleteOuts()
+ delete(g.Nodes, n.Func)
+}
+
+// deleteIns deletes all incoming edges to n.
+func (n *Node) deleteIns() {
+ for _, e := range n.In {
+ removeOutEdge(e)
+ }
+ n.In = nil
+}
+
+// deleteOuts deletes all outgoing edges from n.
+func (n *Node) deleteOuts() {
+ for _, e := range n.Out {
+ removeInEdge(e)
+ }
+ n.Out = nil
+}
+
+// removeOutEdge removes edge.Caller's outgoing edge 'edge'.
+func removeOutEdge(edge *Edge) {
+ caller := edge.Caller
+ n := len(caller.Out)
+ for i, e := range caller.Out {
+ if e == edge {
+ // Replace it with the final element and shrink the slice.
+ caller.Out[i] = caller.Out[n-1]
+ caller.Out[n-1] = nil // aid GC
+ caller.Out = caller.Out[:n-1]
+ return
+ }
+ }
+ panic("edge not found: " + edge.String())
+}
+
+// removeInEdge removes edge.Callee's incoming edge 'edge'.
+func removeInEdge(edge *Edge) {
+ caller := edge.Callee
+ n := len(caller.In)
+ for i, e := range caller.In {
+ if e == edge {
+ // Replace it with the final element and shrink the slice.
+ caller.In[i] = caller.In[n-1]
+ caller.In[n-1] = nil // aid GC
+ caller.In = caller.In[:n-1]
+ return
+ }
+ }
+ panic("edge not found: " + edge.String())
+}
diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/README.md b/vendor/honnef.co/go/tools/cmd/staticcheck/README.md
new file mode 100644
index 000000000..127d8edf3
--- /dev/null
+++ b/vendor/honnef.co/go/tools/cmd/staticcheck/README.md
@@ -0,0 +1,15 @@
+# staticcheck
+
+_staticcheck_ offers extensive analysis of Go code, covering a myriad
+of categories. It will detect bugs, suggest code simplifications,
+point out dead code, and more.
+
+## Installation
+
+ go get honnef.co/go/tools/cmd/staticcheck
+
+## Documentation
+
+Detailed documentation can be found on
+[staticcheck.io](https://staticcheck.io/docs/).
+
diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go b/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
new file mode 100644
index 000000000..3c8d96475
--- /dev/null
+++ b/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
@@ -0,0 +1,30 @@
+// staticcheck analyses Go code and makes it better.
+package main // import "honnef.co/go/tools/cmd/staticcheck"
+
+import (
+ "os"
+
+ "honnef.co/go/tools/lint"
+ "honnef.co/go/tools/lint/lintutil"
+ "honnef.co/go/tools/simple"
+ "honnef.co/go/tools/staticcheck"
+ "honnef.co/go/tools/stylecheck"
+ "honnef.co/go/tools/unused"
+)
+
+func main() {
+ fs := lintutil.FlagSet("staticcheck")
+ fs.Parse(os.Args[1:])
+
+ checkers := []lint.Checker{
+ simple.NewChecker(),
+ staticcheck.NewChecker(),
+ stylecheck.NewChecker(),
+ }
+
+ uc := unused.NewChecker(unused.CheckAll)
+ uc.ConsiderReflection = true
+ checkers = append(checkers, unused.NewLintChecker(uc))
+
+ lintutil.ProcessFlagSet(checkers, fs)
+}
diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go
new file mode 100644
index 000000000..112980b49
--- /dev/null
+++ b/vendor/honnef.co/go/tools/config/config.go
@@ -0,0 +1,162 @@
+package config
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/BurntSushi/toml"
+)
+
+func mergeLists(a, b []string) []string {
+ out := make([]string, 0, len(a)+len(b))
+ for _, el := range b {
+ if el == "inherit" {
+ out = append(out, a...)
+ } else {
+ out = append(out, el)
+ }
+ }
+
+ return out
+}
+
+func normalizeList(list []string) []string {
+ if len(list) > 1 {
+ nlist := make([]string, 0, len(list))
+ nlist = append(nlist, list[0])
+ for i, el := range list[1:] {
+ if el != list[i] {
+ nlist = append(nlist, el)
+ }
+ }
+ list = nlist
+ }
+
+ for _, el := range list {
+ if el == "inherit" {
+ // This should never happen, because the default config
+ // should not use "inherit"
+ panic(`unresolved "inherit"`)
+ }
+ }
+
+ return list
+}
+
+func (cfg Config) Merge(ocfg Config) Config {
+ if ocfg.Checks != nil {
+ cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks)
+ }
+ if ocfg.Initialisms != nil {
+ cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms)
+ }
+ if ocfg.DotImportWhitelist != nil {
+ cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist)
+ }
+ if ocfg.HTTPStatusCodeWhitelist != nil {
+ cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist)
+ }
+ return cfg
+}
+
+type Config struct {
+ // TODO(dh): this implementation makes it impossible for external
+ // clients to add their own checkers with configuration. At the
+ // moment, we don't really care about that; we don't encourage
+ // that people use this package. In the future, we may. The
+ // obvious solution would be using map[string]interface{}, but
+ // that's obviously subpar.
+
+ Checks []string `toml:"checks"`
+ Initialisms []string `toml:"initialisms"`
+ DotImportWhitelist []string `toml:"dot_import_whitelist"`
+ HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"`
+}
+
+var defaultConfig = Config{
+ Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016"},
+ Initialisms: []string{
+ "ACL", "API", "ASCII", "CPU", "CSS", "DNS",
+ "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
+ "IP", "JSON", "QPS", "RAM", "RPC", "SLA",
+ "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
+ "UDP", "UI", "GID", "UID", "UUID", "URI",
+ "URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
+ "XSS",
+ },
+ DotImportWhitelist: []string{},
+ HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"},
+}
+
+const configName = "staticcheck.conf"
+
+func parseConfigs(dir string) ([]Config, error) {
+ var out []Config
+
+ // TODO(dh): consider stopping at the GOPATH/module boundary
+ for dir != "" {
+ f, err := os.Open(filepath.Join(dir, configName))
+ if os.IsNotExist(err) {
+ ndir := filepath.Dir(dir)
+ if ndir == dir {
+ break
+ }
+ dir = ndir
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ var cfg Config
+ _, err = toml.DecodeReader(f, &cfg)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, cfg)
+ ndir := filepath.Dir(dir)
+ if ndir == dir {
+ break
+ }
+ dir = ndir
+ }
+ out = append(out, defaultConfig)
+ if len(out) < 2 {
+ return out, nil
+ }
+ for i := 0; i < len(out)/2; i++ {
+ out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i]
+ }
+ return out, nil
+}
+
+func mergeConfigs(confs []Config) Config {
+ if len(confs) == 0 {
+ // This shouldn't happen because we always have at least a
+ // default config.
+ panic("trying to merge zero configs")
+ }
+ if len(confs) == 1 {
+ return confs[0]
+ }
+ conf := confs[0]
+ for _, oconf := range confs[1:] {
+ conf = conf.Merge(oconf)
+ }
+ return conf
+}
+
+func Load(dir string) (Config, error) {
+ confs, err := parseConfigs(dir)
+ if err != nil {
+ return Config{}, err
+ }
+ conf := mergeConfigs(confs)
+
+ conf.Checks = normalizeList(conf.Checks)
+ conf.Initialisms = normalizeList(conf.Initialisms)
+ conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist)
+ conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist)
+
+ return conf, nil
+}
diff --git a/vendor/honnef.co/go/tools/config/example.conf b/vendor/honnef.co/go/tools/config/example.conf
new file mode 100644
index 000000000..5ffc597f9
--- /dev/null
+++ b/vendor/honnef.co/go/tools/config/example.conf
@@ -0,0 +1,10 @@
+checks = ["all", "-ST1003", "-ST1014"]
+initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS",
+ "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
+ "IP", "JSON", "QPS", "RAM", "RPC", "SLA",
+ "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
+ "UDP", "UI", "GID", "UID", "UUID", "URI",
+ "URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
+ "XSS"]
+dot_import_whitelist = []
+http_status_code_whitelist = ["200", "400", "404", "500"]
diff --git a/vendor/honnef.co/go/tools/deprecated/stdlib.go b/vendor/honnef.co/go/tools/deprecated/stdlib.go
new file mode 100644
index 000000000..b6b217c3e
--- /dev/null
+++ b/vendor/honnef.co/go/tools/deprecated/stdlib.go
@@ -0,0 +1,54 @@
+package deprecated
+
+type Deprecation struct {
+ DeprecatedSince int
+ AlternativeAvailableSince int
+}
+
+var Stdlib = map[string]Deprecation{
+ "image/jpeg.Reader": {4, 0},
+ // FIXME(dh): AllowBinary isn't being detected as deprecated
+ // because the comment has a newline right after "Deprecated:"
+ "go/build.AllowBinary": {7, 7},
+ "(archive/zip.FileHeader).CompressedSize": {1, 1},
+ "(archive/zip.FileHeader).UncompressedSize": {1, 1},
+ "(go/doc.Package).Bugs": {1, 1},
+ "os.SEEK_SET": {7, 7},
+ "os.SEEK_CUR": {7, 7},
+ "os.SEEK_END": {7, 7},
+ "(net.Dialer).Cancel": {7, 7},
+ "runtime.CPUProfile": {9, 0},
+ "compress/flate.ReadError": {6, 6},
+ "compress/flate.WriteError": {6, 6},
+ "path/filepath.HasPrefix": {0, 0},
+ "(net/http.Transport).Dial": {7, 7},
+ "(*net/http.Transport).CancelRequest": {6, 5},
+ "net/http.ErrWriteAfterFlush": {7, 0},
+ "net/http.ErrHeaderTooLong": {8, 0},
+ "net/http.ErrShortBody": {8, 0},
+ "net/http.ErrMissingContentLength": {8, 0},
+ "net/http/httputil.ErrPersistEOF": {0, 0},
+ "net/http/httputil.ErrClosed": {0, 0},
+ "net/http/httputil.ErrPipeline": {0, 0},
+ "net/http/httputil.ServerConn": {0, 0},
+ "net/http/httputil.NewServerConn": {0, 0},
+ "net/http/httputil.ClientConn": {0, 0},
+ "net/http/httputil.NewClientConn": {0, 0},
+ "net/http/httputil.NewProxyClientConn": {0, 0},
+ "(net/http.Request).Cancel": {7, 7},
+ "(text/template/parse.PipeNode).Line": {1, 1},
+ "(text/template/parse.ActionNode).Line": {1, 1},
+ "(text/template/parse.BranchNode).Line": {1, 1},
+ "(text/template/parse.TemplateNode).Line": {1, 1},
+ "database/sql/driver.ColumnConverter": {9, 9},
+ "database/sql/driver.Execer": {8, 8},
+ "database/sql/driver.Queryer": {8, 8},
+ "(database/sql/driver.Conn).Begin": {8, 8},
+ "(database/sql/driver.Stmt).Exec": {8, 8},
+ "(database/sql/driver.Stmt).Query": {8, 8},
+ "syscall.StringByteSlice": {1, 1},
+ "syscall.StringBytePtr": {1, 1},
+ "syscall.StringSlicePtr": {1, 1},
+ "syscall.StringToUTF16": {1, 1},
+ "syscall.StringToUTF16Ptr": {1, 1},
+}
diff --git a/vendor/honnef.co/go/tools/functions/concrete.go b/vendor/honnef.co/go/tools/functions/concrete.go
new file mode 100644
index 000000000..932acd03e
--- /dev/null
+++ b/vendor/honnef.co/go/tools/functions/concrete.go
@@ -0,0 +1,56 @@
+package functions
+
+import (
+ "go/token"
+ "go/types"
+
+ "honnef.co/go/tools/ssa"
+)
+
+func concreteReturnTypes(fn *ssa.Function) []*types.Tuple {
+ res := fn.Signature.Results()
+ if res == nil {
+ return nil
+ }
+ ifaces := make([]bool, res.Len())
+ any := false
+ for i := 0; i < res.Len(); i++ {
+ _, ifaces[i] = res.At(i).Type().Underlying().(*types.Interface)
+ any = any || ifaces[i]
+ }
+ if !any {
+ return []*types.Tuple{res}
+ }
+ var out []*types.Tuple
+ for _, block := range fn.Blocks {
+ if len(block.Instrs) == 0 {
+ continue
+ }
+ ret, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return)
+ if !ok {
+ continue
+ }
+ vars := make([]*types.Var, res.Len())
+ for i, v := range ret.Results {
+ var typ types.Type
+ if !ifaces[i] {
+ typ = res.At(i).Type()
+ } else if mi, ok := v.(*ssa.MakeInterface); ok {
+ // TODO(dh): if mi.X is a function call that returns
+ // an interface, call concreteReturnTypes on that
+ // function (or, really, go through Descriptions,
+ // avoid infinite recursion etc, just like nil error
+ // detection)
+
+ // TODO(dh): support Phi nodes
+ typ = mi.X.Type()
+ } else {
+ typ = res.At(i).Type()
+ }
+ vars[i] = types.NewParam(token.NoPos, nil, "", typ)
+ }
+ out = append(out, types.NewTuple(vars...))
+ }
+ // TODO(dh): deduplicate out
+ return out
+}
diff --git a/vendor/honnef.co/go/tools/functions/functions.go b/vendor/honnef.co/go/tools/functions/functions.go
new file mode 100644
index 000000000..839404129
--- /dev/null
+++ b/vendor/honnef.co/go/tools/functions/functions.go
@@ -0,0 +1,150 @@
+package functions
+
+import (
+ "go/types"
+ "sync"
+
+ "honnef.co/go/tools/callgraph"
+ "honnef.co/go/tools/callgraph/static"
+ "honnef.co/go/tools/ssa"
+ "honnef.co/go/tools/staticcheck/vrp"
+)
+
+var stdlibDescs = map[string]Description{
+ "errors.New": {Pure: true},
+
+ "fmt.Errorf": {Pure: true},
+ "fmt.Sprintf": {Pure: true},
+ "fmt.Sprint": {Pure: true},
+
+ "sort.Reverse": {Pure: true},
+
+ "strings.Map": {Pure: true},
+ "strings.Repeat": {Pure: true},
+ "strings.Replace": {Pure: true},
+ "strings.Title": {Pure: true},
+ "strings.ToLower": {Pure: true},
+ "strings.ToLowerSpecial": {Pure: true},
+ "strings.ToTitle": {Pure: true},
+ "strings.ToTitleSpecial": {Pure: true},
+ "strings.ToUpper": {Pure: true},
+ "strings.ToUpperSpecial": {Pure: true},
+ "strings.Trim": {Pure: true},
+ "strings.TrimFunc": {Pure: true},
+ "strings.TrimLeft": {Pure: true},
+ "strings.TrimLeftFunc": {Pure: true},
+ "strings.TrimPrefix": {Pure: true},
+ "strings.TrimRight": {Pure: true},
+ "strings.TrimRightFunc": {Pure: true},
+ "strings.TrimSpace": {Pure: true},
+ "strings.TrimSuffix": {Pure: true},
+
+ "(*net/http.Request).WithContext": {Pure: true},
+
+ "math/rand.Read": {NilError: true},
+ "(*math/rand.Rand).Read": {NilError: true},
+}
+
+type Description struct {
+ // The function is known to be pure
+ Pure bool
+ // The function is known to be a stub
+ Stub bool
+ // The function is known to never return (panics notwithstanding)
+ Infinite bool
+ // Variable ranges
+ Ranges vrp.Ranges
+ Loops []Loop
+ // Function returns an error as its last argument, but it is
+ // always nil
+ NilError bool
+ ConcreteReturnTypes []*types.Tuple
+}
+
+type descriptionEntry struct {
+ ready chan struct{}
+ result Description
+}
+
+type Descriptions struct {
+ CallGraph *callgraph.Graph
+ mu sync.Mutex
+ cache map[*ssa.Function]*descriptionEntry
+}
+
+func NewDescriptions(prog *ssa.Program) *Descriptions {
+ return &Descriptions{
+ CallGraph: static.CallGraph(prog),
+ cache: map[*ssa.Function]*descriptionEntry{},
+ }
+}
+
+func (d *Descriptions) Get(fn *ssa.Function) Description {
+ d.mu.Lock()
+ fd := d.cache[fn]
+ if fd == nil {
+ fd = &descriptionEntry{
+ ready: make(chan struct{}),
+ }
+ d.cache[fn] = fd
+ d.mu.Unlock()
+
+ {
+ fd.result = stdlibDescs[fn.RelString(nil)]
+ fd.result.Pure = fd.result.Pure || d.IsPure(fn)
+ fd.result.Stub = fd.result.Stub || d.IsStub(fn)
+ fd.result.Infinite = fd.result.Infinite || !terminates(fn)
+ fd.result.Ranges = vrp.BuildGraph(fn).Solve()
+ fd.result.Loops = findLoops(fn)
+ fd.result.NilError = fd.result.NilError || IsNilError(fn)
+ fd.result.ConcreteReturnTypes = concreteReturnTypes(fn)
+ }
+
+ close(fd.ready)
+ } else {
+ d.mu.Unlock()
+ <-fd.ready
+ }
+ return fd.result
+}
+
+func IsNilError(fn *ssa.Function) bool {
+ // TODO(dh): This is very simplistic, as we only look for constant
+ // nil returns. A more advanced approach would work transitively.
+ // An even more advanced approach would be context-aware and
+ // determine nil errors based on inputs (e.g. io.WriteString to a
+ // bytes.Buffer will always return nil, but an io.WriteString to
+ // an os.File might not). Similarly, an os.File opened for reading
+ // won't error on Close, but other files will.
+ res := fn.Signature.Results()
+ if res.Len() == 0 {
+ return false
+ }
+ last := res.At(res.Len() - 1)
+ if types.TypeString(last.Type(), nil) != "error" {
+ return false
+ }
+
+ if fn.Blocks == nil {
+ return false
+ }
+ for _, block := range fn.Blocks {
+ if len(block.Instrs) == 0 {
+ continue
+ }
+ ins := block.Instrs[len(block.Instrs)-1]
+ ret, ok := ins.(*ssa.Return)
+ if !ok {
+ continue
+ }
+ v := ret.Results[len(ret.Results)-1]
+ c, ok := v.(*ssa.Const)
+ if !ok {
+ return false
+ }
+ if !c.IsNil() {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/honnef.co/go/tools/functions/loops.go b/vendor/honnef.co/go/tools/functions/loops.go
new file mode 100644
index 000000000..63011cf3e
--- /dev/null
+++ b/vendor/honnef.co/go/tools/functions/loops.go
@@ -0,0 +1,50 @@
+package functions
+
+import "honnef.co/go/tools/ssa"
+
+type Loop map[*ssa.BasicBlock]bool
+
+func findLoops(fn *ssa.Function) []Loop {
+ if fn.Blocks == nil {
+ return nil
+ }
+ tree := fn.DomPreorder()
+ var sets []Loop
+ for _, h := range tree {
+ for _, n := range h.Preds {
+ if !h.Dominates(n) {
+ continue
+ }
+ // n is a back-edge to h
+ // h is the loop header
+ if n == h {
+ sets = append(sets, Loop{n: true})
+ continue
+ }
+ set := Loop{h: true, n: true}
+ for _, b := range allPredsBut(n, h, nil) {
+ set[b] = true
+ }
+ sets = append(sets, set)
+ }
+ }
+ return sets
+}
+
+func allPredsBut(b, but *ssa.BasicBlock, list []*ssa.BasicBlock) []*ssa.BasicBlock {
+outer:
+ for _, pred := range b.Preds {
+ if pred == but {
+ continue
+ }
+ for _, p := range list {
+ // TODO improve big-o complexity of this function
+ if pred == p {
+ continue outer
+ }
+ }
+ list = append(list, pred)
+ list = allPredsBut(pred, but, list)
+ }
+ return list
+}
diff --git a/vendor/honnef.co/go/tools/functions/pure.go b/vendor/honnef.co/go/tools/functions/pure.go
new file mode 100644
index 000000000..7028eb8c6
--- /dev/null
+++ b/vendor/honnef.co/go/tools/functions/pure.go
@@ -0,0 +1,123 @@
+package functions
+
+import (
+ "go/token"
+ "go/types"
+
+ "honnef.co/go/tools/callgraph"
+ "honnef.co/go/tools/lint/lintdsl"
+ "honnef.co/go/tools/ssa"
+)
+
+// IsStub reports whether a function is a stub. A function is
+// considered a stub if it has no instructions or exactly one
+// instruction, which must be either returning only constant values or
+// a panic.
+func (d *Descriptions) IsStub(fn *ssa.Function) bool {
+ if len(fn.Blocks) == 0 {
+ return true
+ }
+ if len(fn.Blocks) > 1 {
+ return false
+ }
+ instrs := lintdsl.FilterDebug(fn.Blocks[0].Instrs)
+ if len(instrs) != 1 {
+ return false
+ }
+
+ switch instrs[0].(type) {
+ case *ssa.Return:
+ // Since this is the only instruction, the return value must
+ // be a constant. We consider all constants as stubs, not just
+ // the zero value. This does not, unfortunately, cover zero
+ // initialised structs, as these cause additional
+ // instructions.
+ return true
+ case *ssa.Panic:
+ return true
+ default:
+ return false
+ }
+}
+
+func (d *Descriptions) IsPure(fn *ssa.Function) bool {
+ if fn.Signature.Results().Len() == 0 {
+ // A function with no return values is empty or is doing some
+ // work we cannot see (for example because of build tags);
+ // don't consider it pure.
+ return false
+ }
+
+ for _, param := range fn.Params {
+ if _, ok := param.Type().Underlying().(*types.Basic); !ok {
+ return false
+ }
+ }
+
+ if fn.Blocks == nil {
+ return false
+ }
+ checkCall := func(common *ssa.CallCommon) bool {
+ if common.IsInvoke() {
+ return false
+ }
+ builtin, ok := common.Value.(*ssa.Builtin)
+ if !ok {
+ if common.StaticCallee() != fn {
+ if common.StaticCallee() == nil {
+ return false
+ }
+ // TODO(dh): ideally, IsPure wouldn't be responsible
+ // for avoiding infinite recursion, but
+ // FunctionDescriptions would be.
+ node := d.CallGraph.CreateNode(common.StaticCallee())
+ if callgraph.PathSearch(node, func(other *callgraph.Node) bool {
+ return other.Func == fn
+ }) != nil {
+ return false
+ }
+ if !d.Get(common.StaticCallee()).Pure {
+ return false
+ }
+ }
+ } else {
+ switch builtin.Name() {
+ case "len", "cap", "make", "new":
+ default:
+ return false
+ }
+ }
+ return true
+ }
+ for _, b := range fn.Blocks {
+ for _, ins := range b.Instrs {
+ switch ins := ins.(type) {
+ case *ssa.Call:
+ if !checkCall(ins.Common()) {
+ return false
+ }
+ case *ssa.Defer:
+ if !checkCall(&ins.Call) {
+ return false
+ }
+ case *ssa.Select:
+ return false
+ case *ssa.Send:
+ return false
+ case *ssa.Go:
+ return false
+ case *ssa.Panic:
+ return false
+ case *ssa.Store:
+ return false
+ case *ssa.FieldAddr:
+ return false
+ case *ssa.UnOp:
+ if ins.Op == token.MUL || ins.Op == token.AND {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
diff --git a/vendor/honnef.co/go/tools/functions/terminates.go b/vendor/honnef.co/go/tools/functions/terminates.go
new file mode 100644
index 000000000..65f9e16dc
--- /dev/null
+++ b/vendor/honnef.co/go/tools/functions/terminates.go
@@ -0,0 +1,24 @@
+package functions
+
+import "honnef.co/go/tools/ssa"
+
+// terminates reports whether fn is supposed to return, that is if it
+// has at least one theoretic path that returns from the function.
+// Explicit panics do not count as terminating.
+func terminates(fn *ssa.Function) bool {
+ if fn.Blocks == nil {
+ // assuming that a function terminates is the conservative
+ // choice
+ return true
+ }
+
+ for _, block := range fn.Blocks {
+ if len(block.Instrs) == 0 {
+ continue
+ }
+ if _, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return); ok {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/callee.go b/vendor/honnef.co/go/tools/go/types/typeutil/callee.go
new file mode 100644
index 000000000..38f596daf
--- /dev/null
+++ b/vendor/honnef.co/go/tools/go/types/typeutil/callee.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import (
+ "go/ast"
+ "go/types"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// Callee returns the named target of a function call, if any:
+// a function, method, builtin, or variable.
+func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+ var obj types.Object
+ switch fun := astutil.Unparen(call.Fun).(type) {
+ case *ast.Ident:
+ obj = info.Uses[fun] // type, var, builtin, or declared func
+ case *ast.SelectorExpr:
+ if sel, ok := info.Selections[fun]; ok {
+ obj = sel.Obj() // method or field
+ } else {
+ obj = info.Uses[fun.Sel] // qualified identifier?
+ }
+ }
+ if _, ok := obj.(*types.TypeName); ok {
+ return nil // T(x) is a conversion, not a call
+ }
+ return obj
+}
+
+// StaticCallee returns the target (function or method) of a static
+// function call, if any. It returns nil for calls to builtins.
+func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
+ if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
+ return f
+ }
+ return nil
+}
+
+func interfaceMethod(f *types.Func) bool {
+ recv := f.Type().(*types.Signature).Recv()
+ return recv != nil && types.IsInterface(recv.Type())
+}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/identical.go b/vendor/honnef.co/go/tools/go/types/typeutil/identical.go
new file mode 100644
index 000000000..7eda29463
--- /dev/null
+++ b/vendor/honnef.co/go/tools/go/types/typeutil/identical.go
@@ -0,0 +1,29 @@
+package typeutil
+
+import (
+ "go/types"
+)
+
+// Identical reports whether x and y are identical types.
+// Unlike types.Identical, receivers of Signature types are not ignored.
+func Identical(x, y types.Type) (ret bool) {
+ if !types.Identical(x, y) {
+ return false
+ }
+ sigX, ok := x.(*types.Signature)
+ if !ok {
+ return true
+ }
+ sigY, ok := y.(*types.Signature)
+ if !ok {
+ // should be impossible
+ return true
+ }
+ if sigX.Recv() == sigY.Recv() {
+ return true
+ }
+ if sigX.Recv() == nil || sigY.Recv() == nil {
+ return false
+ }
+ return Identical(sigX.Recv().Type(), sigY.Recv().Type())
+}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/imports.go b/vendor/honnef.co/go/tools/go/types/typeutil/imports.go
new file mode 100644
index 000000000..9c441dba9
--- /dev/null
+++ b/vendor/honnef.co/go/tools/go/types/typeutil/imports.go
@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import "go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+//
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+ var result []*types.Package
+ seen := make(map[*types.Package]bool)
+ var visit func(pkgs []*types.Package)
+ visit = func(pkgs []*types.Package) {
+ for _, p := range pkgs {
+ if !seen[p] {
+ seen[p] = true
+ visit(p.Imports())
+ result = append(result, p)
+ }
+ }
+ }
+ visit(pkgs)
+ return result
+}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/map.go b/vendor/honnef.co/go/tools/go/types/typeutil/map.go
new file mode 100644
index 000000000..db0b3bce7
--- /dev/null
+++ b/vendor/honnef.co/go/tools/go/types/typeutil/map.go
@@ -0,0 +1,315 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as Map,
+// a mapping from types.Type to interface{} values.
+package typeutil
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "reflect"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary interface{} values. The concrete types that implement
+// the Type interface are pointers. Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Not thread-safe.
+//
+// This fork handles Signatures correctly, respecting method receivers.
+//
+type Map struct {
+ hasher Hasher // shared by many Maps
+ table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+ length int // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+ key types.Type
+ value interface{}
+}
+
+// SetHasher sets the hasher used by Map.
+//
+// All Hashers are functionally equivalent but contain internal state
+// used to cache the results of hashing previously seen types.
+//
+// A single Hasher created by MakeHasher() may be shared among many
+// Maps. This is recommended if the instances have many keys in
+// common, as it will amortize the cost of hash computation.
+//
+// A Hasher may grow without bound as new types are seen. Even when a
+// type is deleted from the map, the Hasher never shrinks, since other
+// types in the map may reference the deleted type indirectly.
+//
+// Hashers are not thread-safe, and read-only operations such as
+// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
+// read-lock) is require around all Map operations if a shared
+// hasher is accessed from multiple threads.
+//
+// If SetHasher is not called, the Map will create a private hasher at
+// the first call to Insert.
+//
+func (m *Map) SetHasher(hasher Hasher) {
+ m.hasher = hasher
+}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+//
+func (m *Map) Delete(key types.Type) bool {
+ if m != nil && m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ for i, e := range bucket {
+ if e.key != nil && Identical(key, e.key) {
+ // We can't compact the bucket as it
+ // would disturb iterators.
+ bucket[i] = entry{}
+ m.length--
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+//
+func (m *Map) At(key types.Type) interface{} {
+ if m != nil && m.table != nil {
+ for _, e := range m.table[m.hasher.Hash(key)] {
+ if e.key != nil && Identical(key, e.key) {
+ return e.value
+ }
+ }
+ }
+ return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
+ if m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ var hole *entry
+ for i, e := range bucket {
+ if e.key == nil {
+ hole = &bucket[i]
+ } else if Identical(key, e.key) {
+ prev = e.value
+ bucket[i].value = value
+ return
+ }
+ }
+
+ if hole != nil {
+ *hole = entry{key, value} // overwrite deleted entry
+ } else {
+ m.table[hash] = append(bucket, entry{key, value})
+ }
+ } else {
+ if m.hasher.memo == nil {
+ m.hasher = MakeHasher()
+ }
+ hash := m.hasher.Hash(key)
+ m.table = map[uint32][]entry{hash: {entry{key, value}}}
+ }
+
+ m.length++
+ return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+ if m != nil {
+ return m.length
+ }
+ return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+//
+func (m *Map) Iterate(f func(key types.Type, value interface{})) {
+ if m != nil {
+ for _, bucket := range m.table {
+ for _, e := range bucket {
+ if e.key != nil {
+ f(e.key, e.value)
+ }
+ }
+ }
+ }
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+ keys := make([]types.Type, 0, m.Len())
+ m.Iterate(func(key types.Type, _ interface{}) {
+ keys = append(keys, key)
+ })
+ return keys
+}
+
+func (m *Map) toString(values bool) string {
+ if m == nil {
+ return "{}"
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "{")
+ sep := ""
+ m.Iterate(func(key types.Type, value interface{}) {
+ fmt.Fprint(&buf, sep)
+ sep = ", "
+ fmt.Fprint(&buf, key)
+ if values {
+ fmt.Fprintf(&buf, ": %q", value)
+ }
+ })
+ fmt.Fprint(&buf, "}")
+ return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+//
+func (m *Map) String() string {
+ return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+//
+func (m *Map) KeysString() string {
+ return m.toString(false)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Hasher
+
+// A Hasher maps each type to its hash value.
+// For efficiency, a hasher uses memoization; thus its memory
+// footprint grows monotonically over time.
+// Hashers are not thread-safe.
+// Hashers have reference semantics.
+// Call MakeHasher to create a Hasher.
+type Hasher struct {
+ memo map[types.Type]uint32
+}
+
+// MakeHasher returns a new Hasher instance.
+func MakeHasher() Hasher {
+ return Hasher{make(map[types.Type]uint32)}
+}
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+ hash, ok := h.memo[t]
+ if !ok {
+ hash = h.hashFor(t)
+ h.memo[t] = hash
+ }
+ return hash
+}
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+ var h uint32
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// hashFor computes the hash of t.
+func (h Hasher) hashFor(t types.Type) uint32 {
+ // See Identical for rationale.
+ switch t := t.(type) {
+ case *types.Basic:
+ return uint32(t.Kind())
+
+ case *types.Array:
+ return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
+
+ case *types.Slice:
+ return 9049 + 2*h.Hash(t.Elem())
+
+ case *types.Struct:
+ var hash uint32 = 9059
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ f := t.Field(i)
+ if f.Anonymous() {
+ hash += 8861
+ }
+ hash += hashString(t.Tag(i))
+ hash += hashString(f.Name()) // (ignore f.Pkg)
+ hash += h.Hash(f.Type())
+ }
+ return hash
+
+ case *types.Pointer:
+ return 9067 + 2*h.Hash(t.Elem())
+
+ case *types.Signature:
+ var hash uint32 = 9091
+ if t.Variadic() {
+ hash *= 8863
+ }
+ return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+ case *types.Interface:
+ var hash uint32 = 9103
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ // See go/types.identicalMethods for rationale.
+ // Method order is not significant.
+ // Ignore m.Pkg().
+ m := t.Method(i)
+ hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
+ }
+ return hash
+
+ case *types.Map:
+ return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
+
+ case *types.Chan:
+ return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
+
+ case *types.Named:
+ // Not safe with a copying GC; objects may move.
+ return uint32(reflect.ValueOf(t.Obj()).Pointer())
+
+ case *types.Tuple:
+ return h.hashTuple(t)
+ }
+ panic(t)
+}
+
+func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
+ // See go/types.identicalTypes for rationale.
+ n := tuple.Len()
+ var hash uint32 = 9137 + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ hash += 3 * h.Hash(tuple.At(i).Type())
+ }
+ return hash
+}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go b/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
new file mode 100644
index 000000000..32084610f
--- /dev/null
+++ b/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package typeutil
+
+import (
+ "go/types"
+ "sync"
+)
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+ mu sync.Mutex
+ named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
+ others map[types.Type]*types.MethodSet // all other types
+}
+
+// MethodSet returns the method set of type T. It is thread-safe.
+//
+// If cache is nil, this function is equivalent to types.NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+//
+func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
+ if cache == nil {
+ return types.NewMethodSet(T)
+ }
+ cache.mu.Lock()
+ defer cache.mu.Unlock()
+
+ switch T := T.(type) {
+ case *types.Named:
+ return cache.lookupNamed(T).value
+
+ case *types.Pointer:
+ if N, ok := T.Elem().(*types.Named); ok {
+ return cache.lookupNamed(N).pointer
+ }
+ }
+
+ // all other types
+ // (The map uses pointer equivalence, not type identity.)
+ mset := cache.others[T]
+ if mset == nil {
+ mset = types.NewMethodSet(T)
+ if cache.others == nil {
+ cache.others = make(map[types.Type]*types.MethodSet)
+ }
+ cache.others[T] = mset
+ }
+ return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
+ if cache.named == nil {
+ cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
+ }
+ // Avoid recomputing mset(*T) for each distinct Pointer
+ // instance whose underlying type is a named type.
+ msets, ok := cache.named[named]
+ if !ok {
+ msets.value = types.NewMethodSet(named)
+ msets.pointer = types.NewMethodSet(types.NewPointer(named))
+ cache.named[named] = msets
+ }
+ return msets
+}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/ui.go b/vendor/honnef.co/go/tools/go/types/typeutil/ui.go
new file mode 100644
index 000000000..9849c24ce
--- /dev/null
+++ b/vendor/honnef.co/go/tools/go/types/typeutil/ui.go
@@ -0,0 +1,52 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import "go/types"
+
+// IntuitiveMethodSet returns the intuitive method set of a type T,
+// which is the set of methods you can call on an addressable value of
+// that type.
+//
+// The result always contains MethodSet(T), and is exactly MethodSet(T)
+// for interface types and for pointer-to-concrete types.
+// For all other concrete types T, the result additionally
+// contains each method belonging to *T if there is no identically
+// named method on T itself.
+//
+// This corresponds to user intuition about method sets;
+// this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+//
+func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
+ isPointerToConcrete := func(T types.Type) bool {
+ ptr, ok := T.(*types.Pointer)
+ return ok && !types.IsInterface(ptr.Elem())
+ }
+
+ var result []*types.Selection
+ mset := msets.MethodSet(T)
+ if types.IsInterface(T) || isPointerToConcrete(T) {
+ for i, n := 0, mset.Len(); i < n; i++ {
+ result = append(result, mset.At(i))
+ }
+ } else {
+ // T is some other concrete type.
+ // Report methods of T and *T, preferring those of T.
+ pmset := msets.MethodSet(types.NewPointer(T))
+ for i, n := 0, pmset.Len(); i < n; i++ {
+ meth := pmset.At(i)
+ if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+ meth = m
+ }
+ result = append(result, meth)
+ }
+
+ }
+ return result
+}
diff --git a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
new file mode 100644
index 000000000..cbbafbcdf
--- /dev/null
+++ b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
@@ -0,0 +1,68 @@
+package sharedcheck
+
+import (
+ "go/ast"
+ "go/types"
+
+ "honnef.co/go/tools/lint"
+ . "honnef.co/go/tools/lint/lintdsl"
+ "honnef.co/go/tools/ssa"
+)
+
+func CheckRangeStringRunes(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ fn := func(node ast.Node) bool {
+ rng, ok := node.(*ast.RangeStmt)
+ if !ok || !IsBlank(rng.Key) {
+ return true
+ }
+
+ v, _ := ssafn.ValueForExpr(rng.X)
+
+ // Check that we're converting from string to []rune
+ val, _ := v.(*ssa.Convert)
+ if val == nil {
+ return true
+ }
+ Tsrc, ok := val.X.Type().(*types.Basic)
+ if !ok || Tsrc.Kind() != types.String {
+ return true
+ }
+ Tdst, ok := val.Type().(*types.Slice)
+ if !ok {
+ return true
+ }
+ TdstElem, ok := Tdst.Elem().(*types.Basic)
+ if !ok || TdstElem.Kind() != types.Int32 {
+ return true
+ }
+
+ // Check that the result of the conversion is only used to
+ // range over
+ refs := val.Referrers()
+ if refs == nil {
+ return true
+ }
+
+ // Expect two refs: one for obtaining the length of the slice,
+ // one for accessing the elements
+ if len(FilterDebug(*refs)) != 2 {
+ // TODO(dh): right now, we check that only one place
+ // refers to our slice. This will miss cases such as
+ // ranging over the slice twice. Ideally, we'd ensure that
+ // the slice is only used for ranging over (without
+ // accessing the key), but that is harder to do because in
+ // SSA form, ranging over a slice looks like an ordinary
+ // loop with index increments and slice accesses. We'd
+ // have to look at the associated AST node to check that
+ // it's a range statement.
+ return true
+ }
+
+ j.Errorf(rng, "should range over string, not []rune(string)")
+
+ return true
+ }
+ Inspect(ssafn.Syntax(), fn)
+ }
+}
diff --git a/vendor/honnef.co/go/tools/lint/LICENSE b/vendor/honnef.co/go/tools/lint/LICENSE
new file mode 100644
index 000000000..796130a12
--- /dev/null
+++ b/vendor/honnef.co/go/tools/lint/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+Copyright (c) 2016 Dominik Honnef. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/honnef.co/go/tools/lint/generated.go b/vendor/honnef.co/go/tools/lint/generated.go
new file mode 100644
index 000000000..58b23f68f
--- /dev/null
+++ b/vendor/honnef.co/go/tools/lint/generated.go
@@ -0,0 +1,38 @@
+package lint
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+)
+
+var (
+ // used by cgo before Go 1.11
+ oldCgo = []byte("// Created by cgo - DO NOT EDIT")
+ prefix = []byte("// Code generated ")
+ suffix = []byte(" DO NOT EDIT.")
+ nl = []byte("\n")
+ crnl = []byte("\r\n")
+)
+
+func isGenerated(r io.Reader) bool {
+ br := bufio.NewReader(r)
+ for {
+ s, err := br.ReadBytes('\n')
+ if err != nil && err != io.EOF {
+ return false
+ }
+ s = bytes.TrimSuffix(s, crnl)
+ s = bytes.TrimSuffix(s, nl)
+ if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
+ return true
+ }
+ if bytes.Equal(s, oldCgo) {
+ return true
+ }
+ if err == io.EOF {
+ break
+ }
+ }
+ return false
+}
diff --git a/vendor/honnef.co/go/tools/lint/lint.go b/vendor/honnef.co/go/tools/lint/lint.go
new file mode 100644
index 000000000..c81f6e826
--- /dev/null
+++ b/vendor/honnef.co/go/tools/lint/lint.go
@@ -0,0 +1,706 @@
+// Package lint provides the foundation for tools like staticcheck
+package lint // import "honnef.co/go/tools/lint"
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+
+ "golang.org/x/tools/go/packages"
+ "honnef.co/go/tools/config"
+ "honnef.co/go/tools/ssa"
+ "honnef.co/go/tools/ssa/ssautil"
+)
+
+type Job struct {
+ Program *Program
+
+ checker string
+ check Check
+ problems []Problem
+
+ duration time.Duration
+}
+
+type Ignore interface {
+ Match(p Problem) bool
+}
+
+type LineIgnore struct {
+ File string
+ Line int
+ Checks []string
+ matched bool
+ pos token.Pos
+}
+
+func (li *LineIgnore) Match(p Problem) bool {
+ if p.Position.Filename != li.File || p.Position.Line != li.Line {
+ return false
+ }
+ for _, c := range li.Checks {
+ if m, _ := filepath.Match(c, p.Check); m {
+ li.matched = true
+ return true
+ }
+ }
+ return false
+}
+
+func (li *LineIgnore) String() string {
+ matched := "not matched"
+ if li.matched {
+ matched = "matched"
+ }
+ return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
+}
+
+type FileIgnore struct {
+ File string
+ Checks []string
+}
+
+func (fi *FileIgnore) Match(p Problem) bool {
+ if p.Position.Filename != fi.File {
+ return false
+ }
+ for _, c := range fi.Checks {
+ if m, _ := filepath.Match(c, p.Check); m {
+ return true
+ }
+ }
+ return false
+}
+
+type GlobIgnore struct {
+ Pattern string
+ Checks []string
+}
+
+func (gi *GlobIgnore) Match(p Problem) bool {
+ if gi.Pattern != "*" {
+ pkgpath := p.Package.Types.Path()
+ if strings.HasSuffix(pkgpath, "_test") {
+ pkgpath = pkgpath[:len(pkgpath)-len("_test")]
+ }
+ name := filepath.Join(pkgpath, filepath.Base(p.Position.Filename))
+ if m, _ := filepath.Match(gi.Pattern, name); !m {
+ return false
+ }
+ }
+ for _, c := range gi.Checks {
+ if m, _ := filepath.Match(c, p.Check); m {
+ return true
+ }
+ }
+ return false
+}
+
+type Program struct {
+ SSA *ssa.Program
+ InitialPackages []*Pkg
+ InitialFunctions []*ssa.Function
+ AllPackages []*packages.Package
+ AllFunctions []*ssa.Function
+ Files []*ast.File
+ GoVersion int
+
+ tokenFileMap map[*token.File]*ast.File
+ astFileMap map[*ast.File]*Pkg
+ packagesMap map[string]*packages.Package
+
+ genMu sync.RWMutex
+ generatedMap map[string]bool
+}
+
+func (prog *Program) Fset() *token.FileSet {
+ return prog.InitialPackages[0].Fset
+}
+
+type Func func(*Job)
+
+type Severity uint8
+
+const (
+ Error Severity = iota
+ Warning
+ Ignored
+)
+
+// Problem represents a problem in some source code.
+type Problem struct {
+ Position token.Position // position in source file
+ Text string // the prose that describes the problem
+ Check string
+ Checker string
+ Package *Pkg
+ Severity Severity
+}
+
+func (p *Problem) String() string {
+ if p.Check == "" {
+ return p.Text
+ }
+ return fmt.Sprintf("%s (%s)", p.Text, p.Check)
+}
+
+type Checker interface {
+ Name() string
+ Prefix() string
+ Init(*Program)
+ Checks() []Check
+}
+
+type Check struct {
+ Fn Func
+ ID string
+ FilterGenerated bool
+}
+
+// A Linter lints Go source code.
+type Linter struct {
+ Checkers []Checker
+ Ignores []Ignore
+ GoVersion int
+ ReturnIgnored bool
+ Config config.Config
+
+ MaxConcurrentJobs int
+ PrintStats bool
+
+ automaticIgnores []Ignore
+}
+
+func (l *Linter) ignore(p Problem) bool {
+ ignored := false
+ for _, ig := range l.automaticIgnores {
+ // We cannot short-circuit these, as we want to record, for
+ // each ignore, whether it matched or not.
+ if ig.Match(p) {
+ ignored = true
+ }
+ }
+ if ignored {
+ // no need to execute other ignores if we've already had a
+ // match.
+ return true
+ }
+ for _, ig := range l.Ignores {
+ // We can short-circuit here, as we aren't tracking any
+ // information.
+ if ig.Match(p) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (prog *Program) File(node Positioner) *ast.File {
+ return prog.tokenFileMap[prog.SSA.Fset.File(node.Pos())]
+}
+
+func (j *Job) File(node Positioner) *ast.File {
+ return j.Program.File(node)
+}
+
+func parseDirective(s string) (cmd string, args []string) {
+ if !strings.HasPrefix(s, "//lint:") {
+ return "", nil
+ }
+ s = strings.TrimPrefix(s, "//lint:")
+ fields := strings.Split(s, " ")
+ return fields[0], fields[1:]
+}
+
+type PerfStats struct {
+ PackageLoading time.Duration
+ SSABuild time.Duration
+ OtherInitWork time.Duration
+ CheckerInits map[string]time.Duration
+ Jobs []JobStat
+}
+
+type JobStat struct {
+ Job string
+ Duration time.Duration
+}
+
+func (stats *PerfStats) Print(w io.Writer) {
+ fmt.Fprintln(w, "Package loading:", stats.PackageLoading)
+ fmt.Fprintln(w, "SSA build:", stats.SSABuild)
+ fmt.Fprintln(w, "Other init work:", stats.OtherInitWork)
+
+ fmt.Fprintln(w, "Checker inits:")
+ for checker, d := range stats.CheckerInits {
+ fmt.Fprintf(w, "\t%s: %s\n", checker, d)
+ }
+ fmt.Fprintln(w)
+
+ fmt.Fprintln(w, "Jobs:")
+ sort.Slice(stats.Jobs, func(i, j int) bool {
+ return stats.Jobs[i].Duration < stats.Jobs[j].Duration
+ })
+ var total time.Duration
+ for _, job := range stats.Jobs {
+ fmt.Fprintf(w, "\t%s: %s\n", job.Job, job.Duration)
+ total += job.Duration
+ }
+ fmt.Fprintf(w, "\tTotal: %s\n", total)
+}
+
+func (l *Linter) Lint(initial []*packages.Package, stats *PerfStats) []Problem {
+ allPkgs := allPackages(initial)
+ t := time.Now()
+ ssaprog, _ := ssautil.Packages(allPkgs, ssa.GlobalDebug)
+ ssaprog.Build()
+ if stats != nil {
+ stats.SSABuild = time.Since(t)
+ }
+
+ t = time.Now()
+ pkgMap := map[*ssa.Package]*Pkg{}
+ var pkgs []*Pkg
+ for _, pkg := range initial {
+ ssapkg := ssaprog.Package(pkg.Types)
+ var cfg config.Config
+ if len(pkg.GoFiles) != 0 {
+ path := pkg.GoFiles[0]
+ dir := filepath.Dir(path)
+ var err error
+ // OPT(dh): we're rebuilding the entire config tree for
+ // each package. for example, if we check a/b/c and
+ // a/b/c/d, we'll process a, a/b, a/b/c, a, a/b, a/b/c,
+ // a/b/c/d – we should cache configs per package and only
+ // load the new levels.
+ cfg, err = config.Load(dir)
+ if err != nil {
+ // FIXME(dh): we couldn't load the config, what are we
+ // supposed to do? probably tell the user somehow
+ }
+ cfg = cfg.Merge(l.Config)
+ }
+
+ pkg := &Pkg{
+ SSA: ssapkg,
+ Package: pkg,
+ Config: cfg,
+ }
+ pkgMap[ssapkg] = pkg
+ pkgs = append(pkgs, pkg)
+ }
+
+ prog := &Program{
+ SSA: ssaprog,
+ InitialPackages: pkgs,
+ AllPackages: allPkgs,
+ GoVersion: l.GoVersion,
+ tokenFileMap: map[*token.File]*ast.File{},
+ astFileMap: map[*ast.File]*Pkg{},
+ generatedMap: map[string]bool{},
+ }
+ prog.packagesMap = map[string]*packages.Package{}
+ for _, pkg := range allPkgs {
+ prog.packagesMap[pkg.Types.Path()] = pkg
+ }
+
+ isInitial := map[*types.Package]struct{}{}
+ for _, pkg := range pkgs {
+ isInitial[pkg.Types] = struct{}{}
+ }
+ for fn := range ssautil.AllFunctions(ssaprog) {
+ if fn.Pkg == nil {
+ continue
+ }
+ prog.AllFunctions = append(prog.AllFunctions, fn)
+ if _, ok := isInitial[fn.Pkg.Pkg]; ok {
+ prog.InitialFunctions = append(prog.InitialFunctions, fn)
+ }
+ }
+ for _, pkg := range pkgs {
+ prog.Files = append(prog.Files, pkg.Syntax...)
+
+ ssapkg := ssaprog.Package(pkg.Types)
+ for _, f := range pkg.Syntax {
+ prog.astFileMap[f] = pkgMap[ssapkg]
+ }
+ }
+
+ for _, pkg := range allPkgs {
+ for _, f := range pkg.Syntax {
+ tf := pkg.Fset.File(f.Pos())
+ prog.tokenFileMap[tf] = f
+ }
+ }
+
+ var out []Problem
+ l.automaticIgnores = nil
+ for _, pkg := range initial {
+ for _, f := range pkg.Syntax {
+ cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
+ for node, cgs := range cm {
+ for _, cg := range cgs {
+ for _, c := range cg.List {
+ if !strings.HasPrefix(c.Text, "//lint:") {
+ continue
+ }
+ cmd, args := parseDirective(c.Text)
+ switch cmd {
+ case "ignore", "file-ignore":
+ if len(args) < 2 {
+ // FIXME(dh): this causes duplicated warnings when using megacheck
+ p := Problem{
+ Position: prog.DisplayPosition(c.Pos()),
+ Text: "malformed linter directive; missing the required reason field?",
+ Check: "",
+ Checker: "lint",
+ Package: nil,
+ }
+ out = append(out, p)
+ continue
+ }
+ default:
+ // unknown directive, ignore
+ continue
+ }
+ checks := strings.Split(args[0], ",")
+ pos := prog.DisplayPosition(node.Pos())
+ var ig Ignore
+ switch cmd {
+ case "ignore":
+ ig = &LineIgnore{
+ File: pos.Filename,
+ Line: pos.Line,
+ Checks: checks,
+ pos: c.Pos(),
+ }
+ case "file-ignore":
+ ig = &FileIgnore{
+ File: pos.Filename,
+ Checks: checks,
+ }
+ }
+ l.automaticIgnores = append(l.automaticIgnores, ig)
+ }
+ }
+ }
+ }
+ }
+
+ sizes := struct {
+ types int
+ defs int
+ uses int
+ implicits int
+ selections int
+ scopes int
+ }{}
+ for _, pkg := range pkgs {
+ sizes.types += len(pkg.TypesInfo.Types)
+ sizes.defs += len(pkg.TypesInfo.Defs)
+ sizes.uses += len(pkg.TypesInfo.Uses)
+ sizes.implicits += len(pkg.TypesInfo.Implicits)
+ sizes.selections += len(pkg.TypesInfo.Selections)
+ sizes.scopes += len(pkg.TypesInfo.Scopes)
+ }
+
+ if stats != nil {
+ stats.OtherInitWork = time.Since(t)
+ }
+
+ for _, checker := range l.Checkers {
+ t := time.Now()
+ checker.Init(prog)
+ if stats != nil {
+ stats.CheckerInits[checker.Name()] = time.Since(t)
+ }
+ }
+
+ var jobs []*Job
+ var allChecks []string
+
+ for _, checker := range l.Checkers {
+ checks := checker.Checks()
+ for _, check := range checks {
+ allChecks = append(allChecks, check.ID)
+ j := &Job{
+ Program: prog,
+ checker: checker.Name(),
+ check: check,
+ }
+ jobs = append(jobs, j)
+ }
+ }
+
+ max := len(jobs)
+ if l.MaxConcurrentJobs > 0 {
+ max = l.MaxConcurrentJobs
+ }
+
+ sem := make(chan struct{}, max)
+ wg := &sync.WaitGroup{}
+ for _, j := range jobs {
+ wg.Add(1)
+ go func(j *Job) {
+ defer wg.Done()
+ sem <- struct{}{}
+ defer func() { <-sem }()
+ fn := j.check.Fn
+ if fn == nil {
+ return
+ }
+ t := time.Now()
+ fn(j)
+ j.duration = time.Since(t)
+ }(j)
+ }
+ wg.Wait()
+
+ for _, j := range jobs {
+ if stats != nil {
+ stats.Jobs = append(stats.Jobs, JobStat{j.check.ID, j.duration})
+ }
+ for _, p := range j.problems {
+ allowedChecks := FilterChecks(allChecks, p.Package.Config.Checks)
+
+ if l.ignore(p) {
+ p.Severity = Ignored
+ }
+ // TODO(dh): support globs in check white/blacklist
+ // OPT(dh): this approach doesn't actually disable checks,
+ // it just discards their results. For the moment, that's
+ // fine. None of our checks are super expensive. In the
+ // future, we may want to provide opt-in expensive
+ // analysis, which shouldn't run at all. It may be easiest
+ // to implement this in the individual checks.
+ if (l.ReturnIgnored || p.Severity != Ignored) && allowedChecks[p.Check] {
+ out = append(out, p)
+ }
+ }
+ }
+
+ for _, ig := range l.automaticIgnores {
+ ig, ok := ig.(*LineIgnore)
+ if !ok {
+ continue
+ }
+ if ig.matched {
+ continue
+ }
+
+ couldveMatched := false
+ for f, pkg := range prog.astFileMap {
+ if prog.Fset().Position(f.Pos()).Filename != ig.File {
+ continue
+ }
+ allowedChecks := FilterChecks(allChecks, pkg.Config.Checks)
+ for _, c := range ig.Checks {
+ if !allowedChecks[c] {
+ continue
+ }
+ couldveMatched = true
+ break
+ }
+ break
+ }
+
+ if !couldveMatched {
+ // The ignored checks were disabled for the containing package.
+ // Don't flag the ignore for not having matched.
+ continue
+ }
+ p := Problem{
+ Position: prog.DisplayPosition(ig.pos),
+ Text: "this linter directive didn't match anything; should it be removed?",
+ Check: "",
+ Checker: "lint",
+ Package: nil,
+ }
+ out = append(out, p)
+ }
+
+ sort.Slice(out, func(i int, j int) bool {
+ pi, pj := out[i].Position, out[j].Position
+
+ if pi.Filename != pj.Filename {
+ return pi.Filename < pj.Filename
+ }
+ if pi.Line != pj.Line {
+ return pi.Line < pj.Line
+ }
+ if pi.Column != pj.Column {
+ return pi.Column < pj.Column
+ }
+
+ return out[i].Text < out[j].Text
+ })
+
+ if l.PrintStats && stats != nil {
+ stats.Print(os.Stderr)
+ }
+
+ if len(out) < 2 {
+ return out
+ }
+
+ uniq := make([]Problem, 0, len(out))
+ uniq = append(uniq, out[0])
+ prev := out[0]
+ for _, p := range out[1:] {
+ if prev.Position == p.Position && prev.Text == p.Text {
+ continue
+ }
+ prev = p
+ uniq = append(uniq, p)
+ }
+
+ return uniq
+}
+
+func FilterChecks(allChecks []string, checks []string) map[string]bool {
+ // OPT(dh): this entire computation could be cached per package
+ allowedChecks := map[string]bool{}
+
+ for _, check := range checks {
+ b := true
+ if len(check) > 1 && check[0] == '-' {
+ b = false
+ check = check[1:]
+ }
+ if check == "*" || check == "all" {
+ // Match all
+ for _, c := range allChecks {
+ allowedChecks[c] = b
+ }
+ } else if strings.HasSuffix(check, "*") {
+ // Glob
+ prefix := check[:len(check)-1]
+ isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
+
+ for _, c := range allChecks {
+ idx := strings.IndexFunc(c, func(r rune) bool { return unicode.IsNumber(r) })
+ if isCat {
+ // Glob is S*, which should match S1000 but not SA1000
+ cat := c[:idx]
+ if prefix == cat {
+ allowedChecks[c] = b
+ }
+ } else {
+ // Glob is S1*
+ if strings.HasPrefix(c, prefix) {
+ allowedChecks[c] = b
+ }
+ }
+ }
+ } else {
+ // Literal check name
+ allowedChecks[check] = b
+ }
+ }
+ return allowedChecks
+}
+
+func (prog *Program) Package(path string) *packages.Package {
+ return prog.packagesMap[path]
+}
+
+// Pkg represents a package being linted.
+type Pkg struct {
+ SSA *ssa.Package
+ *packages.Package
+ Config config.Config
+}
+
+type Positioner interface {
+ Pos() token.Pos
+}
+
+func (prog *Program) DisplayPosition(p token.Pos) token.Position {
+ // Only use the adjusted position if it points to another Go file.
+ // This means we'll point to the original file for cgo files, but
+ // we won't point to a YACC grammar file.
+
+ pos := prog.Fset().PositionFor(p, false)
+ adjPos := prog.Fset().PositionFor(p, true)
+
+ if filepath.Ext(adjPos.Filename) == ".go" {
+ return adjPos
+ }
+ return pos
+}
+
+func (prog *Program) isGenerated(path string) bool {
+ // This function isn't very efficient in terms of lock contention
+ // and lack of parallelism, but it really shouldn't matter.
+ // Projects consists of thousands of files, and have hundreds of
+ // errors. That's not a lot of calls to isGenerated.
+
+ prog.genMu.RLock()
+ if b, ok := prog.generatedMap[path]; ok {
+ prog.genMu.RUnlock()
+ return b
+ }
+ prog.genMu.RUnlock()
+ prog.genMu.Lock()
+ defer prog.genMu.Unlock()
+ // recheck to avoid doing extra work in case of race
+ if b, ok := prog.generatedMap[path]; ok {
+ return b
+ }
+
+ f, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+ b := isGenerated(f)
+ prog.generatedMap[path] = b
+ return b
+}
+
+func (j *Job) Errorf(n Positioner, format string, args ...interface{}) *Problem {
+ tf := j.Program.SSA.Fset.File(n.Pos())
+ f := j.Program.tokenFileMap[tf]
+ pkg := j.Program.astFileMap[f]
+
+ pos := j.Program.DisplayPosition(n.Pos())
+ if j.Program.isGenerated(pos.Filename) && j.check.FilterGenerated {
+ return nil
+ }
+ problem := Problem{
+ Position: pos,
+ Text: fmt.Sprintf(format, args...),
+ Check: j.check.ID,
+ Checker: j.checker,
+ Package: pkg,
+ }
+ j.problems = append(j.problems, problem)
+ return &j.problems[len(j.problems)-1]
+}
+
+func (j *Job) NodePackage(node Positioner) *Pkg {
+ f := j.File(node)
+ return j.Program.astFileMap[f]
+}
+
+func allPackages(pkgs []*packages.Package) []*packages.Package {
+ var out []*packages.Package
+ packages.Visit(
+ pkgs,
+ func(pkg *packages.Package) bool {
+ out = append(out, pkg)
+ return true
+ },
+ nil,
+ )
+ return out
+}
diff --git a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
new file mode 100644
index 000000000..1bf567d9d
--- /dev/null
+++ b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
@@ -0,0 +1,323 @@
+// Package lintdsl provides helpers for implementing static analysis
+// checks. Dot-importing this package is encouraged.
+package lintdsl
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "honnef.co/go/tools/lint"
+ "honnef.co/go/tools/ssa"
+)
+
+type packager interface {
+ Package() *ssa.Package
+}
+
+func CallName(call *ssa.CallCommon) string {
+ if call.IsInvoke() {
+ return ""
+ }
+ switch v := call.Value.(type) {
+ case *ssa.Function:
+ fn, ok := v.Object().(*types.Func)
+ if !ok {
+ return ""
+ }
+ return fn.FullName()
+ case *ssa.Builtin:
+ return v.Name()
+ }
+ return ""
+}
+
+func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name }
+func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name }
+
+func FilterDebug(instr []ssa.Instruction) []ssa.Instruction {
+ var out []ssa.Instruction
+ for _, ins := range instr {
+ if _, ok := ins.(*ssa.DebugRef); !ok {
+ out = append(out, ins)
+ }
+ }
+ return out
+}
+
+func IsExample(fn *ssa.Function) bool {
+ if !strings.HasPrefix(fn.Name(), "Example") {
+ return false
+ }
+ f := fn.Prog.Fset.File(fn.Pos())
+ if f == nil {
+ return false
+ }
+ return strings.HasSuffix(f.Name(), "_test.go")
+}
+
+func IsPointerLike(T types.Type) bool {
+ switch T := T.Underlying().(type) {
+ case *types.Interface, *types.Chan, *types.Map, *types.Pointer:
+ return true
+ case *types.Basic:
+ return T.Kind() == types.UnsafePointer
+ }
+ return false
+}
+
+func IsGenerated(f *ast.File) bool {
+ comments := f.Comments
+ if len(comments) > 0 {
+ comment := comments[0].Text()
+ return strings.Contains(comment, "Code generated by") ||
+ strings.Contains(comment, "DO NOT EDIT")
+ }
+ return false
+}
+
+func IsIdent(expr ast.Expr, ident string) bool {
+ id, ok := expr.(*ast.Ident)
+ return ok && id.Name == ident
+}
+
+// isBlank returns whether id is the blank identifier "_".
+// If id == nil, the answer is false.
+func IsBlank(id ast.Expr) bool {
+ ident, _ := id.(*ast.Ident)
+ return ident != nil && ident.Name == "_"
+}
+
+func IsIntLiteral(expr ast.Expr, literal string) bool {
+ lit, ok := expr.(*ast.BasicLit)
+ return ok && lit.Kind == token.INT && lit.Value == literal
+}
+
+// Deprecated: use IsIntLiteral instead
+func IsZero(expr ast.Expr) bool {
+ return IsIntLiteral(expr, "0")
+}
+
+func TypeOf(j *lint.Job, expr ast.Expr) types.Type {
+ if expr == nil {
+ return nil
+ }
+ return j.NodePackage(expr).TypesInfo.TypeOf(expr)
+}
+
+func IsOfType(j *lint.Job, expr ast.Expr, name string) bool { return IsType(TypeOf(j, expr), name) }
+
+func ObjectOf(j *lint.Job, ident *ast.Ident) types.Object {
+ if ident == nil {
+ return nil
+ }
+ return j.NodePackage(ident).TypesInfo.ObjectOf(ident)
+}
+
+func IsInTest(j *lint.Job, node lint.Positioner) bool {
+ // FIXME(dh): this doesn't work for global variables with
+ // initializers
+ f := j.Program.SSA.Fset.File(node.Pos())
+ return f != nil && strings.HasSuffix(f.Name(), "_test.go")
+}
+
+func IsInMain(j *lint.Job, node lint.Positioner) bool {
+ if node, ok := node.(packager); ok {
+ return node.Package().Pkg.Name() == "main"
+ }
+ pkg := j.NodePackage(node)
+ if pkg == nil {
+ return false
+ }
+ return pkg.Types.Name() == "main"
+}
+
+func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string {
+ info := j.NodePackage(expr).TypesInfo
+ sel := info.Selections[expr]
+ if sel == nil {
+ if x, ok := expr.X.(*ast.Ident); ok {
+ pkg, ok := info.ObjectOf(x).(*types.PkgName)
+ if !ok {
+ // This shouldn't happen
+ return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name)
+ }
+ return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name)
+ }
+ panic(fmt.Sprintf("unsupported selector: %v", expr))
+ }
+ return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
+}
+
+func IsNil(j *lint.Job, expr ast.Expr) bool {
+ return j.NodePackage(expr).TypesInfo.Types[expr].IsNil()
+}
+
+func BoolConst(j *lint.Job, expr ast.Expr) bool {
+ val := j.NodePackage(expr).TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
+ return constant.BoolVal(val)
+}
+
+func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
+ // We explicitly don't support typed bools because more often than
+ // not, custom bool types are used as binary enums and the
+ // explicit comparison is desired.
+
+ ident, ok := expr.(*ast.Ident)
+ if !ok {
+ return false
+ }
+ obj := j.NodePackage(expr).TypesInfo.ObjectOf(ident)
+ c, ok := obj.(*types.Const)
+ if !ok {
+ return false
+ }
+ basic, ok := c.Type().(*types.Basic)
+ if !ok {
+ return false
+ }
+ if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool {
+ return false
+ }
+ return true
+}
+
+func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) {
+ tv := j.NodePackage(expr).TypesInfo.Types[expr]
+ if tv.Value == nil {
+ return 0, false
+ }
+ if tv.Value.Kind() != constant.Int {
+ return 0, false
+ }
+ return constant.Int64Val(tv.Value)
+}
+
+func ExprToString(j *lint.Job, expr ast.Expr) (string, bool) {
+ val := j.NodePackage(expr).TypesInfo.Types[expr].Value
+ if val == nil {
+ return "", false
+ }
+ if val.Kind() != constant.String {
+ return "", false
+ }
+ return constant.StringVal(val), true
+}
+
+// Dereference returns a pointer's element type; otherwise it returns
+// T.
+func Dereference(T types.Type) types.Type {
+ if p, ok := T.Underlying().(*types.Pointer); ok {
+ return p.Elem()
+ }
+ return T
+}
+
+// DereferenceR returns a pointer's element type; otherwise it returns
+// T. If the element type is itself a pointer, DereferenceR will be
+// applied recursively.
+func DereferenceR(T types.Type) types.Type {
+ if p, ok := T.Underlying().(*types.Pointer); ok {
+ return DereferenceR(p.Elem())
+ }
+ return T
+}
+
+func IsGoVersion(j *lint.Job, minor int) bool {
+ return j.Program.GoVersion >= minor
+}
+
+func CallNameAST(j *lint.Job, call *ast.CallExpr) string {
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return ""
+ }
+ fn, ok := j.NodePackage(call).TypesInfo.ObjectOf(sel.Sel).(*types.Func)
+ if !ok {
+ return ""
+ }
+ return fn.FullName()
+}
+
+func IsCallToAST(j *lint.Job, node ast.Node, name string) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return false
+ }
+ return CallNameAST(j, call) == name
+}
+
+func IsCallToAnyAST(j *lint.Job, node ast.Node, names ...string) bool {
+ for _, name := range names {
+ if IsCallToAST(j, node, name) {
+ return true
+ }
+ }
+ return false
+}
+
+func Render(j *lint.Job, x interface{}) string {
+ fset := j.Program.SSA.Fset
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, fset, x); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func RenderArgs(j *lint.Job, args []ast.Expr) string {
+ var ss []string
+ for _, arg := range args {
+ ss = append(ss, Render(j, arg))
+ }
+ return strings.Join(ss, ", ")
+}
+
+func Preamble(f *ast.File) string {
+ cutoff := f.Package
+ if f.Doc != nil {
+ cutoff = f.Doc.Pos()
+ }
+ var out []string
+ for _, cmt := range f.Comments {
+ if cmt.Pos() >= cutoff {
+ break
+ }
+ out = append(out, cmt.Text())
+ }
+ return strings.Join(out, "\n")
+}
+
+func Inspect(node ast.Node, fn func(node ast.Node) bool) {
+ if node == nil {
+ return
+ }
+ ast.Inspect(node, fn)
+}
+
+func GroupSpecs(j *lint.Job, specs []ast.Spec) [][]ast.Spec {
+ if len(specs) == 0 {
+ return nil
+ }
+ fset := j.Program.SSA.Fset
+ groups := make([][]ast.Spec, 1)
+ groups[0] = append(groups[0], specs[0])
+
+ for _, spec := range specs[1:] {
+ g := groups[len(groups)-1]
+ if fset.PositionFor(spec.Pos(), false).Line-1 !=
+ fset.PositionFor(g[len(g)-1].End(), false).Line {
+
+ groups = append(groups, nil)
+ }
+
+ groups[len(groups)-1] = append(groups[len(groups)-1], spec)
+ }
+
+ return groups
+}
diff --git a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go
new file mode 100644
index 000000000..23aa132de
--- /dev/null
+++ b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go
@@ -0,0 +1,128 @@
+// Package format provides formatters for linter problems.
+package format
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/token"
+ "io"
+ "os"
+ "path/filepath"
+ "text/tabwriter"
+
+ "honnef.co/go/tools/lint"
+)
+
+func shortPath(path string) string {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return path
+ }
+ if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
+ return rel
+ }
+ return path
+}
+
+func relativePositionString(pos token.Position) string {
+ s := shortPath(pos.Filename)
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+type Statter interface {
+ Stats(total, errors, warnings int)
+}
+
+type Formatter interface {
+ Format(p lint.Problem)
+}
+
+type Text struct {
+ W io.Writer
+}
+
+func (o Text) Format(p lint.Problem) {
+ fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Position), p.String())
+}
+
+type JSON struct {
+ W io.Writer
+}
+
+func severity(s lint.Severity) string {
+ switch s {
+ case lint.Error:
+ return "error"
+ case lint.Warning:
+ return "warning"
+ case lint.Ignored:
+ return "ignored"
+ }
+ return ""
+}
+
+func (o JSON) Format(p lint.Problem) {
+ type location struct {
+ File string `json:"file"`
+ Line int `json:"line"`
+ Column int `json:"column"`
+ }
+ jp := struct {
+ Code string `json:"code"`
+ Severity string `json:"severity,omitempty"`
+ Location location `json:"location"`
+ Message string `json:"message"`
+ }{
+ Code: p.Check,
+ Severity: severity(p.Severity),
+ Location: location{
+ File: p.Position.Filename,
+ Line: p.Position.Line,
+ Column: p.Position.Column,
+ },
+ Message: p.Text,
+ }
+ _ = json.NewEncoder(o.W).Encode(jp)
+}
+
+type Stylish struct {
+ W io.Writer
+
+ prevFile string
+ tw *tabwriter.Writer
+}
+
+func (o *Stylish) Format(p lint.Problem) {
+ if p.Position.Filename == "" {
+ p.Position.Filename = "-"
+ }
+
+ if p.Position.Filename != o.prevFile {
+ if o.prevFile != "" {
+ o.tw.Flush()
+ fmt.Fprintln(o.W)
+ }
+ fmt.Fprintln(o.W, p.Position.Filename)
+ o.prevFile = p.Position.Filename
+ o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
+ }
+ fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", p.Position.Line, p.Position.Column, p.Check, p.Text)
+}
+
+func (o *Stylish) Stats(total, errors, warnings int) {
+ if o.tw != nil {
+ o.tw.Flush()
+ fmt.Fprintln(o.W)
+ }
+ fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n",
+ total, errors, warnings)
+}
diff --git a/vendor/honnef.co/go/tools/lint/lintutil/util.go b/vendor/honnef.co/go/tools/lint/lintutil/util.go
new file mode 100644
index 000000000..1142aa04d
--- /dev/null
+++ b/vendor/honnef.co/go/tools/lint/lintutil/util.go
@@ -0,0 +1,362 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package lintutil provides helpers for writing linter command lines.
+package lintutil // import "honnef.co/go/tools/lint/lintutil"
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "go/build"
+ "go/token"
+ "log"
+ "os"
+ "regexp"
+ "runtime"
+ "runtime/pprof"
+ "strconv"
+ "strings"
+ "time"
+
+ "honnef.co/go/tools/config"
+ "honnef.co/go/tools/lint"
+ "honnef.co/go/tools/lint/lintutil/format"
+ "honnef.co/go/tools/version"
+
+ "golang.org/x/tools/go/packages"
+)
+
+func usage(name string, flags *flag.FlagSet) func() {
+ return func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", name)
+ fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name)
+ fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name)
+ fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name)
+ fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name)
+ fmt.Fprintf(os.Stderr, "Flags:\n")
+ flags.PrintDefaults()
+ }
+}
+
+func parseIgnore(s string) ([]lint.Ignore, error) {
+ var out []lint.Ignore
+ if len(s) == 0 {
+ return nil, nil
+ }
+ for _, part := range strings.Fields(s) {
+ p := strings.Split(part, ":")
+ if len(p) != 2 {
+ return nil, errors.New("malformed ignore string")
+ }
+ path := p[0]
+ checks := strings.Split(p[1], ",")
+ out = append(out, &lint.GlobIgnore{Pattern: path, Checks: checks})
+ }
+ return out, nil
+}
+
+type versionFlag int
+
+func (v *versionFlag) String() string {
+ return fmt.Sprintf("1.%d", *v)
+}
+
+func (v *versionFlag) Set(s string) error {
+ if len(s) < 3 {
+ return errors.New("invalid Go version")
+ }
+ if s[0] != '1' {
+ return errors.New("invalid Go version")
+ }
+ if s[1] != '.' {
+ return errors.New("invalid Go version")
+ }
+ i, err := strconv.Atoi(s[2:])
+ *v = versionFlag(i)
+ return err
+}
+
+func (v *versionFlag) Get() interface{} {
+ return int(*v)
+}
+
+type list []string
+
+func (list *list) String() string {
+ return `"` + strings.Join(*list, ",") + `"`
+}
+
+func (list *list) Set(s string) error {
+ if s == "" {
+ *list = nil
+ return nil
+ }
+
+ *list = strings.Split(s, ",")
+ return nil
+}
+
+func FlagSet(name string) *flag.FlagSet {
+ flags := flag.NewFlagSet("", flag.ExitOnError)
+ flags.Usage = usage(name, flags)
+ flags.String("tags", "", "List of `build tags`")
+ flags.String("ignore", "", "Deprecated: use linter directives instead")
+ flags.Bool("tests", true, "Include tests")
+ flags.Bool("version", false, "Print version and exit")
+ flags.Bool("show-ignored", false, "Don't filter ignored problems")
+ flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
+
+ flags.Int("debug.max-concurrent-jobs", 0, "Number of jobs to run concurrently")
+ flags.Bool("debug.print-stats", false, "Print debug statistics")
+ flags.String("debug.cpuprofile", "", "Write CPU profile to `file`")
+ flags.String("debug.memprofile", "", "Write memory profile to `file`")
+
+ checks := list{"inherit"}
+ fail := list{"all"}
+ flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.")
+ flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.")
+
+ tags := build.Default.ReleaseTags
+ v := tags[len(tags)-1][2:]
+ version := new(versionFlag)
+ if err := version.Set(v); err != nil {
+ panic(fmt.Sprintf("internal error: %s", err))
+ }
+
+ flags.Var(version, "go", "Target Go `version` in the format '1.x'")
+ return flags
+}
+
+func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
+ tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string)
+ ignore := fs.Lookup("ignore").Value.(flag.Getter).Get().(string)
+ tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool)
+ goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int)
+ formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string)
+ printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool)
+ showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool)
+
+ maxConcurrentJobs := fs.Lookup("debug.max-concurrent-jobs").Value.(flag.Getter).Get().(int)
+ printStats := fs.Lookup("debug.print-stats").Value.(flag.Getter).Get().(bool)
+ cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string)
+ memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
+
+ cfg := config.Config{}
+ cfg.Checks = *fs.Lookup("checks").Value.(*list)
+
+ exit := func(code int) {
+ if cpuProfile != "" {
+ pprof.StopCPUProfile()
+ }
+ if memProfile != "" {
+ f, err := os.Create(memProfile)
+ if err != nil {
+ panic(err)
+ }
+ runtime.GC()
+ pprof.WriteHeapProfile(f)
+ }
+ os.Exit(code)
+ }
+ if cpuProfile != "" {
+ f, err := os.Create(cpuProfile)
+ if err != nil {
+ log.Fatal(err)
+ }
+ pprof.StartCPUProfile(f)
+ }
+
+ if printVersion {
+ version.Print()
+ exit(0)
+ }
+
+ ps, err := Lint(cs, fs.Args(), &Options{
+ Tags: strings.Fields(tags),
+ LintTests: tests,
+ Ignores: ignore,
+ GoVersion: goVersion,
+ ReturnIgnored: showIgnored,
+ Config: cfg,
+
+ MaxConcurrentJobs: maxConcurrentJobs,
+ PrintStats: printStats,
+ })
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ exit(1)
+ }
+
+ var f format.Formatter
+ switch formatter {
+ case "text":
+ f = format.Text{W: os.Stdout}
+ case "stylish":
+ f = &format.Stylish{W: os.Stdout}
+ case "json":
+ f = format.JSON{W: os.Stdout}
+ default:
+ fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter)
+ exit(2)
+ }
+
+ var (
+ total int
+ errors int
+ warnings int
+ )
+
+ fail := *fs.Lookup("fail").Value.(*list)
+ var allChecks []string
+ for _, p := range ps {
+ allChecks = append(allChecks, p.Check)
+ }
+
+ shouldExit := lint.FilterChecks(allChecks, fail)
+
+ total = len(ps)
+ for _, p := range ps {
+ if shouldExit[p.Check] {
+ errors++
+ } else {
+ p.Severity = lint.Warning
+ warnings++
+ }
+ f.Format(p)
+ }
+ if f, ok := f.(format.Statter); ok {
+ f.Stats(total, errors, warnings)
+ }
+ if errors > 0 {
+ exit(1)
+ }
+}
+
+type Options struct {
+ Config config.Config
+
+ Tags []string
+ LintTests bool
+ Ignores string
+ GoVersion int
+ ReturnIgnored bool
+
+ MaxConcurrentJobs int
+ PrintStats bool
+}
+
+func Lint(cs []lint.Checker, paths []string, opt *Options) ([]lint.Problem, error) {
+ stats := lint.PerfStats{
+ CheckerInits: map[string]time.Duration{},
+ }
+
+ if opt == nil {
+ opt = &Options{}
+ }
+ ignores, err := parseIgnore(opt.Ignores)
+ if err != nil {
+ return nil, err
+ }
+
+ conf := &packages.Config{
+ Mode: packages.LoadAllSyntax,
+ Tests: opt.LintTests,
+ BuildFlags: []string{
+ "-tags=" + strings.Join(opt.Tags, " "),
+ },
+ }
+
+ t := time.Now()
+ if len(paths) == 0 {
+ paths = []string{"."}
+ }
+ pkgs, err := packages.Load(conf, paths...)
+ if err != nil {
+ return nil, err
+ }
+ stats.PackageLoading = time.Since(t)
+
+ var problems []lint.Problem
+ workingPkgs := make([]*packages.Package, 0, len(pkgs))
+ for _, pkg := range pkgs {
+ if pkg.IllTyped {
+ problems = append(problems, compileErrors(pkg)...)
+ } else {
+ workingPkgs = append(workingPkgs, pkg)
+ }
+ }
+
+ if len(workingPkgs) == 0 {
+ return problems, nil
+ }
+
+ l := &lint.Linter{
+ Checkers: cs,
+ Ignores: ignores,
+ GoVersion: opt.GoVersion,
+ ReturnIgnored: opt.ReturnIgnored,
+ Config: opt.Config,
+
+ MaxConcurrentJobs: opt.MaxConcurrentJobs,
+ PrintStats: opt.PrintStats,
+ }
+ problems = append(problems, l.Lint(workingPkgs, &stats)...)
+
+ return problems, nil
+}
+
+var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`)
+
+func parsePos(pos string) token.Position {
+ if pos == "-" || pos == "" {
+ return token.Position{}
+ }
+ parts := posRe.FindStringSubmatch(pos)
+ if parts == nil {
+ panic(fmt.Sprintf("internal error: malformed position %q", pos))
+ }
+ file := parts[1]
+ line, _ := strconv.Atoi(parts[2])
+ col, _ := strconv.Atoi(parts[3])
+ return token.Position{
+ Filename: file,
+ Line: line,
+ Column: col,
+ }
+}
+
+func compileErrors(pkg *packages.Package) []lint.Problem {
+ if !pkg.IllTyped {
+ return nil
+ }
+ if len(pkg.Errors) == 0 {
+ // transitively ill-typed
+ var ps []lint.Problem
+ for _, imp := range pkg.Imports {
+ ps = append(ps, compileErrors(imp)...)
+ }
+ return ps
+ }
+ var ps []lint.Problem
+ for _, err := range pkg.Errors {
+ p := lint.Problem{
+ Position: parsePos(err.Pos),
+ Text: err.Msg,
+ Checker: "compiler",
+ Check: "compile",
+ }
+ ps = append(ps, p)
+ }
+ return ps
+}
+
+func ProcessArgs(name string, cs []lint.Checker, args []string) {
+ flags := FlagSet(name)
+ flags.Parse(args)
+
+ ProcessFlagSet(cs, flags)
+}
diff --git a/vendor/honnef.co/go/tools/simple/CONTRIBUTING.md b/vendor/honnef.co/go/tools/simple/CONTRIBUTING.md
new file mode 100644
index 000000000..c54c6c50a
--- /dev/null
+++ b/vendor/honnef.co/go/tools/simple/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing to gosimple
+
+## Before filing an issue:
+
+### Are you having trouble building gosimple?
+
+Check you have the latest version of its dependencies. Run
+```
+go get -u honnef.co/go/tools/simple
+```
+If you still have problems, consider searching for existing issues before filing a new issue.
+
+## Before sending a pull request:
+
+Have you understood the purpose of gosimple? Make sure to carefully read `README`.
diff --git a/vendor/honnef.co/go/tools/simple/lint.go b/vendor/honnef.co/go/tools/simple/lint.go
new file mode 100644
index 000000000..25de813b8
--- /dev/null
+++ b/vendor/honnef.co/go/tools/simple/lint.go
@@ -0,0 +1,1734 @@
+// Package simple contains a linter for Go source code.
+package simple // import "honnef.co/go/tools/simple"
+
+import (
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "reflect"
+ "strings"
+
+ . "honnef.co/go/tools/arg"
+ "honnef.co/go/tools/internal/sharedcheck"
+ "honnef.co/go/tools/lint"
+ . "honnef.co/go/tools/lint/lintdsl"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+type Checker struct {
+ CheckGenerated bool
+ MS *typeutil.MethodSetCache
+}
+
+func NewChecker() *Checker {
+ return &Checker{
+ MS: &typeutil.MethodSetCache{},
+ }
+}
+
+func (*Checker) Name() string { return "gosimple" }
+func (*Checker) Prefix() string { return "S" }
+
+func (c *Checker) Init(prog *lint.Program) {}
+
+func (c *Checker) Checks() []lint.Check {
+ return []lint.Check{
+ {ID: "S1000", FilterGenerated: true, Fn: c.LintSingleCaseSelect},
+ {ID: "S1001", FilterGenerated: true, Fn: c.LintLoopCopy},
+ {ID: "S1002", FilterGenerated: true, Fn: c.LintIfBoolCmp},
+ {ID: "S1003", FilterGenerated: true, Fn: c.LintStringsContains},
+ {ID: "S1004", FilterGenerated: true, Fn: c.LintBytesCompare},
+ {ID: "S1005", FilterGenerated: true, Fn: c.LintUnnecessaryBlank},
+ {ID: "S1006", FilterGenerated: true, Fn: c.LintForTrue},
+ {ID: "S1007", FilterGenerated: true, Fn: c.LintRegexpRaw},
+ {ID: "S1008", FilterGenerated: true, Fn: c.LintIfReturn},
+ {ID: "S1009", FilterGenerated: true, Fn: c.LintRedundantNilCheckWithLen},
+ {ID: "S1010", FilterGenerated: true, Fn: c.LintSlicing},
+ {ID: "S1011", FilterGenerated: true, Fn: c.LintLoopAppend},
+ {ID: "S1012", FilterGenerated: true, Fn: c.LintTimeSince},
+ {ID: "S1016", FilterGenerated: true, Fn: c.LintSimplerStructConversion},
+ {ID: "S1017", FilterGenerated: true, Fn: c.LintTrim},
+ {ID: "S1018", FilterGenerated: true, Fn: c.LintLoopSlide},
+ {ID: "S1019", FilterGenerated: true, Fn: c.LintMakeLenCap},
+ {ID: "S1020", FilterGenerated: true, Fn: c.LintAssertNotNil},
+ {ID: "S1021", FilterGenerated: true, Fn: c.LintDeclareAssign},
+ {ID: "S1023", FilterGenerated: true, Fn: c.LintRedundantBreak},
+ {ID: "S1024", FilterGenerated: true, Fn: c.LintTimeUntil},
+ {ID: "S1025", FilterGenerated: true, Fn: c.LintRedundantSprintf},
+ {ID: "S1028", FilterGenerated: true, Fn: c.LintErrorsNewSprintf},
+ {ID: "S1029", FilterGenerated: false, Fn: c.LintRangeStringRunes},
+ {ID: "S1030", FilterGenerated: true, Fn: c.LintBytesBufferConversions},
+ {ID: "S1031", FilterGenerated: true, Fn: c.LintNilCheckAroundRange},
+ {ID: "S1032", FilterGenerated: true, Fn: c.LintSortHelpers},
+ }
+}
+
+func (c *Checker) LintSingleCaseSelect(j *lint.Job) {
+ isSingleSelect := func(node ast.Node) bool {
+ v, ok := node.(*ast.SelectStmt)
+ if !ok {
+ return false
+ }
+ return len(v.Body.List) == 1
+ }
+
+ seen := map[ast.Node]struct{}{}
+ fn := func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.ForStmt:
+ if len(v.Body.List) != 1 {
+ return true
+ }
+ if !isSingleSelect(v.Body.List[0]) {
+ return true
+ }
+ if _, ok := v.Body.List[0].(*ast.SelectStmt).Body.List[0].(*ast.CommClause).Comm.(*ast.SendStmt); ok {
+ // Don't suggest using range for channel sends
+ return true
+ }
+ seen[v.Body.List[0]] = struct{}{}
+ j.Errorf(node, "should use for range instead of for { select {} }")
+ case *ast.SelectStmt:
+ if _, ok := seen[v]; ok {
+ return true
+ }
+ if !isSingleSelect(v) {
+ return true
+ }
+ j.Errorf(node, "should use a simple channel send/receive instead of select with a single case")
+ return true
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintLoopCopy(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ loop, ok := node.(*ast.RangeStmt)
+ if !ok {
+ return true
+ }
+
+ if loop.Key == nil {
+ return true
+ }
+ if len(loop.Body.List) != 1 {
+ return true
+ }
+ stmt, ok := loop.Body.List[0].(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if stmt.Tok != token.ASSIGN || len(stmt.Lhs) != 1 || len(stmt.Rhs) != 1 {
+ return true
+ }
+ lhs, ok := stmt.Lhs[0].(*ast.IndexExpr)
+ if !ok {
+ return true
+ }
+
+ if _, ok := TypeOf(j, lhs.X).(*types.Slice); !ok {
+ return true
+ }
+ lidx, ok := lhs.Index.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ key, ok := loop.Key.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if TypeOf(j, lhs) == nil || TypeOf(j, stmt.Rhs[0]) == nil {
+ return true
+ }
+ if ObjectOf(j, lidx) != ObjectOf(j, key) {
+ return true
+ }
+ if !types.Identical(TypeOf(j, lhs), TypeOf(j, stmt.Rhs[0])) {
+ return true
+ }
+ if _, ok := TypeOf(j, loop.X).(*types.Slice); !ok {
+ return true
+ }
+
+ if rhs, ok := stmt.Rhs[0].(*ast.IndexExpr); ok {
+ rx, ok := rhs.X.(*ast.Ident)
+ _ = rx
+ if !ok {
+ return true
+ }
+ ridx, ok := rhs.Index.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if ObjectOf(j, ridx) != ObjectOf(j, key) {
+ return true
+ }
+ } else if rhs, ok := stmt.Rhs[0].(*ast.Ident); ok {
+ value, ok := loop.Value.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if ObjectOf(j, rhs) != ObjectOf(j, value) {
+ return true
+ }
+ } else {
+ return true
+ }
+ j.Errorf(loop, "should use copy() instead of a loop")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintIfBoolCmp(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ expr, ok := node.(*ast.BinaryExpr)
+ if !ok || (expr.Op != token.EQL && expr.Op != token.NEQ) {
+ return true
+ }
+ x := IsBoolConst(j, expr.X)
+ y := IsBoolConst(j, expr.Y)
+ if !x && !y {
+ return true
+ }
+ var other ast.Expr
+ var val bool
+ if x {
+ val = BoolConst(j, expr.X)
+ other = expr.Y
+ } else {
+ val = BoolConst(j, expr.Y)
+ other = expr.X
+ }
+ basic, ok := TypeOf(j, other).Underlying().(*types.Basic)
+ if !ok || basic.Kind() != types.Bool {
+ return true
+ }
+ op := ""
+ if (expr.Op == token.EQL && !val) || (expr.Op == token.NEQ && val) {
+ op = "!"
+ }
+ r := op + Render(j, other)
+ l1 := len(r)
+ r = strings.TrimLeft(r, "!")
+ if (l1-len(r))%2 == 1 {
+ r = "!" + r
+ }
+ if IsInTest(j, node) {
+ return true
+ }
+ j.Errorf(expr, "should omit comparison to bool constant, can be simplified to %s", r)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintBytesBufferConversions(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok || len(call.Args) != 1 {
+ return true
+ }
+
+ argCall, ok := call.Args[0].(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ sel, ok := argCall.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+
+ typ := TypeOf(j, call.Fun)
+ if typ == types.Universe.Lookup("string").Type() && IsCallToAST(j, call.Args[0], "(*bytes.Buffer).Bytes") {
+ j.Errorf(call, "should use %v.String() instead of %v", Render(j, sel.X), Render(j, call))
+ } else if typ, ok := typ.(*types.Slice); ok && typ.Elem() == types.Universe.Lookup("byte").Type() && IsCallToAST(j, call.Args[0], "(*bytes.Buffer).String") {
+ j.Errorf(call, "should use %v.Bytes() instead of %v", Render(j, sel.X), Render(j, call))
+ }
+
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintStringsContains(j *lint.Job) {
+ // map of value to token to bool value
+ allowed := map[int64]map[token.Token]bool{
+ -1: {token.GTR: true, token.NEQ: true, token.EQL: false},
+ 0: {token.GEQ: true, token.LSS: false},
+ }
+ fn := func(node ast.Node) bool {
+ expr, ok := node.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+ switch expr.Op {
+ case token.GEQ, token.GTR, token.NEQ, token.LSS, token.EQL:
+ default:
+ return true
+ }
+
+ value, ok := ExprToInt(j, expr.Y)
+ if !ok {
+ return true
+ }
+
+ allowedOps, ok := allowed[value]
+ if !ok {
+ return true
+ }
+ b, ok := allowedOps[expr.Op]
+ if !ok {
+ return true
+ }
+
+ call, ok := expr.X.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ pkgIdent, ok := sel.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ funIdent := sel.Sel
+ if pkgIdent.Name != "strings" && pkgIdent.Name != "bytes" {
+ return true
+ }
+ newFunc := ""
+ switch funIdent.Name {
+ case "IndexRune":
+ newFunc = "ContainsRune"
+ case "IndexAny":
+ newFunc = "ContainsAny"
+ case "Index":
+ newFunc = "Contains"
+ default:
+ return true
+ }
+
+ prefix := ""
+ if !b {
+ prefix = "!"
+ }
+ j.Errorf(node, "should use %s%s.%s(%s) instead", prefix, pkgIdent.Name, newFunc, RenderArgs(j, call.Args))
+
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintBytesCompare(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ expr, ok := node.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+ if expr.Op != token.NEQ && expr.Op != token.EQL {
+ return true
+ }
+ call, ok := expr.X.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAST(j, call, "bytes.Compare") {
+ return true
+ }
+ value, ok := ExprToInt(j, expr.Y)
+ if !ok || value != 0 {
+ return true
+ }
+ args := RenderArgs(j, call.Args)
+ prefix := ""
+ if expr.Op == token.NEQ {
+ prefix = "!"
+ }
+ j.Errorf(node, "should use %sbytes.Equal(%s) instead", prefix, args)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintForTrue(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ loop, ok := node.(*ast.ForStmt)
+ if !ok {
+ return true
+ }
+ if loop.Init != nil || loop.Post != nil {
+ return true
+ }
+ if !IsBoolConst(j, loop.Cond) || !BoolConst(j, loop.Cond) {
+ return true
+ }
+ j.Errorf(loop, "should use for {} instead of for true {}")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintRegexpRaw(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAST(j, call, "regexp.MustCompile") &&
+ !IsCallToAST(j, call, "regexp.Compile") {
+ return true
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ if len(call.Args) != 1 {
+ // invalid function call
+ return true
+ }
+ lit, ok := call.Args[Arg("regexp.Compile.expr")].(*ast.BasicLit)
+ if !ok {
+ // TODO(dominikh): support string concat, maybe support constants
+ return true
+ }
+ if lit.Kind != token.STRING {
+ // invalid function call
+ return true
+ }
+ if lit.Value[0] != '"' {
+ // already a raw string
+ return true
+ }
+ val := lit.Value
+ if !strings.Contains(val, `\\`) {
+ return true
+ }
+ if strings.Contains(val, "`") {
+ return true
+ }
+
+ bs := false
+ for _, c := range val {
+ if !bs && c == '\\' {
+ bs = true
+ continue
+ }
+ if bs && c == '\\' {
+ bs = false
+ continue
+ }
+ if bs {
+ // backslash followed by non-backslash -> escape sequence
+ return true
+ }
+ }
+
+ j.Errorf(call, "should use raw string (`...`) with regexp.%s to avoid having to escape twice", sel.Sel.Name)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintIfReturn(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ block, ok := node.(*ast.BlockStmt)
+ if !ok {
+ return true
+ }
+ l := len(block.List)
+ if l < 2 {
+ return true
+ }
+ n1, n2 := block.List[l-2], block.List[l-1]
+
+ if len(block.List) >= 3 {
+ if _, ok := block.List[l-3].(*ast.IfStmt); ok {
+ // Do not flag a series of if statements
+ return true
+ }
+ }
+ // if statement with no init, no else, a single condition
+ // checking an identifier or function call and just a return
+ // statement in the body, that returns a boolean constant
+ ifs, ok := n1.(*ast.IfStmt)
+ if !ok {
+ return true
+ }
+ if ifs.Else != nil || ifs.Init != nil {
+ return true
+ }
+ if len(ifs.Body.List) != 1 {
+ return true
+ }
+ if op, ok := ifs.Cond.(*ast.BinaryExpr); ok {
+ switch op.Op {
+ case token.EQL, token.LSS, token.GTR, token.NEQ, token.LEQ, token.GEQ:
+ default:
+ return true
+ }
+ }
+ ret1, ok := ifs.Body.List[0].(*ast.ReturnStmt)
+ if !ok {
+ return true
+ }
+ if len(ret1.Results) != 1 {
+ return true
+ }
+ if !IsBoolConst(j, ret1.Results[0]) {
+ return true
+ }
+
+ ret2, ok := n2.(*ast.ReturnStmt)
+ if !ok {
+ return true
+ }
+ if len(ret2.Results) != 1 {
+ return true
+ }
+ if !IsBoolConst(j, ret2.Results[0]) {
+ return true
+ }
+ j.Errorf(n1, "should use 'return <expr>' instead of 'if <expr> { return <bool> }; return <bool>'")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+// LintRedundantNilCheckWithLen checks for the following reduntant nil-checks:
+//
+// if x == nil || len(x) == 0 {}
+// if x != nil && len(x) != 0 {}
+// if x != nil && len(x) == N {} (where N != 0)
+// if x != nil && len(x) > N {}
+// if x != nil && len(x) >= N {} (where N != 0)
+//
+func (c *Checker) LintRedundantNilCheckWithLen(j *lint.Job) {
+ isConstZero := func(expr ast.Expr) (isConst bool, isZero bool) {
+ _, ok := expr.(*ast.BasicLit)
+ if ok {
+ return true, IsZero(expr)
+ }
+ id, ok := expr.(*ast.Ident)
+ if !ok {
+ return false, false
+ }
+ c, ok := ObjectOf(j, id).(*types.Const)
+ if !ok {
+ return false, false
+ }
+ return true, c.Val().Kind() == constant.Int && c.Val().String() == "0"
+ }
+
+ fn := func(node ast.Node) bool {
+ // check that expr is "x || y" or "x && y"
+ expr, ok := node.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+ if expr.Op != token.LOR && expr.Op != token.LAND {
+ return true
+ }
+ eqNil := expr.Op == token.LOR
+
+ // check that x is "xx == nil" or "xx != nil"
+ x, ok := expr.X.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+ if eqNil && x.Op != token.EQL {
+ return true
+ }
+ if !eqNil && x.Op != token.NEQ {
+ return true
+ }
+ xx, ok := x.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if !IsNil(j, x.Y) {
+ return true
+ }
+
+ // check that y is "len(xx) == 0" or "len(xx) ... "
+ y, ok := expr.Y.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+ if eqNil && y.Op != token.EQL { // must be len(xx) *==* 0
+ return false
+ }
+ yx, ok := y.X.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ yxFun, ok := yx.Fun.(*ast.Ident)
+ if !ok || yxFun.Name != "len" || len(yx.Args) != 1 {
+ return true
+ }
+ yxArg, ok := yx.Args[Arg("len.v")].(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if yxArg.Name != xx.Name {
+ return true
+ }
+
+ if eqNil && !IsZero(y.Y) { // must be len(x) == *0*
+ return true
+ }
+
+ if !eqNil {
+ isConst, isZero := isConstZero(y.Y)
+ if !isConst {
+ return true
+ }
+ switch y.Op {
+ case token.EQL:
+ // avoid false positive for "xx != nil && len(xx) == 0"
+ if isZero {
+ return true
+ }
+ case token.GEQ:
+ // avoid false positive for "xx != nil && len(xx) >= 0"
+ if isZero {
+ return true
+ }
+ case token.NEQ:
+ // avoid false positive for "xx != nil && len(xx) != <non-zero>"
+ if !isZero {
+ return true
+ }
+ case token.GTR:
+ // ok
+ default:
+ return true
+ }
+ }
+
+ // finally check that xx type is one of array, slice, map or chan
+ // this is to prevent false positive in case if xx is a pointer to an array
+ var nilType string
+ switch TypeOf(j, xx).(type) {
+ case *types.Slice:
+ nilType = "nil slices"
+ case *types.Map:
+ nilType = "nil maps"
+ case *types.Chan:
+ nilType = "nil channels"
+ default:
+ return true
+ }
+ j.Errorf(expr, "should omit nil check; len() for %s is defined as zero", nilType)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintSlicing(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ n, ok := node.(*ast.SliceExpr)
+ if !ok {
+ return true
+ }
+ if n.Max != nil {
+ return true
+ }
+ s, ok := n.X.(*ast.Ident)
+ if !ok || s.Obj == nil {
+ return true
+ }
+ call, ok := n.High.(*ast.CallExpr)
+ if !ok || len(call.Args) != 1 || call.Ellipsis.IsValid() {
+ return true
+ }
+ fun, ok := call.Fun.(*ast.Ident)
+ if !ok || fun.Name != "len" {
+ return true
+ }
+ if _, ok := ObjectOf(j, fun).(*types.Builtin); !ok {
+ return true
+ }
+ arg, ok := call.Args[Arg("len.v")].(*ast.Ident)
+ if !ok || arg.Obj != s.Obj {
+ return true
+ }
+ j.Errorf(n, "should omit second index in slice, s[a:len(s)] is identical to s[a:]")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func refersTo(j *lint.Job, expr ast.Expr, ident *ast.Ident) bool {
+ found := false
+ fn := func(node ast.Node) bool {
+ ident2, ok := node.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if ObjectOf(j, ident) == ObjectOf(j, ident2) {
+ found = true
+ return false
+ }
+ return true
+ }
+ ast.Inspect(expr, fn)
+ return found
+}
+
+func (c *Checker) LintLoopAppend(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ loop, ok := node.(*ast.RangeStmt)
+ if !ok {
+ return true
+ }
+ if !IsBlank(loop.Key) {
+ return true
+ }
+ val, ok := loop.Value.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if len(loop.Body.List) != 1 {
+ return true
+ }
+ stmt, ok := loop.Body.List[0].(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if stmt.Tok != token.ASSIGN || len(stmt.Lhs) != 1 || len(stmt.Rhs) != 1 {
+ return true
+ }
+ if refersTo(j, stmt.Lhs[0], val) {
+ return true
+ }
+ call, ok := stmt.Rhs[0].(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if len(call.Args) != 2 || call.Ellipsis.IsValid() {
+ return true
+ }
+ fun, ok := call.Fun.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ obj := ObjectOf(j, fun)
+ fn, ok := obj.(*types.Builtin)
+ if !ok || fn.Name() != "append" {
+ return true
+ }
+
+ src := TypeOf(j, loop.X)
+ dst := TypeOf(j, call.Args[Arg("append.slice")])
+ // TODO(dominikh) remove nil check once Go issue #15173 has
+ // been fixed
+ if src == nil {
+ return true
+ }
+ if !types.Identical(src, dst) {
+ return true
+ }
+
+ if Render(j, stmt.Lhs[0]) != Render(j, call.Args[Arg("append.slice")]) {
+ return true
+ }
+
+ el, ok := call.Args[Arg("append.elems")].(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if ObjectOf(j, val) != ObjectOf(j, el) {
+ return true
+ }
+ j.Errorf(loop, "should replace loop with %s = append(%s, %s...)",
+ Render(j, stmt.Lhs[0]), Render(j, call.Args[Arg("append.slice")]), Render(j, loop.X))
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintTimeSince(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAST(j, sel.X, "time.Now") {
+ return true
+ }
+ if sel.Sel.Name != "Sub" {
+ return true
+ }
+ j.Errorf(call, "should use time.Since instead of time.Now().Sub")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintTimeUntil(j *lint.Job) {
+ if !IsGoVersion(j, 8) {
+ return
+ }
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAST(j, call, "(time.Time).Sub") {
+ return true
+ }
+ if !IsCallToAST(j, call.Args[Arg("(time.Time).Sub.u")], "time.Now") {
+ return true
+ }
+ j.Errorf(call, "should use time.Until instead of t.Sub(time.Now())")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintUnnecessaryBlank(j *lint.Job) {
+ fn1 := func(node ast.Node) {
+ assign, ok := node.(*ast.AssignStmt)
+ if !ok {
+ return
+ }
+ if len(assign.Lhs) != 2 || len(assign.Rhs) != 1 {
+ return
+ }
+ if !IsBlank(assign.Lhs[1]) {
+ return
+ }
+ switch rhs := assign.Rhs[0].(type) {
+ case *ast.IndexExpr:
+ // The type-checker should make sure that it's a map, but
+ // let's be safe.
+ if _, ok := TypeOf(j, rhs.X).Underlying().(*types.Map); !ok {
+ return
+ }
+ case *ast.UnaryExpr:
+ if rhs.Op != token.ARROW {
+ return
+ }
+ default:
+ return
+ }
+ cp := *assign
+ cp.Lhs = cp.Lhs[0:1]
+ j.Errorf(assign, "should write %s instead of %s", Render(j, &cp), Render(j, assign))
+ }
+
+ fn2 := func(node ast.Node) {
+ stmt, ok := node.(*ast.AssignStmt)
+ if !ok {
+ return
+ }
+ if len(stmt.Lhs) != len(stmt.Rhs) {
+ return
+ }
+ for i, lh := range stmt.Lhs {
+ rh := stmt.Rhs[i]
+ if !IsBlank(lh) {
+ continue
+ }
+ expr, ok := rh.(*ast.UnaryExpr)
+ if !ok {
+ continue
+ }
+ if expr.Op != token.ARROW {
+ continue
+ }
+ j.Errorf(lh, "'_ = <-ch' can be simplified to '<-ch'")
+ }
+ }
+
+ fn3 := func(node ast.Node) {
+ rs, ok := node.(*ast.RangeStmt)
+ if !ok {
+ return
+ }
+
+ // for x, _
+ if !IsBlank(rs.Key) && IsBlank(rs.Value) {
+ j.Errorf(rs.Value, "should omit value from range; this loop is equivalent to `for %s %s range ...`", Render(j, rs.Key), rs.Tok)
+ }
+ // for _, _ || for _
+ if IsBlank(rs.Key) && (IsBlank(rs.Value) || rs.Value == nil) {
+ j.Errorf(rs.Key, "should omit values from range; this loop is equivalent to `for range ...`")
+ }
+ }
+
+ fn := func(node ast.Node) bool {
+ fn1(node)
+ fn2(node)
+ if IsGoVersion(j, 4) {
+ fn3(node)
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintSimplerStructConversion(j *lint.Job) {
+ var skip ast.Node
+ fn := func(node ast.Node) bool {
+ // Do not suggest type conversion between pointers
+ if unary, ok := node.(*ast.UnaryExpr); ok && unary.Op == token.AND {
+ if lit, ok := unary.X.(*ast.CompositeLit); ok {
+ skip = lit
+ }
+ return true
+ }
+
+ if node == skip {
+ return true
+ }
+
+ lit, ok := node.(*ast.CompositeLit)
+ if !ok {
+ return true
+ }
+ typ1, _ := TypeOf(j, lit.Type).(*types.Named)
+ if typ1 == nil {
+ return true
+ }
+ s1, ok := typ1.Underlying().(*types.Struct)
+ if !ok {
+ return true
+ }
+
+ var typ2 *types.Named
+ var ident *ast.Ident
+ getSelType := func(expr ast.Expr) (types.Type, *ast.Ident, bool) {
+ sel, ok := expr.(*ast.SelectorExpr)
+ if !ok {
+ return nil, nil, false
+ }
+ ident, ok := sel.X.(*ast.Ident)
+ if !ok {
+ return nil, nil, false
+ }
+ typ := TypeOf(j, sel.X)
+ return typ, ident, typ != nil
+ }
+ if len(lit.Elts) == 0 {
+ return true
+ }
+ if s1.NumFields() != len(lit.Elts) {
+ return true
+ }
+ for i, elt := range lit.Elts {
+ var t types.Type
+ var id *ast.Ident
+ var ok bool
+ switch elt := elt.(type) {
+ case *ast.SelectorExpr:
+ t, id, ok = getSelType(elt)
+ if !ok {
+ return true
+ }
+ if i >= s1.NumFields() || s1.Field(i).Name() != elt.Sel.Name {
+ return true
+ }
+ case *ast.KeyValueExpr:
+ var sel *ast.SelectorExpr
+ sel, ok = elt.Value.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+
+ if elt.Key.(*ast.Ident).Name != sel.Sel.Name {
+ return true
+ }
+ t, id, ok = getSelType(elt.Value)
+ }
+ if !ok {
+ return true
+ }
+ // All fields must be initialized from the same object
+ if ident != nil && ident.Obj != id.Obj {
+ return true
+ }
+ typ2, _ = t.(*types.Named)
+ if typ2 == nil {
+ return true
+ }
+ ident = id
+ }
+
+ if typ2 == nil {
+ return true
+ }
+
+ if typ1.Obj().Pkg() != typ2.Obj().Pkg() {
+ // Do not suggest type conversions between different
+ // packages. Types in different packages might only match
+ // by coincidence. Furthermore, if the dependency ever
+ // adds more fields to its type, it could break the code
+ // that relies on the type conversion to work.
+ return true
+ }
+
+ s2, ok := typ2.Underlying().(*types.Struct)
+ if !ok {
+ return true
+ }
+ if typ1 == typ2 {
+ return true
+ }
+ if IsGoVersion(j, 8) {
+ if !types.IdenticalIgnoreTags(s1, s2) {
+ return true
+ }
+ } else {
+ if !types.Identical(s1, s2) {
+ return true
+ }
+ }
+ j.Errorf(node, "should convert %s (type %s) to %s instead of using struct literal",
+ ident.Name, typ2.Obj().Name(), typ1.Obj().Name())
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintTrim(j *lint.Job) {
+ sameNonDynamic := func(node1, node2 ast.Node) bool {
+ if reflect.TypeOf(node1) != reflect.TypeOf(node2) {
+ return false
+ }
+
+ switch node1 := node1.(type) {
+ case *ast.Ident:
+ return node1.Obj == node2.(*ast.Ident).Obj
+ case *ast.SelectorExpr:
+ return Render(j, node1) == Render(j, node2)
+ case *ast.IndexExpr:
+ return Render(j, node1) == Render(j, node2)
+ }
+ return false
+ }
+
+ isLenOnIdent := func(fn ast.Expr, ident ast.Expr) bool {
+ call, ok := fn.(*ast.CallExpr)
+ if !ok {
+ return false
+ }
+ if fn, ok := call.Fun.(*ast.Ident); !ok || fn.Name != "len" {
+ return false
+ }
+ if len(call.Args) != 1 {
+ return false
+ }
+ return sameNonDynamic(call.Args[Arg("len.v")], ident)
+ }
+
+ fn := func(node ast.Node) bool {
+ var pkg string
+ var fun string
+
+ ifstmt, ok := node.(*ast.IfStmt)
+ if !ok {
+ return true
+ }
+ if ifstmt.Init != nil {
+ return true
+ }
+ if ifstmt.Else != nil {
+ return true
+ }
+ if len(ifstmt.Body.List) != 1 {
+ return true
+ }
+ condCall, ok := ifstmt.Cond.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ call, ok := condCall.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ if IsIdent(call.X, "strings") {
+ pkg = "strings"
+ } else if IsIdent(call.X, "bytes") {
+ pkg = "bytes"
+ } else {
+ return true
+ }
+ if IsIdent(call.Sel, "HasPrefix") {
+ fun = "HasPrefix"
+ } else if IsIdent(call.Sel, "HasSuffix") {
+ fun = "HasSuffix"
+ } else {
+ return true
+ }
+
+ assign, ok := ifstmt.Body.List[0].(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if assign.Tok != token.ASSIGN {
+ return true
+ }
+ if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 {
+ return true
+ }
+ if !sameNonDynamic(condCall.Args[0], assign.Lhs[0]) {
+ return true
+ }
+ slice, ok := assign.Rhs[0].(*ast.SliceExpr)
+ if !ok {
+ return true
+ }
+ if slice.Slice3 {
+ return true
+ }
+ if !sameNonDynamic(slice.X, condCall.Args[0]) {
+ return true
+ }
+ var index ast.Expr
+ switch fun {
+ case "HasPrefix":
+ // TODO(dh) We could detect a High that is len(s), but another
+ // rule will already flag that, anyway.
+ if slice.High != nil {
+ return true
+ }
+ index = slice.Low
+ case "HasSuffix":
+ if slice.Low != nil {
+ n, ok := ExprToInt(j, slice.Low)
+ if !ok || n != 0 {
+ return true
+ }
+ }
+ index = slice.High
+ }
+
+ switch index := index.(type) {
+ case *ast.CallExpr:
+ if fun != "HasPrefix" {
+ return true
+ }
+ if fn, ok := index.Fun.(*ast.Ident); !ok || fn.Name != "len" {
+ return true
+ }
+ if len(index.Args) != 1 {
+ return true
+ }
+ id3 := index.Args[Arg("len.v")]
+ switch oid3 := condCall.Args[1].(type) {
+ case *ast.BasicLit:
+ if pkg != "strings" {
+ return false
+ }
+ lit, ok := id3.(*ast.BasicLit)
+ if !ok {
+ return true
+ }
+ s1, ok1 := ExprToString(j, lit)
+ s2, ok2 := ExprToString(j, condCall.Args[1])
+ if !ok1 || !ok2 || s1 != s2 {
+ return true
+ }
+ default:
+ if !sameNonDynamic(id3, oid3) {
+ return true
+ }
+ }
+ case *ast.BasicLit, *ast.Ident:
+ if fun != "HasPrefix" {
+ return true
+ }
+ if pkg != "strings" {
+ return true
+ }
+ string, ok1 := ExprToString(j, condCall.Args[1])
+ int, ok2 := ExprToInt(j, slice.Low)
+ if !ok1 || !ok2 || int != int64(len(string)) {
+ return true
+ }
+ case *ast.BinaryExpr:
+ if fun != "HasSuffix" {
+ return true
+ }
+ if index.Op != token.SUB {
+ return true
+ }
+ if !isLenOnIdent(index.X, condCall.Args[0]) ||
+ !isLenOnIdent(index.Y, condCall.Args[1]) {
+ return true
+ }
+ default:
+ return true
+ }
+
+ var replacement string
+ switch fun {
+ case "HasPrefix":
+ replacement = "TrimPrefix"
+ case "HasSuffix":
+ replacement = "TrimSuffix"
+ }
+ j.Errorf(ifstmt, "should replace this if statement with an unconditional %s.%s", pkg, replacement)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintLoopSlide(j *lint.Job) {
+ // TODO(dh): detect bs[i+offset] in addition to bs[offset+i]
+ // TODO(dh): consider merging this function with LintLoopCopy
+ // TODO(dh): detect length that is an expression, not a variable name
+ // TODO(dh): support sliding to a different offset than the beginning of the slice
+
+ fn := func(node ast.Node) bool {
+ /*
+ for i := 0; i < n; i++ {
+ bs[i] = bs[offset+i]
+ }
+
+ ↓
+
+ copy(bs[:n], bs[offset:offset+n])
+ */
+
+ loop, ok := node.(*ast.ForStmt)
+ if !ok || len(loop.Body.List) != 1 || loop.Init == nil || loop.Cond == nil || loop.Post == nil {
+ return true
+ }
+ assign, ok := loop.Init.(*ast.AssignStmt)
+ if !ok || len(assign.Lhs) != 1 || len(assign.Rhs) != 1 || !IsZero(assign.Rhs[0]) {
+ return true
+ }
+ initvar, ok := assign.Lhs[0].(*ast.Ident)
+ if !ok {
+ return true
+ }
+ post, ok := loop.Post.(*ast.IncDecStmt)
+ if !ok || post.Tok != token.INC {
+ return true
+ }
+ postvar, ok := post.X.(*ast.Ident)
+ if !ok || ObjectOf(j, postvar) != ObjectOf(j, initvar) {
+ return true
+ }
+ bin, ok := loop.Cond.(*ast.BinaryExpr)
+ if !ok || bin.Op != token.LSS {
+ return true
+ }
+ binx, ok := bin.X.(*ast.Ident)
+ if !ok || ObjectOf(j, binx) != ObjectOf(j, initvar) {
+ return true
+ }
+ biny, ok := bin.Y.(*ast.Ident)
+ if !ok {
+ return true
+ }
+
+ assign, ok = loop.Body.List[0].(*ast.AssignStmt)
+ if !ok || len(assign.Lhs) != 1 || len(assign.Rhs) != 1 || assign.Tok != token.ASSIGN {
+ return true
+ }
+ lhs, ok := assign.Lhs[0].(*ast.IndexExpr)
+ if !ok {
+ return true
+ }
+ rhs, ok := assign.Rhs[0].(*ast.IndexExpr)
+ if !ok {
+ return true
+ }
+
+ bs1, ok := lhs.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ bs2, ok := rhs.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ obj1 := ObjectOf(j, bs1)
+ obj2 := ObjectOf(j, bs2)
+ if obj1 != obj2 {
+ return true
+ }
+ if _, ok := obj1.Type().Underlying().(*types.Slice); !ok {
+ return true
+ }
+
+ index1, ok := lhs.Index.(*ast.Ident)
+ if !ok || ObjectOf(j, index1) != ObjectOf(j, initvar) {
+ return true
+ }
+ index2, ok := rhs.Index.(*ast.BinaryExpr)
+ if !ok || index2.Op != token.ADD {
+ return true
+ }
+ add1, ok := index2.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ add2, ok := index2.Y.(*ast.Ident)
+ if !ok || ObjectOf(j, add2) != ObjectOf(j, initvar) {
+ return true
+ }
+
+ j.Errorf(loop, "should use copy(%s[:%s], %s[%s:]) instead", Render(j, bs1), Render(j, biny), Render(j, bs1), Render(j, add1))
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintMakeLenCap(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if fn, ok := call.Fun.(*ast.Ident); !ok || fn.Name != "make" {
+ // FIXME check whether make is indeed the built-in function
+ return true
+ }
+ switch len(call.Args) {
+ case 2:
+ // make(T, len)
+ if _, ok := TypeOf(j, call.Args[Arg("make.t")]).Underlying().(*types.Slice); ok {
+ break
+ }
+ if IsZero(call.Args[Arg("make.size[0]")]) {
+ j.Errorf(call.Args[Arg("make.size[0]")], "should use make(%s) instead", Render(j, call.Args[Arg("make.t")]))
+ }
+ case 3:
+ // make(T, len, cap)
+ if Render(j, call.Args[Arg("make.size[0]")]) == Render(j, call.Args[Arg("make.size[1]")]) {
+ j.Errorf(call.Args[Arg("make.size[0]")],
+ "should use make(%s, %s) instead",
+ Render(j, call.Args[Arg("make.t")]), Render(j, call.Args[Arg("make.size[0]")]))
+ }
+ }
+ return false
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintAssertNotNil(j *lint.Job) {
+ isNilCheck := func(ident *ast.Ident, expr ast.Expr) bool {
+ xbinop, ok := expr.(*ast.BinaryExpr)
+ if !ok || xbinop.Op != token.NEQ {
+ return false
+ }
+ xident, ok := xbinop.X.(*ast.Ident)
+ if !ok || xident.Obj != ident.Obj {
+ return false
+ }
+ if !IsNil(j, xbinop.Y) {
+ return false
+ }
+ return true
+ }
+ isOKCheck := func(ident *ast.Ident, expr ast.Expr) bool {
+ yident, ok := expr.(*ast.Ident)
+ if !ok || yident.Obj != ident.Obj {
+ return false
+ }
+ return true
+ }
+ fn := func(node ast.Node) bool {
+ ifstmt, ok := node.(*ast.IfStmt)
+ if !ok {
+ return true
+ }
+ assign, ok := ifstmt.Init.(*ast.AssignStmt)
+ if !ok || len(assign.Lhs) != 2 || len(assign.Rhs) != 1 || !IsBlank(assign.Lhs[0]) {
+ return true
+ }
+ assert, ok := assign.Rhs[0].(*ast.TypeAssertExpr)
+ if !ok {
+ return true
+ }
+ binop, ok := ifstmt.Cond.(*ast.BinaryExpr)
+ if !ok || binop.Op != token.LAND {
+ return true
+ }
+ assertIdent, ok := assert.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ assignIdent, ok := assign.Lhs[1].(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if !(isNilCheck(assertIdent, binop.X) && isOKCheck(assignIdent, binop.Y)) &&
+ !(isNilCheck(assertIdent, binop.Y) && isOKCheck(assignIdent, binop.X)) {
+ return true
+ }
+ j.Errorf(ifstmt, "when %s is true, %s can't be nil", Render(j, assignIdent), Render(j, assertIdent))
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintDeclareAssign(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ block, ok := node.(*ast.BlockStmt)
+ if !ok {
+ return true
+ }
+ if len(block.List) < 2 {
+ return true
+ }
+ for i, stmt := range block.List[:len(block.List)-1] {
+ _ = i
+ decl, ok := stmt.(*ast.DeclStmt)
+ if !ok {
+ continue
+ }
+ gdecl, ok := decl.Decl.(*ast.GenDecl)
+ if !ok || gdecl.Tok != token.VAR || len(gdecl.Specs) != 1 {
+ continue
+ }
+ vspec, ok := gdecl.Specs[0].(*ast.ValueSpec)
+ if !ok || len(vspec.Names) != 1 || len(vspec.Values) != 0 {
+ continue
+ }
+
+ assign, ok := block.List[i+1].(*ast.AssignStmt)
+ if !ok || assign.Tok != token.ASSIGN {
+ continue
+ }
+ if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 {
+ continue
+ }
+ ident, ok := assign.Lhs[0].(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if vspec.Names[0].Obj != ident.Obj {
+ continue
+ }
+
+ if refersTo(j, assign.Rhs[0], ident) {
+ continue
+ }
+ j.Errorf(decl, "should merge variable declaration with assignment on next line")
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintRedundantBreak(j *lint.Job) {
+ fn1 := func(node ast.Node) {
+ clause, ok := node.(*ast.CaseClause)
+ if !ok {
+ return
+ }
+ if len(clause.Body) < 2 {
+ return
+ }
+ branch, ok := clause.Body[len(clause.Body)-1].(*ast.BranchStmt)
+ if !ok || branch.Tok != token.BREAK || branch.Label != nil {
+ return
+ }
+ j.Errorf(branch, "redundant break statement")
+ }
+ fn2 := func(node ast.Node) {
+ var ret *ast.FieldList
+ var body *ast.BlockStmt
+ switch x := node.(type) {
+ case *ast.FuncDecl:
+ ret = x.Type.Results
+ body = x.Body
+ case *ast.FuncLit:
+ ret = x.Type.Results
+ body = x.Body
+ default:
+ return
+ }
+ // if the func has results, a return can't be redundant.
+ // similarly, if there are no statements, there can be
+ // no return.
+ if ret != nil || body == nil || len(body.List) < 1 {
+ return
+ }
+ rst, ok := body.List[len(body.List)-1].(*ast.ReturnStmt)
+ if !ok {
+ return
+ }
+ // we don't need to check rst.Results as we already
+ // checked x.Type.Results to be nil.
+ j.Errorf(rst, "redundant return statement")
+ }
+ fn := func(node ast.Node) bool {
+ fn1(node)
+ fn2(node)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) Implements(j *lint.Job, typ types.Type, iface string) bool {
+ // OPT(dh): we can cache the type lookup
+ idx := strings.IndexRune(iface, '.')
+ var scope *types.Scope
+ var ifaceName string
+ if idx == -1 {
+ scope = types.Universe
+ ifaceName = iface
+ } else {
+ pkgName := iface[:idx]
+ pkg := j.Program.Package(pkgName)
+ if pkg == nil {
+ return false
+ }
+ scope = pkg.Types.Scope()
+ ifaceName = iface[idx+1:]
+ }
+
+ obj := scope.Lookup(ifaceName)
+ if obj == nil {
+ return false
+ }
+ i, ok := obj.Type().Underlying().(*types.Interface)
+ if !ok {
+ return false
+ }
+ return types.Implements(typ, i)
+}
+
+func (c *Checker) LintRedundantSprintf(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAST(j, call, "fmt.Sprintf") {
+ return true
+ }
+ if len(call.Args) != 2 {
+ return true
+ }
+ if s, ok := ExprToString(j, call.Args[Arg("fmt.Sprintf.format")]); !ok || s != "%s" {
+ return true
+ }
+ arg := call.Args[Arg("fmt.Sprintf.a[0]")]
+ typ := TypeOf(j, arg)
+
+ if c.Implements(j, typ, "fmt.Stringer") {
+ j.Errorf(call, "should use String() instead of fmt.Sprintf")
+ return true
+ }
+
+ if typ.Underlying() == types.Universe.Lookup("string").Type() {
+ if typ == types.Universe.Lookup("string").Type() {
+ j.Errorf(call, "the argument is already a string, there's no need to use fmt.Sprintf")
+ } else {
+ j.Errorf(call, "the argument's underlying type is a string, should use a simple conversion instead of fmt.Sprintf")
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintErrorsNewSprintf(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ if !IsCallToAST(j, node, "errors.New") {
+ return true
+ }
+ call := node.(*ast.CallExpr)
+ if !IsCallToAST(j, call.Args[Arg("errors.New.text")], "fmt.Sprintf") {
+ return true
+ }
+ j.Errorf(node, "should use fmt.Errorf(...) instead of errors.New(fmt.Sprintf(...))")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) LintRangeStringRunes(j *lint.Job) {
+ sharedcheck.CheckRangeStringRunes(j)
+}
+
+func (c *Checker) LintNilCheckAroundRange(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ ifstmt, ok := node.(*ast.IfStmt)
+ if !ok {
+ return true
+ }
+
+ cond, ok := ifstmt.Cond.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+
+ if cond.Op != token.NEQ || !IsNil(j, cond.Y) || len(ifstmt.Body.List) != 1 {
+ return true
+ }
+
+ loop, ok := ifstmt.Body.List[0].(*ast.RangeStmt)
+ if !ok {
+ return true
+ }
+ ifXIdent, ok := cond.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ rangeXIdent, ok := loop.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if ifXIdent.Obj != rangeXIdent.Obj {
+ return true
+ }
+ switch TypeOf(j, rangeXIdent).(type) {
+ case *types.Slice, *types.Map:
+ j.Errorf(node, "unnecessary nil check around range")
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func isPermissibleSort(j *lint.Job, node ast.Node) bool {
+ call := node.(*ast.CallExpr)
+ typeconv, ok := call.Args[0].(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+
+ sel, ok := typeconv.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ name := SelectorName(j, sel)
+ switch name {
+ case "sort.IntSlice", "sort.Float64Slice", "sort.StringSlice":
+ default:
+ return true
+ }
+
+ return false
+}
+
+func (c *Checker) LintSortHelpers(j *lint.Job) {
+ fnFuncs := func(node ast.Node) bool {
+ var body *ast.BlockStmt
+ switch node := node.(type) {
+ case *ast.FuncLit:
+ body = node.Body
+ case *ast.FuncDecl:
+ body = node.Body
+ default:
+ return true
+ }
+ if body == nil {
+ return true
+ }
+
+ type Error struct {
+ node lint.Positioner
+ msg string
+ }
+ var errors []Error
+ permissible := false
+ fnSorts := func(node ast.Node) bool {
+ if permissible {
+ return false
+ }
+ if !IsCallToAST(j, node, "sort.Sort") {
+ return true
+ }
+ if isPermissibleSort(j, node) {
+ permissible = true
+ return false
+ }
+ call := node.(*ast.CallExpr)
+ typeconv := call.Args[Arg("sort.Sort.data")].(*ast.CallExpr)
+ sel := typeconv.Fun.(*ast.SelectorExpr)
+ name := SelectorName(j, sel)
+
+ switch name {
+ case "sort.IntSlice":
+ errors = append(errors, Error{node, "should use sort.Ints(...) instead of sort.Sort(sort.IntSlice(...))"})
+ case "sort.Float64Slice":
+ errors = append(errors, Error{node, "should use sort.Float64s(...) instead of sort.Sort(sort.Float64Slice(...))"})
+ case "sort.StringSlice":
+ errors = append(errors, Error{node, "should use sort.Strings(...) instead of sort.Sort(sort.StringSlice(...))"})
+ }
+ return true
+ }
+ ast.Inspect(body, fnSorts)
+
+ if permissible {
+ return false
+ }
+ for _, err := range errors {
+ j.Errorf(err.node, "%s", err.msg)
+ }
+ return false
+ }
+
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fnFuncs)
+ }
+}
diff --git a/vendor/honnef.co/go/tools/ssa/LICENSE b/vendor/honnef.co/go/tools/ssa/LICENSE
new file mode 100644
index 000000000..aee48041e
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright (c) 2016 Dominik Honnef. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/honnef.co/go/tools/ssa/blockopt.go b/vendor/honnef.co/go/tools/ssa/blockopt.go
new file mode 100644
index 000000000..22c9a4c0d
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/blockopt.go
@@ -0,0 +1,195 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Simple block optimizations to simplify the control flow graph.
+
+// TODO(adonovan): opt: instead of creating several "unreachable" blocks
+// per function in the Builder, reuse a single one (e.g. at Blocks[1])
+// to reduce garbage.
+
+import (
+ "fmt"
+ "os"
+)
+
+// If true, perform sanity checking and show progress at each
+// successive iteration of optimizeBlocks. Very verbose.
+const debugBlockOpt = false
+
+// markReachable sets Index=-1 for all blocks reachable from b.
+func markReachable(b *BasicBlock) {
+ b.Index = -1
+ for _, succ := range b.Succs {
+ if succ.Index == 0 {
+ markReachable(succ)
+ }
+ }
+}
+
+func DeleteUnreachableBlocks(f *Function) {
+ deleteUnreachableBlocks(f)
+}
+
+// deleteUnreachableBlocks marks all reachable blocks of f and
+// eliminates (nils) all others, including possibly cyclic subgraphs.
+//
+func deleteUnreachableBlocks(f *Function) {
+ const white, black = 0, -1
+ // We borrow b.Index temporarily as the mark bit.
+ for _, b := range f.Blocks {
+ b.Index = white
+ }
+ markReachable(f.Blocks[0])
+ if f.Recover != nil {
+ markReachable(f.Recover)
+ }
+ for i, b := range f.Blocks {
+ if b.Index == white {
+ for _, c := range b.Succs {
+ if c.Index == black {
+ c.removePred(b) // delete white->black edge
+ }
+ }
+ if debugBlockOpt {
+ fmt.Fprintln(os.Stderr, "unreachable", b)
+ }
+ f.Blocks[i] = nil // delete b
+ }
+ }
+ f.removeNilBlocks()
+}
+
+// jumpThreading attempts to apply simple jump-threading to block b,
+// in which a->b->c become a->c if b is just a Jump.
+// The result is true if the optimization was applied.
+//
+func jumpThreading(f *Function, b *BasicBlock) bool {
+ if b.Index == 0 {
+ return false // don't apply to entry block
+ }
+ if b.Instrs == nil {
+ return false
+ }
+ if _, ok := b.Instrs[0].(*Jump); !ok {
+ return false // not just a jump
+ }
+ c := b.Succs[0]
+ if c == b {
+ return false // don't apply to degenerate jump-to-self.
+ }
+ if c.hasPhi() {
+ return false // not sound without more effort
+ }
+ for j, a := range b.Preds {
+ a.replaceSucc(b, c)
+
+ // If a now has two edges to c, replace its degenerate If by Jump.
+ if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c {
+ jump := new(Jump)
+ jump.setBlock(a)
+ a.Instrs[len(a.Instrs)-1] = jump
+ a.Succs = a.Succs[:1]
+ c.removePred(b)
+ } else {
+ if j == 0 {
+ c.replacePred(b, a)
+ } else {
+ c.Preds = append(c.Preds, a)
+ }
+ }
+
+ if debugBlockOpt {
+ fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c)
+ }
+ }
+ f.Blocks[b.Index] = nil // delete b
+ return true
+}
+
+// fuseBlocks attempts to apply the block fusion optimization to block
+// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1.
+// The result is true if the optimization was applied.
+//
+func fuseBlocks(f *Function, a *BasicBlock) bool {
+ if len(a.Succs) != 1 {
+ return false
+ }
+ b := a.Succs[0]
+ if len(b.Preds) != 1 {
+ return false
+ }
+
+ // Degenerate &&/|| ops may result in a straight-line CFG
+ // containing φ-nodes. (Ideally we'd replace such them with
+ // their sole operand but that requires Referrers, built later.)
+ if b.hasPhi() {
+ return false // not sound without further effort
+ }
+
+ // Eliminate jump at end of A, then copy all of B across.
+ a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...)
+ for _, instr := range b.Instrs {
+ instr.setBlock(a)
+ }
+
+ // A inherits B's successors
+ a.Succs = append(a.succs2[:0], b.Succs...)
+
+ // Fix up Preds links of all successors of B.
+ for _, c := range b.Succs {
+ c.replacePred(b, a)
+ }
+
+ if debugBlockOpt {
+ fmt.Fprintln(os.Stderr, "fuseBlocks", a, b)
+ }
+
+ f.Blocks[b.Index] = nil // delete b
+ return true
+}
+
+func OptimizeBlocks(f *Function) {
+ optimizeBlocks(f)
+}
+
+// optimizeBlocks() performs some simple block optimizations on a
+// completed function: dead block elimination, block fusion, jump
+// threading.
+//
+func optimizeBlocks(f *Function) {
+ deleteUnreachableBlocks(f)
+
+ // Loop until no further progress.
+ changed := true
+ for changed {
+ changed = false
+
+ if debugBlockOpt {
+ f.WriteTo(os.Stderr)
+ mustSanityCheck(f, nil)
+ }
+
+ for _, b := range f.Blocks {
+ // f.Blocks will temporarily contain nils to indicate
+ // deleted blocks; we remove them at the end.
+ if b == nil {
+ continue
+ }
+
+ // Fuse blocks. b->c becomes bc.
+ if fuseBlocks(f, b) {
+ changed = true
+ }
+
+ // a->b->c becomes a->c if b contains only a Jump.
+ if jumpThreading(f, b) {
+ changed = true
+ continue // (b was disconnected)
+ }
+ }
+ }
+ f.removeNilBlocks()
+}
diff --git a/vendor/honnef.co/go/tools/ssa/builder.go b/vendor/honnef.co/go/tools/ssa/builder.go
new file mode 100644
index 000000000..032819a2a
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/builder.go
@@ -0,0 +1,2379 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file implements the BUILD phase of SSA construction.
+//
+// SSA construction has two phases, CREATE and BUILD. In the CREATE phase
+// (create.go), all packages are constructed and type-checked and
+// definitions of all package members are created, method-sets are
+// computed, and wrapper methods are synthesized.
+// ssa.Packages are created in arbitrary order.
+//
+// In the BUILD phase (builder.go), the builder traverses the AST of
+// each Go source function and generates SSA instructions for the
+// function body. Initializer expressions for package-level variables
+// are emitted to the package's init() function in the order specified
+// by go/types.Info.InitOrder, then code for each function in the
+// package is generated in lexical order.
+// The BUILD phases for distinct packages are independent and are
+// executed in parallel.
+//
+// TODO(adonovan): indeed, building functions is now embarrassingly parallel.
+// Audit for concurrency then benchmark using more goroutines.
+//
+// The builder's and Program's indices (maps) are populated and
+// mutated during the CREATE phase, but during the BUILD phase they
+// remain constant. The sole exception is Prog.methodSets and its
+// related maps, which are protected by a dedicated mutex.
+
+import (
+ "fmt"
+ "go/ast"
+ exact "go/constant"
+ "go/token"
+ "go/types"
+ "os"
+ "sync"
+)
+
+type opaqueType struct {
+ types.Type
+ name string
+}
+
+func (t *opaqueType) String() string { return t.name }
+
+var (
+ varOk = newVar("ok", tBool)
+ varIndex = newVar("index", tInt)
+
+ // Type constants.
+ tBool = types.Typ[types.Bool]
+ tByte = types.Typ[types.Byte]
+ tInt = types.Typ[types.Int]
+ tInvalid = types.Typ[types.Invalid]
+ tString = types.Typ[types.String]
+ tUntypedNil = types.Typ[types.UntypedNil]
+ tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators
+ tEface = types.NewInterface(nil, nil).Complete()
+
+ // SSA Value constants.
+ vZero = intConst(0)
+ vOne = intConst(1)
+ vTrue = NewConst(exact.MakeBool(true), tBool)
+)
+
+// builder holds state associated with the package currently being built.
+// Its methods contain all the logic for AST-to-SSA conversion.
+type builder struct{}
+
+// cond emits to fn code to evaluate boolean condition e and jump
+// to t or f depending on its value, performing various simplifications.
+//
+// Postcondition: fn.currentBlock is nil.
+//
+func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) {
+ switch e := e.(type) {
+ case *ast.ParenExpr:
+ b.cond(fn, e.X, t, f)
+ return
+
+ case *ast.BinaryExpr:
+ switch e.Op {
+ case token.LAND:
+ ltrue := fn.newBasicBlock("cond.true")
+ b.cond(fn, e.X, ltrue, f)
+ fn.currentBlock = ltrue
+ b.cond(fn, e.Y, t, f)
+ return
+
+ case token.LOR:
+ lfalse := fn.newBasicBlock("cond.false")
+ b.cond(fn, e.X, t, lfalse)
+ fn.currentBlock = lfalse
+ b.cond(fn, e.Y, t, f)
+ return
+ }
+
+ case *ast.UnaryExpr:
+ if e.Op == token.NOT {
+ b.cond(fn, e.X, f, t)
+ return
+ }
+ }
+
+ // A traditional compiler would simplify "if false" (etc) here
+ // but we do not, for better fidelity to the source code.
+ //
+ // The value of a constant condition may be platform-specific,
+ // and may cause blocks that are reachable in some configuration
+ // to be hidden from subsequent analyses such as bug-finding tools.
+ emitIf(fn, b.expr(fn, e), t, f)
+}
+
+// logicalBinop emits code to fn to evaluate e, a &&- or
+// ||-expression whose reified boolean value is wanted.
+// The value is returned.
+//
+func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value {
+ rhs := fn.newBasicBlock("binop.rhs")
+ done := fn.newBasicBlock("binop.done")
+
+ // T(e) = T(e.X) = T(e.Y) after untyped constants have been
+ // eliminated.
+ // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool.
+ t := fn.Pkg.typeOf(e)
+
+ var short Value // value of the short-circuit path
+ switch e.Op {
+ case token.LAND:
+ b.cond(fn, e.X, rhs, done)
+ short = NewConst(exact.MakeBool(false), t)
+
+ case token.LOR:
+ b.cond(fn, e.X, done, rhs)
+ short = NewConst(exact.MakeBool(true), t)
+ }
+
+ // Is rhs unreachable?
+ if rhs.Preds == nil {
+ // Simplify false&&y to false, true||y to true.
+ fn.currentBlock = done
+ return short
+ }
+
+ // Is done unreachable?
+ if done.Preds == nil {
+ // Simplify true&&y (or false||y) to y.
+ fn.currentBlock = rhs
+ return b.expr(fn, e.Y)
+ }
+
+ // All edges from e.X to done carry the short-circuit value.
+ var edges []Value
+ for range done.Preds {
+ edges = append(edges, short)
+ }
+
+ // The edge from e.Y to done carries the value of e.Y.
+ fn.currentBlock = rhs
+ edges = append(edges, b.expr(fn, e.Y))
+ emitJump(fn, done)
+ fn.currentBlock = done
+
+ phi := &Phi{Edges: edges, Comment: e.Op.String()}
+ phi.pos = e.OpPos
+ phi.typ = t
+ return done.emit(phi)
+}
+
+// exprN lowers a multi-result expression e to SSA form, emitting code
+// to fn and returning a single Value whose type is a *types.Tuple.
+// The caller must access the components via Extract.
+//
+// Multi-result expressions include CallExprs in a multi-value
+// assignment or return statement, and "value,ok" uses of
+// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op
+// is token.ARROW).
+//
+func (b *builder) exprN(fn *Function, e ast.Expr) Value {
+ typ := fn.Pkg.typeOf(e).(*types.Tuple)
+ switch e := e.(type) {
+ case *ast.ParenExpr:
+ return b.exprN(fn, e.X)
+
+ case *ast.CallExpr:
+ // Currently, no built-in function nor type conversion
+ // has multiple results, so we can avoid some of the
+ // cases for single-valued CallExpr.
+ var c Call
+ b.setCall(fn, e, &c.Call)
+ c.typ = typ
+ return fn.emit(&c)
+
+ case *ast.IndexExpr:
+ mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map)
+ lookup := &Lookup{
+ X: b.expr(fn, e.X),
+ Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()),
+ CommaOk: true,
+ }
+ lookup.setType(typ)
+ lookup.setPos(e.Lbrack)
+ return fn.emit(lookup)
+
+ case *ast.TypeAssertExpr:
+ return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e.Lparen)
+
+ case *ast.UnaryExpr: // must be receive <-
+ unop := &UnOp{
+ Op: token.ARROW,
+ X: b.expr(fn, e.X),
+ CommaOk: true,
+ }
+ unop.setType(typ)
+ unop.setPos(e.OpPos)
+ return fn.emit(unop)
+ }
+ panic(fmt.Sprintf("exprN(%T) in %s", e, fn))
+}
+
+// builtin emits to fn SSA instructions to implement a call to the
+// built-in function obj with the specified arguments
+// and return type. It returns the value defined by the result.
+//
+// The result is nil if no special handling was required; in this case
+// the caller should treat this like an ordinary library function
+// call.
+//
+func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value {
+ switch obj.Name() {
+ case "make":
+ switch typ.Underlying().(type) {
+ case *types.Slice:
+ n := b.expr(fn, args[1])
+ m := n
+ if len(args) == 3 {
+ m = b.expr(fn, args[2])
+ }
+ if m, ok := m.(*Const); ok {
+ // treat make([]T, n, m) as new([m]T)[:n]
+ cap := m.Int64()
+ at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap)
+ alloc := emitNew(fn, at, pos)
+ alloc.Comment = "makeslice"
+ v := &Slice{
+ X: alloc,
+ High: n,
+ }
+ v.setPos(pos)
+ v.setType(typ)
+ return fn.emit(v)
+ }
+ v := &MakeSlice{
+ Len: n,
+ Cap: m,
+ }
+ v.setPos(pos)
+ v.setType(typ)
+ return fn.emit(v)
+
+ case *types.Map:
+ var res Value
+ if len(args) == 2 {
+ res = b.expr(fn, args[1])
+ }
+ v := &MakeMap{Reserve: res}
+ v.setPos(pos)
+ v.setType(typ)
+ return fn.emit(v)
+
+ case *types.Chan:
+ var sz Value = vZero
+ if len(args) == 2 {
+ sz = b.expr(fn, args[1])
+ }
+ v := &MakeChan{Size: sz}
+ v.setPos(pos)
+ v.setType(typ)
+ return fn.emit(v)
+ }
+
+ case "new":
+ alloc := emitNew(fn, deref(typ), pos)
+ alloc.Comment = "new"
+ return alloc
+
+ case "len", "cap":
+ // Special case: len or cap of an array or *array is
+ // based on the type, not the value which may be nil.
+ // We must still evaluate the value, though. (If it
+ // was side-effect free, the whole call would have
+ // been constant-folded.)
+ t := deref(fn.Pkg.typeOf(args[0])).Underlying()
+ if at, ok := t.(*types.Array); ok {
+ b.expr(fn, args[0]) // for effects only
+ return intConst(at.Len())
+ }
+ // Otherwise treat as normal.
+
+ case "panic":
+ fn.emit(&Panic{
+ X: emitConv(fn, b.expr(fn, args[0]), tEface),
+ pos: pos,
+ })
+ fn.currentBlock = fn.newBasicBlock("unreachable")
+ return vTrue // any non-nil Value will do
+ }
+ return nil // treat all others as a regular function call
+}
+
+// addr lowers a single-result addressable expression e to SSA form,
+// emitting code to fn and returning the location (an lvalue) defined
+// by the expression.
+//
+// If escaping is true, addr marks the base variable of the
+// addressable expression e as being a potentially escaping pointer
+// value. For example, in this code:
+//
+// a := A{
+// b: [1]B{B{c: 1}}
+// }
+// return &a.b[0].c
+//
+// the application of & causes a.b[0].c to have its address taken,
+// which means that ultimately the local variable a must be
+// heap-allocated. This is a simple but very conservative escape
+// analysis.
+//
+// Operations forming potentially escaping pointers include:
+// - &x, including when implicit in method call or composite literals.
+// - a[:] iff a is an array (not *array)
+// - references to variables in lexically enclosing functions.
+//
+func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
+ switch e := e.(type) {
+ case *ast.Ident:
+ if isBlankIdent(e) {
+ return blank{}
+ }
+ obj := fn.Pkg.objectOf(e)
+ v := fn.Prog.packageLevelValue(obj) // var (address)
+ if v == nil {
+ v = fn.lookup(obj, escaping)
+ }
+ return &address{addr: v, pos: e.Pos(), expr: e}
+
+ case *ast.CompositeLit:
+ t := deref(fn.Pkg.typeOf(e))
+ var v *Alloc
+ if escaping {
+ v = emitNew(fn, t, e.Lbrace)
+ } else {
+ v = fn.addLocal(t, e.Lbrace)
+ }
+ v.Comment = "complit"
+ var sb storebuf
+ b.compLit(fn, v, e, true, &sb)
+ sb.emit(fn)
+ return &address{addr: v, pos: e.Lbrace, expr: e}
+
+ case *ast.ParenExpr:
+ return b.addr(fn, e.X, escaping)
+
+ case *ast.SelectorExpr:
+ sel, ok := fn.Pkg.info.Selections[e]
+ if !ok {
+ // qualified identifier
+ return b.addr(fn, e.Sel, escaping)
+ }
+ if sel.Kind() != types.FieldVal {
+ panic(sel)
+ }
+ wantAddr := true
+ v := b.receiver(fn, e.X, wantAddr, escaping, sel)
+ last := len(sel.Index()) - 1
+ return &address{
+ addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel),
+ pos: e.Sel.Pos(),
+ expr: e.Sel,
+ }
+
+ case *ast.IndexExpr:
+ var x Value
+ var et types.Type
+ switch t := fn.Pkg.typeOf(e.X).Underlying().(type) {
+ case *types.Array:
+ x = b.addr(fn, e.X, escaping).address(fn)
+ et = types.NewPointer(t.Elem())
+ case *types.Pointer: // *array
+ x = b.expr(fn, e.X)
+ et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())
+ case *types.Slice:
+ x = b.expr(fn, e.X)
+ et = types.NewPointer(t.Elem())
+ case *types.Map:
+ return &element{
+ m: b.expr(fn, e.X),
+ k: emitConv(fn, b.expr(fn, e.Index), t.Key()),
+ t: t.Elem(),
+ pos: e.Lbrack,
+ }
+ default:
+ panic("unexpected container type in IndexExpr: " + t.String())
+ }
+ v := &IndexAddr{
+ X: x,
+ Index: emitConv(fn, b.expr(fn, e.Index), tInt),
+ }
+ v.setPos(e.Lbrack)
+ v.setType(et)
+ return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e}
+
+ case *ast.StarExpr:
+ return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e}
+ }
+
+ panic(fmt.Sprintf("unexpected address expression: %T", e))
+}
+
+type store struct {
+ lhs lvalue
+ rhs Value
+}
+
+type storebuf struct{ stores []store }
+
+func (sb *storebuf) store(lhs lvalue, rhs Value) {
+ sb.stores = append(sb.stores, store{lhs, rhs})
+}
+
+func (sb *storebuf) emit(fn *Function) {
+ for _, s := range sb.stores {
+ s.lhs.store(fn, s.rhs)
+ }
+}
+
+// assign emits to fn code to initialize the lvalue loc with the value
+// of expression e. If isZero is true, assign assumes that loc holds
+// the zero value for its type.
+//
+// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate
+// better code in some cases, e.g., for composite literals in an
+// addressable location.
+//
+// If sb is not nil, assign generates code to evaluate expression e, but
+// not to update loc. Instead, the necessary stores are appended to the
+// storebuf sb so that they can be executed later. This allows correct
+// in-place update of existing variables when the RHS is a composite
+// literal that may reference parts of the LHS.
+//
+func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) {
+ // Can we initialize it in place?
+ if e, ok := unparen(e).(*ast.CompositeLit); ok {
+ // A CompositeLit never evaluates to a pointer,
+ // so if the type of the location is a pointer,
+ // an &-operation is implied.
+ if _, ok := loc.(blank); !ok { // avoid calling blank.typ()
+ if isPointer(loc.typ()) {
+ ptr := b.addr(fn, e, true).address(fn)
+ // copy address
+ if sb != nil {
+ sb.store(loc, ptr)
+ } else {
+ loc.store(fn, ptr)
+ }
+ return
+ }
+ }
+
+ if _, ok := loc.(*address); ok {
+ if isInterface(loc.typ()) {
+ // e.g. var x interface{} = T{...}
+ // Can't in-place initialize an interface value.
+ // Fall back to copying.
+ } else {
+ // x = T{...} or x := T{...}
+ addr := loc.address(fn)
+ if sb != nil {
+ b.compLit(fn, addr, e, isZero, sb)
+ } else {
+ var sb storebuf
+ b.compLit(fn, addr, e, isZero, &sb)
+ sb.emit(fn)
+ }
+
+ // Subtle: emit debug ref for aggregate types only;
+ // slice and map are handled by store ops in compLit.
+ switch loc.typ().Underlying().(type) {
+ case *types.Struct, *types.Array:
+ emitDebugRef(fn, e, addr, true)
+ }
+
+ return
+ }
+ }
+ }
+
+ // simple case: just copy
+ rhs := b.expr(fn, e)
+ if sb != nil {
+ sb.store(loc, rhs)
+ } else {
+ loc.store(fn, rhs)
+ }
+}
+
+// expr lowers a single-result expression e to SSA form, emitting code
+// to fn and returning the Value defined by the expression.
+//
+func (b *builder) expr(fn *Function, e ast.Expr) Value {
+ e = unparen(e)
+
+ tv := fn.Pkg.info.Types[e]
+
+ // Is expression a constant?
+ if tv.Value != nil {
+ return NewConst(tv.Value, tv.Type)
+ }
+
+ var v Value
+ if tv.Addressable() {
+ // Prefer pointer arithmetic ({Index,Field}Addr) followed
+ // by Load over subelement extraction (e.g. Index, Field),
+ // to avoid large copies.
+ v = b.addr(fn, e, false).load(fn)
+ } else {
+ v = b.expr0(fn, e, tv)
+ }
+ if fn.debugInfo() {
+ emitDebugRef(fn, e, v, false)
+ }
+ return v
+}
+
+func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
+ switch e := e.(type) {
+ case *ast.BasicLit:
+ panic("non-constant BasicLit") // unreachable
+
+ case *ast.FuncLit:
+ fn2 := &Function{
+ name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)),
+ Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature),
+ pos: e.Type.Func,
+ parent: fn,
+ Pkg: fn.Pkg,
+ Prog: fn.Prog,
+ syntax: e,
+ }
+ fn.AnonFuncs = append(fn.AnonFuncs, fn2)
+ b.buildFunction(fn2)
+ if fn2.FreeVars == nil {
+ return fn2
+ }
+ v := &MakeClosure{Fn: fn2}
+ v.setType(tv.Type)
+ for _, fv := range fn2.FreeVars {
+ v.Bindings = append(v.Bindings, fv.outer)
+ fv.outer = nil
+ }
+ return fn.emit(v)
+
+ case *ast.TypeAssertExpr: // single-result form only
+ return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e.Lparen)
+
+ case *ast.CallExpr:
+ if fn.Pkg.info.Types[e.Fun].IsType() {
+ // Explicit type conversion, e.g. string(x) or big.Int(x)
+ x := b.expr(fn, e.Args[0])
+ y := emitConv(fn, x, tv.Type)
+ if y != x {
+ switch y := y.(type) {
+ case *Convert:
+ y.pos = e.Lparen
+ case *ChangeType:
+ y.pos = e.Lparen
+ case *MakeInterface:
+ y.pos = e.Lparen
+ }
+ }
+ return y
+ }
+ // Call to "intrinsic" built-ins, e.g. new, make, panic.
+ if id, ok := unparen(e.Fun).(*ast.Ident); ok {
+ if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok {
+ if v := b.builtin(fn, obj, e.Args, tv.Type, e.Lparen); v != nil {
+ return v
+ }
+ }
+ }
+ // Regular function call.
+ var v Call
+ b.setCall(fn, e, &v.Call)
+ v.setType(tv.Type)
+ return fn.emit(&v)
+
+ case *ast.UnaryExpr:
+ switch e.Op {
+ case token.AND: // &X --- potentially escaping.
+ addr := b.addr(fn, e.X, true)
+ if _, ok := unparen(e.X).(*ast.StarExpr); ok {
+ // &*p must panic if p is nil (http://golang.org/s/go12nil).
+ // For simplicity, we'll just (suboptimally) rely
+ // on the side effects of a load.
+ // TODO(adonovan): emit dedicated nilcheck.
+ addr.load(fn)
+ }
+ return addr.address(fn)
+ case token.ADD:
+ return b.expr(fn, e.X)
+ case token.NOT, token.ARROW, token.SUB, token.XOR: // ! <- - ^
+ v := &UnOp{
+ Op: e.Op,
+ X: b.expr(fn, e.X),
+ }
+ v.setPos(e.OpPos)
+ v.setType(tv.Type)
+ return fn.emit(v)
+ default:
+ panic(e.Op)
+ }
+
+ case *ast.BinaryExpr:
+ switch e.Op {
+ case token.LAND, token.LOR:
+ return b.logicalBinop(fn, e)
+ case token.SHL, token.SHR:
+ fallthrough
+ case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
+ return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e.OpPos)
+
+ case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ:
+ cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos)
+ // The type of x==y may be UntypedBool.
+ return emitConv(fn, cmp, DefaultType(tv.Type))
+ default:
+ panic("illegal op in BinaryExpr: " + e.Op.String())
+ }
+
+ case *ast.SliceExpr:
+ var low, high, max Value
+ var x Value
+ switch fn.Pkg.typeOf(e.X).Underlying().(type) {
+ case *types.Array:
+ // Potentially escaping.
+ x = b.addr(fn, e.X, true).address(fn)
+ case *types.Basic, *types.Slice, *types.Pointer: // *array
+ x = b.expr(fn, e.X)
+ default:
+ panic("unreachable")
+ }
+ if e.High != nil {
+ high = b.expr(fn, e.High)
+ }
+ if e.Low != nil {
+ low = b.expr(fn, e.Low)
+ }
+ if e.Slice3 {
+ max = b.expr(fn, e.Max)
+ }
+ v := &Slice{
+ X: x,
+ Low: low,
+ High: high,
+ Max: max,
+ }
+ v.setPos(e.Lbrack)
+ v.setType(tv.Type)
+ return fn.emit(v)
+
+ case *ast.Ident:
+ obj := fn.Pkg.info.Uses[e]
+ // Universal built-in or nil?
+ switch obj := obj.(type) {
+ case *types.Builtin:
+ return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)}
+ case *types.Nil:
+ return nilConst(tv.Type)
+ }
+ // Package-level func or var?
+ if v := fn.Prog.packageLevelValue(obj); v != nil {
+ if _, ok := obj.(*types.Var); ok {
+ return emitLoad(fn, v) // var (address)
+ }
+ return v // (func)
+ }
+ // Local var.
+ return emitLoad(fn, fn.lookup(obj, false)) // var (address)
+
+ case *ast.SelectorExpr:
+ sel, ok := fn.Pkg.info.Selections[e]
+ if !ok {
+ // qualified identifier
+ return b.expr(fn, e.Sel)
+ }
+ switch sel.Kind() {
+ case types.MethodExpr:
+ // (*T).f or T.f, the method f from the method-set of type T.
+ // The result is a "thunk".
+ return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type)
+
+ case types.MethodVal:
+ // e.f where e is an expression and f is a method.
+ // The result is a "bound".
+ obj := sel.Obj().(*types.Func)
+ rt := recvType(obj)
+ wantAddr := isPointer(rt)
+ escaping := true
+ v := b.receiver(fn, e.X, wantAddr, escaping, sel)
+ if isInterface(rt) {
+ // If v has interface type I,
+ // we must emit a check that v is non-nil.
+ // We use: typeassert v.(I).
+ emitTypeAssert(fn, v, rt, token.NoPos)
+ }
+ c := &MakeClosure{
+ Fn: makeBound(fn.Prog, obj),
+ Bindings: []Value{v},
+ }
+ c.setPos(e.Sel.Pos())
+ c.setType(tv.Type)
+ return fn.emit(c)
+
+ case types.FieldVal:
+ indices := sel.Index()
+ last := len(indices) - 1
+ v := b.expr(fn, e.X)
+ v = emitImplicitSelections(fn, v, indices[:last])
+ v = emitFieldSelection(fn, v, indices[last], false, e.Sel)
+ return v
+ }
+
+ panic("unexpected expression-relative selector")
+
+ case *ast.IndexExpr:
+ switch t := fn.Pkg.typeOf(e.X).Underlying().(type) {
+ case *types.Array:
+ // Non-addressable array (in a register).
+ v := &Index{
+ X: b.expr(fn, e.X),
+ Index: emitConv(fn, b.expr(fn, e.Index), tInt),
+ }
+ v.setPos(e.Lbrack)
+ v.setType(t.Elem())
+ return fn.emit(v)
+
+ case *types.Map:
+ // Maps are not addressable.
+ mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map)
+ v := &Lookup{
+ X: b.expr(fn, e.X),
+ Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()),
+ }
+ v.setPos(e.Lbrack)
+ v.setType(mapt.Elem())
+ return fn.emit(v)
+
+ case *types.Basic: // => string
+ // Strings are not addressable.
+ v := &Lookup{
+ X: b.expr(fn, e.X),
+ Index: b.expr(fn, e.Index),
+ }
+ v.setPos(e.Lbrack)
+ v.setType(tByte)
+ return fn.emit(v)
+
+ case *types.Slice, *types.Pointer: // *array
+ // Addressable slice/array; use IndexAddr and Load.
+ return b.addr(fn, e, false).load(fn)
+
+ default:
+ panic("unexpected container type in IndexExpr: " + t.String())
+ }
+
+ case *ast.CompositeLit, *ast.StarExpr:
+ // Addressable types (lvalues)
+ return b.addr(fn, e, false).load(fn)
+ }
+
+ panic(fmt.Sprintf("unexpected expr: %T", e))
+}
+
+// stmtList emits to fn code for all statements in list.
+func (b *builder) stmtList(fn *Function, list []ast.Stmt) {
+ for _, s := range list {
+ b.stmt(fn, s)
+ }
+}
+
+// receiver emits to fn code for expression e in the "receiver"
+// position of selection e.f (where f may be a field or a method) and
+// returns the effective receiver after applying the implicit field
+// selections of sel.
+//
+// wantAddr requests that the result is an an address. If
+// !sel.Indirect(), this may require that e be built in addr() mode; it
+// must thus be addressable.
+//
+// escaping is defined as per builder.addr().
+//
+func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection) Value {
+ var v Value
+ if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) {
+ v = b.addr(fn, e, escaping).address(fn)
+ } else {
+ v = b.expr(fn, e)
+ }
+
+ last := len(sel.Index()) - 1
+ v = emitImplicitSelections(fn, v, sel.Index()[:last])
+ if !wantAddr && isPointer(v.Type()) {
+ v = emitLoad(fn, v)
+ }
+ return v
+}
+
+// setCallFunc populates the function parts of a CallCommon structure
+// (Func, Method, Recv, Args[0]) based on the kind of invocation
+// occurring in e.
+//
+func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) {
+ c.pos = e.Lparen
+
+ // Is this a method call?
+ if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok {
+ sel, ok := fn.Pkg.info.Selections[selector]
+ if ok && sel.Kind() == types.MethodVal {
+ obj := sel.Obj().(*types.Func)
+ recv := recvType(obj)
+ wantAddr := isPointer(recv)
+ escaping := true
+ v := b.receiver(fn, selector.X, wantAddr, escaping, sel)
+ if isInterface(recv) {
+ // Invoke-mode call.
+ c.Value = v
+ c.Method = obj
+ } else {
+ // "Call"-mode call.
+ c.Value = fn.Prog.declaredFunc(obj)
+ c.Args = append(c.Args, v)
+ }
+ return
+ }
+
+ // sel.Kind()==MethodExpr indicates T.f() or (*T).f():
+ // a statically dispatched call to the method f in the
+ // method-set of T or *T. T may be an interface.
+ //
+ // e.Fun would evaluate to a concrete method, interface
+ // wrapper function, or promotion wrapper.
+ //
+ // For now, we evaluate it in the usual way.
+ //
+ // TODO(adonovan): opt: inline expr() here, to make the
+ // call static and to avoid generation of wrappers.
+ // It's somewhat tricky as it may consume the first
+ // actual parameter if the call is "invoke" mode.
+ //
+ // Examples:
+ // type T struct{}; func (T) f() {} // "call" mode
+ // type T interface { f() } // "invoke" mode
+ //
+ // type S struct{ T }
+ //
+ // var s S
+ // S.f(s)
+ // (*S).f(&s)
+ //
+ // Suggested approach:
+ // - consume the first actual parameter expression
+ // and build it with b.expr().
+ // - apply implicit field selections.
+ // - use MethodVal logic to populate fields of c.
+ }
+
+ // Evaluate the function operand in the usual way.
+ c.Value = b.expr(fn, e.Fun)
+}
+
+// emitCallArgs emits to f code for the actual parameters of call e to
+// a (possibly built-in) function of effective type sig.
+// The argument values are appended to args, which is then returned.
+//
+func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value {
+ // f(x, y, z...): pass slice z straight through.
+ if e.Ellipsis != 0 {
+ for i, arg := range e.Args {
+ v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type())
+ args = append(args, v)
+ }
+ return args
+ }
+
+ offset := len(args) // 1 if call has receiver, 0 otherwise
+
+ // Evaluate actual parameter expressions.
+ //
+ // If this is a chained call of the form f(g()) where g has
+ // multiple return values (MRV), they are flattened out into
+ // args; a suffix of them may end up in a varargs slice.
+ for _, arg := range e.Args {
+ v := b.expr(fn, arg)
+ if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain
+ for i, n := 0, ttuple.Len(); i < n; i++ {
+ args = append(args, emitExtract(fn, v, i))
+ }
+ } else {
+ args = append(args, v)
+ }
+ }
+
+ // Actual->formal assignability conversions for normal parameters.
+ np := sig.Params().Len() // number of normal parameters
+ if sig.Variadic() {
+ np--
+ }
+ for i := 0; i < np; i++ {
+ args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type())
+ }
+
+ // Actual->formal assignability conversions for variadic parameter,
+ // and construction of slice.
+ if sig.Variadic() {
+ varargs := args[offset+np:]
+ st := sig.Params().At(np).Type().(*types.Slice)
+ vt := st.Elem()
+ if len(varargs) == 0 {
+ args = append(args, nilConst(st))
+ } else {
+ // Replace a suffix of args with a slice containing it.
+ at := types.NewArray(vt, int64(len(varargs)))
+ a := emitNew(fn, at, token.NoPos)
+ a.setPos(e.Rparen)
+ a.Comment = "varargs"
+ for i, arg := range varargs {
+ iaddr := &IndexAddr{
+ X: a,
+ Index: intConst(int64(i)),
+ }
+ iaddr.setType(types.NewPointer(vt))
+ fn.emit(iaddr)
+ emitStore(fn, iaddr, arg, arg.Pos())
+ }
+ s := &Slice{X: a}
+ s.setType(st)
+ args[offset+np] = fn.emit(s)
+ args = args[:offset+np+1]
+ }
+ }
+ return args
+}
+
+// setCall emits to fn code to evaluate all the parameters of a function
+// call e, and populates *c with those values.
+//
+func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) {
+ // First deal with the f(...) part and optional receiver.
+ b.setCallFunc(fn, e, c)
+
+ // Then append the other actual parameters.
+ sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature)
+ if sig == nil {
+ panic(fmt.Sprintf("no signature for call of %s", e.Fun))
+ }
+ c.Args = b.emitCallArgs(fn, sig, e, c.Args)
+}
+
+// assignOp emits to fn code to perform loc += incr or loc -= incr.
+func (b *builder) assignOp(fn *Function, loc lvalue, incr Value, op token.Token, pos token.Pos) {
+ oldv := loc.load(fn)
+ loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, incr, oldv.Type()), loc.typ(), pos))
+}
+
+// localValueSpec emits to fn code to define all of the vars in the
+// function-local ValueSpec, spec.
+//
+func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) {
+ switch {
+ case len(spec.Values) == len(spec.Names):
+ // e.g. var x, y = 0, 1
+ // 1:1 assignment
+ for i, id := range spec.Names {
+ if !isBlankIdent(id) {
+ fn.addLocalForIdent(id)
+ }
+ lval := b.addr(fn, id, false) // non-escaping
+ b.assign(fn, lval, spec.Values[i], true, nil)
+ }
+
+ case len(spec.Values) == 0:
+ // e.g. var x, y int
+ // Locals are implicitly zero-initialized.
+ for _, id := range spec.Names {
+ if !isBlankIdent(id) {
+ lhs := fn.addLocalForIdent(id)
+ if fn.debugInfo() {
+ emitDebugRef(fn, id, lhs, true)
+ }
+ }
+ }
+
+ default:
+ // e.g. var x, y = pos()
+ tuple := b.exprN(fn, spec.Values[0])
+ for i, id := range spec.Names {
+ if !isBlankIdent(id) {
+ fn.addLocalForIdent(id)
+ lhs := b.addr(fn, id, false) // non-escaping
+ lhs.store(fn, emitExtract(fn, tuple, i))
+ }
+ }
+ }
+}
+
+// assignStmt emits code to fn for a parallel assignment of rhss to lhss.
+// isDef is true if this is a short variable declaration (:=).
+//
+// Note the similarity with localValueSpec.
+//
+func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) {
+ // Side effects of all LHSs and RHSs must occur in left-to-right order.
+ lvals := make([]lvalue, len(lhss))
+ isZero := make([]bool, len(lhss))
+ for i, lhs := range lhss {
+ var lval lvalue = blank{}
+ if !isBlankIdent(lhs) {
+ if isDef {
+ if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil {
+ fn.addNamedLocal(obj)
+ isZero[i] = true
+ }
+ }
+ lval = b.addr(fn, lhs, false) // non-escaping
+ }
+ lvals[i] = lval
+ }
+ if len(lhss) == len(rhss) {
+ // Simple assignment: x = f() (!isDef)
+ // Parallel assignment: x, y = f(), g() (!isDef)
+ // or short var decl: x, y := f(), g() (isDef)
+ //
+ // In all cases, the RHSs may refer to the LHSs,
+ // so we need a storebuf.
+ var sb storebuf
+ for i := range rhss {
+ b.assign(fn, lvals[i], rhss[i], isZero[i], &sb)
+ }
+ sb.emit(fn)
+ } else {
+ // e.g. x, y = pos()
+ tuple := b.exprN(fn, rhss[0])
+ emitDebugRef(fn, rhss[0], tuple, false)
+ for i, lval := range lvals {
+ lval.store(fn, emitExtract(fn, tuple, i))
+ }
+ }
+}
+
+// arrayLen returns the length of the array whose composite literal elements are elts.
+func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 {
+ var max int64 = -1
+ var i int64 = -1
+ for _, e := range elts {
+ if kv, ok := e.(*ast.KeyValueExpr); ok {
+ i = b.expr(fn, kv.Key).(*Const).Int64()
+ } else {
+ i++
+ }
+ if i > max {
+ max = i
+ }
+ }
+ return max + 1
+}
+
+// compLit emits to fn code to initialize a composite literal e at
+// address addr with type typ.
+//
+// Nested composite literals are recursively initialized in place
+// where possible. If isZero is true, compLit assumes that addr
+// holds the zero value for typ.
+//
+// Because the elements of a composite literal may refer to the
+// variables being updated, as in the second line below,
+// x := T{a: 1}
+// x = T{a: x.a}
+// all the reads must occur before all the writes. Thus all stores to
+// loc are emitted to the storebuf sb for later execution.
+//
+// A CompositeLit may have pointer type only in the recursive (nested)
+// case when the type name is implicit. e.g. in []*T{{}}, the inner
+// literal has type *T behaves like &T{}.
+// In that case, addr must hold a T, not a *T.
+//
+func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) {
+ typ := deref(fn.Pkg.typeOf(e))
+ switch t := typ.Underlying().(type) {
+ case *types.Struct:
+ if !isZero && len(e.Elts) != t.NumFields() {
+ // memclear
+ sb.store(&address{addr, e.Lbrace, nil},
+ zeroValue(fn, deref(addr.Type())))
+ isZero = true
+ }
+ for i, e := range e.Elts {
+ fieldIndex := i
+ pos := e.Pos()
+ if kv, ok := e.(*ast.KeyValueExpr); ok {
+ fname := kv.Key.(*ast.Ident).Name
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ sf := t.Field(i)
+ if sf.Name() == fname {
+ fieldIndex = i
+ pos = kv.Colon
+ e = kv.Value
+ break
+ }
+ }
+ }
+ sf := t.Field(fieldIndex)
+ faddr := &FieldAddr{
+ X: addr,
+ Field: fieldIndex,
+ }
+ faddr.setType(types.NewPointer(sf.Type()))
+ fn.emit(faddr)
+ b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb)
+ }
+
+ case *types.Array, *types.Slice:
+ var at *types.Array
+ var array Value
+ switch t := t.(type) {
+ case *types.Slice:
+ at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts))
+ alloc := emitNew(fn, at, e.Lbrace)
+ alloc.Comment = "slicelit"
+ array = alloc
+ case *types.Array:
+ at = t
+ array = addr
+
+ if !isZero && int64(len(e.Elts)) != at.Len() {
+ // memclear
+ sb.store(&address{array, e.Lbrace, nil},
+ zeroValue(fn, deref(array.Type())))
+ }
+ }
+
+ var idx *Const
+ for _, e := range e.Elts {
+ pos := e.Pos()
+ if kv, ok := e.(*ast.KeyValueExpr); ok {
+ idx = b.expr(fn, kv.Key).(*Const)
+ pos = kv.Colon
+ e = kv.Value
+ } else {
+ var idxval int64
+ if idx != nil {
+ idxval = idx.Int64() + 1
+ }
+ idx = intConst(idxval)
+ }
+ iaddr := &IndexAddr{
+ X: array,
+ Index: idx,
+ }
+ iaddr.setType(types.NewPointer(at.Elem()))
+ fn.emit(iaddr)
+ if t != at { // slice
+ // backing array is unaliased => storebuf not needed.
+ b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, nil)
+ } else {
+ b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, sb)
+ }
+ }
+
+ if t != at { // slice
+ s := &Slice{X: array}
+ s.setPos(e.Lbrace)
+ s.setType(typ)
+ sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, fn.emit(s))
+ }
+
+ case *types.Map:
+ m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))}
+ m.setPos(e.Lbrace)
+ m.setType(typ)
+ fn.emit(m)
+ for _, e := range e.Elts {
+ e := e.(*ast.KeyValueExpr)
+
+ // If a key expression in a map literal is itself a
+ // composite literal, the type may be omitted.
+ // For example:
+ // map[*struct{}]bool{{}: true}
+ // An &-operation may be implied:
+ // map[*struct{}]bool{&struct{}{}: true}
+ var key Value
+ if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) {
+ // A CompositeLit never evaluates to a pointer,
+ // so if the type of the location is a pointer,
+ // an &-operation is implied.
+ key = b.addr(fn, e.Key, true).address(fn)
+ } else {
+ key = b.expr(fn, e.Key)
+ }
+
+ loc := element{
+ m: m,
+ k: emitConv(fn, key, t.Key()),
+ t: t.Elem(),
+ pos: e.Colon,
+ }
+
+ // We call assign() only because it takes care
+ // of any &-operation required in the recursive
+ // case, e.g.,
+ // map[int]*struct{}{0: {}} implies &struct{}{}.
+ // In-place update is of course impossible,
+ // and no storebuf is needed.
+ b.assign(fn, &loc, e.Value, true, nil)
+ }
+ sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m)
+
+ default:
+ panic("unexpected CompositeLit type: " + t.String())
+ }
+}
+
+// switchStmt emits to fn code for the switch statement s, optionally
+// labelled by label.
+//
+func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) {
+ // We treat SwitchStmt like a sequential if-else chain.
+ // Multiway dispatch can be recovered later by ssautil.Switches()
+ // to those cases that are free of side effects.
+ if s.Init != nil {
+ b.stmt(fn, s.Init)
+ }
+ var tag Value = vTrue
+ if s.Tag != nil {
+ tag = b.expr(fn, s.Tag)
+ }
+ done := fn.newBasicBlock("switch.done")
+ if label != nil {
+ label._break = done
+ }
+ // We pull the default case (if present) down to the end.
+ // But each fallthrough label must point to the next
+ // body block in source order, so we preallocate a
+ // body block (fallthru) for the next case.
+ // Unfortunately this makes for a confusing block order.
+ var dfltBody *[]ast.Stmt
+ var dfltFallthrough *BasicBlock
+ var fallthru, dfltBlock *BasicBlock
+ ncases := len(s.Body.List)
+ for i, clause := range s.Body.List {
+ body := fallthru
+ if body == nil {
+ body = fn.newBasicBlock("switch.body") // first case only
+ }
+
+ // Preallocate body block for the next case.
+ fallthru = done
+ if i+1 < ncases {
+ fallthru = fn.newBasicBlock("switch.body")
+ }
+
+ cc := clause.(*ast.CaseClause)
+ if cc.List == nil {
+ // Default case.
+ dfltBody = &cc.Body
+ dfltFallthrough = fallthru
+ dfltBlock = body
+ continue
+ }
+
+ var nextCond *BasicBlock
+ for _, cond := range cc.List {
+ nextCond = fn.newBasicBlock("switch.next")
+ // TODO(adonovan): opt: when tag==vTrue, we'd
+ // get better code if we use b.cond(cond)
+ // instead of BinOp(EQL, tag, b.expr(cond))
+ // followed by If. Don't forget conversions
+ // though.
+ cond := emitCompare(fn, token.EQL, tag, b.expr(fn, cond), cond.Pos())
+ emitIf(fn, cond, body, nextCond)
+ fn.currentBlock = nextCond
+ }
+ fn.currentBlock = body
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ _fallthrough: fallthru,
+ }
+ b.stmtList(fn, cc.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, done)
+ fn.currentBlock = nextCond
+ }
+ if dfltBlock != nil {
+ emitJump(fn, dfltBlock)
+ fn.currentBlock = dfltBlock
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ _fallthrough: dfltFallthrough,
+ }
+ b.stmtList(fn, *dfltBody)
+ fn.targets = fn.targets.tail
+ }
+ emitJump(fn, done)
+ fn.currentBlock = done
+}
+
+// typeSwitchStmt emits to fn code for the type switch statement s, optionally
+// labelled by label.
+//
+func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) {
+ // We treat TypeSwitchStmt like a sequential if-else chain.
+ // Multiway dispatch can be recovered later by ssautil.Switches().
+
+ // Typeswitch lowering:
+ //
+ // var x X
+ // switch y := x.(type) {
+ // case T1, T2: S1 // >1 (y := x)
+ // case nil: SN // nil (y := x)
+ // default: SD // 0 types (y := x)
+ // case T3: S3 // 1 type (y := x.(T3))
+ // }
+ //
+ // ...s.Init...
+ // x := eval x
+ // .caseT1:
+ // t1, ok1 := typeswitch,ok x <T1>
+ // if ok1 then goto S1 else goto .caseT2
+ // .caseT2:
+ // t2, ok2 := typeswitch,ok x <T2>
+ // if ok2 then goto S1 else goto .caseNil
+ // .S1:
+ // y := x
+ // ...S1...
+ // goto done
+ // .caseNil:
+ // if t2, ok2 := typeswitch,ok x <T2>
+ // if x == nil then goto SN else goto .caseT3
+ // .SN:
+ // y := x
+ // ...SN...
+ // goto done
+ // .caseT3:
+ // t3, ok3 := typeswitch,ok x <T3>
+ // if ok3 then goto S3 else goto default
+ // .S3:
+ // y := t3
+ // ...S3...
+ // goto done
+ // .default:
+ // y := x
+ // ...SD...
+ // goto done
+ // .done:
+
+ if s.Init != nil {
+ b.stmt(fn, s.Init)
+ }
+
+ var x Value
+ switch ass := s.Assign.(type) {
+ case *ast.ExprStmt: // x.(type)
+ x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X)
+ case *ast.AssignStmt: // y := x.(type)
+ x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X)
+ }
+
+ done := fn.newBasicBlock("typeswitch.done")
+ if label != nil {
+ label._break = done
+ }
+ var default_ *ast.CaseClause
+ for _, clause := range s.Body.List {
+ cc := clause.(*ast.CaseClause)
+ if cc.List == nil {
+ default_ = cc
+ continue
+ }
+ body := fn.newBasicBlock("typeswitch.body")
+ var next *BasicBlock
+ var casetype types.Type
+ var ti Value // ti, ok := typeassert,ok x <Ti>
+ for _, cond := range cc.List {
+ next = fn.newBasicBlock("typeswitch.next")
+ casetype = fn.Pkg.typeOf(cond)
+ var condv Value
+ if casetype == tUntypedNil {
+ condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), token.NoPos)
+ ti = x
+ } else {
+ yok := emitTypeTest(fn, x, casetype, cc.Case)
+ ti = emitExtract(fn, yok, 0)
+ condv = emitExtract(fn, yok, 1)
+ }
+ emitIf(fn, condv, body, next)
+ fn.currentBlock = next
+ }
+ if len(cc.List) != 1 {
+ ti = x
+ }
+ fn.currentBlock = body
+ b.typeCaseBody(fn, cc, ti, done)
+ fn.currentBlock = next
+ }
+ if default_ != nil {
+ b.typeCaseBody(fn, default_, x, done)
+ } else {
+ emitJump(fn, done)
+ }
+ fn.currentBlock = done
+}
+
+func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) {
+ if obj := fn.Pkg.info.Implicits[cc]; obj != nil {
+ // In a switch y := x.(type), each case clause
+ // implicitly declares a distinct object y.
+ // In a single-type case, y has that type.
+ // In multi-type cases, 'case nil' and default,
+ // y has the same type as the interface operand.
+ emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos())
+ }
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ }
+ b.stmtList(fn, cc.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, done)
+}
+
+// selectStmt emits to fn code for the select statement s, optionally
+// labelled by label.
+//
+func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) {
+ // A blocking select of a single case degenerates to a
+ // simple send or receive.
+ // TODO(adonovan): opt: is this optimization worth its weight?
+ if len(s.Body.List) == 1 {
+ clause := s.Body.List[0].(*ast.CommClause)
+ if clause.Comm != nil {
+ b.stmt(fn, clause.Comm)
+ done := fn.newBasicBlock("select.done")
+ if label != nil {
+ label._break = done
+ }
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ }
+ b.stmtList(fn, clause.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, done)
+ fn.currentBlock = done
+ return
+ }
+ }
+
+ // First evaluate all channels in all cases, and find
+ // the directions of each state.
+ var states []*SelectState
+ blocking := true
+ debugInfo := fn.debugInfo()
+ for _, clause := range s.Body.List {
+ var st *SelectState
+ switch comm := clause.(*ast.CommClause).Comm.(type) {
+ case nil: // default case
+ blocking = false
+ continue
+
+ case *ast.SendStmt: // ch<- i
+ ch := b.expr(fn, comm.Chan)
+ st = &SelectState{
+ Dir: types.SendOnly,
+ Chan: ch,
+ Send: emitConv(fn, b.expr(fn, comm.Value),
+ ch.Type().Underlying().(*types.Chan).Elem()),
+ Pos: comm.Arrow,
+ }
+ if debugInfo {
+ st.DebugNode = comm
+ }
+
+ case *ast.AssignStmt: // x := <-ch
+ recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr)
+ st = &SelectState{
+ Dir: types.RecvOnly,
+ Chan: b.expr(fn, recv.X),
+ Pos: recv.OpPos,
+ }
+ if debugInfo {
+ st.DebugNode = recv
+ }
+
+ case *ast.ExprStmt: // <-ch
+ recv := unparen(comm.X).(*ast.UnaryExpr)
+ st = &SelectState{
+ Dir: types.RecvOnly,
+ Chan: b.expr(fn, recv.X),
+ Pos: recv.OpPos,
+ }
+ if debugInfo {
+ st.DebugNode = recv
+ }
+ }
+ states = append(states, st)
+ }
+
+ // We dispatch on the (fair) result of Select using a
+ // sequential if-else chain, in effect:
+ //
+ // idx, recvOk, r0...r_n-1 := select(...)
+ // if idx == 0 { // receive on channel 0 (first receive => r0)
+ // x, ok := r0, recvOk
+ // ...state0...
+ // } else if v == 1 { // send on channel 1
+ // ...state1...
+ // } else {
+ // ...default...
+ // }
+ sel := &Select{
+ States: states,
+ Blocking: blocking,
+ }
+ sel.setPos(s.Select)
+ var vars []*types.Var
+ vars = append(vars, varIndex, varOk)
+ for _, st := range states {
+ if st.Dir == types.RecvOnly {
+ tElem := st.Chan.Type().Underlying().(*types.Chan).Elem()
+ vars = append(vars, anonVar(tElem))
+ }
+ }
+ sel.setType(types.NewTuple(vars...))
+
+ fn.emit(sel)
+ idx := emitExtract(fn, sel, 0)
+
+ done := fn.newBasicBlock("select.done")
+ if label != nil {
+ label._break = done
+ }
+
+ var defaultBody *[]ast.Stmt
+ state := 0
+ r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV
+ for _, cc := range s.Body.List {
+ clause := cc.(*ast.CommClause)
+ if clause.Comm == nil {
+ defaultBody = &clause.Body
+ continue
+ }
+ body := fn.newBasicBlock("select.body")
+ next := fn.newBasicBlock("select.next")
+ emitIf(fn, emitCompare(fn, token.EQL, idx, intConst(int64(state)), token.NoPos), body, next)
+ fn.currentBlock = body
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ }
+ switch comm := clause.Comm.(type) {
+ case *ast.ExprStmt: // <-ch
+ if debugInfo {
+ v := emitExtract(fn, sel, r)
+ emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false)
+ }
+ r++
+
+ case *ast.AssignStmt: // x := <-states[state].Chan
+ if comm.Tok == token.DEFINE {
+ fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident))
+ }
+ x := b.addr(fn, comm.Lhs[0], false) // non-escaping
+ v := emitExtract(fn, sel, r)
+ if debugInfo {
+ emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false)
+ }
+ x.store(fn, v)
+
+ if len(comm.Lhs) == 2 { // x, ok := ...
+ if comm.Tok == token.DEFINE {
+ fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident))
+ }
+ ok := b.addr(fn, comm.Lhs[1], false) // non-escaping
+ ok.store(fn, emitExtract(fn, sel, 1))
+ }
+ r++
+ }
+ b.stmtList(fn, clause.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, done)
+ fn.currentBlock = next
+ state++
+ }
+ if defaultBody != nil {
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ }
+ b.stmtList(fn, *defaultBody)
+ fn.targets = fn.targets.tail
+ } else {
+ // A blocking select must match some case.
+ // (This should really be a runtime.errorString, not a string.)
+ fn.emit(&Panic{
+ X: emitConv(fn, stringConst("blocking select matched no case"), tEface),
+ })
+ fn.currentBlock = fn.newBasicBlock("unreachable")
+ }
+ emitJump(fn, done)
+ fn.currentBlock = done
+}
+
+// forStmt emits to fn code for the for statement s, optionally
+// labelled by label.
+//
+func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) {
+ // ...init...
+ // jump loop
+ // loop:
+ // if cond goto body else done
+ // body:
+ // ...body...
+ // jump post
+ // post: (target of continue)
+ // ...post...
+ // jump loop
+ // done: (target of break)
+ if s.Init != nil {
+ b.stmt(fn, s.Init)
+ }
+ body := fn.newBasicBlock("for.body")
+ done := fn.newBasicBlock("for.done") // target of 'break'
+ loop := body // target of back-edge
+ if s.Cond != nil {
+ loop = fn.newBasicBlock("for.loop")
+ }
+ cont := loop // target of 'continue'
+ if s.Post != nil {
+ cont = fn.newBasicBlock("for.post")
+ }
+ if label != nil {
+ label._break = done
+ label._continue = cont
+ }
+ emitJump(fn, loop)
+ fn.currentBlock = loop
+ if loop != body {
+ b.cond(fn, s.Cond, body, done)
+ fn.currentBlock = body
+ }
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ _continue: cont,
+ }
+ b.stmt(fn, s.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, cont)
+
+ if s.Post != nil {
+ fn.currentBlock = cont
+ b.stmt(fn, s.Post)
+ emitJump(fn, loop) // back-edge
+ }
+ fn.currentBlock = done
+}
+
+// rangeIndexed emits to fn the header for an integer-indexed loop
+// over array, *array or slice value x.
+// The v result is defined only if tv is non-nil.
+// forPos is the position of the "for" token.
+//
+func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) {
+ //
+ // length = len(x)
+ // index = -1
+ // loop: (target of continue)
+ // index++
+ // if index < length goto body else done
+ // body:
+ // k = index
+ // v = x[index]
+ // ...body...
+ // jump loop
+ // done: (target of break)
+
+ // Determine number of iterations.
+ var length Value
+ if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok {
+ // For array or *array, the number of iterations is
+ // known statically thanks to the type. We avoid a
+ // data dependence upon x, permitting later dead-code
+ // elimination if x is pure, static unrolling, etc.
+ // Ranging over a nil *array may have >0 iterations.
+ // We still generate code for x, in case it has effects.
+ length = intConst(arr.Len())
+ } else {
+ // length = len(x).
+ var c Call
+ c.Call.Value = makeLen(x.Type())
+ c.Call.Args = []Value{x}
+ c.setType(tInt)
+ length = fn.emit(&c)
+ }
+
+ index := fn.addLocal(tInt, token.NoPos)
+ emitStore(fn, index, intConst(-1), pos)
+
+ loop = fn.newBasicBlock("rangeindex.loop")
+ emitJump(fn, loop)
+ fn.currentBlock = loop
+
+ incr := &BinOp{
+ Op: token.ADD,
+ X: emitLoad(fn, index),
+ Y: vOne,
+ }
+ incr.setType(tInt)
+ emitStore(fn, index, fn.emit(incr), pos)
+
+ body := fn.newBasicBlock("rangeindex.body")
+ done = fn.newBasicBlock("rangeindex.done")
+ emitIf(fn, emitCompare(fn, token.LSS, incr, length, token.NoPos), body, done)
+ fn.currentBlock = body
+
+ k = emitLoad(fn, index)
+ if tv != nil {
+ switch t := x.Type().Underlying().(type) {
+ case *types.Array:
+ instr := &Index{
+ X: x,
+ Index: k,
+ }
+ instr.setType(t.Elem())
+ v = fn.emit(instr)
+
+ case *types.Pointer: // *array
+ instr := &IndexAddr{
+ X: x,
+ Index: k,
+ }
+ instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()))
+ v = emitLoad(fn, fn.emit(instr))
+
+ case *types.Slice:
+ instr := &IndexAddr{
+ X: x,
+ Index: k,
+ }
+ instr.setType(types.NewPointer(t.Elem()))
+ v = emitLoad(fn, fn.emit(instr))
+
+ default:
+ panic("rangeIndexed x:" + t.String())
+ }
+ }
+ return
+}
+
+// rangeIter emits to fn the header for a loop using
+// Range/Next/Extract to iterate over map or string value x.
+// tk and tv are the types of the key/value results k and v, or nil
+// if the respective component is not wanted.
+//
+func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) {
+ //
+ // it = range x
+ // loop: (target of continue)
+ // okv = next it (ok, key, value)
+ // ok = extract okv #0
+ // if ok goto body else done
+ // body:
+ // k = extract okv #1
+ // v = extract okv #2
+ // ...body...
+ // jump loop
+ // done: (target of break)
+ //
+
+ if tk == nil {
+ tk = tInvalid
+ }
+ if tv == nil {
+ tv = tInvalid
+ }
+
+ rng := &Range{X: x}
+ rng.setPos(pos)
+ rng.setType(tRangeIter)
+ it := fn.emit(rng)
+
+ loop = fn.newBasicBlock("rangeiter.loop")
+ emitJump(fn, loop)
+ fn.currentBlock = loop
+
+ _, isString := x.Type().Underlying().(*types.Basic)
+
+ okv := &Next{
+ Iter: it,
+ IsString: isString,
+ }
+ okv.setType(types.NewTuple(
+ varOk,
+ newVar("k", tk),
+ newVar("v", tv),
+ ))
+ fn.emit(okv)
+
+ body := fn.newBasicBlock("rangeiter.body")
+ done = fn.newBasicBlock("rangeiter.done")
+ emitIf(fn, emitExtract(fn, okv, 0), body, done)
+ fn.currentBlock = body
+
+ if tk != tInvalid {
+ k = emitExtract(fn, okv, 1)
+ }
+ if tv != tInvalid {
+ v = emitExtract(fn, okv, 2)
+ }
+ return
+}
+
+// rangeChan emits to fn the header for a loop that receives from
+// channel x until it fails.
+// tk is the channel's element type, or nil if the k result is
+// not wanted
+// pos is the position of the '=' or ':=' token.
+//
+func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) {
+ //
+ // loop: (target of continue)
+ // ko = <-x (key, ok)
+ // ok = extract ko #1
+ // if ok goto body else done
+ // body:
+ // k = extract ko #0
+ // ...
+ // goto loop
+ // done: (target of break)
+
+ loop = fn.newBasicBlock("rangechan.loop")
+ emitJump(fn, loop)
+ fn.currentBlock = loop
+ recv := &UnOp{
+ Op: token.ARROW,
+ X: x,
+ CommaOk: true,
+ }
+ recv.setPos(pos)
+ recv.setType(types.NewTuple(
+ newVar("k", x.Type().Underlying().(*types.Chan).Elem()),
+ varOk,
+ ))
+ ko := fn.emit(recv)
+ body := fn.newBasicBlock("rangechan.body")
+ done = fn.newBasicBlock("rangechan.done")
+ emitIf(fn, emitExtract(fn, ko, 1), body, done)
+ fn.currentBlock = body
+ if tk != nil {
+ k = emitExtract(fn, ko, 0)
+ }
+ return
+}
+
+// rangeStmt emits to fn code for the range statement s, optionally
+// labelled by label.
+//
+func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) {
+ var tk, tv types.Type
+ if s.Key != nil && !isBlankIdent(s.Key) {
+ tk = fn.Pkg.typeOf(s.Key)
+ }
+ if s.Value != nil && !isBlankIdent(s.Value) {
+ tv = fn.Pkg.typeOf(s.Value)
+ }
+
+ // If iteration variables are defined (:=), this
+ // occurs once outside the loop.
+ //
+ // Unlike a short variable declaration, a RangeStmt
+ // using := never redeclares an existing variable; it
+ // always creates a new one.
+ if s.Tok == token.DEFINE {
+ if tk != nil {
+ fn.addLocalForIdent(s.Key.(*ast.Ident))
+ }
+ if tv != nil {
+ fn.addLocalForIdent(s.Value.(*ast.Ident))
+ }
+ }
+
+ x := b.expr(fn, s.X)
+
+ var k, v Value
+ var loop, done *BasicBlock
+ switch rt := x.Type().Underlying().(type) {
+ case *types.Slice, *types.Array, *types.Pointer: // *array
+ k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For)
+
+ case *types.Chan:
+ k, loop, done = b.rangeChan(fn, x, tk, s.For)
+
+ case *types.Map, *types.Basic: // string
+ k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For)
+
+ default:
+ panic("Cannot range over: " + rt.String())
+ }
+
+ // Evaluate both LHS expressions before we update either.
+ var kl, vl lvalue
+ if tk != nil {
+ kl = b.addr(fn, s.Key, false) // non-escaping
+ }
+ if tv != nil {
+ vl = b.addr(fn, s.Value, false) // non-escaping
+ }
+ if tk != nil {
+ kl.store(fn, k)
+ }
+ if tv != nil {
+ vl.store(fn, v)
+ }
+
+ if label != nil {
+ label._break = done
+ label._continue = loop
+ }
+
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ _continue: loop,
+ }
+ b.stmt(fn, s.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, loop) // back-edge
+ fn.currentBlock = done
+}
+
+// stmt lowers statement s to SSA form, emitting code to fn.
+func (b *builder) stmt(fn *Function, _s ast.Stmt) {
+ // The label of the current statement. If non-nil, its _goto
+ // target is always set; its _break and _continue are set only
+ // within the body of switch/typeswitch/select/for/range.
+ // It is effectively an additional default-nil parameter of stmt().
+ var label *lblock
+start:
+ switch s := _s.(type) {
+ case *ast.EmptyStmt:
+ // ignore. (Usually removed by gofmt.)
+
+ case *ast.DeclStmt: // Con, Var or Typ
+ d := s.Decl.(*ast.GenDecl)
+ if d.Tok == token.VAR {
+ for _, spec := range d.Specs {
+ if vs, ok := spec.(*ast.ValueSpec); ok {
+ b.localValueSpec(fn, vs)
+ }
+ }
+ }
+
+ case *ast.LabeledStmt:
+ label = fn.labelledBlock(s.Label)
+ emitJump(fn, label._goto)
+ fn.currentBlock = label._goto
+ _s = s.Stmt
+ goto start // effectively: tailcall stmt(fn, s.Stmt, label)
+
+ case *ast.ExprStmt:
+ b.expr(fn, s.X)
+
+ case *ast.SendStmt:
+ fn.emit(&Send{
+ Chan: b.expr(fn, s.Chan),
+ X: emitConv(fn, b.expr(fn, s.Value),
+ fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem()),
+ pos: s.Arrow,
+ })
+
+ case *ast.IncDecStmt:
+ op := token.ADD
+ if s.Tok == token.DEC {
+ op = token.SUB
+ }
+ loc := b.addr(fn, s.X, false)
+ b.assignOp(fn, loc, NewConst(exact.MakeInt64(1), loc.typ()), op, s.Pos())
+
+ case *ast.AssignStmt:
+ switch s.Tok {
+ case token.ASSIGN, token.DEFINE:
+ b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE)
+
+ default: // +=, etc.
+ op := s.Tok + token.ADD - token.ADD_ASSIGN
+ b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s.Pos())
+ }
+
+ case *ast.GoStmt:
+ // The "intrinsics" new/make/len/cap are forbidden here.
+ // panic is treated like an ordinary function call.
+ v := Go{pos: s.Go}
+ b.setCall(fn, s.Call, &v.Call)
+ fn.emit(&v)
+
+ case *ast.DeferStmt:
+ // The "intrinsics" new/make/len/cap are forbidden here.
+ // panic is treated like an ordinary function call.
+ v := Defer{pos: s.Defer}
+ b.setCall(fn, s.Call, &v.Call)
+ fn.emit(&v)
+
+ // A deferred call can cause recovery from panic,
+ // and control resumes at the Recover block.
+ createRecoverBlock(fn)
+
+ case *ast.ReturnStmt:
+ var results []Value
+ if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 {
+ // Return of one expression in a multi-valued function.
+ tuple := b.exprN(fn, s.Results[0])
+ ttuple := tuple.Type().(*types.Tuple)
+ for i, n := 0, ttuple.Len(); i < n; i++ {
+ results = append(results,
+ emitConv(fn, emitExtract(fn, tuple, i),
+ fn.Signature.Results().At(i).Type()))
+ }
+ } else {
+ // 1:1 return, or no-arg return in non-void function.
+ for i, r := range s.Results {
+ v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type())
+ results = append(results, v)
+ }
+ }
+ if fn.namedResults != nil {
+ // Function has named result parameters (NRPs).
+ // Perform parallel assignment of return operands to NRPs.
+ for i, r := range results {
+ emitStore(fn, fn.namedResults[i], r, s.Return)
+ }
+ }
+ // Run function calls deferred in this
+ // function when explicitly returning from it.
+ fn.emit(new(RunDefers))
+ if fn.namedResults != nil {
+ // Reload NRPs to form the result tuple.
+ results = results[:0]
+ for _, r := range fn.namedResults {
+ results = append(results, emitLoad(fn, r))
+ }
+ }
+ fn.emit(&Return{Results: results, pos: s.Return})
+ fn.currentBlock = fn.newBasicBlock("unreachable")
+
+ case *ast.BranchStmt:
+ var block *BasicBlock
+ switch s.Tok {
+ case token.BREAK:
+ if s.Label != nil {
+ block = fn.labelledBlock(s.Label)._break
+ } else {
+ for t := fn.targets; t != nil && block == nil; t = t.tail {
+ block = t._break
+ }
+ }
+
+ case token.CONTINUE:
+ if s.Label != nil {
+ block = fn.labelledBlock(s.Label)._continue
+ } else {
+ for t := fn.targets; t != nil && block == nil; t = t.tail {
+ block = t._continue
+ }
+ }
+
+ case token.FALLTHROUGH:
+ for t := fn.targets; t != nil && block == nil; t = t.tail {
+ block = t._fallthrough
+ }
+
+ case token.GOTO:
+ block = fn.labelledBlock(s.Label)._goto
+ }
+ emitJump(fn, block)
+ fn.currentBlock = fn.newBasicBlock("unreachable")
+
+ case *ast.BlockStmt:
+ b.stmtList(fn, s.List)
+
+ case *ast.IfStmt:
+ if s.Init != nil {
+ b.stmt(fn, s.Init)
+ }
+ then := fn.newBasicBlock("if.then")
+ done := fn.newBasicBlock("if.done")
+ els := done
+ if s.Else != nil {
+ els = fn.newBasicBlock("if.else")
+ }
+ b.cond(fn, s.Cond, then, els)
+ fn.currentBlock = then
+ b.stmt(fn, s.Body)
+ emitJump(fn, done)
+
+ if s.Else != nil {
+ fn.currentBlock = els
+ b.stmt(fn, s.Else)
+ emitJump(fn, done)
+ }
+
+ fn.currentBlock = done
+
+ case *ast.SwitchStmt:
+ b.switchStmt(fn, s, label)
+
+ case *ast.TypeSwitchStmt:
+ b.typeSwitchStmt(fn, s, label)
+
+ case *ast.SelectStmt:
+ b.selectStmt(fn, s, label)
+
+ case *ast.ForStmt:
+ b.forStmt(fn, s, label)
+
+ case *ast.RangeStmt:
+ b.rangeStmt(fn, s, label)
+
+ default:
+ panic(fmt.Sprintf("unexpected statement kind: %T", s))
+ }
+}
+
+// buildFunction builds SSA code for the body of function fn. Idempotent.
+func (b *builder) buildFunction(fn *Function) {
+ if fn.Blocks != nil {
+ return // building already started
+ }
+
+ var recvField *ast.FieldList
+ var body *ast.BlockStmt
+ var functype *ast.FuncType
+ switch n := fn.syntax.(type) {
+ case nil:
+ return // not a Go source function. (Synthetic, or from object file.)
+ case *ast.FuncDecl:
+ functype = n.Type
+ recvField = n.Recv
+ body = n.Body
+ case *ast.FuncLit:
+ functype = n.Type
+ body = n.Body
+ default:
+ panic(n)
+ }
+
+ if body == nil {
+ // External function.
+ if fn.Params == nil {
+ // This condition ensures we add a non-empty
+ // params list once only, but we may attempt
+ // the degenerate empty case repeatedly.
+ // TODO(adonovan): opt: don't do that.
+
+ // We set Function.Params even though there is no body
+ // code to reference them. This simplifies clients.
+ if recv := fn.Signature.Recv(); recv != nil {
+ fn.addParamObj(recv)
+ }
+ params := fn.Signature.Params()
+ for i, n := 0, params.Len(); i < n; i++ {
+ fn.addParamObj(params.At(i))
+ }
+ }
+ return
+ }
+ if fn.Prog.mode&LogSource != 0 {
+ defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))()
+ }
+ fn.startBody()
+ fn.createSyntacticParams(recvField, functype)
+ b.stmt(fn, body)
+ if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) {
+ // Control fell off the end of the function's body block.
+ //
+ // Block optimizations eliminate the current block, if
+ // unreachable. It is a builder invariant that
+ // if this no-arg return is ill-typed for
+ // fn.Signature.Results, this block must be
+ // unreachable. The sanity checker checks this.
+ fn.emit(new(RunDefers))
+ fn.emit(new(Return))
+ }
+ fn.finishBody()
+}
+
+// buildFuncDecl builds SSA code for the function or method declared
+// by decl in package pkg.
+//
+func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) {
+ id := decl.Name
+ if isBlankIdent(id) {
+ return // discard
+ }
+ fn := pkg.values[pkg.info.Defs[id]].(*Function)
+ if decl.Recv == nil && id.Name == "init" {
+ var v Call
+ v.Call.Value = fn
+ v.setType(types.NewTuple())
+ pkg.init.emit(&v)
+ }
+ b.buildFunction(fn)
+}
+
+// Build calls Package.Build for each package in prog.
+// Building occurs in parallel unless the BuildSerially mode flag was set.
+//
+// Build is intended for whole-program analysis; a typical compiler
+// need only build a single package.
+//
+// Build is idempotent and thread-safe.
+//
+func (prog *Program) Build() {
+ var wg sync.WaitGroup
+ for _, p := range prog.packages {
+ if prog.mode&BuildSerially != 0 {
+ p.Build()
+ } else {
+ wg.Add(1)
+ go func(p *Package) {
+ p.Build()
+ wg.Done()
+ }(p)
+ }
+ }
+ wg.Wait()
+}
+
+// Build builds SSA code for all functions and vars in package p.
+//
+// Precondition: CreatePackage must have been called for all of p's
+// direct imports (and hence its direct imports must have been
+// error-free).
+//
+// Build is idempotent and thread-safe.
+//
+func (p *Package) Build() { p.buildOnce.Do(p.build) }
+
+func (p *Package) build() {
+ if p.info == nil {
+ return // synthetic package, e.g. "testmain"
+ }
+
+ // Ensure we have runtime type info for all exported members.
+ // TODO(adonovan): ideally belongs in memberFromObject, but
+ // that would require package creation in topological order.
+ for name, mem := range p.Members {
+ if ast.IsExported(name) {
+ p.Prog.needMethodsOf(mem.Type())
+ }
+ }
+ if p.Prog.mode&LogSource != 0 {
+ defer logStack("build %s", p)()
+ }
+ init := p.init
+ init.startBody()
+
+ var done *BasicBlock
+
+ if p.Prog.mode&BareInits == 0 {
+ // Make init() skip if package is already initialized.
+ initguard := p.Var("init$guard")
+ doinit := init.newBasicBlock("init.start")
+ done = init.newBasicBlock("init.done")
+ emitIf(init, emitLoad(init, initguard), done, doinit)
+ init.currentBlock = doinit
+ emitStore(init, initguard, vTrue, token.NoPos)
+
+ // Call the init() function of each package we import.
+ for _, pkg := range p.Pkg.Imports() {
+ prereq := p.Prog.packages[pkg]
+ if prereq == nil {
+ panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path()))
+ }
+ var v Call
+ v.Call.Value = prereq.init
+ v.Call.pos = init.pos
+ v.setType(types.NewTuple())
+ init.emit(&v)
+ }
+ }
+
+ var b builder
+
+ // Initialize package-level vars in correct order.
+ for _, varinit := range p.info.InitOrder {
+ if init.Prog.mode&LogSource != 0 {
+ fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n",
+ varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos()))
+ }
+ if len(varinit.Lhs) == 1 {
+ // 1:1 initialization: var x, y = a(), b()
+ var lval lvalue
+ if v := varinit.Lhs[0]; v.Name() != "_" {
+ lval = &address{addr: p.values[v].(*Global), pos: v.Pos()}
+ } else {
+ lval = blank{}
+ }
+ b.assign(init, lval, varinit.Rhs, true, nil)
+ } else {
+ // n:1 initialization: var x, y := f()
+ tuple := b.exprN(init, varinit.Rhs)
+ for i, v := range varinit.Lhs {
+ if v.Name() == "_" {
+ continue
+ }
+ emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i), v.Pos())
+ }
+ }
+ }
+
+ // Build all package-level functions, init functions
+ // and methods, including unreachable/blank ones.
+ // We build them in source order, but it's not significant.
+ for _, file := range p.files {
+ for _, decl := range file.Decls {
+ if decl, ok := decl.(*ast.FuncDecl); ok {
+ b.buildFuncDecl(p, decl)
+ }
+ }
+ }
+
+ // Finish up init().
+ if p.Prog.mode&BareInits == 0 {
+ emitJump(init, done)
+ init.currentBlock = done
+ }
+ init.emit(new(Return))
+ init.finishBody()
+
+ p.info = nil // We no longer need ASTs or go/types deductions.
+
+ if p.Prog.mode&SanityCheckFunctions != 0 {
+ sanityCheckPackage(p)
+ }
+}
+
+// Like ObjectOf, but panics instead of returning nil.
+// Only valid during p's create and build phases.
+func (p *Package) objectOf(id *ast.Ident) types.Object {
+ if o := p.info.ObjectOf(id); o != nil {
+ return o
+ }
+ panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s",
+ id.Name, p.Prog.Fset.Position(id.Pos())))
+}
+
+// Like TypeOf, but panics instead of returning nil.
+// Only valid during p's create and build phases.
+func (p *Package) typeOf(e ast.Expr) types.Type {
+ if T := p.info.TypeOf(e); T != nil {
+ return T
+ }
+ panic(fmt.Sprintf("no type for %T @ %s",
+ e, p.Prog.Fset.Position(e.Pos())))
+}
diff --git a/vendor/honnef.co/go/tools/ssa/const.go b/vendor/honnef.co/go/tools/ssa/const.go
new file mode 100644
index 000000000..ca99adc3f
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/const.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines the Const SSA value type.
+
+import (
+ "fmt"
+ exact "go/constant"
+ "go/token"
+ "go/types"
+ "strconv"
+)
+
+// NewConst returns a new constant of the specified value and type.
+// val must be valid according to the specification of Const.Value.
+//
+func NewConst(val exact.Value, typ types.Type) *Const {
+ return &Const{typ, val}
+}
+
+// intConst returns an 'int' constant that evaluates to i.
+// (i is an int64 in case the host is narrower than the target.)
+func intConst(i int64) *Const {
+ return NewConst(exact.MakeInt64(i), tInt)
+}
+
+// nilConst returns a nil constant of the specified type, which may
+// be any reference type, including interfaces.
+//
+func nilConst(typ types.Type) *Const {
+ return NewConst(nil, typ)
+}
+
+// stringConst returns a 'string' constant that evaluates to s.
+func stringConst(s string) *Const {
+ return NewConst(exact.MakeString(s), tString)
+}
+
+// zeroConst returns a new "zero" constant of the specified type,
+// which must not be an array or struct type: the zero values of
+// aggregates are well-defined but cannot be represented by Const.
+//
+func zeroConst(t types.Type) *Const {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return NewConst(exact.MakeBool(false), t)
+ case t.Info()&types.IsNumeric != 0:
+ return NewConst(exact.MakeInt64(0), t)
+ case t.Info()&types.IsString != 0:
+ return NewConst(exact.MakeString(""), t)
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return nilConst(t)
+ default:
+ panic(fmt.Sprint("zeroConst for unexpected type:", t))
+ }
+ case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+ return nilConst(t)
+ case *types.Named:
+ return NewConst(zeroConst(t.Underlying()).Value, t)
+ case *types.Array, *types.Struct, *types.Tuple:
+ panic(fmt.Sprint("zeroConst applied to aggregate:", t))
+ }
+ panic(fmt.Sprint("zeroConst: unexpected ", t))
+}
+
+func (c *Const) RelString(from *types.Package) string {
+ var s string
+ if c.Value == nil {
+ s = "nil"
+ } else if c.Value.Kind() == exact.String {
+ s = exact.StringVal(c.Value)
+ const max = 20
+ // TODO(adonovan): don't cut a rune in half.
+ if len(s) > max {
+ s = s[:max-3] + "..." // abbreviate
+ }
+ s = strconv.Quote(s)
+ } else {
+ s = c.Value.String()
+ }
+ return s + ":" + relType(c.Type(), from)
+}
+
+func (c *Const) Name() string {
+ return c.RelString(nil)
+}
+
+func (c *Const) String() string {
+ return c.Name()
+}
+
+func (c *Const) Type() types.Type {
+ return c.typ
+}
+
+func (c *Const) Referrers() *[]Instruction {
+ return nil
+}
+
+func (c *Const) Parent() *Function { return nil }
+
+func (c *Const) Pos() token.Pos {
+ return token.NoPos
+}
+
+// IsNil returns true if this constant represents a typed or untyped nil value.
+func (c *Const) IsNil() bool {
+ return c.Value == nil
+}
+
+// TODO(adonovan): move everything below into honnef.co/go/tools/ssa/interp.
+
+// Int64 returns the numeric value of this constant truncated to fit
+// a signed 64-bit integer.
+//
+func (c *Const) Int64() int64 {
+ switch x := exact.ToInt(c.Value); x.Kind() {
+ case exact.Int:
+ if i, ok := exact.Int64Val(x); ok {
+ return i
+ }
+ return 0
+ case exact.Float:
+ f, _ := exact.Float64Val(x)
+ return int64(f)
+ }
+ panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
+}
+
+// Uint64 returns the numeric value of this constant truncated to fit
+// an unsigned 64-bit integer.
+//
+func (c *Const) Uint64() uint64 {
+ switch x := exact.ToInt(c.Value); x.Kind() {
+ case exact.Int:
+ if u, ok := exact.Uint64Val(x); ok {
+ return u
+ }
+ return 0
+ case exact.Float:
+ f, _ := exact.Float64Val(x)
+ return uint64(f)
+ }
+ panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
+}
+
+// Float64 returns the numeric value of this constant truncated to fit
+// a float64.
+//
+func (c *Const) Float64() float64 {
+ f, _ := exact.Float64Val(c.Value)
+ return f
+}
+
+// Complex128 returns the complex value of this constant truncated to
+// fit a complex128.
+//
+func (c *Const) Complex128() complex128 {
+ re, _ := exact.Float64Val(exact.Real(c.Value))
+ im, _ := exact.Float64Val(exact.Imag(c.Value))
+ return complex(re, im)
+}
diff --git a/vendor/honnef.co/go/tools/ssa/create.go b/vendor/honnef.co/go/tools/ssa/create.go
new file mode 100644
index 000000000..69ac12b1b
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/create.go
@@ -0,0 +1,263 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file implements the CREATE phase of SSA construction.
+// See builder.go for explanation.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "os"
+ "sync"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+// NewProgram returns a new SSA Program.
+//
+// mode controls diagnostics and checking during SSA construction.
+//
+func NewProgram(fset *token.FileSet, mode BuilderMode) *Program {
+ prog := &Program{
+ Fset: fset,
+ imported: make(map[string]*Package),
+ packages: make(map[*types.Package]*Package),
+ thunks: make(map[selectionKey]*Function),
+ bounds: make(map[*types.Func]*Function),
+ mode: mode,
+ }
+
+ h := typeutil.MakeHasher() // protected by methodsMu, in effect
+ prog.methodSets.SetHasher(h)
+ prog.canon.SetHasher(h)
+
+ return prog
+}
+
+// memberFromObject populates package pkg with a member for the
+// typechecker object obj.
+//
+// For objects from Go source code, syntax is the associated syntax
+// tree (for funcs and vars only); it will be used during the build
+// phase.
+//
+func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
+ name := obj.Name()
+ switch obj := obj.(type) {
+ case *types.Builtin:
+ if pkg.Pkg != types.Unsafe {
+ panic("unexpected builtin object: " + obj.String())
+ }
+
+ case *types.TypeName:
+ pkg.Members[name] = &Type{
+ object: obj,
+ pkg: pkg,
+ }
+
+ case *types.Const:
+ c := &NamedConst{
+ object: obj,
+ Value: NewConst(obj.Val(), obj.Type()),
+ pkg: pkg,
+ }
+ pkg.values[obj] = c.Value
+ pkg.Members[name] = c
+
+ case *types.Var:
+ g := &Global{
+ Pkg: pkg,
+ name: name,
+ object: obj,
+ typ: types.NewPointer(obj.Type()), // address
+ pos: obj.Pos(),
+ }
+ pkg.values[obj] = g
+ pkg.Members[name] = g
+
+ case *types.Func:
+ sig := obj.Type().(*types.Signature)
+ if sig.Recv() == nil && name == "init" {
+ pkg.ninit++
+ name = fmt.Sprintf("init#%d", pkg.ninit)
+ }
+ fn := &Function{
+ name: name,
+ object: obj,
+ Signature: sig,
+ syntax: syntax,
+ pos: obj.Pos(),
+ Pkg: pkg,
+ Prog: pkg.Prog,
+ }
+ if syntax == nil {
+ fn.Synthetic = "loaded from gc object file"
+ }
+
+ pkg.values[obj] = fn
+ if sig.Recv() == nil {
+ pkg.Members[name] = fn // package-level function
+ }
+
+ default: // (incl. *types.Package)
+ panic("unexpected Object type: " + obj.String())
+ }
+}
+
+// membersFromDecl populates package pkg with members for each
+// typechecker object (var, func, const or type) associated with the
+// specified decl.
+//
+func membersFromDecl(pkg *Package, decl ast.Decl) {
+ switch decl := decl.(type) {
+ case *ast.GenDecl: // import, const, type or var
+ switch decl.Tok {
+ case token.CONST:
+ for _, spec := range decl.Specs {
+ for _, id := range spec.(*ast.ValueSpec).Names {
+ if !isBlankIdent(id) {
+ memberFromObject(pkg, pkg.info.Defs[id], nil)
+ }
+ }
+ }
+
+ case token.VAR:
+ for _, spec := range decl.Specs {
+ for _, id := range spec.(*ast.ValueSpec).Names {
+ if !isBlankIdent(id) {
+ memberFromObject(pkg, pkg.info.Defs[id], spec)
+ }
+ }
+ }
+
+ case token.TYPE:
+ for _, spec := range decl.Specs {
+ id := spec.(*ast.TypeSpec).Name
+ if !isBlankIdent(id) {
+ memberFromObject(pkg, pkg.info.Defs[id], nil)
+ }
+ }
+ }
+
+ case *ast.FuncDecl:
+ id := decl.Name
+ if !isBlankIdent(id) {
+ memberFromObject(pkg, pkg.info.Defs[id], decl)
+ }
+ }
+}
+
+// CreatePackage constructs and returns an SSA Package from the
+// specified type-checked, error-free file ASTs, and populates its
+// Members mapping.
+//
+// importable determines whether this package should be returned by a
+// subsequent call to ImportedPackage(pkg.Path()).
+//
+// The real work of building SSA form for each function is not done
+// until a subsequent call to Package.Build().
+//
+func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package {
+ p := &Package{
+ Prog: prog,
+ Members: make(map[string]Member),
+ values: make(map[types.Object]Value),
+ Pkg: pkg,
+ info: info, // transient (CREATE and BUILD phases)
+ files: files, // transient (CREATE and BUILD phases)
+ }
+
+ // Add init() function.
+ p.init = &Function{
+ name: "init",
+ Signature: new(types.Signature),
+ Synthetic: "package initializer",
+ Pkg: p,
+ Prog: prog,
+ }
+ p.Members[p.init.name] = p.init
+
+ // CREATE phase.
+ // Allocate all package members: vars, funcs, consts and types.
+ if len(files) > 0 {
+ // Go source package.
+ for _, file := range files {
+ for _, decl := range file.Decls {
+ membersFromDecl(p, decl)
+ }
+ }
+ } else {
+ // GC-compiled binary package (or "unsafe")
+ // No code.
+ // No position information.
+ scope := p.Pkg.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ memberFromObject(p, obj, nil)
+ if obj, ok := obj.(*types.TypeName); ok {
+ if named, ok := obj.Type().(*types.Named); ok {
+ for i, n := 0, named.NumMethods(); i < n; i++ {
+ memberFromObject(p, named.Method(i), nil)
+ }
+ }
+ }
+ }
+ }
+
+ if prog.mode&BareInits == 0 {
+ // Add initializer guard variable.
+ initguard := &Global{
+ Pkg: p,
+ name: "init$guard",
+ typ: types.NewPointer(tBool),
+ }
+ p.Members[initguard.Name()] = initguard
+ }
+
+ if prog.mode&GlobalDebug != 0 {
+ p.SetDebugMode(true)
+ }
+
+ if prog.mode&PrintPackages != 0 {
+ printMu.Lock()
+ p.WriteTo(os.Stdout)
+ printMu.Unlock()
+ }
+
+ if importable {
+ prog.imported[p.Pkg.Path()] = p
+ }
+ prog.packages[p.Pkg] = p
+
+ return p
+}
+
+// printMu serializes printing of Packages/Functions to stdout.
+var printMu sync.Mutex
+
+// AllPackages returns a new slice containing all packages in the
+// program prog in unspecified order.
+//
+func (prog *Program) AllPackages() []*Package {
+ pkgs := make([]*Package, 0, len(prog.packages))
+ for _, pkg := range prog.packages {
+ pkgs = append(pkgs, pkg)
+ }
+ return pkgs
+}
+
+// ImportedPackage returns the importable SSA Package whose import
+// path is path, or nil if no such SSA package has been created.
+//
+// Not all packages are importable. For example, no import
+// declaration can resolve to the x_test package created by 'go test'
+// or the ad-hoc main package created 'go build foo.go'.
+//
+func (prog *Program) ImportedPackage(path string) *Package {
+ return prog.imported[path]
+}
diff --git a/vendor/honnef.co/go/tools/ssa/doc.go b/vendor/honnef.co/go/tools/ssa/doc.go
new file mode 100644
index 000000000..57474dd20
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/doc.go
@@ -0,0 +1,123 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ssa defines a representation of the elements of Go programs
+// (packages, types, functions, variables and constants) using a
+// static single-assignment (SSA) form intermediate representation
+// (IR) for the bodies of functions.
+//
+// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
+//
+// For an introduction to SSA form, see
+// http://en.wikipedia.org/wiki/Static_single_assignment_form.
+// This page provides a broader reading list:
+// http://www.dcs.gla.ac.uk/~jsinger/ssa.html.
+//
+// The level of abstraction of the SSA form is intentionally close to
+// the source language to facilitate construction of source analysis
+// tools. It is not intended for machine code generation.
+//
+// All looping, branching and switching constructs are replaced with
+// unstructured control flow. Higher-level control flow constructs
+// such as multi-way branch can be reconstructed as needed; see
+// ssautil.Switches() for an example.
+//
+// To construct an SSA-form program, call ssautil.CreateProgram on a
+// loader.Program, a set of type-checked packages created from
+// parsed Go source files. The resulting ssa.Program contains all the
+// packages and their members, but SSA code is not created for
+// function bodies until a subsequent call to (*Package).Build.
+//
+// The builder initially builds a naive SSA form in which all local
+// variables are addresses of stack locations with explicit loads and
+// stores. Registerisation of eligible locals and φ-node insertion
+// using dominance and dataflow are then performed as a second pass
+// called "lifting" to improve the accuracy and performance of
+// subsequent analyses; this pass can be skipped by setting the
+// NaiveForm builder flag.
+//
+// The primary interfaces of this package are:
+//
+// - Member: a named member of a Go package.
+// - Value: an expression that yields a value.
+// - Instruction: a statement that consumes values and performs computation.
+// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph)
+//
+// A computation that yields a result implements both the Value and
+// Instruction interfaces. The following table shows for each
+// concrete type which of these interfaces it implements.
+//
+// Value? Instruction? Member?
+// *Alloc ✔ ✔
+// *BinOp ✔ ✔
+// *Builtin ✔
+// *Call ✔ ✔
+// *ChangeInterface ✔ ✔
+// *ChangeType ✔ ✔
+// *Const ✔
+// *Convert ✔ ✔
+// *DebugRef ✔
+// *Defer ✔
+// *Extract ✔ ✔
+// *Field ✔ ✔
+// *FieldAddr ✔ ✔
+// *FreeVar ✔
+// *Function ✔ ✔ (func)
+// *Global ✔ ✔ (var)
+// *Go ✔
+// *If ✔
+// *Index ✔ ✔
+// *IndexAddr ✔ ✔
+// *Jump ✔
+// *Lookup ✔ ✔
+// *MakeChan ✔ ✔
+// *MakeClosure ✔ ✔
+// *MakeInterface ✔ ✔
+// *MakeMap ✔ ✔
+// *MakeSlice ✔ ✔
+// *MapUpdate ✔
+// *NamedConst ✔ (const)
+// *Next ✔ ✔
+// *Panic ✔
+// *Parameter ✔
+// *Phi ✔ ✔
+// *Range ✔ ✔
+// *Return ✔
+// *RunDefers ✔
+// *Select ✔ ✔
+// *Send ✔
+// *Slice ✔ ✔
+// *Store ✔
+// *Type ✔ (type)
+// *TypeAssert ✔ ✔
+// *UnOp ✔ ✔
+//
+// Other key types in this package include: Program, Package, Function
+// and BasicBlock.
+//
+// The program representation constructed by this package is fully
+// resolved internally, i.e. it does not rely on the names of Values,
+// Packages, Functions, Types or BasicBlocks for the correct
+// interpretation of the program. Only the identities of objects and
+// the topology of the SSA and type graphs are semantically
+// significant. (There is one exception: Ids, used to identify field
+// and method names, contain strings.) Avoidance of name-based
+// operations simplifies the implementation of subsequent passes and
+// can make them very efficient. Many objects are nonetheless named
+// to aid in debugging, but it is not essential that the names be
+// either accurate or unambiguous. The public API exposes a number of
+// name-based maps for client convenience.
+//
+// The ssa/ssautil package provides various utilities that depend only
+// on the public API of this package.
+//
+// TODO(adonovan): Consider the exceptional control-flow implications
+// of defer and recover().
+//
+// TODO(adonovan): write a how-to document for all the various cases
+// of trying to determine corresponding elements across the four
+// domains of source locations, ast.Nodes, types.Objects,
+// ssa.Values/Instructions.
+//
+package ssa // import "honnef.co/go/tools/ssa"
diff --git a/vendor/honnef.co/go/tools/ssa/dom.go b/vendor/honnef.co/go/tools/ssa/dom.go
new file mode 100644
index 000000000..12ef4308f
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/dom.go
@@ -0,0 +1,341 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines algorithms related to dominance.
+
+// Dominator tree construction ----------------------------------------
+//
+// We use the algorithm described in Lengauer & Tarjan. 1979. A fast
+// algorithm for finding dominators in a flowgraph.
+// http://doi.acm.org/10.1145/357062.357071
+//
+// We also apply the optimizations to SLT described in Georgiadis et
+// al, Finding Dominators in Practice, JGAA 2006,
+// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf
+// to avoid the need for buckets of size > 1.
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+ "os"
+ "sort"
+)
+
+// Idom returns the block that immediately dominates b:
+// its parent in the dominator tree, if any.
+// Neither the entry node (b.Index==0) nor recover node
+// (b==b.Parent().Recover()) have a parent.
+//
+func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom }
+
+// Dominees returns the list of blocks that b immediately dominates:
+// its children in the dominator tree.
+//
+func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children }
+
+// Dominates reports whether b dominates c.
+func (b *BasicBlock) Dominates(c *BasicBlock) bool {
+ return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post
+}
+
+type byDomPreorder []*BasicBlock
+
+func (a byDomPreorder) Len() int { return len(a) }
+func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre }
+
+// DomPreorder returns a new slice containing the blocks of f in
+// dominator tree preorder.
+//
+func (f *Function) DomPreorder() []*BasicBlock {
+ n := len(f.Blocks)
+ order := make(byDomPreorder, n, n)
+ copy(order, f.Blocks)
+ sort.Sort(order)
+ return order
+}
+
+// domInfo contains a BasicBlock's dominance information.
+type domInfo struct {
+ idom *BasicBlock // immediate dominator (parent in domtree)
+ children []*BasicBlock // nodes immediately dominated by this one
+ pre, post int32 // pre- and post-order numbering within domtree
+}
+
+// ltState holds the working state for Lengauer-Tarjan algorithm
+// (during which domInfo.pre is repurposed for CFG DFS preorder number).
+type ltState struct {
+ // Each slice is indexed by b.Index.
+ sdom []*BasicBlock // b's semidominator
+ parent []*BasicBlock // b's parent in DFS traversal of CFG
+ ancestor []*BasicBlock // b's ancestor with least sdom
+}
+
+// dfs implements the depth-first search part of the LT algorithm.
+func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 {
+ preorder[i] = v
+ v.dom.pre = i // For now: DFS preorder of spanning tree of CFG
+ i++
+ lt.sdom[v.Index] = v
+ lt.link(nil, v)
+ for _, w := range v.Succs {
+ if lt.sdom[w.Index] == nil {
+ lt.parent[w.Index] = v
+ i = lt.dfs(w, i, preorder)
+ }
+ }
+ return i
+}
+
+// eval implements the EVAL part of the LT algorithm.
+func (lt *ltState) eval(v *BasicBlock) *BasicBlock {
+ // TODO(adonovan): opt: do path compression per simple LT.
+ u := v
+ for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] {
+ if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre {
+ u = v
+ }
+ }
+ return u
+}
+
+// link implements the LINK part of the LT algorithm.
+func (lt *ltState) link(v, w *BasicBlock) {
+ lt.ancestor[w.Index] = v
+}
+
+// buildDomTree computes the dominator tree of f using the LT algorithm.
+// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run).
+//
+func buildDomTree(f *Function) {
+ // The step numbers refer to the original LT paper; the
+ // reordering is due to Georgiadis.
+
+ // Clear any previous domInfo.
+ for _, b := range f.Blocks {
+ b.dom = domInfo{}
+ }
+
+ n := len(f.Blocks)
+ // Allocate space for 5 contiguous [n]*BasicBlock arrays:
+ // sdom, parent, ancestor, preorder, buckets.
+ space := make([]*BasicBlock, 5*n, 5*n)
+ lt := ltState{
+ sdom: space[0:n],
+ parent: space[n : 2*n],
+ ancestor: space[2*n : 3*n],
+ }
+
+ // Step 1. Number vertices by depth-first preorder.
+ preorder := space[3*n : 4*n]
+ root := f.Blocks[0]
+ prenum := lt.dfs(root, 0, preorder)
+ recover := f.Recover
+ if recover != nil {
+ lt.dfs(recover, prenum, preorder)
+ }
+
+ buckets := space[4*n : 5*n]
+ copy(buckets, preorder)
+
+ // In reverse preorder...
+ for i := int32(n) - 1; i > 0; i-- {
+ w := preorder[i]
+
+ // Step 3. Implicitly define the immediate dominator of each node.
+ for v := buckets[i]; v != w; v = buckets[v.dom.pre] {
+ u := lt.eval(v)
+ if lt.sdom[u.Index].dom.pre < i {
+ v.dom.idom = u
+ } else {
+ v.dom.idom = w
+ }
+ }
+
+ // Step 2. Compute the semidominators of all nodes.
+ lt.sdom[w.Index] = lt.parent[w.Index]
+ for _, v := range w.Preds {
+ u := lt.eval(v)
+ if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre {
+ lt.sdom[w.Index] = lt.sdom[u.Index]
+ }
+ }
+
+ lt.link(lt.parent[w.Index], w)
+
+ if lt.parent[w.Index] == lt.sdom[w.Index] {
+ w.dom.idom = lt.parent[w.Index]
+ } else {
+ buckets[i] = buckets[lt.sdom[w.Index].dom.pre]
+ buckets[lt.sdom[w.Index].dom.pre] = w
+ }
+ }
+
+ // The final 'Step 3' is now outside the loop.
+ for v := buckets[0]; v != root; v = buckets[v.dom.pre] {
+ v.dom.idom = root
+ }
+
+ // Step 4. Explicitly define the immediate dominator of each
+ // node, in preorder.
+ for _, w := range preorder[1:] {
+ if w == root || w == recover {
+ w.dom.idom = nil
+ } else {
+ if w.dom.idom != lt.sdom[w.Index] {
+ w.dom.idom = w.dom.idom.dom.idom
+ }
+ // Calculate Children relation as inverse of Idom.
+ w.dom.idom.dom.children = append(w.dom.idom.dom.children, w)
+ }
+ }
+
+ pre, post := numberDomTree(root, 0, 0)
+ if recover != nil {
+ numberDomTree(recover, pre, post)
+ }
+
+ // printDomTreeDot(os.Stderr, f) // debugging
+ // printDomTreeText(os.Stderr, root, 0) // debugging
+
+ if f.Prog.mode&SanityCheckFunctions != 0 {
+ sanityCheckDomTree(f)
+ }
+}
+
+// numberDomTree sets the pre- and post-order numbers of a depth-first
+// traversal of the dominator tree rooted at v. These are used to
+// answer dominance queries in constant time.
+//
+func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
+ v.dom.pre = pre
+ pre++
+ for _, child := range v.dom.children {
+ pre, post = numberDomTree(child, pre, post)
+ }
+ v.dom.post = post
+ post++
+ return pre, post
+}
+
+// Testing utilities ----------------------------------------
+
+// sanityCheckDomTree checks the correctness of the dominator tree
+// computed by the LT algorithm by comparing against the dominance
+// relation computed by a naive Kildall-style forward dataflow
+// analysis (Algorithm 10.16 from the "Dragon" book).
+//
+func sanityCheckDomTree(f *Function) {
+ n := len(f.Blocks)
+
+ // D[i] is the set of blocks that dominate f.Blocks[i],
+ // represented as a bit-set of block indices.
+ D := make([]big.Int, n)
+
+ one := big.NewInt(1)
+
+ // all is the set of all blocks; constant.
+ var all big.Int
+ all.Set(one).Lsh(&all, uint(n)).Sub(&all, one)
+
+ // Initialization.
+ for i, b := range f.Blocks {
+ if i == 0 || b == f.Recover {
+ // A root is dominated only by itself.
+ D[i].SetBit(&D[0], 0, 1)
+ } else {
+ // All other blocks are (initially) dominated
+ // by every block.
+ D[i].Set(&all)
+ }
+ }
+
+ // Iteration until fixed point.
+ for changed := true; changed; {
+ changed = false
+ for i, b := range f.Blocks {
+ if i == 0 || b == f.Recover {
+ continue
+ }
+ // Compute intersection across predecessors.
+ var x big.Int
+ x.Set(&all)
+ for _, pred := range b.Preds {
+ x.And(&x, &D[pred.Index])
+ }
+ x.SetBit(&x, i, 1) // a block always dominates itself.
+ if D[i].Cmp(&x) != 0 {
+ D[i].Set(&x)
+ changed = true
+ }
+ }
+ }
+
+ // Check the entire relation. O(n^2).
+ // The Recover block (if any) must be treated specially so we skip it.
+ ok := true
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ b, c := f.Blocks[i], f.Blocks[j]
+ if c == f.Recover {
+ continue
+ }
+ actual := b.Dominates(c)
+ expected := D[j].Bit(i) == 1
+ if actual != expected {
+ fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected)
+ ok = false
+ }
+ }
+ }
+
+ preorder := f.DomPreorder()
+ for _, b := range f.Blocks {
+ if got := preorder[b.dom.pre]; got != b {
+ fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b)
+ ok = false
+ }
+ }
+
+ if !ok {
+ panic("sanityCheckDomTree failed for " + f.String())
+ }
+
+}
+
+// Printing functions ----------------------------------------
+
+// printDomTree prints the dominator tree as text, using indentation.
+func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) {
+ fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
+ for _, child := range v.dom.children {
+ printDomTreeText(buf, child, indent+1)
+ }
+}
+
+// printDomTreeDot prints the dominator tree of f in AT&T GraphViz
+// (.dot) format.
+func printDomTreeDot(buf *bytes.Buffer, f *Function) {
+ fmt.Fprintln(buf, "//", f)
+ fmt.Fprintln(buf, "digraph domtree {")
+ for i, b := range f.Blocks {
+ v := b.dom
+ fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post)
+ // TODO(adonovan): improve appearance of edges
+ // belonging to both dominator tree and CFG.
+
+ // Dominator tree edge.
+ if i != 0 {
+ fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre)
+ }
+ // CFG edges.
+ for _, pred := range b.Preds {
+ fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre)
+ }
+ }
+ fmt.Fprintln(buf, "}")
+}
diff --git a/vendor/honnef.co/go/tools/ssa/emit.go b/vendor/honnef.co/go/tools/ssa/emit.go
new file mode 100644
index 000000000..1036988ad
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/emit.go
@@ -0,0 +1,468 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Helpers for emitting SSA instructions.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// emitNew emits to f a new (heap Alloc) instruction allocating an
+// object of type typ. pos is the optional source location.
+//
+func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc {
+ v := &Alloc{Heap: true}
+ v.setType(types.NewPointer(typ))
+ v.setPos(pos)
+ f.emit(v)
+ return v
+}
+
+// emitLoad emits to f an instruction to load the address addr into a
+// new temporary, and returns the value so defined.
+//
+func emitLoad(f *Function, addr Value) *UnOp {
+ v := &UnOp{Op: token.MUL, X: addr}
+ v.setType(deref(addr.Type()))
+ f.emit(v)
+ return v
+}
+
+// emitDebugRef emits to f a DebugRef pseudo-instruction associating
+// expression e with value v.
+//
+func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) {
+ if !f.debugInfo() {
+ return // debugging not enabled
+ }
+ if v == nil || e == nil {
+ panic("nil")
+ }
+ var obj types.Object
+ e = unparen(e)
+ if id, ok := e.(*ast.Ident); ok {
+ if isBlankIdent(id) {
+ return
+ }
+ obj = f.Pkg.objectOf(id)
+ switch obj.(type) {
+ case *types.Nil, *types.Const, *types.Builtin:
+ return
+ }
+ }
+ f.emit(&DebugRef{
+ X: v,
+ Expr: e,
+ IsAddr: isAddr,
+ object: obj,
+ })
+}
+
+// emitArith emits to f code to compute the binary operation op(x, y)
+// where op is an eager shift, logical or arithmetic operation.
+// (Use emitCompare() for comparisons and Builder.logicalBinop() for
+// non-eager operations.)
+//
+func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value {
+ switch op {
+ case token.SHL, token.SHR:
+ x = emitConv(f, x, t)
+ // y may be signed or an 'untyped' constant.
+ // TODO(adonovan): whence signed values?
+ if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 {
+ y = emitConv(f, y, types.Typ[types.Uint64])
+ }
+
+ case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
+ x = emitConv(f, x, t)
+ y = emitConv(f, y, t)
+
+ default:
+ panic("illegal op in emitArith: " + op.String())
+
+ }
+ v := &BinOp{
+ Op: op,
+ X: x,
+ Y: y,
+ }
+ v.setPos(pos)
+ v.setType(t)
+ return f.emit(v)
+}
+
+// emitCompare emits to f code compute the boolean result of
+// comparison comparison 'x op y'.
+//
+func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
+ xt := x.Type().Underlying()
+ yt := y.Type().Underlying()
+
+ // Special case to optimise a tagless SwitchStmt so that
+ // these are equivalent
+ // switch { case e: ...}
+ // switch true { case e: ... }
+ // if e==true { ... }
+ // even in the case when e's type is an interface.
+ // TODO(adonovan): opt: generalise to x==true, false!=y, etc.
+ if x == vTrue && op == token.EQL {
+ if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 {
+ return y
+ }
+ }
+
+ if types.Identical(xt, yt) {
+ // no conversion necessary
+ } else if _, ok := xt.(*types.Interface); ok {
+ y = emitConv(f, y, x.Type())
+ } else if _, ok := yt.(*types.Interface); ok {
+ x = emitConv(f, x, y.Type())
+ } else if _, ok := x.(*Const); ok {
+ x = emitConv(f, x, y.Type())
+ } else if _, ok := y.(*Const); ok {
+ y = emitConv(f, y, x.Type())
+ } else {
+ // other cases, e.g. channels. No-op.
+ }
+
+ v := &BinOp{
+ Op: op,
+ X: x,
+ Y: y,
+ }
+ v.setPos(pos)
+ v.setType(tBool)
+ return f.emit(v)
+}
+
+// isValuePreserving returns true if a conversion from ut_src to
+// ut_dst is value-preserving, i.e. just a change of type.
+// Precondition: neither argument is a named type.
+//
+func isValuePreserving(ut_src, ut_dst types.Type) bool {
+ // Identical underlying types?
+ if structTypesIdentical(ut_dst, ut_src) {
+ return true
+ }
+
+ switch ut_dst.(type) {
+ case *types.Chan:
+ // Conversion between channel types?
+ _, ok := ut_src.(*types.Chan)
+ return ok
+
+ case *types.Pointer:
+ // Conversion between pointers with identical base types?
+ _, ok := ut_src.(*types.Pointer)
+ return ok
+ }
+ return false
+}
+
+// emitConv emits to f code to convert Value val to exactly type typ,
+// and returns the converted value. Implicit conversions are required
+// by language assignability rules in assignments, parameter passing,
+// etc. Conversions cannot fail dynamically.
+//
+func emitConv(f *Function, val Value, typ types.Type) Value {
+ t_src := val.Type()
+
+ // Identical types? Conversion is a no-op.
+ if types.Identical(t_src, typ) {
+ return val
+ }
+
+ ut_dst := typ.Underlying()
+ ut_src := t_src.Underlying()
+
+ // Just a change of type, but not value or representation?
+ if isValuePreserving(ut_src, ut_dst) {
+ c := &ChangeType{X: val}
+ c.setType(typ)
+ return f.emit(c)
+ }
+
+ // Conversion to, or construction of a value of, an interface type?
+ if _, ok := ut_dst.(*types.Interface); ok {
+ // Assignment from one interface type to another?
+ if _, ok := ut_src.(*types.Interface); ok {
+ c := &ChangeInterface{X: val}
+ c.setType(typ)
+ return f.emit(c)
+ }
+
+ // Untyped nil constant? Return interface-typed nil constant.
+ if ut_src == tUntypedNil {
+ return nilConst(typ)
+ }
+
+ // Convert (non-nil) "untyped" literals to their default type.
+ if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 {
+ val = emitConv(f, val, DefaultType(ut_src))
+ }
+
+ f.Pkg.Prog.needMethodsOf(val.Type())
+ mi := &MakeInterface{X: val}
+ mi.setType(typ)
+ return f.emit(mi)
+ }
+
+ // Conversion of a compile-time constant value?
+ if c, ok := val.(*Const); ok {
+ if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() {
+ // Conversion of a compile-time constant to
+ // another constant type results in a new
+ // constant of the destination type and
+ // (initially) the same abstract value.
+ // We don't truncate the value yet.
+ return NewConst(c.Value, typ)
+ }
+
+ // We're converting from constant to non-constant type,
+ // e.g. string -> []byte/[]rune.
+ }
+
+ // A representation-changing conversion?
+ // At least one of {ut_src,ut_dst} must be *Basic.
+ // (The other may be []byte or []rune.)
+ _, ok1 := ut_src.(*types.Basic)
+ _, ok2 := ut_dst.(*types.Basic)
+ if ok1 || ok2 {
+ c := &Convert{X: val}
+ c.setType(typ)
+ return f.emit(c)
+ }
+
+ panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ))
+}
+
+// emitStore emits to f an instruction to store value val at location
+// addr, applying implicit conversions as required by assignability rules.
+//
+func emitStore(f *Function, addr, val Value, pos token.Pos) *Store {
+ s := &Store{
+ Addr: addr,
+ Val: emitConv(f, val, deref(addr.Type())),
+ pos: pos,
+ }
+ f.emit(s)
+ return s
+}
+
+// emitJump emits to f a jump to target, and updates the control-flow graph.
+// Postcondition: f.currentBlock is nil.
+//
+func emitJump(f *Function, target *BasicBlock) {
+ b := f.currentBlock
+ b.emit(new(Jump))
+ addEdge(b, target)
+ f.currentBlock = nil
+}
+
+// emitIf emits to f a conditional jump to tblock or fblock based on
+// cond, and updates the control-flow graph.
+// Postcondition: f.currentBlock is nil.
+//
+func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) {
+ b := f.currentBlock
+ b.emit(&If{Cond: cond})
+ addEdge(b, tblock)
+ addEdge(b, fblock)
+ f.currentBlock = nil
+}
+
+// emitExtract emits to f an instruction to extract the index'th
+// component of tuple. It returns the extracted value.
+//
+func emitExtract(f *Function, tuple Value, index int) Value {
+ e := &Extract{Tuple: tuple, Index: index}
+ e.setType(tuple.Type().(*types.Tuple).At(index).Type())
+ return f.emit(e)
+}
+
+// emitTypeAssert emits to f a type assertion value := x.(t) and
+// returns the value. x.Type() must be an interface.
+//
+func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value {
+ a := &TypeAssert{X: x, AssertedType: t}
+ a.setPos(pos)
+ a.setType(t)
+ return f.emit(a)
+}
+
+// emitTypeTest emits to f a type test value,ok := x.(t) and returns
+// a (value, ok) tuple. x.Type() must be an interface.
+//
+func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value {
+ a := &TypeAssert{
+ X: x,
+ AssertedType: t,
+ CommaOk: true,
+ }
+ a.setPos(pos)
+ a.setType(types.NewTuple(
+ newVar("value", t),
+ varOk,
+ ))
+ return f.emit(a)
+}
+
+// emitTailCall emits to f a function call in tail position. The
+// caller is responsible for all fields of 'call' except its type.
+// Intended for wrapper methods.
+// Precondition: f does/will not use deferred procedure calls.
+// Postcondition: f.currentBlock is nil.
+//
+func emitTailCall(f *Function, call *Call) {
+ tresults := f.Signature.Results()
+ nr := tresults.Len()
+ if nr == 1 {
+ call.typ = tresults.At(0).Type()
+ } else {
+ call.typ = tresults
+ }
+ tuple := f.emit(call)
+ var ret Return
+ switch nr {
+ case 0:
+ // no-op
+ case 1:
+ ret.Results = []Value{tuple}
+ default:
+ for i := 0; i < nr; i++ {
+ v := emitExtract(f, tuple, i)
+ // TODO(adonovan): in principle, this is required:
+ // v = emitConv(f, o.Type, f.Signature.Results[i].Type)
+ // but in practice emitTailCall is only used when
+ // the types exactly match.
+ ret.Results = append(ret.Results, v)
+ }
+ }
+ f.emit(&ret)
+ f.currentBlock = nil
+}
+
+// emitImplicitSelections emits to f code to apply the sequence of
+// implicit field selections specified by indices to base value v, and
+// returns the selected value.
+//
+// If v is the address of a struct, the result will be the address of
+// a field; if it is the value of a struct, the result will be the
+// value of a field.
+//
+func emitImplicitSelections(f *Function, v Value, indices []int) Value {
+ for _, index := range indices {
+ fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
+
+ if isPointer(v.Type()) {
+ instr := &FieldAddr{
+ X: v,
+ Field: index,
+ }
+ instr.setType(types.NewPointer(fld.Type()))
+ v = f.emit(instr)
+ // Load the field's value iff indirectly embedded.
+ if isPointer(fld.Type()) {
+ v = emitLoad(f, v)
+ }
+ } else {
+ instr := &Field{
+ X: v,
+ Field: index,
+ }
+ instr.setType(fld.Type())
+ v = f.emit(instr)
+ }
+ }
+ return v
+}
+
+// emitFieldSelection emits to f code to select the index'th field of v.
+//
+// If wantAddr, the input must be a pointer-to-struct and the result
+// will be the field's address; otherwise the result will be the
+// field's value.
+// Ident id is used for position and debug info.
+//
+func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value {
+ fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
+ if isPointer(v.Type()) {
+ instr := &FieldAddr{
+ X: v,
+ Field: index,
+ }
+ instr.setPos(id.Pos())
+ instr.setType(types.NewPointer(fld.Type()))
+ v = f.emit(instr)
+ // Load the field's value iff we don't want its address.
+ if !wantAddr {
+ v = emitLoad(f, v)
+ }
+ } else {
+ instr := &Field{
+ X: v,
+ Field: index,
+ }
+ instr.setPos(id.Pos())
+ instr.setType(fld.Type())
+ v = f.emit(instr)
+ }
+ emitDebugRef(f, id, v, wantAddr)
+ return v
+}
+
+// zeroValue emits to f code to produce a zero value of type t,
+// and returns it.
+//
+func zeroValue(f *Function, t types.Type) Value {
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return emitLoad(f, f.addLocal(t, token.NoPos))
+ default:
+ return zeroConst(t)
+ }
+}
+
+// createRecoverBlock emits to f a block of code to return after a
+// recovered panic, and sets f.Recover to it.
+//
+// If f's result parameters are named, the code loads and returns
+// their current values, otherwise it returns the zero values of their
+// type.
+//
+// Idempotent.
+//
+func createRecoverBlock(f *Function) {
+ if f.Recover != nil {
+ return // already created
+ }
+ saved := f.currentBlock
+
+ f.Recover = f.newBasicBlock("recover")
+ f.currentBlock = f.Recover
+
+ var results []Value
+ if f.namedResults != nil {
+ // Reload NRPs to form value tuple.
+ for _, r := range f.namedResults {
+ results = append(results, emitLoad(f, r))
+ }
+ } else {
+ R := f.Signature.Results()
+ for i, n := 0, R.Len(); i < n; i++ {
+ T := R.At(i).Type()
+
+ // Return zero value of each result type.
+ results = append(results, zeroValue(f, T))
+ }
+ }
+ f.emit(&Return{Results: results})
+
+ f.currentBlock = saved
+}
diff --git a/vendor/honnef.co/go/tools/ssa/func.go b/vendor/honnef.co/go/tools/ssa/func.go
new file mode 100644
index 000000000..53635ba01
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/func.go
@@ -0,0 +1,701 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file implements the Function and BasicBlock types.
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+ "strings"
+)
+
+// addEdge adds a control-flow graph edge from from to to.
+func addEdge(from, to *BasicBlock) {
+ from.Succs = append(from.Succs, to)
+ to.Preds = append(to.Preds, from)
+}
+
+// Parent returns the function that contains block b.
+func (b *BasicBlock) Parent() *Function { return b.parent }
+
+// String returns a human-readable label of this block.
+// It is not guaranteed unique within the function.
+//
+func (b *BasicBlock) String() string {
+ return fmt.Sprintf("%d", b.Index)
+}
+
+// emit appends an instruction to the current basic block.
+// If the instruction defines a Value, it is returned.
+//
+func (b *BasicBlock) emit(i Instruction) Value {
+ i.setBlock(b)
+ b.Instrs = append(b.Instrs, i)
+ v, _ := i.(Value)
+ return v
+}
+
+// predIndex returns the i such that b.Preds[i] == c or panics if
+// there is none.
+func (b *BasicBlock) predIndex(c *BasicBlock) int {
+ for i, pred := range b.Preds {
+ if pred == c {
+ return i
+ }
+ }
+ panic(fmt.Sprintf("no edge %s -> %s", c, b))
+}
+
+// hasPhi returns true if b.Instrs contains φ-nodes.
+func (b *BasicBlock) hasPhi() bool {
+ _, ok := b.Instrs[0].(*Phi)
+ return ok
+}
+
+func (b *BasicBlock) Phis() []Instruction {
+ return b.phis()
+}
+
+// phis returns the prefix of b.Instrs containing all the block's φ-nodes.
+func (b *BasicBlock) phis() []Instruction {
+ for i, instr := range b.Instrs {
+ if _, ok := instr.(*Phi); !ok {
+ return b.Instrs[:i]
+ }
+ }
+ return nil // unreachable in well-formed blocks
+}
+
+// replacePred replaces all occurrences of p in b's predecessor list with q.
+// Ordinarily there should be at most one.
+//
+func (b *BasicBlock) replacePred(p, q *BasicBlock) {
+ for i, pred := range b.Preds {
+ if pred == p {
+ b.Preds[i] = q
+ }
+ }
+}
+
+// replaceSucc replaces all occurrences of p in b's successor list with q.
+// Ordinarily there should be at most one.
+//
+func (b *BasicBlock) replaceSucc(p, q *BasicBlock) {
+ for i, succ := range b.Succs {
+ if succ == p {
+ b.Succs[i] = q
+ }
+ }
+}
+
+func (b *BasicBlock) RemovePred(p *BasicBlock) {
+ b.removePred(p)
+}
+
+// removePred removes all occurrences of p in b's
+// predecessor list and φ-nodes.
+// Ordinarily there should be at most one.
+//
+func (b *BasicBlock) removePred(p *BasicBlock) {
+ phis := b.phis()
+
+ // We must preserve edge order for φ-nodes.
+ j := 0
+ for i, pred := range b.Preds {
+ if pred != p {
+ b.Preds[j] = b.Preds[i]
+ // Strike out φ-edge too.
+ for _, instr := range phis {
+ phi := instr.(*Phi)
+ phi.Edges[j] = phi.Edges[i]
+ }
+ j++
+ }
+ }
+ // Nil out b.Preds[j:] and φ-edges[j:] to aid GC.
+ for i := j; i < len(b.Preds); i++ {
+ b.Preds[i] = nil
+ for _, instr := range phis {
+ instr.(*Phi).Edges[i] = nil
+ }
+ }
+ b.Preds = b.Preds[:j]
+ for _, instr := range phis {
+ phi := instr.(*Phi)
+ phi.Edges = phi.Edges[:j]
+ }
+}
+
+// Destinations associated with unlabelled for/switch/select stmts.
+// We push/pop one of these as we enter/leave each construct and for
+// each BranchStmt we scan for the innermost target of the right type.
+//
+type targets struct {
+ tail *targets // rest of stack
+ _break *BasicBlock
+ _continue *BasicBlock
+ _fallthrough *BasicBlock
+}
+
+// Destinations associated with a labelled block.
+// We populate these as labels are encountered in forward gotos or
+// labelled statements.
+//
+type lblock struct {
+ _goto *BasicBlock
+ _break *BasicBlock
+ _continue *BasicBlock
+}
+
+// labelledBlock returns the branch target associated with the
+// specified label, creating it if needed.
+//
+func (f *Function) labelledBlock(label *ast.Ident) *lblock {
+ lb := f.lblocks[label.Obj]
+ if lb == nil {
+ lb = &lblock{_goto: f.newBasicBlock(label.Name)}
+ if f.lblocks == nil {
+ f.lblocks = make(map[*ast.Object]*lblock)
+ }
+ f.lblocks[label.Obj] = lb
+ }
+ return lb
+}
+
+// addParam adds a (non-escaping) parameter to f.Params of the
+// specified name, type and source position.
+//
+func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter {
+ v := &Parameter{
+ name: name,
+ typ: typ,
+ pos: pos,
+ parent: f,
+ }
+ f.Params = append(f.Params, v)
+ return v
+}
+
+func (f *Function) addParamObj(obj types.Object) *Parameter {
+ name := obj.Name()
+ if name == "" {
+ name = fmt.Sprintf("arg%d", len(f.Params))
+ }
+ param := f.addParam(name, obj.Type(), obj.Pos())
+ param.object = obj
+ return param
+}
+
+// addSpilledParam declares a parameter that is pre-spilled to the
+// stack; the function body will load/store the spilled location.
+// Subsequent lifting will eliminate spills where possible.
+//
+func (f *Function) addSpilledParam(obj types.Object) {
+ param := f.addParamObj(obj)
+ spill := &Alloc{Comment: obj.Name()}
+ spill.setType(types.NewPointer(obj.Type()))
+ spill.setPos(obj.Pos())
+ f.objects[obj] = spill
+ f.Locals = append(f.Locals, spill)
+ f.emit(spill)
+ f.emit(&Store{Addr: spill, Val: param})
+}
+
+// startBody initializes the function prior to generating SSA code for its body.
+// Precondition: f.Type() already set.
+//
+func (f *Function) startBody() {
+ f.currentBlock = f.newBasicBlock("entry")
+ f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init
+}
+
+// createSyntacticParams populates f.Params and generates code (spills
+// and named result locals) for all the parameters declared in the
+// syntax. In addition it populates the f.objects mapping.
+//
+// Preconditions:
+// f.startBody() was called.
+// Postcondition:
+// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0)
+//
+func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) {
+ // Receiver (at most one inner iteration).
+ if recv != nil {
+ for _, field := range recv.List {
+ for _, n := range field.Names {
+ f.addSpilledParam(f.Pkg.info.Defs[n])
+ }
+ // Anonymous receiver? No need to spill.
+ if field.Names == nil {
+ f.addParamObj(f.Signature.Recv())
+ }
+ }
+ }
+
+ // Parameters.
+ if functype.Params != nil {
+ n := len(f.Params) // 1 if has recv, 0 otherwise
+ for _, field := range functype.Params.List {
+ for _, n := range field.Names {
+ f.addSpilledParam(f.Pkg.info.Defs[n])
+ }
+ // Anonymous parameter? No need to spill.
+ if field.Names == nil {
+ f.addParamObj(f.Signature.Params().At(len(f.Params) - n))
+ }
+ }
+ }
+
+ // Named results.
+ if functype.Results != nil {
+ for _, field := range functype.Results.List {
+ // Implicit "var" decl of locals for named results.
+ for _, n := range field.Names {
+ f.namedResults = append(f.namedResults, f.addLocalForIdent(n))
+ }
+ }
+ }
+}
+
+// numberRegisters assigns numbers to all SSA registers
+// (value-defining Instructions) in f, to aid debugging.
+// (Non-Instruction Values are named at construction.)
+//
+func numberRegisters(f *Function) {
+ v := 0
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ switch instr.(type) {
+ case Value:
+ instr.(interface {
+ setNum(int)
+ }).setNum(v)
+ v++
+ }
+ }
+ }
+}
+
+// buildReferrers populates the def/use information in all non-nil
+// Value.Referrers slice.
+// Precondition: all such slices are initially empty.
+func buildReferrers(f *Function) {
+ var rands []*Value
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ rands = instr.Operands(rands[:0]) // recycle storage
+ for _, rand := range rands {
+ if r := *rand; r != nil {
+ if ref := r.Referrers(); ref != nil {
+ *ref = append(*ref, instr)
+ }
+ }
+ }
+ }
+ }
+}
+
+// finishBody() finalizes the function after SSA code generation of its body.
+func (f *Function) finishBody() {
+ f.objects = nil
+ f.currentBlock = nil
+ f.lblocks = nil
+
+ // Don't pin the AST in memory (except in debug mode).
+ if n := f.syntax; n != nil && !f.debugInfo() {
+ f.syntax = extentNode{n.Pos(), n.End()}
+ }
+
+ // Remove from f.Locals any Allocs that escape to the heap.
+ j := 0
+ for _, l := range f.Locals {
+ if !l.Heap {
+ f.Locals[j] = l
+ j++
+ }
+ }
+ // Nil out f.Locals[j:] to aid GC.
+ for i := j; i < len(f.Locals); i++ {
+ f.Locals[i] = nil
+ }
+ f.Locals = f.Locals[:j]
+
+ optimizeBlocks(f)
+
+ buildReferrers(f)
+
+ buildDomTree(f)
+
+ if f.Prog.mode&NaiveForm == 0 {
+ // For debugging pre-state of lifting pass:
+ // numberRegisters(f)
+ // f.WriteTo(os.Stderr)
+ lift(f)
+ }
+
+ f.namedResults = nil // (used by lifting)
+
+ numberRegisters(f)
+
+ if f.Prog.mode&PrintFunctions != 0 {
+ printMu.Lock()
+ f.WriteTo(os.Stdout)
+ printMu.Unlock()
+ }
+
+ if f.Prog.mode&SanityCheckFunctions != 0 {
+ mustSanityCheck(f, nil)
+ }
+}
+
+func (f *Function) RemoveNilBlocks() {
+ f.removeNilBlocks()
+}
+
+// removeNilBlocks eliminates nils from f.Blocks and updates each
+// BasicBlock.Index. Use this after any pass that may delete blocks.
+//
+func (f *Function) removeNilBlocks() {
+ j := 0
+ for _, b := range f.Blocks {
+ if b != nil {
+ b.Index = j
+ f.Blocks[j] = b
+ j++
+ }
+ }
+ // Nil out f.Blocks[j:] to aid GC.
+ for i := j; i < len(f.Blocks); i++ {
+ f.Blocks[i] = nil
+ }
+ f.Blocks = f.Blocks[:j]
+}
+
+// SetDebugMode sets the debug mode for package pkg. If true, all its
+// functions will include full debug info. This greatly increases the
+// size of the instruction stream, and causes Functions to depend upon
+// the ASTs, potentially keeping them live in memory for longer.
+//
+func (pkg *Package) SetDebugMode(debug bool) {
+ // TODO(adonovan): do we want ast.File granularity?
+ pkg.debug = debug
+}
+
+// debugInfo reports whether debug info is wanted for this function.
+func (f *Function) debugInfo() bool {
+ return f.Pkg != nil && f.Pkg.debug
+}
+
+// addNamedLocal creates a local variable, adds it to function f and
+// returns it. Its name and type are taken from obj. Subsequent
+// calls to f.lookup(obj) will return the same local.
+//
+func (f *Function) addNamedLocal(obj types.Object) *Alloc {
+ l := f.addLocal(obj.Type(), obj.Pos())
+ l.Comment = obj.Name()
+ f.objects[obj] = l
+ return l
+}
+
+func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc {
+ return f.addNamedLocal(f.Pkg.info.Defs[id])
+}
+
+// addLocal creates an anonymous local variable of type typ, adds it
+// to function f and returns it. pos is the optional source location.
+//
+func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc {
+ v := &Alloc{}
+ v.setType(types.NewPointer(typ))
+ v.setPos(pos)
+ f.Locals = append(f.Locals, v)
+ f.emit(v)
+ return v
+}
+
+// lookup returns the address of the named variable identified by obj
+// that is local to function f or one of its enclosing functions.
+// If escaping, the reference comes from a potentially escaping pointer
+// expression and the referent must be heap-allocated.
+//
+func (f *Function) lookup(obj types.Object, escaping bool) Value {
+ if v, ok := f.objects[obj]; ok {
+ if alloc, ok := v.(*Alloc); ok && escaping {
+ alloc.Heap = true
+ }
+ return v // function-local var (address)
+ }
+
+ // Definition must be in an enclosing function;
+ // plumb it through intervening closures.
+ if f.parent == nil {
+ panic("no ssa.Value for " + obj.String())
+ }
+ outer := f.parent.lookup(obj, true) // escaping
+ v := &FreeVar{
+ name: obj.Name(),
+ typ: outer.Type(),
+ pos: outer.Pos(),
+ outer: outer,
+ parent: f,
+ }
+ f.objects[obj] = v
+ f.FreeVars = append(f.FreeVars, v)
+ return v
+}
+
+// emit emits the specified instruction to function f.
+func (f *Function) emit(instr Instruction) Value {
+ return f.currentBlock.emit(instr)
+}
+
+// RelString returns the full name of this function, qualified by
+// package name, receiver type, etc.
+//
+// The specific formatting rules are not guaranteed and may change.
+//
+// Examples:
+// "math.IsNaN" // a package-level function
+// "(*bytes.Buffer).Bytes" // a declared method or a wrapper
+// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0)
+// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure)
+// "main.main$1" // an anonymous function in main
+// "main.init#1" // a declared init function
+// "main.init" // the synthesized package initializer
+//
+// When these functions are referred to from within the same package
+// (i.e. from == f.Pkg.Object), they are rendered without the package path.
+// For example: "IsNaN", "(*Buffer).Bytes", etc.
+//
+// All non-synthetic functions have distinct package-qualified names.
+// (But two methods may have the same name "(T).f" if one is a synthetic
+// wrapper promoting a non-exported method "f" from another package; in
+// that case, the strings are equal but the identifiers "f" are distinct.)
+//
+func (f *Function) RelString(from *types.Package) string {
+ // Anonymous?
+ if f.parent != nil {
+ // An anonymous function's Name() looks like "parentName$1",
+ // but its String() should include the type/package/etc.
+ parent := f.parent.RelString(from)
+ for i, anon := range f.parent.AnonFuncs {
+ if anon == f {
+ return fmt.Sprintf("%s$%d", parent, 1+i)
+ }
+ }
+
+ return f.name // should never happen
+ }
+
+ // Method (declared or wrapper)?
+ if recv := f.Signature.Recv(); recv != nil {
+ return f.relMethod(from, recv.Type())
+ }
+
+ // Thunk?
+ if f.method != nil {
+ return f.relMethod(from, f.method.Recv())
+ }
+
+ // Bound?
+ if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") {
+ return f.relMethod(from, f.FreeVars[0].Type())
+ }
+
+ // Package-level function?
+ // Prefix with package name for cross-package references only.
+ if p := f.pkg(); p != nil && p != from {
+ return fmt.Sprintf("%s.%s", p.Path(), f.name)
+ }
+
+ // Unknown.
+ return f.name
+}
+
+func (f *Function) relMethod(from *types.Package, recv types.Type) string {
+ return fmt.Sprintf("(%s).%s", relType(recv, from), f.name)
+}
+
+// writeSignature writes to buf the signature sig in declaration syntax.
+func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) {
+ buf.WriteString("func ")
+ if recv := sig.Recv(); recv != nil {
+ buf.WriteString("(")
+ if n := params[0].Name(); n != "" {
+ buf.WriteString(n)
+ buf.WriteString(" ")
+ }
+ types.WriteType(buf, params[0].Type(), types.RelativeTo(from))
+ buf.WriteString(") ")
+ }
+ buf.WriteString(name)
+ types.WriteSignature(buf, sig, types.RelativeTo(from))
+}
+
+func (f *Function) pkg() *types.Package {
+ if f.Pkg != nil {
+ return f.Pkg.Pkg
+ }
+ return nil
+}
+
+var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer
+
+func (f *Function) WriteTo(w io.Writer) (int64, error) {
+ var buf bytes.Buffer
+ WriteFunction(&buf, f)
+ n, err := w.Write(buf.Bytes())
+ return int64(n), err
+}
+
+// WriteFunction writes to buf a human-readable "disassembly" of f.
+func WriteFunction(buf *bytes.Buffer, f *Function) {
+ fmt.Fprintf(buf, "# Name: %s\n", f.String())
+ if f.Pkg != nil {
+ fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path())
+ }
+ if syn := f.Synthetic; syn != "" {
+ fmt.Fprintln(buf, "# Synthetic:", syn)
+ }
+ if pos := f.Pos(); pos.IsValid() {
+ fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos))
+ }
+
+ if f.parent != nil {
+ fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name())
+ }
+
+ if f.Recover != nil {
+ fmt.Fprintf(buf, "# Recover: %s\n", f.Recover)
+ }
+
+ from := f.pkg()
+
+ if f.FreeVars != nil {
+ buf.WriteString("# Free variables:\n")
+ for i, fv := range f.FreeVars {
+ fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from))
+ }
+ }
+
+ if len(f.Locals) > 0 {
+ buf.WriteString("# Locals:\n")
+ for i, l := range f.Locals {
+ fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from))
+ }
+ }
+ writeSignature(buf, from, f.Name(), f.Signature, f.Params)
+ buf.WriteString(":\n")
+
+ if f.Blocks == nil {
+ buf.WriteString("\t(external)\n")
+ }
+
+ // NB. column calculations are confused by non-ASCII
+ // characters and assume 8-space tabs.
+ const punchcard = 80 // for old time's sake.
+ const tabwidth = 8
+ for _, b := range f.Blocks {
+ if b == nil {
+ // Corrupt CFG.
+ fmt.Fprintf(buf, ".nil:\n")
+ continue
+ }
+ n, _ := fmt.Fprintf(buf, "%d:", b.Index)
+ bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs))
+ fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg)
+
+ if false { // CFG debugging
+ fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs)
+ }
+ for _, instr := range b.Instrs {
+ buf.WriteString("\t")
+ switch v := instr.(type) {
+ case Value:
+ l := punchcard - tabwidth
+ // Left-align the instruction.
+ if name := v.Name(); name != "" {
+ n, _ := fmt.Fprintf(buf, "%s = ", name)
+ l -= n
+ }
+ n, _ := buf.WriteString(instr.String())
+ l -= n
+ // Right-align the type if there's space.
+ if t := v.Type(); t != nil {
+ buf.WriteByte(' ')
+ ts := relType(t, from)
+ l -= len(ts) + len(" ") // (spaces before and after type)
+ if l > 0 {
+ fmt.Fprintf(buf, "%*s", l, "")
+ }
+ buf.WriteString(ts)
+ }
+ case nil:
+ // Be robust against bad transforms.
+ buf.WriteString("<deleted>")
+ default:
+ buf.WriteString(instr.String())
+ }
+ buf.WriteString("\n")
+ }
+ }
+ fmt.Fprintf(buf, "\n")
+}
+
+// newBasicBlock adds to f a new basic block and returns it. It does
+// not automatically become the current block for subsequent calls to emit.
+// comment is an optional string for more readable debugging output.
+//
+func (f *Function) newBasicBlock(comment string) *BasicBlock {
+ b := &BasicBlock{
+ Index: len(f.Blocks),
+ Comment: comment,
+ parent: f,
+ }
+ b.Succs = b.succs2[:0]
+ f.Blocks = append(f.Blocks, b)
+ return b
+}
+
+// NewFunction returns a new synthetic Function instance belonging to
+// prog, with its name and signature fields set as specified.
+//
+// The caller is responsible for initializing the remaining fields of
+// the function object, e.g. Pkg, Params, Blocks.
+//
+// It is practically impossible for clients to construct well-formed
+// SSA functions/packages/programs directly, so we assume this is the
+// job of the Builder alone. NewFunction exists to provide clients a
+// little flexibility. For example, analysis tools may wish to
+// construct fake Functions for the root of the callgraph, a fake
+// "reflect" package, etc.
+//
+// TODO(adonovan): think harder about the API here.
+//
+func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function {
+ return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance}
+}
+
+type extentNode [2]token.Pos
+
+func (n extentNode) Pos() token.Pos { return n[0] }
+func (n extentNode) End() token.Pos { return n[1] }
+
+// Syntax returns an ast.Node whose Pos/End methods provide the
+// lexical extent of the function if it was defined by Go source code
+// (f.Synthetic==""), or nil otherwise.
+//
+// If f was built with debug information (see Package.SetDebugRef),
+// the result is the *ast.FuncDecl or *ast.FuncLit that declared the
+// function. Otherwise, it is an opaque Node providing only position
+// information; this avoids pinning the AST in memory.
+//
+func (f *Function) Syntax() ast.Node { return f.syntax }
diff --git a/vendor/honnef.co/go/tools/ssa/identical.go b/vendor/honnef.co/go/tools/ssa/identical.go
new file mode 100644
index 000000000..53cbee107
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/identical.go
@@ -0,0 +1,7 @@
+// +build go1.8
+
+package ssa
+
+import "go/types"
+
+var structTypesIdentical = types.IdenticalIgnoreTags
diff --git a/vendor/honnef.co/go/tools/ssa/identical_17.go b/vendor/honnef.co/go/tools/ssa/identical_17.go
new file mode 100644
index 000000000..da89d3339
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/identical_17.go
@@ -0,0 +1,7 @@
+// +build !go1.8
+
+package ssa
+
+import "go/types"
+
+var structTypesIdentical = types.Identical
diff --git a/vendor/honnef.co/go/tools/ssa/lift.go b/vendor/honnef.co/go/tools/ssa/lift.go
new file mode 100644
index 000000000..048e9b032
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/lift.go
@@ -0,0 +1,653 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines the lifting pass which tries to "lift" Alloc
+// cells (new/local variables) into SSA registers, replacing loads
+// with the dominating stored value, eliminating loads and stores, and
+// inserting φ-nodes as needed.
+
+// Cited papers and resources:
+//
+// Ron Cytron et al. 1991. Efficiently computing SSA form...
+// http://doi.acm.org/10.1145/115372.115320
+//
+// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm.
+// Software Practice and Experience 2001, 4:1-10.
+// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+//
+// Daniel Berlin, llvmdev mailing list, 2012.
+// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html
+// (Be sure to expand the whole thread.)
+
+// TODO(adonovan): opt: there are many optimizations worth evaluating, and
+// the conventional wisdom for SSA construction is that a simple
+// algorithm well engineered often beats those of better asymptotic
+// complexity on all but the most egregious inputs.
+//
+// Danny Berlin suggests that the Cooper et al. algorithm for
+// computing the dominance frontier is superior to Cytron et al.
+// Furthermore he recommends that rather than computing the DF for the
+// whole function then renaming all alloc cells, it may be cheaper to
+// compute the DF for each alloc cell separately and throw it away.
+//
+// Consider exploiting liveness information to avoid creating dead
+// φ-nodes which we then immediately remove.
+//
+// Also see many other "TODO: opt" suggestions in the code.
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "math/big"
+ "os"
+)
+
+// If true, show diagnostic information at each step of lifting.
+// Very verbose.
+const debugLifting = false
+
+// domFrontier maps each block to the set of blocks in its dominance
+// frontier. The outer slice is conceptually a map keyed by
+// Block.Index. The inner slice is conceptually a set, possibly
+// containing duplicates.
+//
+// TODO(adonovan): opt: measure impact of dups; consider a packed bit
+// representation, e.g. big.Int, and bitwise parallel operations for
+// the union step in the Children loop.
+//
+// domFrontier's methods mutate the slice's elements but not its
+// length, so their receivers needn't be pointers.
+//
+type domFrontier [][]*BasicBlock
+
+func (df domFrontier) add(u, v *BasicBlock) {
+ p := &df[u.Index]
+ *p = append(*p, v)
+}
+
+// build builds the dominance frontier df for the dominator (sub)tree
+// rooted at u, using the Cytron et al. algorithm.
+//
+// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA
+// by pruning the entire IDF computation, rather than merely pruning
+// the DF -> IDF step.
+func (df domFrontier) build(u *BasicBlock) {
+ // Encounter each node u in postorder of dom tree.
+ for _, child := range u.dom.children {
+ df.build(child)
+ }
+ for _, vb := range u.Succs {
+ if v := vb.dom; v.idom != u {
+ df.add(u, vb)
+ }
+ }
+ for _, w := range u.dom.children {
+ for _, vb := range df[w.Index] {
+ // TODO(adonovan): opt: use word-parallel bitwise union.
+ if v := vb.dom; v.idom != u {
+ df.add(u, vb)
+ }
+ }
+ }
+}
+
+func buildDomFrontier(fn *Function) domFrontier {
+ df := make(domFrontier, len(fn.Blocks))
+ df.build(fn.Blocks[0])
+ if fn.Recover != nil {
+ df.build(fn.Recover)
+ }
+ return df
+}
+
+func removeInstr(refs []Instruction, instr Instruction) []Instruction {
+ i := 0
+ for _, ref := range refs {
+ if ref == instr {
+ continue
+ }
+ refs[i] = ref
+ i++
+ }
+ for j := i; j != len(refs); j++ {
+ refs[j] = nil // aid GC
+ }
+ return refs[:i]
+}
+
+// lift replaces local and new Allocs accessed only with
+// load/store by SSA registers, inserting φ-nodes where necessary.
+// The result is a program in classical pruned SSA form.
+//
+// Preconditions:
+// - fn has no dead blocks (blockopt has run).
+// - Def/use info (Operands and Referrers) is up-to-date.
+// - The dominator tree is up-to-date.
+//
+func lift(fn *Function) {
+ // TODO(adonovan): opt: lots of little optimizations may be
+ // worthwhile here, especially if they cause us to avoid
+ // buildDomFrontier. For example:
+ //
+ // - Alloc never loaded? Eliminate.
+ // - Alloc never stored? Replace all loads with a zero constant.
+ // - Alloc stored once? Replace loads with dominating store;
+ // don't forget that an Alloc is itself an effective store
+ // of zero.
+ // - Alloc used only within a single block?
+ // Use degenerate algorithm avoiding φ-nodes.
+ // - Consider synergy with scalar replacement of aggregates (SRA).
+ // e.g. *(&x.f) where x is an Alloc.
+ // Perhaps we'd get better results if we generated this as x.f
+ // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)).
+ // Unclear.
+ //
+ // But we will start with the simplest correct code.
+ df := buildDomFrontier(fn)
+
+ if debugLifting {
+ title := false
+ for i, blocks := range df {
+ if blocks != nil {
+ if !title {
+ fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn)
+ title = true
+ }
+ fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks)
+ }
+ }
+ }
+
+ newPhis := make(newPhiMap)
+
+ // During this pass we will replace some BasicBlock.Instrs
+ // (allocs, loads and stores) with nil, keeping a count in
+ // BasicBlock.gaps. At the end we will reset Instrs to the
+ // concatenation of all non-dead newPhis and non-nil Instrs
+ // for the block, reusing the original array if space permits.
+
+ // While we're here, we also eliminate 'rundefers'
+ // instructions in functions that contain no 'defer'
+ // instructions.
+ usesDefer := false
+
+ // A counter used to generate ~unique ids for Phi nodes, as an
+ // aid to debugging. We use large numbers to make them highly
+ // visible. All nodes are renumbered later.
+ fresh := 1000
+
+ // Determine which allocs we can lift and number them densely.
+ // The renaming phase uses this numbering for compact maps.
+ numAllocs := 0
+ for _, b := range fn.Blocks {
+ b.gaps = 0
+ b.rundefers = 0
+ for _, instr := range b.Instrs {
+ switch instr := instr.(type) {
+ case *Alloc:
+ index := -1
+ if liftAlloc(df, instr, newPhis, &fresh) {
+ index = numAllocs
+ numAllocs++
+ }
+ instr.index = index
+ case *Defer:
+ usesDefer = true
+ case *RunDefers:
+ b.rundefers++
+ }
+ }
+ }
+
+ // renaming maps an alloc (keyed by index) to its replacement
+ // value. Initially the renaming contains nil, signifying the
+ // zero constant of the appropriate type; we construct the
+ // Const lazily at most once on each path through the domtree.
+ // TODO(adonovan): opt: cache per-function not per subtree.
+ renaming := make([]Value, numAllocs)
+
+ // Renaming.
+ rename(fn.Blocks[0], renaming, newPhis)
+
+ // Eliminate dead φ-nodes.
+ removeDeadPhis(fn.Blocks, newPhis)
+
+ // Prepend remaining live φ-nodes to each block.
+ for _, b := range fn.Blocks {
+ nps := newPhis[b]
+ j := len(nps)
+
+ rundefersToKill := b.rundefers
+ if usesDefer {
+ rundefersToKill = 0
+ }
+
+ if j+b.gaps+rundefersToKill == 0 {
+ continue // fast path: no new phis or gaps
+ }
+
+ // Compact nps + non-nil Instrs into a new slice.
+ // TODO(adonovan): opt: compact in situ (rightwards)
+ // if Instrs has sufficient space or slack.
+ dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill)
+ for i, np := range nps {
+ dst[i] = np.phi
+ }
+ for _, instr := range b.Instrs {
+ if instr == nil {
+ continue
+ }
+ if !usesDefer {
+ if _, ok := instr.(*RunDefers); ok {
+ continue
+ }
+ }
+ dst[j] = instr
+ j++
+ }
+ b.Instrs = dst
+ }
+
+ // Remove any fn.Locals that were lifted.
+ j := 0
+ for _, l := range fn.Locals {
+ if l.index < 0 {
+ fn.Locals[j] = l
+ j++
+ }
+ }
+ // Nil out fn.Locals[j:] to aid GC.
+ for i := j; i < len(fn.Locals); i++ {
+ fn.Locals[i] = nil
+ }
+ fn.Locals = fn.Locals[:j]
+}
+
+// removeDeadPhis removes φ-nodes not transitively needed by a
+// non-Phi, non-DebugRef instruction.
+func removeDeadPhis(blocks []*BasicBlock, newPhis newPhiMap) {
+ // First pass: find the set of "live" φ-nodes: those reachable
+ // from some non-Phi instruction.
+ //
+ // We compute reachability in reverse, starting from each φ,
+ // rather than forwards, starting from each live non-Phi
+ // instruction, because this way visits much less of the
+ // Value graph.
+ livePhis := make(map[*Phi]bool)
+ for _, npList := range newPhis {
+ for _, np := range npList {
+ phi := np.phi
+ if !livePhis[phi] && phiHasDirectReferrer(phi) {
+ markLivePhi(livePhis, phi)
+ }
+ }
+ }
+
+ // Existing φ-nodes due to && and || operators
+ // are all considered live (see Go issue 19622).
+ for _, b := range blocks {
+ for _, phi := range b.phis() {
+ markLivePhi(livePhis, phi.(*Phi))
+ }
+ }
+
+ // Second pass: eliminate unused phis from newPhis.
+ for block, npList := range newPhis {
+ j := 0
+ for _, np := range npList {
+ if livePhis[np.phi] {
+ npList[j] = np
+ j++
+ } else {
+ // discard it, first removing it from referrers
+ for _, val := range np.phi.Edges {
+ if refs := val.Referrers(); refs != nil {
+ *refs = removeInstr(*refs, np.phi)
+ }
+ }
+ np.phi.block = nil
+ }
+ }
+ newPhis[block] = npList[:j]
+ }
+}
+
+// markLivePhi marks phi, and all φ-nodes transitively reachable via
+// its Operands, live.
+func markLivePhi(livePhis map[*Phi]bool, phi *Phi) {
+ livePhis[phi] = true
+ for _, rand := range phi.Operands(nil) {
+ if q, ok := (*rand).(*Phi); ok {
+ if !livePhis[q] {
+ markLivePhi(livePhis, q)
+ }
+ }
+ }
+}
+
+// phiHasDirectReferrer reports whether phi is directly referred to by
+// a non-Phi instruction. Such instructions are the
+// roots of the liveness traversal.
+func phiHasDirectReferrer(phi *Phi) bool {
+ for _, instr := range *phi.Referrers() {
+ if _, ok := instr.(*Phi); !ok {
+ return true
+ }
+ }
+ return false
+}
+
+type blockSet struct{ big.Int } // (inherit methods from Int)
+
+// add adds b to the set and returns true if the set changed.
+func (s *blockSet) add(b *BasicBlock) bool {
+ i := b.Index
+ if s.Bit(i) != 0 {
+ return false
+ }
+ s.SetBit(&s.Int, i, 1)
+ return true
+}
+
+// take removes an arbitrary element from a set s and
+// returns its index, or returns -1 if empty.
+func (s *blockSet) take() int {
+ l := s.BitLen()
+ for i := 0; i < l; i++ {
+ if s.Bit(i) == 1 {
+ s.SetBit(&s.Int, i, 0)
+ return i
+ }
+ }
+ return -1
+}
+
+// newPhi is a pair of a newly introduced φ-node and the lifted Alloc
+// it replaces.
+type newPhi struct {
+ phi *Phi
+ alloc *Alloc
+}
+
+// newPhiMap records for each basic block, the set of newPhis that
+// must be prepended to the block.
+type newPhiMap map[*BasicBlock][]newPhi
+
+// liftAlloc determines whether alloc can be lifted into registers,
+// and if so, it populates newPhis with all the φ-nodes it may require
+// and returns true.
+//
+// fresh is a source of fresh ids for phi nodes.
+//
+func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool {
+ // Don't lift aggregates into registers, because we don't have
+ // a way to express their zero-constants.
+ switch deref(alloc.Type()).Underlying().(type) {
+ case *types.Array, *types.Struct:
+ return false
+ }
+
+ // Don't lift named return values in functions that defer
+ // calls that may recover from panic.
+ if fn := alloc.Parent(); fn.Recover != nil {
+ for _, nr := range fn.namedResults {
+ if nr == alloc {
+ return false
+ }
+ }
+ }
+
+ // Compute defblocks, the set of blocks containing a
+ // definition of the alloc cell.
+ var defblocks blockSet
+ for _, instr := range *alloc.Referrers() {
+ // Bail out if we discover the alloc is not liftable;
+ // the only operations permitted to use the alloc are
+ // loads/stores into the cell, and DebugRef.
+ switch instr := instr.(type) {
+ case *Store:
+ if instr.Val == alloc {
+ return false // address used as value
+ }
+ if instr.Addr != alloc {
+ panic("Alloc.Referrers is inconsistent")
+ }
+ defblocks.add(instr.Block())
+ case *UnOp:
+ if instr.Op != token.MUL {
+ return false // not a load
+ }
+ if instr.X != alloc {
+ panic("Alloc.Referrers is inconsistent")
+ }
+ case *DebugRef:
+ // ok
+ default:
+ return false // some other instruction
+ }
+ }
+ // The Alloc itself counts as a (zero) definition of the cell.
+ defblocks.add(alloc.Block())
+
+ if debugLifting {
+ fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name())
+ }
+
+ fn := alloc.Parent()
+
+ // Φ-insertion.
+ //
+ // What follows is the body of the main loop of the insert-φ
+ // function described by Cytron et al, but instead of using
+ // counter tricks, we just reset the 'hasAlready' and 'work'
+ // sets each iteration. These are bitmaps so it's pretty cheap.
+ //
+ // TODO(adonovan): opt: recycle slice storage for W,
+ // hasAlready, defBlocks across liftAlloc calls.
+ var hasAlready blockSet
+
+ // Initialize W and work to defblocks.
+ var work blockSet = defblocks // blocks seen
+ var W blockSet // blocks to do
+ W.Set(&defblocks.Int)
+
+ // Traverse iterated dominance frontier, inserting φ-nodes.
+ for i := W.take(); i != -1; i = W.take() {
+ u := fn.Blocks[i]
+ for _, v := range df[u.Index] {
+ if hasAlready.add(v) {
+ // Create φ-node.
+ // It will be prepended to v.Instrs later, if needed.
+ phi := &Phi{
+ Edges: make([]Value, len(v.Preds)),
+ Comment: alloc.Comment,
+ }
+ // This is merely a debugging aid:
+ phi.setNum(*fresh)
+ *fresh++
+
+ phi.pos = alloc.Pos()
+ phi.setType(deref(alloc.Type()))
+ phi.block = v
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v)
+ }
+ newPhis[v] = append(newPhis[v], newPhi{phi, alloc})
+
+ if work.add(v) {
+ W.add(v)
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// replaceAll replaces all intraprocedural uses of x with y,
+// updating x.Referrers and y.Referrers.
+// Precondition: x.Referrers() != nil, i.e. x must be local to some function.
+//
+func replaceAll(x, y Value) {
+ var rands []*Value
+ pxrefs := x.Referrers()
+ pyrefs := y.Referrers()
+ for _, instr := range *pxrefs {
+ rands = instr.Operands(rands[:0]) // recycle storage
+ for _, rand := range rands {
+ if *rand != nil {
+ if *rand == x {
+ *rand = y
+ }
+ }
+ }
+ if pyrefs != nil {
+ *pyrefs = append(*pyrefs, instr) // dups ok
+ }
+ }
+ *pxrefs = nil // x is now unreferenced
+}
+
+// renamed returns the value to which alloc is being renamed,
+// constructing it lazily if it's the implicit zero initialization.
+//
+func renamed(renaming []Value, alloc *Alloc) Value {
+ v := renaming[alloc.index]
+ if v == nil {
+ v = zeroConst(deref(alloc.Type()))
+ renaming[alloc.index] = v
+ }
+ return v
+}
+
+// rename implements the (Cytron et al) SSA renaming algorithm, a
+// preorder traversal of the dominator tree replacing all loads of
+// Alloc cells with the value stored to that cell by the dominating
+// store instruction. For lifting, we need only consider loads,
+// stores and φ-nodes.
+//
+// renaming is a map from *Alloc (keyed by index number) to its
+// dominating stored value; newPhis[x] is the set of new φ-nodes to be
+// prepended to block x.
+//
+func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) {
+ // Each φ-node becomes the new name for its associated Alloc.
+ for _, np := range newPhis[u] {
+ phi := np.phi
+ alloc := np.alloc
+ renaming[alloc.index] = phi
+ }
+
+ // Rename loads and stores of allocs.
+ for i, instr := range u.Instrs {
+ switch instr := instr.(type) {
+ case *Alloc:
+ if instr.index >= 0 { // store of zero to Alloc cell
+ // Replace dominated loads by the zero value.
+ renaming[instr.index] = nil
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr)
+ }
+ // Delete the Alloc.
+ u.Instrs[i] = nil
+ u.gaps++
+ }
+
+ case *Store:
+ if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell
+ // Replace dominated loads by the stored value.
+ renaming[alloc.index] = instr.Val
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n",
+ instr, instr.Val.Name())
+ }
+ // Remove the store from the referrer list of the stored value.
+ if refs := instr.Val.Referrers(); refs != nil {
+ *refs = removeInstr(*refs, instr)
+ }
+ // Delete the Store.
+ u.Instrs[i] = nil
+ u.gaps++
+ }
+
+ case *UnOp:
+ if instr.Op == token.MUL {
+ if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell
+ newval := renamed(renaming, alloc)
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n",
+ instr.Name(), instr, newval.Name())
+ }
+ // Replace all references to
+ // the loaded value by the
+ // dominating stored value.
+ replaceAll(instr, newval)
+ // Delete the Load.
+ u.Instrs[i] = nil
+ u.gaps++
+ }
+ }
+
+ case *DebugRef:
+ if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell
+ if instr.IsAddr {
+ instr.X = renamed(renaming, alloc)
+ instr.IsAddr = false
+
+ // Add DebugRef to instr.X's referrers.
+ if refs := instr.X.Referrers(); refs != nil {
+ *refs = append(*refs, instr)
+ }
+ } else {
+ // A source expression denotes the address
+ // of an Alloc that was optimized away.
+ instr.X = nil
+
+ // Delete the DebugRef.
+ u.Instrs[i] = nil
+ u.gaps++
+ }
+ }
+ }
+ }
+
+ // For each φ-node in a CFG successor, rename the edge.
+ for _, v := range u.Succs {
+ phis := newPhis[v]
+ if len(phis) == 0 {
+ continue
+ }
+ i := v.predIndex(u)
+ for _, np := range phis {
+ phi := np.phi
+ alloc := np.alloc
+ newval := renamed(renaming, alloc)
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n",
+ phi.Name(), u, v, i, alloc.Name(), newval.Name())
+ }
+ phi.Edges[i] = newval
+ if prefs := newval.Referrers(); prefs != nil {
+ *prefs = append(*prefs, phi)
+ }
+ }
+ }
+
+ // Continue depth-first recursion over domtree, pushing a
+ // fresh copy of the renaming map for each subtree.
+ for i, v := range u.dom.children {
+ r := renaming
+ if i < len(u.dom.children)-1 {
+ // On all but the final iteration, we must make
+ // a copy to avoid destructive update.
+ r = make([]Value, len(renaming))
+ copy(r, renaming)
+ }
+ rename(v, r, newPhis)
+ }
+
+}
diff --git a/vendor/honnef.co/go/tools/ssa/lvalue.go b/vendor/honnef.co/go/tools/ssa/lvalue.go
new file mode 100644
index 000000000..eb5d71e18
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/lvalue.go
@@ -0,0 +1,123 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// lvalues are the union of addressable expressions and map-index
+// expressions.
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// An lvalue represents an assignable location that may appear on the
+// left-hand side of an assignment. This is a generalization of a
+// pointer to permit updates to elements of maps.
+//
+type lvalue interface {
+ store(fn *Function, v Value) // stores v into the location
+ load(fn *Function) Value // loads the contents of the location
+ address(fn *Function) Value // address of the location
+ typ() types.Type // returns the type of the location
+}
+
+// An address is an lvalue represented by a true pointer.
+type address struct {
+ addr Value
+ pos token.Pos // source position
+ expr ast.Expr // source syntax of the value (not address) [debug mode]
+}
+
+func (a *address) load(fn *Function) Value {
+ load := emitLoad(fn, a.addr)
+ load.pos = a.pos
+ return load
+}
+
+func (a *address) store(fn *Function, v Value) {
+ store := emitStore(fn, a.addr, v, a.pos)
+ if a.expr != nil {
+ // store.Val is v, converted for assignability.
+ emitDebugRef(fn, a.expr, store.Val, false)
+ }
+}
+
+func (a *address) address(fn *Function) Value {
+ if a.expr != nil {
+ emitDebugRef(fn, a.expr, a.addr, true)
+ }
+ return a.addr
+}
+
+func (a *address) typ() types.Type {
+ return deref(a.addr.Type())
+}
+
+// An element is an lvalue represented by m[k], the location of an
+// element of a map or string. These locations are not addressable
+// since pointers cannot be formed from them, but they do support
+// load(), and in the case of maps, store().
+//
+type element struct {
+ m, k Value // map or string
+ t types.Type // map element type or string byte type
+ pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v)
+}
+
+func (e *element) load(fn *Function) Value {
+ l := &Lookup{
+ X: e.m,
+ Index: e.k,
+ }
+ l.setPos(e.pos)
+ l.setType(e.t)
+ return fn.emit(l)
+}
+
+func (e *element) store(fn *Function, v Value) {
+ up := &MapUpdate{
+ Map: e.m,
+ Key: e.k,
+ Value: emitConv(fn, v, e.t),
+ }
+ up.pos = e.pos
+ fn.emit(up)
+}
+
+func (e *element) address(fn *Function) Value {
+ panic("map/string elements are not addressable")
+}
+
+func (e *element) typ() types.Type {
+ return e.t
+}
+
+// A blank is a dummy variable whose name is "_".
+// It is not reified: loads are illegal and stores are ignored.
+//
+type blank struct{}
+
+func (bl blank) load(fn *Function) Value {
+ panic("blank.load is illegal")
+}
+
+func (bl blank) store(fn *Function, v Value) {
+ s := &BlankStore{
+ Val: v,
+ }
+ fn.emit(s)
+}
+
+func (bl blank) address(fn *Function) Value {
+ panic("blank var is not addressable")
+}
+
+func (bl blank) typ() types.Type {
+ // This should be the type of the blank Ident; the typechecker
+ // doesn't provide this yet, but fortunately, we don't need it
+ // yet either.
+ panic("blank.typ is unimplemented")
+}
diff --git a/vendor/honnef.co/go/tools/ssa/methods.go b/vendor/honnef.co/go/tools/ssa/methods.go
new file mode 100644
index 000000000..080dca968
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/methods.go
@@ -0,0 +1,239 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines utilities for population of method sets.
+
+import (
+ "fmt"
+ "go/types"
+)
+
+// MethodValue returns the Function implementing method sel, building
+// wrapper methods on demand. It returns nil if sel denotes an
+// abstract (interface) method.
+//
+// Precondition: sel.Kind() == MethodVal.
+//
+// Thread-safe.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
+//
+func (prog *Program) MethodValue(sel *types.Selection) *Function {
+ if sel.Kind() != types.MethodVal {
+ panic(fmt.Sprintf("Method(%s) kind != MethodVal", sel))
+ }
+ T := sel.Recv()
+ if isInterface(T) {
+ return nil // abstract method
+ }
+ if prog.mode&LogSource != 0 {
+ defer logStack("Method %s %v", T, sel)()
+ }
+
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+
+ return prog.addMethod(prog.createMethodSet(T), sel)
+}
+
+// LookupMethod returns the implementation of the method of type T
+// identified by (pkg, name). It returns nil if the method exists but
+// is abstract, and panics if T has no such method.
+//
+func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function {
+ sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name)
+ if sel == nil {
+ panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name)))
+ }
+ return prog.MethodValue(sel)
+}
+
+// methodSet contains the (concrete) methods of a non-interface type.
+type methodSet struct {
+ mapping map[string]*Function // populated lazily
+ complete bool // mapping contains all methods
+}
+
+// Precondition: !isInterface(T).
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
+func (prog *Program) createMethodSet(T types.Type) *methodSet {
+ mset, ok := prog.methodSets.At(T).(*methodSet)
+ if !ok {
+ mset = &methodSet{mapping: make(map[string]*Function)}
+ prog.methodSets.Set(T, mset)
+ }
+ return mset
+}
+
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
+func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function {
+ if sel.Kind() == types.MethodExpr {
+ panic(sel)
+ }
+ id := sel.Obj().Id()
+ fn := mset.mapping[id]
+ if fn == nil {
+ obj := sel.Obj().(*types.Func)
+
+ needsPromotion := len(sel.Index()) > 1
+ needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv())
+ if needsPromotion || needsIndirection {
+ fn = makeWrapper(prog, sel)
+ } else {
+ fn = prog.declaredFunc(obj)
+ }
+ if fn.Signature.Recv() == nil {
+ panic(fn) // missing receiver
+ }
+ mset.mapping[id] = fn
+ }
+ return fn
+}
+
+// RuntimeTypes returns a new unordered slice containing all
+// concrete types in the program for which a complete (non-empty)
+// method set is required at run-time.
+//
+// Thread-safe.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
+//
+func (prog *Program) RuntimeTypes() []types.Type {
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+
+ var res []types.Type
+ prog.methodSets.Iterate(func(T types.Type, v interface{}) {
+ if v.(*methodSet).complete {
+ res = append(res, T)
+ }
+ })
+ return res
+}
+
+// declaredFunc returns the concrete function/method denoted by obj.
+// Panic ensues if there is none.
+//
+func (prog *Program) declaredFunc(obj *types.Func) *Function {
+ if v := prog.packageLevelValue(obj); v != nil {
+ return v.(*Function)
+ }
+ panic("no concrete method: " + obj.String())
+}
+
+// needMethodsOf ensures that runtime type information (including the
+// complete method set) is available for the specified type T and all
+// its subcomponents.
+//
+// needMethodsOf must be called for at least every type that is an
+// operand of some MakeInterface instruction, and for the type of
+// every exported package member.
+//
+// Precondition: T is not a method signature (*Signature with Recv()!=nil).
+//
+// Thread-safe. (Called via emitConv from multiple builder goroutines.)
+//
+// TODO(adonovan): make this faster. It accounts for 20% of SSA build time.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
+//
+func (prog *Program) needMethodsOf(T types.Type) {
+ prog.methodsMu.Lock()
+ prog.needMethods(T, false)
+ prog.methodsMu.Unlock()
+}
+
+// Precondition: T is not a method signature (*Signature with Recv()!=nil).
+// Recursive case: skip => don't create methods for T.
+//
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
+//
+func (prog *Program) needMethods(T types.Type, skip bool) {
+ // Each package maintains its own set of types it has visited.
+ if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok {
+ // needMethods(T) was previously called
+ if !prevSkip || skip {
+ return // already seen, with same or false 'skip' value
+ }
+ }
+ prog.runtimeTypes.Set(T, skip)
+
+ tmset := prog.MethodSets.MethodSet(T)
+
+ if !skip && !isInterface(T) && tmset.Len() > 0 {
+ // Create methods of T.
+ mset := prog.createMethodSet(T)
+ if !mset.complete {
+ mset.complete = true
+ n := tmset.Len()
+ for i := 0; i < n; i++ {
+ prog.addMethod(mset, tmset.At(i))
+ }
+ }
+ }
+
+ // Recursion over signatures of each method.
+ for i := 0; i < tmset.Len(); i++ {
+ sig := tmset.At(i).Type().(*types.Signature)
+ prog.needMethods(sig.Params(), false)
+ prog.needMethods(sig.Results(), false)
+ }
+
+ switch t := T.(type) {
+ case *types.Basic:
+ // nop
+
+ case *types.Interface:
+ // nop---handled by recursion over method set.
+
+ case *types.Pointer:
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Slice:
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Chan:
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Map:
+ prog.needMethods(t.Key(), false)
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Signature:
+ if t.Recv() != nil {
+ panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
+ }
+ prog.needMethods(t.Params(), false)
+ prog.needMethods(t.Results(), false)
+
+ case *types.Named:
+ // A pointer-to-named type can be derived from a named
+ // type via reflection. It may have methods too.
+ prog.needMethods(types.NewPointer(T), false)
+
+ // Consider 'type T struct{S}' where S has methods.
+ // Reflection provides no way to get from T to struct{S},
+ // only to S, so the method set of struct{S} is unwanted,
+ // so set 'skip' flag during recursion.
+ prog.needMethods(t.Underlying(), true)
+
+ case *types.Array:
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ prog.needMethods(t.Field(i).Type(), false)
+ }
+
+ case *types.Tuple:
+ for i, n := 0, t.Len(); i < n; i++ {
+ prog.needMethods(t.At(i).Type(), false)
+ }
+
+ default:
+ panic(T)
+ }
+}
diff --git a/vendor/honnef.co/go/tools/ssa/mode.go b/vendor/honnef.co/go/tools/ssa/mode.go
new file mode 100644
index 000000000..d2a269893
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/mode.go
@@ -0,0 +1,100 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines the BuilderMode type and its command-line flag.
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// BuilderMode is a bitmask of options for diagnostics and checking.
+//
+// *BuilderMode satisfies the flag.Value interface. Example:
+//
+// var mode = ssa.BuilderMode(0)
+// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) }
+//
+type BuilderMode uint
+
+const (
+ PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout
+ PrintFunctions // Print function SSA code to stdout
+ LogSource // Log source locations as SSA builder progresses
+ SanityCheckFunctions // Perform sanity checking of function bodies
+ NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers
+ BuildSerially // Build packages serially, not in parallel.
+ GlobalDebug // Enable debug info for all packages
+ BareInits // Build init functions without guards or calls to dependent inits
+)
+
+const BuilderModeDoc = `Options controlling the SSA builder.
+The value is a sequence of zero or more of these letters:
+C perform sanity [C]hecking of the SSA form.
+D include [D]ebug info for every function.
+P print [P]ackage inventory.
+F print [F]unction SSA code.
+S log [S]ource locations as SSA builder progresses.
+L build distinct packages seria[L]ly instead of in parallel.
+N build [N]aive SSA form: don't replace local loads/stores with registers.
+I build bare [I]nit functions: no init guards or calls to dependent inits.
+`
+
+func (m BuilderMode) String() string {
+ var buf bytes.Buffer
+ if m&GlobalDebug != 0 {
+ buf.WriteByte('D')
+ }
+ if m&PrintPackages != 0 {
+ buf.WriteByte('P')
+ }
+ if m&PrintFunctions != 0 {
+ buf.WriteByte('F')
+ }
+ if m&LogSource != 0 {
+ buf.WriteByte('S')
+ }
+ if m&SanityCheckFunctions != 0 {
+ buf.WriteByte('C')
+ }
+ if m&NaiveForm != 0 {
+ buf.WriteByte('N')
+ }
+ if m&BuildSerially != 0 {
+ buf.WriteByte('L')
+ }
+ return buf.String()
+}
+
+// Set parses the flag characters in s and updates *m.
+func (m *BuilderMode) Set(s string) error {
+ var mode BuilderMode
+ for _, c := range s {
+ switch c {
+ case 'D':
+ mode |= GlobalDebug
+ case 'P':
+ mode |= PrintPackages
+ case 'F':
+ mode |= PrintFunctions
+ case 'S':
+ mode |= LogSource | BuildSerially
+ case 'C':
+ mode |= SanityCheckFunctions
+ case 'N':
+ mode |= NaiveForm
+ case 'L':
+ mode |= BuildSerially
+ default:
+ return fmt.Errorf("unknown BuilderMode option: %q", c)
+ }
+ }
+ *m = mode
+ return nil
+}
+
+// Get returns m.
+func (m BuilderMode) Get() interface{} { return m }
diff --git a/vendor/honnef.co/go/tools/ssa/print.go b/vendor/honnef.co/go/tools/ssa/print.go
new file mode 100644
index 000000000..6fd277277
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/print.go
@@ -0,0 +1,435 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file implements the String() methods for all Value and
+// Instruction types.
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "io"
+ "reflect"
+ "sort"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+// relName returns the name of v relative to i.
+// In most cases, this is identical to v.Name(), but references to
+// Functions (including methods) and Globals use RelString and
+// all types are displayed with relType, so that only cross-package
+// references are package-qualified.
+//
+func relName(v Value, i Instruction) string {
+ var from *types.Package
+ if i != nil {
+ from = i.Parent().pkg()
+ }
+ switch v := v.(type) {
+ case Member: // *Function or *Global
+ return v.RelString(from)
+ case *Const:
+ return v.RelString(from)
+ }
+ return v.Name()
+}
+
+func relType(t types.Type, from *types.Package) string {
+ return types.TypeString(t, types.RelativeTo(from))
+}
+
+func relString(m Member, from *types.Package) string {
+ // NB: not all globals have an Object (e.g. init$guard),
+ // so use Package().Object not Object.Package().
+ if pkg := m.Package().Pkg; pkg != nil && pkg != from {
+ return fmt.Sprintf("%s.%s", pkg.Path(), m.Name())
+ }
+ return m.Name()
+}
+
+// Value.String()
+//
+// This method is provided only for debugging.
+// It never appears in disassembly, which uses Value.Name().
+
+func (v *Parameter) String() string {
+ from := v.Parent().pkg()
+ return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from))
+}
+
+func (v *FreeVar) String() string {
+ from := v.Parent().pkg()
+ return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from))
+}
+
+func (v *Builtin) String() string {
+ return fmt.Sprintf("builtin %s", v.Name())
+}
+
+// Instruction.String()
+
+func (v *Alloc) String() string {
+ op := "local"
+ if v.Heap {
+ op = "new"
+ }
+ from := v.Parent().pkg()
+ return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment)
+}
+
+func (v *Phi) String() string {
+ var b bytes.Buffer
+ b.WriteString("phi [")
+ for i, edge := range v.Edges {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ // Be robust against malformed CFG.
+ if v.block == nil {
+ b.WriteString("??")
+ continue
+ }
+ block := -1
+ if i < len(v.block.Preds) {
+ block = v.block.Preds[i].Index
+ }
+ fmt.Fprintf(&b, "%d: ", block)
+ edgeVal := "<nil>" // be robust
+ if edge != nil {
+ edgeVal = relName(edge, v)
+ }
+ b.WriteString(edgeVal)
+ }
+ b.WriteString("]")
+ if v.Comment != "" {
+ b.WriteString(" #")
+ b.WriteString(v.Comment)
+ }
+ return b.String()
+}
+
+func printCall(v *CallCommon, prefix string, instr Instruction) string {
+ var b bytes.Buffer
+ b.WriteString(prefix)
+ if !v.IsInvoke() {
+ b.WriteString(relName(v.Value, instr))
+ } else {
+ fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name())
+ }
+ b.WriteString("(")
+ for i, arg := range v.Args {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(relName(arg, instr))
+ }
+ if v.Signature().Variadic() {
+ b.WriteString("...")
+ }
+ b.WriteString(")")
+ return b.String()
+}
+
+func (c *CallCommon) String() string {
+ return printCall(c, "", nil)
+}
+
+func (v *Call) String() string {
+ return printCall(&v.Call, "", v)
+}
+
+func (v *BinOp) String() string {
+ return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v))
+}
+
+func (v *UnOp) String() string {
+ return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk))
+}
+
+func printConv(prefix string, v, x Value) string {
+ from := v.Parent().pkg()
+ return fmt.Sprintf("%s %s <- %s (%s)",
+ prefix,
+ relType(v.Type(), from),
+ relType(x.Type(), from),
+ relName(x, v.(Instruction)))
+}
+
+func (v *ChangeType) String() string { return printConv("changetype", v, v.X) }
+func (v *Convert) String() string { return printConv("convert", v, v.X) }
+func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) }
+func (v *MakeInterface) String() string { return printConv("make", v, v.X) }
+
+func (v *MakeClosure) String() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v))
+ if v.Bindings != nil {
+ b.WriteString(" [")
+ for i, c := range v.Bindings {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(relName(c, v))
+ }
+ b.WriteString("]")
+ }
+ return b.String()
+}
+
+func (v *MakeSlice) String() string {
+ from := v.Parent().pkg()
+ return fmt.Sprintf("make %s %s %s",
+ relType(v.Type(), from),
+ relName(v.Len, v),
+ relName(v.Cap, v))
+}
+
+func (v *Slice) String() string {
+ var b bytes.Buffer
+ b.WriteString("slice ")
+ b.WriteString(relName(v.X, v))
+ b.WriteString("[")
+ if v.Low != nil {
+ b.WriteString(relName(v.Low, v))
+ }
+ b.WriteString(":")
+ if v.High != nil {
+ b.WriteString(relName(v.High, v))
+ }
+ if v.Max != nil {
+ b.WriteString(":")
+ b.WriteString(relName(v.Max, v))
+ }
+ b.WriteString("]")
+ return b.String()
+}
+
+func (v *MakeMap) String() string {
+ res := ""
+ if v.Reserve != nil {
+ res = relName(v.Reserve, v)
+ }
+ from := v.Parent().pkg()
+ return fmt.Sprintf("make %s %s", relType(v.Type(), from), res)
+}
+
+func (v *MakeChan) String() string {
+ from := v.Parent().pkg()
+ return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v))
+}
+
+func (v *FieldAddr) String() string {
+ st := deref(v.X.Type()).Underlying().(*types.Struct)
+ // Be robust against a bad index.
+ name := "?"
+ if 0 <= v.Field && v.Field < st.NumFields() {
+ name = st.Field(v.Field).Name()
+ }
+ return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field)
+}
+
+func (v *Field) String() string {
+ st := v.X.Type().Underlying().(*types.Struct)
+ // Be robust against a bad index.
+ name := "?"
+ if 0 <= v.Field && v.Field < st.NumFields() {
+ name = st.Field(v.Field).Name()
+ }
+ return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field)
+}
+
+func (v *IndexAddr) String() string {
+ return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v))
+}
+
+func (v *Index) String() string {
+ return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v))
+}
+
+func (v *Lookup) String() string {
+ return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk))
+}
+
+func (v *Range) String() string {
+ return "range " + relName(v.X, v)
+}
+
+func (v *Next) String() string {
+ return "next " + relName(v.Iter, v)
+}
+
+func (v *TypeAssert) String() string {
+ from := v.Parent().pkg()
+ return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from))
+}
+
+func (v *Extract) String() string {
+ return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index)
+}
+
+func (s *Jump) String() string {
+ // Be robust against malformed CFG.
+ block := -1
+ if s.block != nil && len(s.block.Succs) == 1 {
+ block = s.block.Succs[0].Index
+ }
+ return fmt.Sprintf("jump %d", block)
+}
+
+func (s *If) String() string {
+ // Be robust against malformed CFG.
+ tblock, fblock := -1, -1
+ if s.block != nil && len(s.block.Succs) == 2 {
+ tblock = s.block.Succs[0].Index
+ fblock = s.block.Succs[1].Index
+ }
+ return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock)
+}
+
+func (s *Go) String() string {
+ return printCall(&s.Call, "go ", s)
+}
+
+func (s *Panic) String() string {
+ return "panic " + relName(s.X, s)
+}
+
+func (s *Return) String() string {
+ var b bytes.Buffer
+ b.WriteString("return")
+ for i, r := range s.Results {
+ if i == 0 {
+ b.WriteString(" ")
+ } else {
+ b.WriteString(", ")
+ }
+ b.WriteString(relName(r, s))
+ }
+ return b.String()
+}
+
+func (*RunDefers) String() string {
+ return "rundefers"
+}
+
+func (s *Send) String() string {
+ return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s))
+}
+
+func (s *Defer) String() string {
+ return printCall(&s.Call, "defer ", s)
+}
+
+func (s *Select) String() string {
+ var b bytes.Buffer
+ for i, st := range s.States {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ if st.Dir == types.RecvOnly {
+ b.WriteString("<-")
+ b.WriteString(relName(st.Chan, s))
+ } else {
+ b.WriteString(relName(st.Chan, s))
+ b.WriteString("<-")
+ b.WriteString(relName(st.Send, s))
+ }
+ }
+ non := ""
+ if !s.Blocking {
+ non = "non"
+ }
+ return fmt.Sprintf("select %sblocking [%s]", non, b.String())
+}
+
+func (s *Store) String() string {
+ return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s))
+}
+
+func (s *BlankStore) String() string {
+ return fmt.Sprintf("_ = %s", relName(s.Val, s))
+}
+
+func (s *MapUpdate) String() string {
+ return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s))
+}
+
+func (s *DebugRef) String() string {
+ p := s.Parent().Prog.Fset.Position(s.Pos())
+ var descr interface{}
+ if s.object != nil {
+ descr = s.object // e.g. "var x int"
+ } else {
+ descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr"
+ }
+ var addr string
+ if s.IsAddr {
+ addr = "address of "
+ }
+ return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name())
+}
+
+func (p *Package) String() string {
+ return "package " + p.Pkg.Path()
+}
+
+var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer
+
+func (p *Package) WriteTo(w io.Writer) (int64, error) {
+ var buf bytes.Buffer
+ WritePackage(&buf, p)
+ n, err := w.Write(buf.Bytes())
+ return int64(n), err
+}
+
+// WritePackage writes to buf a human-readable summary of p.
+func WritePackage(buf *bytes.Buffer, p *Package) {
+ fmt.Fprintf(buf, "%s:\n", p)
+
+ var names []string
+ maxname := 0
+ for name := range p.Members {
+ if l := len(name); l > maxname {
+ maxname = l
+ }
+ names = append(names, name)
+ }
+
+ from := p.Pkg
+ sort.Strings(names)
+ for _, name := range names {
+ switch mem := p.Members[name].(type) {
+ case *NamedConst:
+ fmt.Fprintf(buf, " const %-*s %s = %s\n",
+ maxname, name, mem.Name(), mem.Value.RelString(from))
+
+ case *Function:
+ fmt.Fprintf(buf, " func %-*s %s\n",
+ maxname, name, relType(mem.Type(), from))
+
+ case *Type:
+ fmt.Fprintf(buf, " type %-*s %s\n",
+ maxname, name, relType(mem.Type().Underlying(), from))
+ for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) {
+ fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from)))
+ }
+
+ case *Global:
+ fmt.Fprintf(buf, " var %-*s %s\n",
+ maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from))
+ }
+ }
+
+ fmt.Fprintf(buf, "\n")
+}
+
+func commaOk(x bool) string {
+ if x {
+ return ",ok"
+ }
+ return ""
+}
diff --git a/vendor/honnef.co/go/tools/ssa/sanity.go b/vendor/honnef.co/go/tools/ssa/sanity.go
new file mode 100644
index 000000000..c56b2682c
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/sanity.go
@@ -0,0 +1,523 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// An optional pass for sanity-checking invariants of the SSA representation.
+// Currently it checks CFG invariants but little at the instruction level.
+
+import (
+ "fmt"
+ "go/types"
+ "io"
+ "os"
+ "strings"
+)
+
+type sanity struct {
+ reporter io.Writer
+ fn *Function
+ block *BasicBlock
+ instrs map[Instruction]struct{}
+ insane bool
+}
+
+// sanityCheck performs integrity checking of the SSA representation
+// of the function fn and returns true if it was valid. Diagnostics
+// are written to reporter if non-nil, os.Stderr otherwise. Some
+// diagnostics are only warnings and do not imply a negative result.
+//
+// Sanity-checking is intended to facilitate the debugging of code
+// transformation passes.
+//
+func sanityCheck(fn *Function, reporter io.Writer) bool {
+ if reporter == nil {
+ reporter = os.Stderr
+ }
+ return (&sanity{reporter: reporter}).checkFunction(fn)
+}
+
+// mustSanityCheck is like sanityCheck but panics instead of returning
+// a negative result.
+//
+func mustSanityCheck(fn *Function, reporter io.Writer) {
+ if !sanityCheck(fn, reporter) {
+ fn.WriteTo(os.Stderr)
+ panic("SanityCheck failed")
+ }
+}
+
+func (s *sanity) diagnostic(prefix, format string, args ...interface{}) {
+ fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn)
+ if s.block != nil {
+ fmt.Fprintf(s.reporter, ", block %s", s.block)
+ }
+ io.WriteString(s.reporter, ": ")
+ fmt.Fprintf(s.reporter, format, args...)
+ io.WriteString(s.reporter, "\n")
+}
+
+func (s *sanity) errorf(format string, args ...interface{}) {
+ s.insane = true
+ s.diagnostic("Error", format, args...)
+}
+
+func (s *sanity) warnf(format string, args ...interface{}) {
+ s.diagnostic("Warning", format, args...)
+}
+
+// findDuplicate returns an arbitrary basic block that appeared more
+// than once in blocks, or nil if all were unique.
+func findDuplicate(blocks []*BasicBlock) *BasicBlock {
+ if len(blocks) < 2 {
+ return nil
+ }
+ if blocks[0] == blocks[1] {
+ return blocks[0]
+ }
+ // Slow path:
+ m := make(map[*BasicBlock]bool)
+ for _, b := range blocks {
+ if m[b] {
+ return b
+ }
+ m[b] = true
+ }
+ return nil
+}
+
+func (s *sanity) checkInstr(idx int, instr Instruction) {
+ switch instr := instr.(type) {
+ case *If, *Jump, *Return, *Panic:
+ s.errorf("control flow instruction not at end of block")
+ case *Phi:
+ if idx == 0 {
+ // It suffices to apply this check to just the first phi node.
+ if dup := findDuplicate(s.block.Preds); dup != nil {
+ s.errorf("phi node in block with duplicate predecessor %s", dup)
+ }
+ } else {
+ prev := s.block.Instrs[idx-1]
+ if _, ok := prev.(*Phi); !ok {
+ s.errorf("Phi instruction follows a non-Phi: %T", prev)
+ }
+ }
+ if ne, np := len(instr.Edges), len(s.block.Preds); ne != np {
+ s.errorf("phi node has %d edges but %d predecessors", ne, np)
+
+ } else {
+ for i, e := range instr.Edges {
+ if e == nil {
+ s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i])
+ }
+ }
+ }
+
+ case *Alloc:
+ if !instr.Heap {
+ found := false
+ for _, l := range s.fn.Locals {
+ if l == instr {
+ found = true
+ break
+ }
+ }
+ if !found {
+ s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr)
+ }
+ }
+
+ case *BinOp:
+ case *Call:
+ case *ChangeInterface:
+ case *ChangeType:
+ case *Convert:
+ if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
+ if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
+ s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
+ }
+ }
+
+ case *Defer:
+ case *Extract:
+ case *Field:
+ case *FieldAddr:
+ case *Go:
+ case *Index:
+ case *IndexAddr:
+ case *Lookup:
+ case *MakeChan:
+ case *MakeClosure:
+ numFree := len(instr.Fn.(*Function).FreeVars)
+ numBind := len(instr.Bindings)
+ if numFree != numBind {
+ s.errorf("MakeClosure has %d Bindings for function %s with %d free vars",
+ numBind, instr.Fn, numFree)
+
+ }
+ if recv := instr.Type().(*types.Signature).Recv(); recv != nil {
+ s.errorf("MakeClosure's type includes receiver %s", recv.Type())
+ }
+
+ case *MakeInterface:
+ case *MakeMap:
+ case *MakeSlice:
+ case *MapUpdate:
+ case *Next:
+ case *Range:
+ case *RunDefers:
+ case *Select:
+ case *Send:
+ case *Slice:
+ case *Store:
+ case *TypeAssert:
+ case *UnOp:
+ case *DebugRef:
+ case *BlankStore:
+ case *Sigma:
+ // TODO(adonovan): implement checks.
+ default:
+ panic(fmt.Sprintf("Unknown instruction type: %T", instr))
+ }
+
+ if call, ok := instr.(CallInstruction); ok {
+ if call.Common().Signature() == nil {
+ s.errorf("nil signature: %s", call)
+ }
+ }
+
+ // Check that value-defining instructions have valid types
+ // and a valid referrer list.
+ if v, ok := instr.(Value); ok {
+ t := v.Type()
+ if t == nil {
+ s.errorf("no type: %s = %s", v.Name(), v)
+ } else if t == tRangeIter {
+ // not a proper type; ignore.
+ } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
+ s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t)
+ }
+ s.checkReferrerList(v)
+ }
+
+ // Untyped constants are legal as instruction Operands(),
+ // for example:
+ // _ = "foo"[0]
+ // or:
+ // if wordsize==64 {...}
+
+ // All other non-Instruction Values can be found via their
+ // enclosing Function or Package.
+}
+
+func (s *sanity) checkFinalInstr(instr Instruction) {
+ switch instr := instr.(type) {
+ case *If:
+ if nsuccs := len(s.block.Succs); nsuccs != 2 {
+ s.errorf("If-terminated block has %d successors; expected 2", nsuccs)
+ return
+ }
+ if s.block.Succs[0] == s.block.Succs[1] {
+ s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0])
+ return
+ }
+
+ case *Jump:
+ if nsuccs := len(s.block.Succs); nsuccs != 1 {
+ s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs)
+ return
+ }
+
+ case *Return:
+ if nsuccs := len(s.block.Succs); nsuccs != 0 {
+ s.errorf("Return-terminated block has %d successors; expected none", nsuccs)
+ return
+ }
+ if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na {
+ s.errorf("%d-ary return in %d-ary function", na, nf)
+ }
+
+ case *Panic:
+ if nsuccs := len(s.block.Succs); nsuccs != 0 {
+ s.errorf("Panic-terminated block has %d successors; expected none", nsuccs)
+ return
+ }
+
+ default:
+ s.errorf("non-control flow instruction at end of block")
+ }
+}
+
+func (s *sanity) checkBlock(b *BasicBlock, index int) {
+ s.block = b
+
+ if b.Index != index {
+ s.errorf("block has incorrect Index %d", b.Index)
+ }
+ if b.parent != s.fn {
+ s.errorf("block has incorrect parent %s", b.parent)
+ }
+
+ // Check all blocks are reachable.
+ // (The entry block is always implicitly reachable,
+ // as is the Recover block, if any.)
+ if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 {
+ s.warnf("unreachable block")
+ if b.Instrs == nil {
+ // Since this block is about to be pruned,
+ // tolerating transient problems in it
+ // simplifies other optimizations.
+ return
+ }
+ }
+
+ // Check predecessor and successor relations are dual,
+ // and that all blocks in CFG belong to same function.
+ for _, a := range b.Preds {
+ found := false
+ for _, bb := range a.Succs {
+ if bb == b {
+ found = true
+ break
+ }
+ }
+ if !found {
+ s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs)
+ }
+ if a.parent != s.fn {
+ s.errorf("predecessor %s belongs to different function %s", a, a.parent)
+ }
+ }
+ for _, c := range b.Succs {
+ found := false
+ for _, bb := range c.Preds {
+ if bb == b {
+ found = true
+ break
+ }
+ }
+ if !found {
+ s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds)
+ }
+ if c.parent != s.fn {
+ s.errorf("successor %s belongs to different function %s", c, c.parent)
+ }
+ }
+
+ // Check each instruction is sane.
+ n := len(b.Instrs)
+ if n == 0 {
+ s.errorf("basic block contains no instructions")
+ }
+ var rands [10]*Value // reuse storage
+ for j, instr := range b.Instrs {
+ if instr == nil {
+ s.errorf("nil instruction at index %d", j)
+ continue
+ }
+ if b2 := instr.Block(); b2 == nil {
+ s.errorf("nil Block() for instruction at index %d", j)
+ continue
+ } else if b2 != b {
+ s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j)
+ continue
+ }
+ if j < n-1 {
+ s.checkInstr(j, instr)
+ } else {
+ s.checkFinalInstr(instr)
+ }
+
+ // Check Instruction.Operands.
+ operands:
+ for i, op := range instr.Operands(rands[:0]) {
+ if op == nil {
+ s.errorf("nil operand pointer %d of %s", i, instr)
+ continue
+ }
+ val := *op
+ if val == nil {
+ continue // a nil operand is ok
+ }
+
+ // Check that "untyped" types only appear on constant operands.
+ if _, ok := (*op).(*Const); !ok {
+ if basic, ok := (*op).Type().(*types.Basic); ok {
+ if basic.Info()&types.IsUntyped != 0 {
+ s.errorf("operand #%d of %s is untyped: %s", i, instr, basic)
+ }
+ }
+ }
+
+ // Check that Operands that are also Instructions belong to same function.
+ // TODO(adonovan): also check their block dominates block b.
+ if val, ok := val.(Instruction); ok {
+ if val.Block() == nil {
+ s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val)
+ } else if val.Parent() != s.fn {
+ s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent())
+ }
+ }
+
+ // Check that each function-local operand of
+ // instr refers back to instr. (NB: quadratic)
+ switch val := val.(type) {
+ case *Const, *Global, *Builtin:
+ continue // not local
+ case *Function:
+ if val.parent == nil {
+ continue // only anon functions are local
+ }
+ }
+
+ // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined.
+
+ if refs := val.Referrers(); refs != nil {
+ for _, ref := range *refs {
+ if ref == instr {
+ continue operands
+ }
+ }
+ s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val)
+ } else {
+ s.errorf("operand %d of %s (%s) has no referrers", i, instr, val)
+ }
+ }
+ }
+}
+
+func (s *sanity) checkReferrerList(v Value) {
+ refs := v.Referrers()
+ if refs == nil {
+ s.errorf("%s has missing referrer list", v.Name())
+ return
+ }
+ for i, ref := range *refs {
+ if _, ok := s.instrs[ref]; !ok {
+ s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref)
+ }
+ }
+}
+
+func (s *sanity) checkFunction(fn *Function) bool {
+ // TODO(adonovan): check Function invariants:
+ // - check params match signature
+ // - check transient fields are nil
+ // - warn if any fn.Locals do not appear among block instructions.
+ s.fn = fn
+ if fn.Prog == nil {
+ s.errorf("nil Prog")
+ }
+
+ fn.String() // must not crash
+ fn.RelString(fn.pkg()) // must not crash
+
+ // All functions have a package, except delegates (which are
+ // shared across packages, or duplicated as weak symbols in a
+ // separate-compilation model), and error.Error.
+ if fn.Pkg == nil {
+ if strings.HasPrefix(fn.Synthetic, "wrapper ") ||
+ strings.HasPrefix(fn.Synthetic, "bound ") ||
+ strings.HasPrefix(fn.Synthetic, "thunk ") ||
+ strings.HasSuffix(fn.name, "Error") {
+ // ok
+ } else {
+ s.errorf("nil Pkg")
+ }
+ }
+ if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn {
+ s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
+ }
+ for i, l := range fn.Locals {
+ if l.Parent() != fn {
+ s.errorf("Local %s at index %d has wrong parent", l.Name(), i)
+ }
+ if l.Heap {
+ s.errorf("Local %s at index %d has Heap flag set", l.Name(), i)
+ }
+ }
+ // Build the set of valid referrers.
+ s.instrs = make(map[Instruction]struct{})
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ s.instrs[instr] = struct{}{}
+ }
+ }
+ for i, p := range fn.Params {
+ if p.Parent() != fn {
+ s.errorf("Param %s at index %d has wrong parent", p.Name(), i)
+ }
+ s.checkReferrerList(p)
+ }
+ for i, fv := range fn.FreeVars {
+ if fv.Parent() != fn {
+ s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i)
+ }
+ s.checkReferrerList(fv)
+ }
+
+ if fn.Blocks != nil && len(fn.Blocks) == 0 {
+ // Function _had_ blocks (so it's not external) but
+ // they were "optimized" away, even the entry block.
+ s.errorf("Blocks slice is non-nil but empty")
+ }
+ for i, b := range fn.Blocks {
+ if b == nil {
+ s.warnf("nil *BasicBlock at f.Blocks[%d]", i)
+ continue
+ }
+ s.checkBlock(b, i)
+ }
+ if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover {
+ s.errorf("Recover block is not in Blocks slice")
+ }
+
+ s.block = nil
+ for i, anon := range fn.AnonFuncs {
+ if anon.Parent() != fn {
+ s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent())
+ }
+ }
+ s.fn = nil
+ return !s.insane
+}
+
+// sanityCheckPackage checks invariants of packages upon creation.
+// It does not require that the package is built.
+// Unlike sanityCheck (for functions), it just panics at the first error.
+func sanityCheckPackage(pkg *Package) {
+ if pkg.Pkg == nil {
+ panic(fmt.Sprintf("Package %s has no Object", pkg))
+ }
+ pkg.String() // must not crash
+
+ for name, mem := range pkg.Members {
+ if name != mem.Name() {
+ panic(fmt.Sprintf("%s: %T.Name() = %s, want %s",
+ pkg.Pkg.Path(), mem, mem.Name(), name))
+ }
+ obj := mem.Object()
+ if obj == nil {
+ // This check is sound because fields
+ // {Global,Function}.object have type
+ // types.Object. (If they were declared as
+ // *types.{Var,Func}, we'd have a non-empty
+ // interface containing a nil pointer.)
+
+ continue // not all members have typechecker objects
+ }
+ if obj.Name() != name {
+ if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") {
+ // Ok. The name of a declared init function varies between
+ // its types.Func ("init") and its ssa.Function ("init#%d").
+ } else {
+ panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
+ pkg.Pkg.Path(), mem, obj.Name(), name))
+ }
+ }
+ if obj.Pos() != mem.Pos() {
+ panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos()))
+ }
+ }
+}
diff --git a/vendor/honnef.co/go/tools/ssa/source.go b/vendor/honnef.co/go/tools/ssa/source.go
new file mode 100644
index 000000000..6d2223eda
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/source.go
@@ -0,0 +1,293 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines utilities for working with source positions
+// or source-level named entities ("objects").
+
+// TODO(adonovan): test that {Value,Instruction}.Pos() positions match
+// the originating syntax, as specified.
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// EnclosingFunction returns the function that contains the syntax
+// node denoted by path.
+//
+// Syntax associated with package-level variable specifications is
+// enclosed by the package's init() function.
+//
+// Returns nil if not found; reasons might include:
+// - the node is not enclosed by any function.
+// - the node is within an anonymous function (FuncLit) and
+// its SSA function has not been created yet
+// (pkg.Build() has not yet been called).
+//
+func EnclosingFunction(pkg *Package, path []ast.Node) *Function {
+ // Start with package-level function...
+ fn := findEnclosingPackageLevelFunction(pkg, path)
+ if fn == nil {
+ return nil // not in any function
+ }
+
+ // ...then walk down the nested anonymous functions.
+ n := len(path)
+outer:
+ for i := range path {
+ if lit, ok := path[n-1-i].(*ast.FuncLit); ok {
+ for _, anon := range fn.AnonFuncs {
+ if anon.Pos() == lit.Type.Func {
+ fn = anon
+ continue outer
+ }
+ }
+ // SSA function not found:
+ // - package not yet built, or maybe
+ // - builder skipped FuncLit in dead block
+ // (in principle; but currently the Builder
+ // generates even dead FuncLits).
+ return nil
+ }
+ }
+ return fn
+}
+
+// HasEnclosingFunction returns true if the AST node denoted by path
+// is contained within the declaration of some function or
+// package-level variable.
+//
+// Unlike EnclosingFunction, the behaviour of this function does not
+// depend on whether SSA code for pkg has been built, so it can be
+// used to quickly reject check inputs that will cause
+// EnclosingFunction to fail, prior to SSA building.
+//
+func HasEnclosingFunction(pkg *Package, path []ast.Node) bool {
+ return findEnclosingPackageLevelFunction(pkg, path) != nil
+}
+
+// findEnclosingPackageLevelFunction returns the Function
+// corresponding to the package-level function enclosing path.
+//
+func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function {
+ if n := len(path); n >= 2 { // [... {Gen,Func}Decl File]
+ switch decl := path[n-2].(type) {
+ case *ast.GenDecl:
+ if decl.Tok == token.VAR && n >= 3 {
+ // Package-level 'var' initializer.
+ return pkg.init
+ }
+
+ case *ast.FuncDecl:
+ if decl.Recv == nil && decl.Name.Name == "init" {
+ // Explicit init() function.
+ for _, b := range pkg.init.Blocks {
+ for _, instr := range b.Instrs {
+ if instr, ok := instr.(*Call); ok {
+ if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos {
+ return callee
+ }
+ }
+ }
+ }
+ // Hack: return non-nil when SSA is not yet
+ // built so that HasEnclosingFunction works.
+ return pkg.init
+ }
+ // Declared function/method.
+ return findNamedFunc(pkg, decl.Name.NamePos)
+ }
+ }
+ return nil // not in any function
+}
+
+// findNamedFunc returns the named function whose FuncDecl.Ident is at
+// position pos.
+//
+func findNamedFunc(pkg *Package, pos token.Pos) *Function {
+ // Look at all package members and method sets of named types.
+ // Not very efficient.
+ for _, mem := range pkg.Members {
+ switch mem := mem.(type) {
+ case *Function:
+ if mem.Pos() == pos {
+ return mem
+ }
+ case *Type:
+ mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type()))
+ for i, n := 0, mset.Len(); i < n; i++ {
+ // Don't call Program.Method: avoid creating wrappers.
+ obj := mset.At(i).Obj().(*types.Func)
+ if obj.Pos() == pos {
+ return pkg.values[obj].(*Function)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// ValueForExpr returns the SSA Value that corresponds to non-constant
+// expression e.
+//
+// It returns nil if no value was found, e.g.
+// - the expression is not lexically contained within f;
+// - f was not built with debug information; or
+// - e is a constant expression. (For efficiency, no debug
+// information is stored for constants. Use
+// go/types.Info.Types[e].Value instead.)
+// - e is a reference to nil or a built-in function.
+// - the value was optimised away.
+//
+// If e is an addressable expression used in an lvalue context,
+// value is the address denoted by e, and isAddr is true.
+//
+// The types of e (or &e, if isAddr) and the result are equal
+// (modulo "untyped" bools resulting from comparisons).
+//
+// (Tip: to find the ssa.Value given a source position, use
+// importer.PathEnclosingInterval to locate the ast.Node, then
+// EnclosingFunction to locate the Function, then ValueForExpr to find
+// the ssa.Value.)
+//
+func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) {
+ if f.debugInfo() { // (opt)
+ e = unparen(e)
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if ref, ok := instr.(*DebugRef); ok {
+ if ref.Expr == e {
+ return ref.X, ref.IsAddr
+ }
+ }
+ }
+ }
+ }
+ return
+}
+
+// --- Lookup functions for source-level named entities (types.Objects) ---
+
+// Package returns the SSA Package corresponding to the specified
+// type-checker package object.
+// It returns nil if no such SSA package has been created.
+//
+func (prog *Program) Package(obj *types.Package) *Package {
+ return prog.packages[obj]
+}
+
+// packageLevelValue returns the package-level value corresponding to
+// the specified named object, which may be a package-level const
+// (*Const), var (*Global) or func (*Function) of some package in
+// prog. It returns nil if the object is not found.
+//
+func (prog *Program) packageLevelValue(obj types.Object) Value {
+ if pkg, ok := prog.packages[obj.Pkg()]; ok {
+ return pkg.values[obj]
+ }
+ return nil
+}
+
+// FuncValue returns the concrete Function denoted by the source-level
+// named function obj, or nil if obj denotes an interface method.
+//
+// TODO(adonovan): check the invariant that obj.Type() matches the
+// result's Signature, both in the params/results and in the receiver.
+//
+func (prog *Program) FuncValue(obj *types.Func) *Function {
+ fn, _ := prog.packageLevelValue(obj).(*Function)
+ return fn
+}
+
+// ConstValue returns the SSA Value denoted by the source-level named
+// constant obj.
+//
+func (prog *Program) ConstValue(obj *types.Const) *Const {
+ // TODO(adonovan): opt: share (don't reallocate)
+ // Consts for const objects and constant ast.Exprs.
+
+ // Universal constant? {true,false,nil}
+ if obj.Parent() == types.Universe {
+ return NewConst(obj.Val(), obj.Type())
+ }
+ // Package-level named constant?
+ if v := prog.packageLevelValue(obj); v != nil {
+ return v.(*Const)
+ }
+ return NewConst(obj.Val(), obj.Type())
+}
+
+// VarValue returns the SSA Value that corresponds to a specific
+// identifier denoting the source-level named variable obj.
+//
+// VarValue returns nil if a local variable was not found, perhaps
+// because its package was not built, the debug information was not
+// requested during SSA construction, or the value was optimized away.
+//
+// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval),
+// and that ident must resolve to obj.
+//
+// pkg is the package enclosing the reference. (A reference to a var
+// always occurs within a function, so we need to know where to find it.)
+//
+// If the identifier is a field selector and its base expression is
+// non-addressable, then VarValue returns the value of that field.
+// For example:
+// func f() struct {x int}
+// f().x // VarValue(x) returns a *Field instruction of type int
+//
+// All other identifiers denote addressable locations (variables).
+// For them, VarValue may return either the variable's address or its
+// value, even when the expression is evaluated only for its value; the
+// situation is reported by isAddr, the second component of the result.
+//
+// If !isAddr, the returned value is the one associated with the
+// specific identifier. For example,
+// var x int // VarValue(x) returns Const 0 here
+// x = 1 // VarValue(x) returns Const 1 here
+//
+// It is not specified whether the value or the address is returned in
+// any particular case, as it may depend upon optimizations performed
+// during SSA code generation, such as registerization, constant
+// folding, avoidance of materialization of subexpressions, etc.
+//
+func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) {
+ // All references to a var are local to some function, possibly init.
+ fn := EnclosingFunction(pkg, ref)
+ if fn == nil {
+ return // e.g. def of struct field; SSA not built?
+ }
+
+ id := ref[0].(*ast.Ident)
+
+ // Defining ident of a parameter?
+ if id.Pos() == obj.Pos() {
+ for _, param := range fn.Params {
+ if param.Object() == obj {
+ return param, false
+ }
+ }
+ }
+
+ // Other ident?
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ if dr, ok := instr.(*DebugRef); ok {
+ if dr.Pos() == id.Pos() {
+ return dr.X, dr.IsAddr
+ }
+ }
+ }
+ }
+
+ // Defining ident of package-level var?
+ if v := prog.packageLevelValue(obj); v != nil {
+ return v.(*Global), true
+ }
+
+ return // e.g. debug info not requested, or var optimized away
+}
diff --git a/vendor/honnef.co/go/tools/ssa/ssa.go b/vendor/honnef.co/go/tools/ssa/ssa.go
new file mode 100644
index 000000000..8825e7b59
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/ssa.go
@@ -0,0 +1,1745 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This package defines a high-level intermediate representation for
+// Go programs using static single-assignment (SSA) form.
+
+import (
+ "fmt"
+ "go/ast"
+ exact "go/constant"
+ "go/token"
+ "go/types"
+ "sync"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+// A Program is a partial or complete Go program converted to SSA form.
+type Program struct {
+ Fset *token.FileSet // position information for the files of this Program
+ imported map[string]*Package // all importable Packages, keyed by import path
+ packages map[*types.Package]*Package // all loaded Packages, keyed by object
+ mode BuilderMode // set of mode bits for SSA construction
+ MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets
+
+ methodsMu sync.Mutex // guards the following maps:
+ methodSets typeutil.Map // maps type to its concrete methodSet
+ runtimeTypes typeutil.Map // types for which rtypes are needed
+ canon typeutil.Map // type canonicalization map
+ bounds map[*types.Func]*Function // bounds for curried x.Method closures
+ thunks map[selectionKey]*Function // thunks for T.Method expressions
+}
+
+// A Package is a single analyzed Go package containing Members for
+// all package-level functions, variables, constants and types it
+// declares. These may be accessed directly via Members, or via the
+// type-specific accessor methods Func, Type, Var and Const.
+//
+// Members also contains entries for "init" (the synthetic package
+// initializer) and "init#%d", the nth declared init function,
+// and unspecified other things too.
+//
+type Package struct {
+ Prog *Program // the owning program
+ Pkg *types.Package // the corresponding go/types.Package
+ Members map[string]Member // all package members keyed by name (incl. init and init#%d)
+ values map[types.Object]Value // package members (incl. types and methods), keyed by object
+ init *Function // Func("init"); the package's init function
+ debug bool // include full debug info in this package
+
+ // The following fields are set transiently, then cleared
+ // after building.
+ buildOnce sync.Once // ensures package building occurs once
+ ninit int32 // number of init functions
+ info *types.Info // package type information
+ files []*ast.File // package ASTs
+}
+
+// A Member is a member of a Go package, implemented by *NamedConst,
+// *Global, *Function, or *Type; they are created by package-level
+// const, var, func and type declarations respectively.
+//
+type Member interface {
+ Name() string // declared name of the package member
+ String() string // package-qualified name of the package member
+ RelString(*types.Package) string // like String, but relative refs are unqualified
+ Object() types.Object // typechecker's object for this member, if any
+ Pos() token.Pos // position of member's declaration, if known
+ Type() types.Type // type of the package member
+ Token() token.Token // token.{VAR,FUNC,CONST,TYPE}
+ Package() *Package // the containing package
+}
+
+// A Type is a Member of a Package representing a package-level named type.
+type Type struct {
+ object *types.TypeName
+ pkg *Package
+}
+
+// A NamedConst is a Member of a Package representing a package-level
+// named constant.
+//
+// Pos() returns the position of the declaring ast.ValueSpec.Names[*]
+// identifier.
+//
+// NB: a NamedConst is not a Value; it contains a constant Value, which
+// it augments with the name and position of its 'const' declaration.
+//
+type NamedConst struct {
+ object *types.Const
+ Value *Const
+ pkg *Package
+}
+
+// A Value is an SSA value that can be referenced by an instruction.
+type Value interface {
+ // Name returns the name of this value, and determines how
+ // this Value appears when used as an operand of an
+ // Instruction.
+ //
+ // This is the same as the source name for Parameters,
+ // Builtins, Functions, FreeVars, Globals.
+ // For constants, it is a representation of the constant's value
+ // and type. For all other Values this is the name of the
+ // virtual register defined by the instruction.
+ //
+ // The name of an SSA Value is not semantically significant,
+ // and may not even be unique within a function.
+ Name() string
+
+ // If this value is an Instruction, String returns its
+ // disassembled form; otherwise it returns unspecified
+ // human-readable information about the Value, such as its
+ // kind, name and type.
+ String() string
+
+ // Type returns the type of this value. Many instructions
+ // (e.g. IndexAddr) change their behaviour depending on the
+ // types of their operands.
+ Type() types.Type
+
+ // Parent returns the function to which this Value belongs.
+ // It returns nil for named Functions, Builtin, Const and Global.
+ Parent() *Function
+
+ // Referrers returns the list of instructions that have this
+ // value as one of their operands; it may contain duplicates
+ // if an instruction has a repeated operand.
+ //
+ // Referrers actually returns a pointer through which the
+ // caller may perform mutations to the object's state.
+ //
+ // Referrers is currently only defined if Parent()!=nil,
+ // i.e. for the function-local values FreeVar, Parameter,
+ // Functions (iff anonymous) and all value-defining instructions.
+ // It returns nil for named Functions, Builtin, Const and Global.
+ //
+ // Instruction.Operands contains the inverse of this relation.
+ Referrers() *[]Instruction
+
+ // Pos returns the location of the AST token most closely
+ // associated with the operation that gave rise to this value,
+ // or token.NoPos if it was not explicit in the source.
+ //
+ // For each ast.Node type, a particular token is designated as
+ // the closest location for the expression, e.g. the Lparen
+ // for an *ast.CallExpr. This permits a compact but
+ // approximate mapping from Values to source positions for use
+ // in diagnostic messages, for example.
+ //
+ // (Do not use this position to determine which Value
+ // corresponds to an ast.Expr; use Function.ValueForExpr
+ // instead. NB: it requires that the function was built with
+ // debug information.)
+ Pos() token.Pos
+}
+
+// An Instruction is an SSA instruction that computes a new Value or
+// has some effect.
+//
+// An Instruction that defines a value (e.g. BinOp) also implements
+// the Value interface; an Instruction that only has an effect (e.g. Store)
+// does not.
+//
+type Instruction interface {
+ // String returns the disassembled form of this value.
+ //
+ // Examples of Instructions that are Values:
+ // "x + y" (BinOp)
+ // "len([])" (Call)
+ // Note that the name of the Value is not printed.
+ //
+ // Examples of Instructions that are not Values:
+ // "return x" (Return)
+ // "*y = x" (Store)
+ //
+ // (The separation Value.Name() from Value.String() is useful
+ // for some analyses which distinguish the operation from the
+ // value it defines, e.g., 'y = local int' is both an allocation
+ // of memory 'local int' and a definition of a pointer y.)
+ String() string
+
+ // Parent returns the function to which this instruction
+ // belongs.
+ Parent() *Function
+
+ // Block returns the basic block to which this instruction
+ // belongs.
+ Block() *BasicBlock
+
+ // setBlock sets the basic block to which this instruction belongs.
+ setBlock(*BasicBlock)
+
+ // Operands returns the operands of this instruction: the
+ // set of Values it references.
+ //
+ // Specifically, it appends their addresses to rands, a
+ // user-provided slice, and returns the resulting slice,
+ // permitting avoidance of memory allocation.
+ //
+ // The operands are appended in undefined order, but the order
+ // is consistent for a given Instruction; the addresses are
+ // always non-nil but may point to a nil Value. Clients may
+ // store through the pointers, e.g. to effect a value
+ // renaming.
+ //
+ // Value.Referrers is a subset of the inverse of this
+ // relation. (Referrers are not tracked for all types of
+ // Values.)
+ Operands(rands []*Value) []*Value
+
+ // Pos returns the location of the AST token most closely
+ // associated with the operation that gave rise to this
+ // instruction, or token.NoPos if it was not explicit in the
+ // source.
+ //
+ // For each ast.Node type, a particular token is designated as
+ // the closest location for the expression, e.g. the Go token
+ // for an *ast.GoStmt. This permits a compact but approximate
+ // mapping from Instructions to source positions for use in
+ // diagnostic messages, for example.
+ //
+ // (Do not use this position to determine which Instruction
+ // corresponds to an ast.Expr; see the notes for Value.Pos.
+ // This position may be used to determine which non-Value
+ // Instruction corresponds to some ast.Stmts, but not all: If
+ // and Jump instructions have no Pos(), for example.)
+ Pos() token.Pos
+}
+
+// A Node is a node in the SSA value graph. Every concrete type that
+// implements Node is also either a Value, an Instruction, or both.
+//
+// Node contains the methods common to Value and Instruction, plus the
+// Operands and Referrers methods generalized to return nil for
+// non-Instructions and non-Values, respectively.
+//
+// Node is provided to simplify SSA graph algorithms. Clients should
+// use the more specific and informative Value or Instruction
+// interfaces where appropriate.
+//
+type Node interface {
+ // Common methods:
+ String() string
+ Pos() token.Pos
+ Parent() *Function
+
+ // Partial methods:
+ Operands(rands []*Value) []*Value // nil for non-Instructions
+ Referrers() *[]Instruction // nil for non-Values
+}
+
+// Function represents the parameters, results, and code of a function
+// or method.
+//
+// If Blocks is nil, this indicates an external function for which no
+// Go source code is available. In this case, FreeVars and Locals
+// are nil too. Clients performing whole-program analysis must
+// handle external functions specially.
+//
+// Blocks contains the function's control-flow graph (CFG).
+// Blocks[0] is the function entry point; block order is not otherwise
+// semantically significant, though it may affect the readability of
+// the disassembly.
+// To iterate over the blocks in dominance order, use DomPreorder().
+//
+// Recover is an optional second entry point to which control resumes
+// after a recovered panic. The Recover block may contain only a return
+// statement, preceded by a load of the function's named return
+// parameters, if any.
+//
+// A nested function (Parent()!=nil) that refers to one or more
+// lexically enclosing local variables ("free variables") has FreeVars.
+// Such functions cannot be called directly but require a
+// value created by MakeClosure which, via its Bindings, supplies
+// values for these parameters.
+//
+// If the function is a method (Signature.Recv() != nil) then the first
+// element of Params is the receiver parameter.
+//
+// A Go package may declare many functions called "init".
+// For each one, Object().Name() returns "init" but Name() returns
+// "init#1", etc, in declaration order.
+//
+// Pos() returns the declaring ast.FuncLit.Type.Func or the position
+// of the ast.FuncDecl.Name, if the function was explicit in the
+// source. Synthetic wrappers, for which Synthetic != "", may share
+// the same position as the function they wrap.
+// Syntax.Pos() always returns the position of the declaring "func" token.
+//
+// Type() returns the function's Signature.
+//
+type Function struct {
+ name string
+ object types.Object // a declared *types.Func or one of its wrappers
+ method *types.Selection // info about provenance of synthetic methods
+ Signature *types.Signature
+ pos token.Pos
+
+ Synthetic string // provenance of synthetic function; "" for true source functions
+ syntax ast.Node // *ast.Func{Decl,Lit}; replaced with simple ast.Node after build, unless debug mode
+ parent *Function // enclosing function if anon; nil if global
+ Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error)
+ Prog *Program // enclosing program
+ Params []*Parameter // function parameters; for methods, includes receiver
+ FreeVars []*FreeVar // free variables whose values must be supplied by closure
+ Locals []*Alloc // local variables of this function
+ Blocks []*BasicBlock // basic blocks of the function; nil => external
+ Recover *BasicBlock // optional; control transfers here after recovered panic
+ AnonFuncs []*Function // anonymous functions directly beneath this one
+ referrers []Instruction // referring instructions (iff Parent() != nil)
+
+ // The following fields are set transiently during building,
+ // then cleared.
+ currentBlock *BasicBlock // where to emit code
+ objects map[types.Object]Value // addresses of local variables
+ namedResults []*Alloc // tuple of named results
+ targets *targets // linked stack of branch targets
+ lblocks map[*ast.Object]*lblock // labelled blocks
+}
+
+// BasicBlock represents an SSA basic block.
+//
+// The final element of Instrs is always an explicit transfer of
+// control (If, Jump, Return, or Panic).
+//
+// A block may contain no Instructions only if it is unreachable,
+// i.e., Preds is nil. Empty blocks are typically pruned.
+//
+// BasicBlocks and their Preds/Succs relation form a (possibly cyclic)
+// graph independent of the SSA Value graph: the control-flow graph or
+// CFG. It is illegal for multiple edges to exist between the same
+// pair of blocks.
+//
+// Each BasicBlock is also a node in the dominator tree of the CFG.
+// The tree may be navigated using Idom()/Dominees() and queried using
+// Dominates().
+//
+// The order of Preds and Succs is significant (to Phi and If
+// instructions, respectively).
+//
+type BasicBlock struct {
+ Index int // index of this block within Parent().Blocks
+ Comment string // optional label; no semantic significance
+ parent *Function // parent function
+ Instrs []Instruction // instructions in order
+ Preds, Succs []*BasicBlock // predecessors and successors
+ succs2 [2]*BasicBlock // initial space for Succs
+ dom domInfo // dominator tree info
+ gaps int // number of nil Instrs (transient)
+ rundefers int // number of rundefers (transient)
+}
+
+// Pure values ----------------------------------------
+
+// A FreeVar represents a free variable of the function to which it
+// belongs.
+//
+// FreeVars are used to implement anonymous functions, whose free
+// variables are lexically captured in a closure formed by
+// MakeClosure. The value of such a free var is an Alloc or another
+// FreeVar and is considered a potentially escaping heap address, with
+// pointer type.
+//
+// FreeVars are also used to implement bound method closures. Such a
+// free var represents the receiver value and may be of any type that
+// has concrete methods.
+//
+// Pos() returns the position of the value that was captured, which
+// belongs to an enclosing function.
+//
+type FreeVar struct {
+ name string
+ typ types.Type
+ pos token.Pos
+ parent *Function
+ referrers []Instruction
+
+ // Transiently needed during building.
+ outer Value // the Value captured from the enclosing context.
+}
+
+// A Parameter represents an input parameter of a function.
+//
+type Parameter struct {
+ name string
+ object types.Object // a *types.Var; nil for non-source locals
+ typ types.Type
+ pos token.Pos
+ parent *Function
+ referrers []Instruction
+}
+
+// A Const represents the value of a constant expression.
+//
+// The underlying type of a constant may be any boolean, numeric, or
+// string type. In addition, a Const may represent the nil value of
+// any reference type---interface, map, channel, pointer, slice, or
+// function---but not "untyped nil".
+//
+// All source-level constant expressions are represented by a Const
+// of the same type and value.
+//
+// Value holds the exact value of the constant, independent of its
+// Type(), using the same representation as package go/exact uses for
+// constants, or nil for a typed nil value.
+//
+// Pos() returns token.NoPos.
+//
+// Example printed form:
+// 42:int
+// "hello":untyped string
+// 3+4i:MyComplex
+//
+type Const struct {
+ typ types.Type
+ Value exact.Value
+}
+
+// A Global is a named Value holding the address of a package-level
+// variable.
+//
+// Pos() returns the position of the ast.ValueSpec.Names[*]
+// identifier.
+//
+type Global struct {
+ name string
+ object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard
+ typ types.Type
+ pos token.Pos
+
+ Pkg *Package
+}
+
+// A Builtin represents a specific use of a built-in function, e.g. len.
+//
+// Builtins are immutable values. Builtins do not have addresses.
+// Builtins can only appear in CallCommon.Func.
+//
+// Name() indicates the function: one of the built-in functions from the
+// Go spec (excluding "make" and "new") or one of these ssa-defined
+// intrinsics:
+//
+// // wrapnilchk returns ptr if non-nil, panics otherwise.
+// // (For use in indirection wrappers.)
+// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T
+//
+// Object() returns a *types.Builtin for built-ins defined by the spec,
+// nil for others.
+//
+// Type() returns a *types.Signature representing the effective
+// signature of the built-in for this call.
+//
+type Builtin struct {
+ name string
+ sig *types.Signature
+}
+
+// Value-defining instructions ----------------------------------------
+
+// The Alloc instruction reserves space for a variable of the given type,
+// zero-initializes it, and yields its address.
+//
+// Alloc values are always addresses, and have pointer types, so the
+// type of the allocated variable is actually
+// Type().Underlying().(*types.Pointer).Elem().
+//
+// If Heap is false, Alloc allocates space in the function's
+// activation record (frame); we refer to an Alloc(Heap=false) as a
+// "local" alloc. Each local Alloc returns the same address each time
+// it is executed within the same activation; the space is
+// re-initialized to zero.
+//
+// If Heap is true, Alloc allocates space in the heap; we
+// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc
+// returns a different address each time it is executed.
+//
+// When Alloc is applied to a channel, map or slice type, it returns
+// the address of an uninitialized (nil) reference of that kind; store
+// the result of MakeSlice, MakeMap or MakeChan in that location to
+// instantiate these types.
+//
+// Pos() returns the ast.CompositeLit.Lbrace for a composite literal,
+// or the ast.CallExpr.Rparen for a call to new() or for a call that
+// allocates a varargs slice.
+//
+// Example printed form:
+// t0 = local int
+// t1 = new int
+//
+type Alloc struct {
+ register
+ Comment string
+ Heap bool
+ index int // dense numbering; for lifting
+}
+
+var _ Instruction = (*Sigma)(nil)
+var _ Value = (*Sigma)(nil)
+
+type Sigma struct {
+ register
+ X Value
+ Branch bool
+}
+
+func (p *Sigma) Value() Value {
+ v := p.X
+ for {
+ sigma, ok := v.(*Sigma)
+ if !ok {
+ break
+ }
+ v = sigma
+ }
+ return v
+}
+
+func (p *Sigma) String() string {
+ return fmt.Sprintf("σ [%s.%t]", relName(p.X, p), p.Branch)
+}
+
+// The Phi instruction represents an SSA φ-node, which combines values
+// that differ across incoming control-flow edges and yields a new
+// value. Within a block, all φ-nodes must appear before all non-φ
+// nodes.
+//
+// Pos() returns the position of the && or || for short-circuit
+// control-flow joins, or that of the *Alloc for φ-nodes inserted
+// during SSA renaming.
+//
+// Example printed form:
+// t2 = phi [0: t0, 1: t1]
+//
+type Phi struct {
+ register
+ Comment string // a hint as to its purpose
+ Edges []Value // Edges[i] is value for Block().Preds[i]
+}
+
+// The Call instruction represents a function or method call.
+//
+// The Call instruction yields the function result if there is exactly
+// one. Otherwise it returns a tuple, the components of which are
+// accessed via Extract.
+//
+// See CallCommon for generic function call documentation.
+//
+// Pos() returns the ast.CallExpr.Lparen, if explicit in the source.
+//
+// Example printed form:
+// t2 = println(t0, t1)
+// t4 = t3()
+// t7 = invoke t5.Println(...t6)
+//
+type Call struct {
+ register
+ Call CallCommon
+}
+
+// The BinOp instruction yields the result of binary operation X Op Y.
+//
+// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source.
+//
+// Example printed form:
+// t1 = t0 + 1:int
+//
+type BinOp struct {
+ register
+ // One of:
+ // ADD SUB MUL QUO REM + - * / %
+ // AND OR XOR SHL SHR AND_NOT & | ^ << >> &~
+ // EQL LSS GTR NEQ LEQ GEQ == != < <= < >=
+ Op token.Token
+ X, Y Value
+}
+
+// The UnOp instruction yields the result of Op X.
+// ARROW is channel receive.
+// MUL is pointer indirection (load).
+// XOR is bitwise complement.
+// SUB is negation.
+// NOT is logical negation.
+//
+// If CommaOk and Op=ARROW, the result is a 2-tuple of the value above
+// and a boolean indicating the success of the receive. The
+// components of the tuple are accessed using Extract.
+//
+// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source.
+// For receive operations (ARROW) implicit in ranging over a channel,
+// Pos() returns the ast.RangeStmt.For.
+// For implicit memory loads (STAR), Pos() returns the position of the
+// most closely associated source-level construct; the details are not
+// specified.
+//
+// Example printed form:
+// t0 = *x
+// t2 = <-t1,ok
+//
+type UnOp struct {
+ register
+ Op token.Token // One of: NOT SUB ARROW MUL XOR ! - <- * ^
+ X Value
+ CommaOk bool
+}
+
+// The ChangeType instruction applies to X a value-preserving type
+// change to Type().
+//
+// Type changes are permitted:
+// - between a named type and its underlying type.
+// - between two named types of the same underlying type.
+// - between (possibly named) pointers to identical base types.
+// - from a bidirectional channel to a read- or write-channel,
+// optionally adding/removing a name.
+//
+// This operation cannot fail dynamically.
+//
+// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
+// from an explicit conversion in the source.
+//
+// Example printed form:
+// t1 = changetype *int <- IntPtr (t0)
+//
+type ChangeType struct {
+ register
+ X Value
+}
+
+// The Convert instruction yields the conversion of value X to type
+// Type(). One or both of those types is basic (but possibly named).
+//
+// A conversion may change the value and representation of its operand.
+// Conversions are permitted:
+// - between real numeric types.
+// - between complex numeric types.
+// - between string and []byte or []rune.
+// - between pointers and unsafe.Pointer.
+// - between unsafe.Pointer and uintptr.
+// - from (Unicode) integer to (UTF-8) string.
+// A conversion may imply a type name change also.
+//
+// This operation cannot fail dynamically.
+//
+// Conversions of untyped string/number/bool constants to a specific
+// representation are eliminated during SSA construction.
+//
+// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
+// from an explicit conversion in the source.
+//
+// Example printed form:
+// t1 = convert []byte <- string (t0)
+//
+type Convert struct {
+ register
+ X Value
+}
+
+// ChangeInterface constructs a value of one interface type from a
+// value of another interface type known to be assignable to it.
+// This operation cannot fail.
+//
+// Pos() returns the ast.CallExpr.Lparen if the instruction arose from
+// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the
+// instruction arose from an explicit e.(T) operation; or token.NoPos
+// otherwise.
+//
+// Example printed form:
+// t1 = change interface interface{} <- I (t0)
+//
+type ChangeInterface struct {
+ register
+ X Value
+}
+
+// MakeInterface constructs an instance of an interface type from a
+// value of a concrete type.
+//
+// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set
+// of X, and Program.Method(m) to find the implementation of a method.
+//
+// To construct the zero value of an interface type T, use:
+// NewConst(exact.MakeNil(), T, pos)
+//
+// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
+// from an explicit conversion in the source.
+//
+// Example printed form:
+// t1 = make interface{} <- int (42:int)
+// t2 = make Stringer <- t0
+//
+type MakeInterface struct {
+ register
+ X Value
+}
+
+// The MakeClosure instruction yields a closure value whose code is
+// Fn and whose free variables' values are supplied by Bindings.
+//
+// Type() returns a (possibly named) *types.Signature.
+//
+// Pos() returns the ast.FuncLit.Type.Func for a function literal
+// closure or the ast.SelectorExpr.Sel for a bound method closure.
+//
+// Example printed form:
+// t0 = make closure anon@1.2 [x y z]
+// t1 = make closure bound$(main.I).add [i]
+//
+type MakeClosure struct {
+ register
+ Fn Value // always a *Function
+ Bindings []Value // values for each free variable in Fn.FreeVars
+}
+
+// The MakeMap instruction creates a new hash-table-based map object
+// and yields a value of kind map.
+//
+// Type() returns a (possibly named) *types.Map.
+//
+// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or
+// the ast.CompositeLit.Lbrack if created by a literal.
+//
+// Example printed form:
+// t1 = make map[string]int t0
+// t1 = make StringIntMap t0
+//
+type MakeMap struct {
+ register
+ Reserve Value // initial space reservation; nil => default
+}
+
+// The MakeChan instruction creates a new channel object and yields a
+// value of kind chan.
+//
+// Type() returns a (possibly named) *types.Chan.
+//
+// Pos() returns the ast.CallExpr.Lparen for the make(chan) that
+// created it.
+//
+// Example printed form:
+// t0 = make chan int 0
+// t0 = make IntChan 0
+//
+type MakeChan struct {
+ register
+ Size Value // int; size of buffer; zero => synchronous.
+}
+
+// The MakeSlice instruction yields a slice of length Len backed by a
+// newly allocated array of length Cap.
+//
+// Both Len and Cap must be non-nil Values of integer type.
+//
+// (Alloc(types.Array) followed by Slice will not suffice because
+// Alloc can only create arrays of constant length.)
+//
+// Type() returns a (possibly named) *types.Slice.
+//
+// Pos() returns the ast.CallExpr.Lparen for the make([]T) that
+// created it.
+//
+// Example printed form:
+// t1 = make []string 1:int t0
+// t1 = make StringSlice 1:int t0
+//
+type MakeSlice struct {
+ register
+ Len Value
+ Cap Value
+}
+
+// The Slice instruction yields a slice of an existing string, slice
+// or *array X between optional integer bounds Low and High.
+//
+// Dynamically, this instruction panics if X evaluates to a nil *array
+// pointer.
+//
+// Type() returns string if the type of X was string, otherwise a
+// *types.Slice with the same element type as X.
+//
+// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice
+// operation, the ast.CompositeLit.Lbrace if created by a literal, or
+// NoPos if not explicit in the source (e.g. a variadic argument slice).
+//
+// Example printed form:
+// t1 = slice t0[1:]
+//
+type Slice struct {
+ register
+ X Value // slice, string, or *array
+ Low, High, Max Value // each may be nil
+}
+
+// The FieldAddr instruction yields the address of Field of *struct X.
+//
+// The field is identified by its index within the field list of the
+// struct type of X.
+//
+// Dynamically, this instruction panics if X evaluates to a nil
+// pointer.
+//
+// Type() returns a (possibly named) *types.Pointer.
+//
+// Pos() returns the position of the ast.SelectorExpr.Sel for the
+// field, if explicit in the source.
+//
+// Example printed form:
+// t1 = &t0.name [#1]
+//
+type FieldAddr struct {
+ register
+ X Value // *struct
+ Field int // index into X.Type().Deref().(*types.Struct).Fields
+}
+
+// The Field instruction yields the Field of struct X.
+//
+// The field is identified by its index within the field list of the
+// struct type of X; by using numeric indices we avoid ambiguity of
+// package-local identifiers and permit compact representations.
+//
+// Pos() returns the position of the ast.SelectorExpr.Sel for the
+// field, if explicit in the source.
+//
+// Example printed form:
+// t1 = t0.name [#1]
+//
+type Field struct {
+ register
+ X Value // struct
+ Field int // index into X.Type().(*types.Struct).Fields
+}
+
+// The IndexAddr instruction yields the address of the element at
+// index Index of collection X. Index is an integer expression.
+//
+// The elements of maps and strings are not addressable; use Lookup or
+// MapUpdate instead.
+//
+// Dynamically, this instruction panics if X evaluates to a nil *array
+// pointer.
+//
+// Type() returns a (possibly named) *types.Pointer.
+//
+// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if
+// explicit in the source.
+//
+// Example printed form:
+// t2 = &t0[t1]
+//
+type IndexAddr struct {
+ register
+ X Value // slice or *array,
+ Index Value // numeric index
+}
+
+// The Index instruction yields element Index of array X.
+//
+// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if
+// explicit in the source.
+//
+// Example printed form:
+// t2 = t0[t1]
+//
+type Index struct {
+ register
+ X Value // array
+ Index Value // integer index
+}
+
+// The Lookup instruction yields element Index of collection X, a map
+// or string. Index is an integer expression if X is a string or the
+// appropriate key type if X is a map.
+//
+// If CommaOk, the result is a 2-tuple of the value above and a
+// boolean indicating the result of a map membership test for the key.
+// The components of the tuple are accessed using Extract.
+//
+// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source.
+//
+// Example printed form:
+// t2 = t0[t1]
+// t5 = t3[t4],ok
+//
+type Lookup struct {
+ register
+ X Value // string or map
+ Index Value // numeric or key-typed index
+ CommaOk bool // return a value,ok pair
+}
+
+// SelectState is a helper for Select.
+// It represents one goal state and its corresponding communication.
+//
+type SelectState struct {
+ Dir types.ChanDir // direction of case (SendOnly or RecvOnly)
+ Chan Value // channel to use (for send or receive)
+ Send Value // value to send (for send)
+ Pos token.Pos // position of token.ARROW
+ DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode]
+}
+
+// The Select instruction tests whether (or blocks until) one
+// of the specified sent or received states is entered.
+//
+// Let n be the number of States for which Dir==RECV and T_i (0<=i<n)
+// be the element type of each such state's Chan.
+// Select returns an n+2-tuple
+// (index int, recvOk bool, r_0 T_0, ... r_n-1 T_n-1)
+// The tuple's components, described below, must be accessed via the
+// Extract instruction.
+//
+// If Blocking, select waits until exactly one state holds, i.e. a
+// channel becomes ready for the designated operation of sending or
+// receiving; select chooses one among the ready states
+// pseudorandomly, performs the send or receive operation, and sets
+// 'index' to the index of the chosen channel.
+//
+// If !Blocking, select doesn't block if no states hold; instead it
+// returns immediately with index equal to -1.
+//
+// If the chosen channel was used for a receive, the r_i component is
+// set to the received value, where i is the index of that state among
+// all n receive states; otherwise r_i has the zero value of type T_i.
+// Note that the receive index i is not the same as the state
+// index index.
+//
+// The second component of the triple, recvOk, is a boolean whose value
+// is true iff the selected operation was a receive and the receive
+// successfully yielded a value.
+//
+// Pos() returns the ast.SelectStmt.Select.
+//
+// Example printed form:
+// t3 = select nonblocking [<-t0, t1<-t2]
+// t4 = select blocking []
+//
+type Select struct {
+ register
+ States []*SelectState
+ Blocking bool
+}
+
+// The Range instruction yields an iterator over the domain and range
+// of X, which must be a string or map.
+//
+// Elements are accessed via Next.
+//
+// Type() returns an opaque and degenerate "rangeIter" type.
+//
+// Pos() returns the ast.RangeStmt.For.
+//
+// Example printed form:
+// t0 = range "hello":string
+//
+type Range struct {
+ register
+ X Value // string or map
+}
+
+// The Next instruction reads and advances the (map or string)
+// iterator Iter and returns a 3-tuple value (ok, k, v). If the
+// iterator is not exhausted, ok is true and k and v are the next
+// elements of the domain and range, respectively. Otherwise ok is
+// false and k and v are undefined.
+//
+// Components of the tuple are accessed using Extract.
+//
+// The IsString field distinguishes iterators over strings from those
+// over maps, as the Type() alone is insufficient: consider
+// map[int]rune.
+//
+// Type() returns a *types.Tuple for the triple (ok, k, v).
+// The types of k and/or v may be types.Invalid.
+//
+// Example printed form:
+// t1 = next t0
+//
+type Next struct {
+ register
+ Iter Value
+ IsString bool // true => string iterator; false => map iterator.
+}
+
+// The TypeAssert instruction tests whether interface value X has type
+// AssertedType.
+//
+// If !CommaOk, on success it returns v, the result of the conversion
+// (defined below); on failure it panics.
+//
+// If CommaOk: on success it returns a pair (v, true) where v is the
+// result of the conversion; on failure it returns (z, false) where z
+// is AssertedType's zero value. The components of the pair must be
+// accessed using the Extract instruction.
+//
+// If AssertedType is a concrete type, TypeAssert checks whether the
+// dynamic type in interface X is equal to it, and if so, the result
+// of the conversion is a copy of the value in the interface.
+//
+// If AssertedType is an interface, TypeAssert checks whether the
+// dynamic type of the interface is assignable to it, and if so, the
+// result of the conversion is a copy of the interface value X.
+// If AssertedType is a superinterface of X.Type(), the operation will
+// fail iff the operand is nil. (Contrast with ChangeInterface, which
+// performs no nil-check.)
+//
+// Type() reflects the actual type of the result, possibly a
+// 2-types.Tuple; AssertedType is the asserted type.
+//
+// Pos() returns the ast.CallExpr.Lparen if the instruction arose from
+// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the
+// instruction arose from an explicit e.(T) operation; or the
+// ast.CaseClause.Case if the instruction arose from a case of a
+// type-switch statement.
+//
+// Example printed form:
+// t1 = typeassert t0.(int)
+// t3 = typeassert,ok t2.(T)
+//
+type TypeAssert struct {
+ register
+ X Value
+ AssertedType types.Type
+ CommaOk bool
+}
+
+// The Extract instruction yields component Index of Tuple.
+//
+// This is used to access the results of instructions with multiple
+// return values, such as Call, TypeAssert, Next, UnOp(ARROW) and
+// IndexExpr(Map).
+//
+// Example printed form:
+// t1 = extract t0 #1
+//
+type Extract struct {
+ register
+ Tuple Value
+ Index int
+}
+
+// Instructions executed for effect. They do not yield a value. --------------------
+
+// The Jump instruction transfers control to the sole successor of its
+// owning block.
+//
+// A Jump must be the last instruction of its containing BasicBlock.
+//
+// Pos() returns NoPos.
+//
+// Example printed form:
+// jump done
+//
+type Jump struct {
+ anInstruction
+}
+
+// The If instruction transfers control to one of the two successors
+// of its owning block, depending on the boolean Cond: the first if
+// true, the second if false.
+//
+// An If instruction must be the last instruction of its containing
+// BasicBlock.
+//
+// Pos() returns NoPos.
+//
+// Example printed form:
+// if t0 goto done else body
+//
+type If struct {
+ anInstruction
+ Cond Value
+}
+
+// The Return instruction returns values and control back to the calling
+// function.
+//
+// len(Results) is always equal to the number of results in the
+// function's signature.
+//
+// If len(Results) > 1, Return returns a tuple value with the specified
+// components which the caller must access using Extract instructions.
+//
+// There is no instruction to return a ready-made tuple like those
+// returned by a "value,ok"-mode TypeAssert, Lookup or UnOp(ARROW) or
+// a tail-call to a function with multiple result parameters.
+//
+// Return must be the last instruction of its containing BasicBlock.
+// Such a block has no successors.
+//
+// Pos() returns the ast.ReturnStmt.Return, if explicit in the source.
+//
+// Example printed form:
+// return
+// return nil:I, 2:int
+//
+type Return struct {
+ anInstruction
+ Results []Value
+ pos token.Pos
+}
+
+// The RunDefers instruction pops and invokes the entire stack of
+// procedure calls pushed by Defer instructions in this function.
+//
+// It is legal to encounter multiple 'rundefers' instructions in a
+// single control-flow path through a function; this is useful in
+// the combined init() function, for example.
+//
+// Pos() returns NoPos.
+//
+// Example printed form:
+// rundefers
+//
+type RunDefers struct {
+ anInstruction
+}
+
+// The Panic instruction initiates a panic with value X.
+//
+// A Panic instruction must be the last instruction of its containing
+// BasicBlock, which must have no successors.
+//
+// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction;
+// they are treated as calls to a built-in function.
+//
+// Pos() returns the ast.CallExpr.Lparen if this panic was explicit
+// in the source.
+//
+// Example printed form:
+// panic t0
+//
+type Panic struct {
+ anInstruction
+ X Value // an interface{}
+ pos token.Pos
+}
+
+// The Go instruction creates a new goroutine and calls the specified
+// function within it.
+//
+// See CallCommon for generic function call documentation.
+//
+// Pos() returns the ast.GoStmt.Go.
+//
+// Example printed form:
+// go println(t0, t1)
+// go t3()
+// go invoke t5.Println(...t6)
+//
+type Go struct {
+ anInstruction
+ Call CallCommon
+ pos token.Pos
+}
+
+// The Defer instruction pushes the specified call onto a stack of
+// functions to be called by a RunDefers instruction or by a panic.
+//
+// See CallCommon for generic function call documentation.
+//
+// Pos() returns the ast.DeferStmt.Defer.
+//
+// Example printed form:
+// defer println(t0, t1)
+// defer t3()
+// defer invoke t5.Println(...t6)
+//
+type Defer struct {
+ anInstruction
+ Call CallCommon
+ pos token.Pos
+}
+
+// The Send instruction sends X on channel Chan.
+//
+// Pos() returns the ast.SendStmt.Arrow, if explicit in the source.
+//
+// Example printed form:
+// send t0 <- t1
+//
+type Send struct {
+ anInstruction
+ Chan, X Value
+ pos token.Pos
+}
+
+// The Store instruction stores Val at address Addr.
+// Stores can be of arbitrary types.
+//
+// Pos() returns the position of the source-level construct most closely
+// associated with the memory store operation.
+// Since implicit memory stores are numerous and varied and depend upon
+// implementation choices, the details are not specified.
+//
+// Example printed form:
+// *x = y
+//
+type Store struct {
+ anInstruction
+ Addr Value
+ Val Value
+ pos token.Pos
+}
+
+// The BlankStore instruction is emitted for assignments to the blank
+// identifier.
+//
+// BlankStore is a pseudo-instruction: it has no dynamic effect.
+//
+// Pos() returns NoPos.
+//
+// Example printed form:
+// _ = t0
+//
+type BlankStore struct {
+ anInstruction
+ Val Value
+}
+
+// The MapUpdate instruction updates the association of Map[Key] to
+// Value.
+//
+// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack,
+// if explicit in the source.
+//
+// Example printed form:
+// t0[t1] = t2
+//
+type MapUpdate struct {
+ anInstruction
+ Map Value
+ Key Value
+ Value Value
+ pos token.Pos
+}
+
+// A DebugRef instruction maps a source-level expression Expr to the
+// SSA value X that represents the value (!IsAddr) or address (IsAddr)
+// of that expression.
+//
+// DebugRef is a pseudo-instruction: it has no dynamic effect.
+//
+// Pos() returns Expr.Pos(), the start position of the source-level
+// expression. This is not the same as the "designated" token as
+// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the
+// position of the ("designated") Lparen token.
+//
+// If Expr is an *ast.Ident denoting a var or func, Object() returns
+// the object; though this information can be obtained from the type
+// checker, including it here greatly facilitates debugging.
+// For non-Ident expressions, Object() returns nil.
+//
+// DebugRefs are generated only for functions built with debugging
+// enabled; see Package.SetDebugMode() and the GlobalDebug builder
+// mode flag.
+//
+// DebugRefs are not emitted for ast.Idents referring to constants or
+// predeclared identifiers, since they are trivial and numerous.
+// Nor are they emitted for ast.ParenExprs.
+//
+// (By representing these as instructions, rather than out-of-band,
+// consistency is maintained during transformation passes by the
+// ordinary SSA renaming machinery.)
+//
+// Example printed form:
+// ; *ast.CallExpr @ 102:9 is t5
+// ; var x float64 @ 109:72 is x
+// ; address of *ast.CompositeLit @ 216:10 is t0
+//
+type DebugRef struct {
+ anInstruction
+ Expr ast.Expr // the referring expression (never *ast.ParenExpr)
+ object types.Object // the identity of the source var/func
+ IsAddr bool // Expr is addressable and X is the address it denotes
+ X Value // the value or address of Expr
+}
+
+// Embeddable mix-ins and helpers for common parts of other structs. -----------
+
+// register is a mix-in embedded by all SSA values that are also
+// instructions, i.e. virtual registers, and provides a uniform
+// implementation of most of the Value interface: Value.Name() is a
+// numbered register (e.g. "t0"); the other methods are field accessors.
+//
+// Temporary names are automatically assigned to each register on
+// completion of building a function in SSA form.
+//
+// Clients must not assume that the 'id' value (and the Name() derived
+// from it) is unique within a function. As always in this API,
+// semantics are determined only by identity; names exist only to
+// facilitate debugging.
+//
+type register struct {
+ anInstruction
+ num int // "name" of virtual register, e.g. "t0". Not guaranteed unique.
+ typ types.Type // type of virtual register
+ pos token.Pos // position of source expression, or NoPos
+ referrers []Instruction
+}
+
+// anInstruction is a mix-in embedded by all Instructions.
+// It provides the implementations of the Block and setBlock methods.
+type anInstruction struct {
+ block *BasicBlock // the basic block of this instruction
+}
+
+// CallCommon is contained by Go, Defer and Call to hold the
+// common parts of a function or method call.
+//
+// Each CallCommon exists in one of two modes, function call and
+// interface method invocation, or "call" and "invoke" for short.
+//
+// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon
+// represents an ordinary function call of the value in Value,
+// which may be a *Builtin, a *Function or any other value of kind
+// 'func'.
+//
+// Value may be one of:
+// (a) a *Function, indicating a statically dispatched call
+// to a package-level function, an anonymous function, or
+// a method of a named type.
+// (b) a *MakeClosure, indicating an immediately applied
+// function literal with free variables.
+// (c) a *Builtin, indicating a statically dispatched call
+// to a built-in function.
+// (d) any other value, indicating a dynamically dispatched
+// function call.
+// StaticCallee returns the identity of the callee in cases
+// (a) and (b), nil otherwise.
+//
+// Args contains the arguments to the call. If Value is a method,
+// Args[0] contains the receiver parameter.
+//
+// Example printed form:
+// t2 = println(t0, t1)
+// go t3()
+// defer t5(...t6)
+//
+// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon
+// represents a dynamically dispatched call to an interface method.
+// In this mode, Value is the interface value and Method is the
+// interface's abstract method. Note: an abstract method may be
+// shared by multiple interfaces due to embedding; Value.Type()
+// provides the specific interface used for this call.
+//
+// Value is implicitly supplied to the concrete method implementation
+// as the receiver parameter; in other words, Args[0] holds not the
+// receiver but the first true argument.
+//
+// Example printed form:
+// t1 = invoke t0.String()
+// go invoke t3.Run(t2)
+// defer invoke t4.Handle(...t5)
+//
+// For all calls to variadic functions (Signature().Variadic()),
+// the last element of Args is a slice.
+//
+type CallCommon struct {
+ Value Value // receiver (invoke mode) or func value (call mode)
+ Method *types.Func // abstract method (invoke mode)
+ Args []Value // actual parameters (in static method call, includes receiver)
+ pos token.Pos // position of CallExpr.Lparen, iff explicit in source
+}
+
+// IsInvoke returns true if this call has "invoke" (not "call") mode.
+func (c *CallCommon) IsInvoke() bool {
+ return c.Method != nil
+}
+
+func (c *CallCommon) Pos() token.Pos { return c.pos }
+
+// Signature returns the signature of the called function.
+//
+// For an "invoke"-mode call, the signature of the interface method is
+// returned.
+//
+// In either "call" or "invoke" mode, if the callee is a method, its
+// receiver is represented by sig.Recv, not sig.Params().At(0).
+//
+func (c *CallCommon) Signature() *types.Signature {
+ if c.Method != nil {
+ return c.Method.Type().(*types.Signature)
+ }
+ return c.Value.Type().Underlying().(*types.Signature)
+}
+
+// StaticCallee returns the callee if this is a trivially static
+// "call"-mode call to a function.
+func (c *CallCommon) StaticCallee() *Function {
+ switch fn := c.Value.(type) {
+ case *Function:
+ return fn
+ case *MakeClosure:
+ return fn.Fn.(*Function)
+ }
+ return nil
+}
+
+// Description returns a description of the mode of this call suitable
+// for a user interface, e.g., "static method call".
+func (c *CallCommon) Description() string {
+ switch fn := c.Value.(type) {
+ case *Builtin:
+ return "built-in function call"
+ case *MakeClosure:
+ return "static function closure call"
+ case *Function:
+ if fn.Signature.Recv() != nil {
+ return "static method call"
+ }
+ return "static function call"
+ }
+ if c.IsInvoke() {
+ return "dynamic method call" // ("invoke" mode)
+ }
+ return "dynamic function call"
+}
+
+// The CallInstruction interface, implemented by *Go, *Defer and *Call,
+// exposes the common parts of function-calling instructions,
+// yet provides a way back to the Value defined by *Call alone.
+//
+type CallInstruction interface {
+ Instruction
+ Common() *CallCommon // returns the common parts of the call
+ Value() *Call // returns the result value of the call (*Call) or nil (*Go, *Defer)
+}
+
+func (s *Call) Common() *CallCommon { return &s.Call }
+func (s *Defer) Common() *CallCommon { return &s.Call }
+func (s *Go) Common() *CallCommon { return &s.Call }
+
+func (s *Call) Value() *Call { return s }
+func (s *Defer) Value() *Call { return nil }
+func (s *Go) Value() *Call { return nil }
+
+func (v *Builtin) Type() types.Type { return v.sig }
+func (v *Builtin) Name() string { return v.name }
+func (*Builtin) Referrers() *[]Instruction { return nil }
+func (v *Builtin) Pos() token.Pos { return token.NoPos }
+func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) }
+func (v *Builtin) Parent() *Function { return nil }
+
+func (v *FreeVar) Type() types.Type { return v.typ }
+func (v *FreeVar) Name() string { return v.name }
+func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers }
+func (v *FreeVar) Pos() token.Pos { return v.pos }
+func (v *FreeVar) Parent() *Function { return v.parent }
+
+func (v *Global) Type() types.Type { return v.typ }
+func (v *Global) Name() string { return v.name }
+func (v *Global) Parent() *Function { return nil }
+func (v *Global) Pos() token.Pos { return v.pos }
+func (v *Global) Referrers() *[]Instruction { return nil }
+func (v *Global) Token() token.Token { return token.VAR }
+func (v *Global) Object() types.Object { return v.object }
+func (v *Global) String() string { return v.RelString(nil) }
+func (v *Global) Package() *Package { return v.Pkg }
+func (v *Global) RelString(from *types.Package) string { return relString(v, from) }
+
+func (v *Function) Name() string { return v.name }
+func (v *Function) Type() types.Type { return v.Signature }
+func (v *Function) Pos() token.Pos { return v.pos }
+func (v *Function) Token() token.Token { return token.FUNC }
+func (v *Function) Object() types.Object { return v.object }
+func (v *Function) String() string { return v.RelString(nil) }
+func (v *Function) Package() *Package { return v.Pkg }
+func (v *Function) Parent() *Function { return v.parent }
+func (v *Function) Referrers() *[]Instruction {
+ if v.parent != nil {
+ return &v.referrers
+ }
+ return nil
+}
+
+func (v *Parameter) Type() types.Type { return v.typ }
+func (v *Parameter) Name() string { return v.name }
+func (v *Parameter) Object() types.Object { return v.object }
+func (v *Parameter) Referrers() *[]Instruction { return &v.referrers }
+func (v *Parameter) Pos() token.Pos { return v.pos }
+func (v *Parameter) Parent() *Function { return v.parent }
+
+func (v *Alloc) Type() types.Type { return v.typ }
+func (v *Alloc) Referrers() *[]Instruction { return &v.referrers }
+func (v *Alloc) Pos() token.Pos { return v.pos }
+
+func (v *register) Type() types.Type { return v.typ }
+func (v *register) setType(typ types.Type) { v.typ = typ }
+func (v *register) Name() string { return fmt.Sprintf("t%d", v.num) }
+func (v *register) setNum(num int) { v.num = num }
+func (v *register) Referrers() *[]Instruction { return &v.referrers }
+func (v *register) Pos() token.Pos { return v.pos }
+func (v *register) setPos(pos token.Pos) { v.pos = pos }
+
+func (v *anInstruction) Parent() *Function { return v.block.parent }
+func (v *anInstruction) Block() *BasicBlock { return v.block }
+func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block }
+func (v *anInstruction) Referrers() *[]Instruction { return nil }
+
+func (t *Type) Name() string { return t.object.Name() }
+func (t *Type) Pos() token.Pos { return t.object.Pos() }
+func (t *Type) Type() types.Type { return t.object.Type() }
+func (t *Type) Token() token.Token { return token.TYPE }
+func (t *Type) Object() types.Object { return t.object }
+func (t *Type) String() string { return t.RelString(nil) }
+func (t *Type) Package() *Package { return t.pkg }
+func (t *Type) RelString(from *types.Package) string { return relString(t, from) }
+
+func (c *NamedConst) Name() string { return c.object.Name() }
+func (c *NamedConst) Pos() token.Pos { return c.object.Pos() }
+func (c *NamedConst) String() string { return c.RelString(nil) }
+func (c *NamedConst) Type() types.Type { return c.object.Type() }
+func (c *NamedConst) Token() token.Token { return token.CONST }
+func (c *NamedConst) Object() types.Object { return c.object }
+func (c *NamedConst) Package() *Package { return c.pkg }
+func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) }
+
+// Func returns the package-level function of the specified name,
+// or nil if not found.
+//
+func (p *Package) Func(name string) (f *Function) {
+ f, _ = p.Members[name].(*Function)
+ return
+}
+
+// Var returns the package-level variable of the specified name,
+// or nil if not found.
+//
+func (p *Package) Var(name string) (g *Global) {
+ g, _ = p.Members[name].(*Global)
+ return
+}
+
+// Const returns the package-level constant of the specified name,
+// or nil if not found.
+//
+func (p *Package) Const(name string) (c *NamedConst) {
+ c, _ = p.Members[name].(*NamedConst)
+ return
+}
+
+// Type returns the package-level type of the specified name,
+// or nil if not found.
+//
+func (p *Package) Type(name string) (t *Type) {
+ t, _ = p.Members[name].(*Type)
+ return
+}
+
+func (v *Call) Pos() token.Pos { return v.Call.pos }
+func (s *Defer) Pos() token.Pos { return s.pos }
+func (s *Go) Pos() token.Pos { return s.pos }
+func (s *MapUpdate) Pos() token.Pos { return s.pos }
+func (s *Panic) Pos() token.Pos { return s.pos }
+func (s *Return) Pos() token.Pos { return s.pos }
+func (s *Send) Pos() token.Pos { return s.pos }
+func (s *Store) Pos() token.Pos { return s.pos }
+func (s *BlankStore) Pos() token.Pos { return token.NoPos }
+func (s *If) Pos() token.Pos { return token.NoPos }
+func (s *Jump) Pos() token.Pos { return token.NoPos }
+func (s *RunDefers) Pos() token.Pos { return token.NoPos }
+func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() }
+
+// Operands.
+
+func (v *Alloc) Operands(rands []*Value) []*Value {
+ return rands
+}
+
+func (v *BinOp) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Y)
+}
+
+func (c *CallCommon) Operands(rands []*Value) []*Value {
+ rands = append(rands, &c.Value)
+ for i := range c.Args {
+ rands = append(rands, &c.Args[i])
+ }
+ return rands
+}
+
+func (s *Go) Operands(rands []*Value) []*Value {
+ return s.Call.Operands(rands)
+}
+
+func (s *Call) Operands(rands []*Value) []*Value {
+ return s.Call.Operands(rands)
+}
+
+func (s *Defer) Operands(rands []*Value) []*Value {
+ return s.Call.Operands(rands)
+}
+
+func (v *ChangeInterface) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *ChangeType) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *Convert) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (s *DebugRef) Operands(rands []*Value) []*Value {
+ return append(rands, &s.X)
+}
+
+func (v *Extract) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Tuple)
+}
+
+func (v *Field) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *FieldAddr) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (s *If) Operands(rands []*Value) []*Value {
+ return append(rands, &s.Cond)
+}
+
+func (v *Index) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Index)
+}
+
+func (v *IndexAddr) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Index)
+}
+
+func (*Jump) Operands(rands []*Value) []*Value {
+ return rands
+}
+
+func (v *Lookup) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Index)
+}
+
+func (v *MakeChan) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Size)
+}
+
+func (v *MakeClosure) Operands(rands []*Value) []*Value {
+ rands = append(rands, &v.Fn)
+ for i := range v.Bindings {
+ rands = append(rands, &v.Bindings[i])
+ }
+ return rands
+}
+
+func (v *MakeInterface) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *MakeMap) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Reserve)
+}
+
+func (v *MakeSlice) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Len, &v.Cap)
+}
+
+func (v *MapUpdate) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Map, &v.Key, &v.Value)
+}
+
+func (v *Next) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Iter)
+}
+
+func (s *Panic) Operands(rands []*Value) []*Value {
+ return append(rands, &s.X)
+}
+
+func (v *Sigma) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *Phi) Operands(rands []*Value) []*Value {
+ for i := range v.Edges {
+ rands = append(rands, &v.Edges[i])
+ }
+ return rands
+}
+
+func (v *Range) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (s *Return) Operands(rands []*Value) []*Value {
+ for i := range s.Results {
+ rands = append(rands, &s.Results[i])
+ }
+ return rands
+}
+
+func (*RunDefers) Operands(rands []*Value) []*Value {
+ return rands
+}
+
+func (v *Select) Operands(rands []*Value) []*Value {
+ for i := range v.States {
+ rands = append(rands, &v.States[i].Chan, &v.States[i].Send)
+ }
+ return rands
+}
+
+func (s *Send) Operands(rands []*Value) []*Value {
+ return append(rands, &s.Chan, &s.X)
+}
+
+func (v *Slice) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Low, &v.High, &v.Max)
+}
+
+func (s *Store) Operands(rands []*Value) []*Value {
+ return append(rands, &s.Addr, &s.Val)
+}
+
+func (s *BlankStore) Operands(rands []*Value) []*Value {
+ return append(rands, &s.Val)
+}
+
+func (v *TypeAssert) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *UnOp) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+// Non-Instruction Values:
+func (v *Builtin) Operands(rands []*Value) []*Value { return rands }
+func (v *FreeVar) Operands(rands []*Value) []*Value { return rands }
+func (v *Const) Operands(rands []*Value) []*Value { return rands }
+func (v *Function) Operands(rands []*Value) []*Value { return rands }
+func (v *Global) Operands(rands []*Value) []*Value { return rands }
+func (v *Parameter) Operands(rands []*Value) []*Value { return rands }
diff --git a/vendor/honnef.co/go/tools/ssa/ssautil/load.go b/vendor/honnef.co/go/tools/ssa/ssautil/load.go
new file mode 100644
index 000000000..3b8694a13
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/ssautil/load.go
@@ -0,0 +1,143 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssautil
+
+// This file defines utility functions for constructing programs in SSA form.
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/packages"
+ "honnef.co/go/tools/ssa"
+)
+
+// Packages creates an SSA program for a set of packages loaded from
+// source syntax using the golang.org/x/tools/go/packages.Load function.
+// It creates and returns an SSA package for each well-typed package in
+// the initial list. The resulting list of packages has the same length
+// as initial, and contains a nil if SSA could not be constructed for
+// the corresponding initial package.
+//
+// Code for bodies of functions is not built until Build is called
+// on the resulting Program.
+//
+// The mode parameter controls diagnostics and checking during SSA construction.
+//
+func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) {
+ var fset *token.FileSet
+ if len(initial) > 0 {
+ fset = initial[0].Fset
+ }
+
+ prog := ssa.NewProgram(fset, mode)
+ seen := make(map[*packages.Package]*ssa.Package)
+ var create func(p *packages.Package) *ssa.Package
+ create = func(p *packages.Package) *ssa.Package {
+ ssapkg, ok := seen[p]
+ if !ok {
+ if p.Types == nil || p.IllTyped {
+ // not well typed
+ seen[p] = nil
+ return nil
+ }
+
+ ssapkg = prog.CreatePackage(p.Types, p.Syntax, p.TypesInfo, true)
+ seen[p] = ssapkg
+
+ for _, imp := range p.Imports {
+ create(imp)
+ }
+ }
+ return ssapkg
+ }
+
+ var ssapkgs []*ssa.Package
+ for _, p := range initial {
+ ssapkgs = append(ssapkgs, create(p))
+ }
+ return prog, ssapkgs
+}
+
+// CreateProgram returns a new program in SSA form, given a program
+// loaded from source. An SSA package is created for each transitively
+// error-free package of lprog.
+//
+// Code for bodies of functions is not built until Build is called
+// on the result.
+//
+// mode controls diagnostics and checking during SSA construction.
+//
+func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program {
+ prog := ssa.NewProgram(lprog.Fset, mode)
+
+ for _, info := range lprog.AllPackages {
+ if info.TransitivelyErrorFree {
+ prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
+ }
+ }
+
+ return prog
+}
+
+// BuildPackage builds an SSA program with IR for a single package.
+//
+// It populates pkg by type-checking the specified file ASTs. All
+// dependencies are loaded using the importer specified by tc, which
+// typically loads compiler export data; SSA code cannot be built for
+// those packages. BuildPackage then constructs an ssa.Program with all
+// dependency packages created, and builds and returns the SSA package
+// corresponding to pkg.
+//
+// The caller must have set pkg.Path() to the import path.
+//
+// The operation fails if there were any type-checking or import errors.
+//
+// See ../ssa/example_test.go for an example.
+//
+func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ssa.BuilderMode) (*ssa.Package, *types.Info, error) {
+ if fset == nil {
+ panic("no token.FileSet")
+ }
+ if pkg.Path() == "" {
+ panic("package has no import path")
+ }
+
+ info := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+ if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil {
+ return nil, nil, err
+ }
+
+ prog := ssa.NewProgram(fset, mode)
+
+ // Create SSA packages for all imports.
+ // Order is not significant.
+ created := make(map[*types.Package]bool)
+ var createAll func(pkgs []*types.Package)
+ createAll = func(pkgs []*types.Package) {
+ for _, p := range pkgs {
+ if !created[p] {
+ created[p] = true
+ prog.CreatePackage(p, nil, nil, true)
+ createAll(p.Imports())
+ }
+ }
+ }
+ createAll(pkg.Imports())
+
+ // Create and build the primary package.
+ ssapkg := prog.CreatePackage(pkg, files, info, false)
+ ssapkg.Build()
+ return ssapkg, info, nil
+}
diff --git a/vendor/honnef.co/go/tools/ssa/ssautil/switch.go b/vendor/honnef.co/go/tools/ssa/ssautil/switch.go
new file mode 100644
index 000000000..9c2f5d06e
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/ssautil/switch.go
@@ -0,0 +1,234 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssautil
+
+// This file implements discovery of switch and type-switch constructs
+// from low-level control flow.
+//
+// Many techniques exist for compiling a high-level switch with
+// constant cases to efficient machine code. The optimal choice will
+// depend on the data type, the specific case values, the code in the
+// body of each case, and the hardware.
+// Some examples:
+// - a lookup table (for a switch that maps constants to constants)
+// - a computed goto
+// - a binary tree
+// - a perfect hash
+// - a two-level switch (to partition constant strings by their first byte).
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "go/types"
+
+ "honnef.co/go/tools/ssa"
+)
+
+// A ConstCase represents a single constant comparison.
+// It is part of a Switch.
+type ConstCase struct {
+ Block *ssa.BasicBlock // block performing the comparison
+ Body *ssa.BasicBlock // body of the case
+ Value *ssa.Const // case comparand
+}
+
+// A TypeCase represents a single type assertion.
+// It is part of a Switch.
+type TypeCase struct {
+ Block *ssa.BasicBlock // block performing the type assert
+ Body *ssa.BasicBlock // body of the case
+ Type types.Type // case type
+ Binding ssa.Value // value bound by this case
+}
+
+// A Switch is a logical high-level control flow operation
+// (a multiway branch) discovered by analysis of a CFG containing
+// only if/else chains. It is not part of the ssa.Instruction set.
+//
+// One of ConstCases and TypeCases has length >= 2;
+// the other is nil.
+//
+// In a value switch, the list of cases may contain duplicate constants.
+// A type switch may contain duplicate types, or types assignable
+// to an interface type also in the list.
+// TODO(adonovan): eliminate such duplicates.
+//
+type Switch struct {
+ Start *ssa.BasicBlock // block containing start of if/else chain
+ X ssa.Value // the switch operand
+ ConstCases []ConstCase // ordered list of constant comparisons
+ TypeCases []TypeCase // ordered list of type assertions
+ Default *ssa.BasicBlock // successor if all comparisons fail
+}
+
+func (sw *Switch) String() string {
+ // We represent each block by the String() of its
+ // first Instruction, e.g. "print(42:int)".
+ var buf bytes.Buffer
+ if sw.ConstCases != nil {
+ fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name())
+ for _, c := range sw.ConstCases {
+ fmt.Fprintf(&buf, "case %s: %s\n", c.Value, c.Body.Instrs[0])
+ }
+ } else {
+ fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name())
+ for _, c := range sw.TypeCases {
+ fmt.Fprintf(&buf, "case %s %s: %s\n",
+ c.Binding.Name(), c.Type, c.Body.Instrs[0])
+ }
+ }
+ if sw.Default != nil {
+ fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0])
+ }
+ fmt.Fprintf(&buf, "}")
+ return buf.String()
+}
+
+// Switches examines the control-flow graph of fn and returns the
+// set of inferred value and type switches. A value switch tests an
+// ssa.Value for equality against two or more compile-time constant
+// values. Switches involving link-time constants (addresses) are
+// ignored. A type switch type-asserts an ssa.Value against two or
+// more types.
+//
+// The switches are returned in dominance order.
+//
+// The resulting switches do not necessarily correspond to uses of the
+// 'switch' keyword in the source: for example, a single source-level
+// switch statement with non-constant cases may result in zero, one or
+// many Switches, one per plural sequence of constant cases.
+// Switches may even be inferred from if/else- or goto-based control flow.
+// (In general, the control flow constructs of the source program
+// cannot be faithfully reproduced from the SSA representation.)
+//
+func Switches(fn *ssa.Function) []Switch {
+ // Traverse the CFG in dominance order, so we don't
+ // enter an if/else-chain in the middle.
+ var switches []Switch
+ seen := make(map[*ssa.BasicBlock]bool) // TODO(adonovan): opt: use ssa.blockSet
+ for _, b := range fn.DomPreorder() {
+ if x, k := isComparisonBlock(b); x != nil {
+ // Block b starts a switch.
+ sw := Switch{Start: b, X: x}
+ valueSwitch(&sw, k, seen)
+ if len(sw.ConstCases) > 1 {
+ switches = append(switches, sw)
+ }
+ }
+
+ if y, x, T := isTypeAssertBlock(b); y != nil {
+ // Block b starts a type switch.
+ sw := Switch{Start: b, X: x}
+ typeSwitch(&sw, y, T, seen)
+ if len(sw.TypeCases) > 1 {
+ switches = append(switches, sw)
+ }
+ }
+ }
+ return switches
+}
+
+func valueSwitch(sw *Switch, k *ssa.Const, seen map[*ssa.BasicBlock]bool) {
+ b := sw.Start
+ x := sw.X
+ for x == sw.X {
+ if seen[b] {
+ break
+ }
+ seen[b] = true
+
+ sw.ConstCases = append(sw.ConstCases, ConstCase{
+ Block: b,
+ Body: b.Succs[0],
+ Value: k,
+ })
+ b = b.Succs[1]
+ if len(b.Instrs) > 2 {
+ // Block b contains not just 'if x == k',
+ // so it may have side effects that
+ // make it unsafe to elide.
+ break
+ }
+ if len(b.Preds) != 1 {
+ // Block b has multiple predecessors,
+ // so it cannot be treated as a case.
+ break
+ }
+ x, k = isComparisonBlock(b)
+ }
+ sw.Default = b
+}
+
+func typeSwitch(sw *Switch, y ssa.Value, T types.Type, seen map[*ssa.BasicBlock]bool) {
+ b := sw.Start
+ x := sw.X
+ for x == sw.X {
+ if seen[b] {
+ break
+ }
+ seen[b] = true
+
+ sw.TypeCases = append(sw.TypeCases, TypeCase{
+ Block: b,
+ Body: b.Succs[0],
+ Type: T,
+ Binding: y,
+ })
+ b = b.Succs[1]
+ if len(b.Instrs) > 4 {
+ // Block b contains not just
+ // {TypeAssert; Extract #0; Extract #1; If}
+ // so it may have side effects that
+ // make it unsafe to elide.
+ break
+ }
+ if len(b.Preds) != 1 {
+ // Block b has multiple predecessors,
+ // so it cannot be treated as a case.
+ break
+ }
+ y, x, T = isTypeAssertBlock(b)
+ }
+ sw.Default = b
+}
+
+// isComparisonBlock returns the operands (v, k) if a block ends with
+// a comparison v==k, where k is a compile-time constant.
+//
+func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) {
+ if n := len(b.Instrs); n >= 2 {
+ if i, ok := b.Instrs[n-1].(*ssa.If); ok {
+ if binop, ok := i.Cond.(*ssa.BinOp); ok && binop.Block() == b && binop.Op == token.EQL {
+ if k, ok := binop.Y.(*ssa.Const); ok {
+ return binop.X, k
+ }
+ if k, ok := binop.X.(*ssa.Const); ok {
+ return binop.Y, k
+ }
+ }
+ }
+ }
+ return
+}
+
+// isTypeAssertBlock returns the operands (y, x, T) if a block ends with
+// a type assertion "if y, ok := x.(T); ok {".
+//
+func isTypeAssertBlock(b *ssa.BasicBlock) (y, x ssa.Value, T types.Type) {
+ if n := len(b.Instrs); n >= 4 {
+ if i, ok := b.Instrs[n-1].(*ssa.If); ok {
+ if ext1, ok := i.Cond.(*ssa.Extract); ok && ext1.Block() == b && ext1.Index == 1 {
+ if ta, ok := ext1.Tuple.(*ssa.TypeAssert); ok && ta.Block() == b {
+ // hack: relies upon instruction ordering.
+ if ext0, ok := b.Instrs[n-3].(*ssa.Extract); ok {
+ return ext0, ta.X, ta.AssertedType
+ }
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/honnef.co/go/tools/ssa/ssautil/visit.go b/vendor/honnef.co/go/tools/ssa/ssautil/visit.go
new file mode 100644
index 000000000..5c14845f5
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/ssautil/visit.go
@@ -0,0 +1,79 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssautil // import "honnef.co/go/tools/ssa/ssautil"
+
+import "honnef.co/go/tools/ssa"
+
+// This file defines utilities for visiting the SSA representation of
+// a Program.
+//
+// TODO(adonovan): test coverage.
+
+// AllFunctions finds and returns the set of functions potentially
+// needed by program prog, as determined by a simple linker-style
+// reachability algorithm starting from the members and method-sets of
+// each package. The result may include anonymous functions and
+// synthetic wrappers.
+//
+// Precondition: all packages are built.
+//
+func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool {
+ visit := visitor{
+ prog: prog,
+ seen: make(map[*ssa.Function]bool),
+ }
+ visit.program()
+ return visit.seen
+}
+
+type visitor struct {
+ prog *ssa.Program
+ seen map[*ssa.Function]bool
+}
+
+func (visit *visitor) program() {
+ for _, pkg := range visit.prog.AllPackages() {
+ for _, mem := range pkg.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ visit.function(fn)
+ }
+ }
+ }
+ for _, T := range visit.prog.RuntimeTypes() {
+ mset := visit.prog.MethodSets.MethodSet(T)
+ for i, n := 0, mset.Len(); i < n; i++ {
+ visit.function(visit.prog.MethodValue(mset.At(i)))
+ }
+ }
+}
+
+func (visit *visitor) function(fn *ssa.Function) {
+ if !visit.seen[fn] {
+ visit.seen[fn] = true
+ var buf [10]*ssa.Value // avoid alloc in common case
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ for _, op := range instr.Operands(buf[:0]) {
+ if fn, ok := (*op).(*ssa.Function); ok {
+ visit.function(fn)
+ }
+ }
+ }
+ }
+ }
+}
+
+// MainPackages returns the subset of the specified packages
+// named "main" that define a main function.
+// The result may include synthetic "testmain" packages.
+func MainPackages(pkgs []*ssa.Package) []*ssa.Package {
+ var mains []*ssa.Package
+ for _, pkg := range pkgs {
+ if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil {
+ mains = append(mains, pkg)
+ }
+ }
+ return mains
+}
diff --git a/vendor/honnef.co/go/tools/ssa/testmain.go b/vendor/honnef.co/go/tools/ssa/testmain.go
new file mode 100644
index 000000000..ea232ada9
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/testmain.go
@@ -0,0 +1,267 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// CreateTestMainPackage synthesizes a main package that runs all the
+// tests of the supplied packages.
+// It is closely coupled to $GOROOT/src/cmd/go/test.go and $GOROOT/src/testing.
+//
+// TODO(adonovan): this file no longer needs to live in the ssa package.
+// Move it to ssautil.
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/types"
+ "log"
+ "os"
+ "strings"
+ "text/template"
+)
+
+// FindTests returns the Test, Benchmark, and Example functions
+// (as defined by "go test") defined in the specified package,
+// and its TestMain function, if any.
+func FindTests(pkg *Package) (tests, benchmarks, examples []*Function, main *Function) {
+ prog := pkg.Prog
+
+ // The first two of these may be nil: if the program doesn't import "testing",
+ // it can't contain any tests, but it may yet contain Examples.
+ var testSig *types.Signature // func(*testing.T)
+ var benchmarkSig *types.Signature // func(*testing.B)
+ var exampleSig = types.NewSignature(nil, nil, nil, false) // func()
+
+ // Obtain the types from the parameters of testing.MainStart.
+ if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil {
+ mainStart := testingPkg.Func("MainStart")
+ params := mainStart.Signature.Params()
+ testSig = funcField(params.At(1).Type())
+ benchmarkSig = funcField(params.At(2).Type())
+
+ // Does the package define this function?
+ // func TestMain(*testing.M)
+ if f := pkg.Func("TestMain"); f != nil {
+ sig := f.Type().(*types.Signature)
+ starM := mainStart.Signature.Results().At(0).Type() // *testing.M
+ if sig.Results().Len() == 0 &&
+ sig.Params().Len() == 1 &&
+ types.Identical(sig.Params().At(0).Type(), starM) {
+ main = f
+ }
+ }
+ }
+
+ // TODO(adonovan): use a stable order, e.g. lexical.
+ for _, mem := range pkg.Members {
+ if f, ok := mem.(*Function); ok &&
+ ast.IsExported(f.Name()) &&
+ strings.HasSuffix(prog.Fset.Position(f.Pos()).Filename, "_test.go") {
+
+ switch {
+ case testSig != nil && isTestSig(f, "Test", testSig):
+ tests = append(tests, f)
+ case benchmarkSig != nil && isTestSig(f, "Benchmark", benchmarkSig):
+ benchmarks = append(benchmarks, f)
+ case isTestSig(f, "Example", exampleSig):
+ examples = append(examples, f)
+ default:
+ continue
+ }
+ }
+ }
+ return
+}
+
+// Like isTest, but checks the signature too.
+func isTestSig(f *Function, prefix string, sig *types.Signature) bool {
+ return isTest(f.Name(), prefix) && types.Identical(f.Signature, sig)
+}
+
+// Given the type of one of the three slice parameters of testing.Main,
+// returns the function type.
+func funcField(slice types.Type) *types.Signature {
+ return slice.(*types.Slice).Elem().Underlying().(*types.Struct).Field(1).Type().(*types.Signature)
+}
+
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+// Plundered from $GOROOT/src/cmd/go/test.go
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ return ast.IsExported(name[len(prefix):])
+}
+
+// CreateTestMainPackage creates and returns a synthetic "testmain"
+// package for the specified package if it defines tests, benchmarks or
+// executable examples, or nil otherwise. The new package is named
+// "main" and provides a function named "main" that runs the tests,
+// similar to the one that would be created by the 'go test' tool.
+//
+// Subsequent calls to prog.AllPackages include the new package.
+// The package pkg must belong to the program prog.
+func (prog *Program) CreateTestMainPackage(pkg *Package) *Package {
+ if pkg.Prog != prog {
+ log.Fatal("Package does not belong to Program")
+ }
+
+ // Template data
+ var data struct {
+ Pkg *Package
+ Tests, Benchmarks, Examples []*Function
+ Main *Function
+ Go18 bool
+ }
+ data.Pkg = pkg
+
+ // Enumerate tests.
+ data.Tests, data.Benchmarks, data.Examples, data.Main = FindTests(pkg)
+ if data.Main == nil &&
+ data.Tests == nil && data.Benchmarks == nil && data.Examples == nil {
+ return nil
+ }
+
+ // Synthesize source for testmain package.
+ path := pkg.Pkg.Path() + "$testmain"
+ tmpl := testmainTmpl
+ if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil {
+ // In Go 1.8, testing.MainStart's first argument is an interface, not a func.
+ data.Go18 = types.IsInterface(testingPkg.Func("MainStart").Signature.Params().At(0).Type())
+ } else {
+ // The program does not import "testing", but FindTests
+ // returned non-nil, which must mean there were Examples
+ // but no Test, Benchmark, or TestMain functions.
+
+ // We'll simply call them from testmain.main; this will
+ // ensure they don't panic, but will not check any
+ // "Output:" comments.
+ // (We should not execute an Example that has no
+ // "Output:" comment, but it's impossible to tell here.)
+ tmpl = examplesOnlyTmpl
+ }
+ var buf bytes.Buffer
+ if err := tmpl.Execute(&buf, data); err != nil {
+ log.Fatalf("internal error expanding template for %s: %v", path, err)
+ }
+ if false { // debugging
+ fmt.Fprintln(os.Stderr, buf.String())
+ }
+
+ // Parse and type-check the testmain package.
+ f, err := parser.ParseFile(prog.Fset, path+".go", &buf, parser.Mode(0))
+ if err != nil {
+ log.Fatalf("internal error parsing %s: %v", path, err)
+ }
+ conf := types.Config{
+ DisableUnusedImportCheck: true,
+ Importer: importer{pkg},
+ }
+ files := []*ast.File{f}
+ info := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+ testmainPkg, err := conf.Check(path, prog.Fset, files, info)
+ if err != nil {
+ log.Fatalf("internal error type-checking %s: %v", path, err)
+ }
+
+ // Create and build SSA code.
+ testmain := prog.CreatePackage(testmainPkg, files, info, false)
+ testmain.SetDebugMode(false)
+ testmain.Build()
+ testmain.Func("main").Synthetic = "test main function"
+ testmain.Func("init").Synthetic = "package initializer"
+ return testmain
+}
+
+// An implementation of types.Importer for an already loaded SSA program.
+type importer struct {
+ pkg *Package // package under test; may be non-importable
+}
+
+func (imp importer) Import(path string) (*types.Package, error) {
+ if p := imp.pkg.Prog.ImportedPackage(path); p != nil {
+ return p.Pkg, nil
+ }
+ if path == imp.pkg.Pkg.Path() {
+ return imp.pkg.Pkg, nil
+ }
+ return nil, fmt.Errorf("not found") // can't happen
+}
+
+var testmainTmpl = template.Must(template.New("testmain").Parse(`
+package main
+
+import "io"
+import "os"
+import "testing"
+import p {{printf "%q" .Pkg.Pkg.Path}}
+
+{{if .Go18}}
+type deps struct{}
+
+func (deps) ImportPath() string { return "" }
+func (deps) MatchString(pat, str string) (bool, error) { return true, nil }
+func (deps) StartCPUProfile(io.Writer) error { return nil }
+func (deps) StartTestLog(io.Writer) {}
+func (deps) StopCPUProfile() {}
+func (deps) StopTestLog() error { return nil }
+func (deps) WriteHeapProfile(io.Writer) error { return nil }
+func (deps) WriteProfileTo(string, io.Writer, int) error { return nil }
+
+var match deps
+{{else}}
+func match(_, _ string) (bool, error) { return true, nil }
+{{end}}
+
+func main() {
+ tests := []testing.InternalTest{
+{{range .Tests}}
+ { {{printf "%q" .Name}}, p.{{.Name}} },
+{{end}}
+ }
+ benchmarks := []testing.InternalBenchmark{
+{{range .Benchmarks}}
+ { {{printf "%q" .Name}}, p.{{.Name}} },
+{{end}}
+ }
+ examples := []testing.InternalExample{
+{{range .Examples}}
+ {Name: {{printf "%q" .Name}}, F: p.{{.Name}}},
+{{end}}
+ }
+ m := testing.MainStart(match, tests, benchmarks, examples)
+{{with .Main}}
+ p.{{.Name}}(m)
+{{else}}
+ os.Exit(m.Run())
+{{end}}
+}
+
+`))
+
+var examplesOnlyTmpl = template.Must(template.New("examples").Parse(`
+package main
+
+import p {{printf "%q" .Pkg.Pkg.Path}}
+
+func main() {
+{{range .Examples}}
+ p.{{.Name}}()
+{{end}}
+}
+`))
diff --git a/vendor/honnef.co/go/tools/ssa/util.go b/vendor/honnef.co/go/tools/ssa/util.go
new file mode 100644
index 000000000..ddb118460
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/util.go
@@ -0,0 +1,119 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines a number of miscellaneous utility functions.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+//// AST utilities
+
+func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
+
+// isBlankIdent returns true iff e is an Ident with name "_".
+// They have no associated types.Object, and thus no type.
+//
+func isBlankIdent(e ast.Expr) bool {
+ id, ok := e.(*ast.Ident)
+ return ok && id.Name == "_"
+}
+
+//// Type utilities. Some of these belong in go/types.
+
+// isPointer returns true for types whose underlying type is a pointer.
+func isPointer(typ types.Type) bool {
+ _, ok := typ.Underlying().(*types.Pointer)
+ return ok
+}
+
+func isInterface(T types.Type) bool { return types.IsInterface(T) }
+
+// deref returns a pointer's element type; otherwise it returns typ.
+func deref(typ types.Type) types.Type {
+ if p, ok := typ.Underlying().(*types.Pointer); ok {
+ return p.Elem()
+ }
+ return typ
+}
+
+// recvType returns the receiver type of method obj.
+func recvType(obj *types.Func) types.Type {
+ return obj.Type().(*types.Signature).Recv().Type()
+}
+
+// DefaultType returns the default "typed" type for an "untyped" type;
+// it returns the incoming type for all other types. The default type
+// for untyped nil is untyped nil.
+//
+// Exported to ssa/interp.
+//
+// TODO(adonovan): use go/types.DefaultType after 1.8.
+//
+func DefaultType(typ types.Type) types.Type {
+ if t, ok := typ.(*types.Basic); ok {
+ k := t.Kind()
+ switch k {
+ case types.UntypedBool:
+ k = types.Bool
+ case types.UntypedInt:
+ k = types.Int
+ case types.UntypedRune:
+ k = types.Rune
+ case types.UntypedFloat:
+ k = types.Float64
+ case types.UntypedComplex:
+ k = types.Complex128
+ case types.UntypedString:
+ k = types.String
+ }
+ typ = types.Typ[k]
+ }
+ return typ
+}
+
+// logStack prints the formatted "start" message to stderr and
+// returns a closure that prints the corresponding "end" message.
+// Call using 'defer logStack(...)()' to show builder stack on panic.
+// Don't forget trailing parens!
+//
+func logStack(format string, args ...interface{}) func() {
+ msg := fmt.Sprintf(format, args...)
+ io.WriteString(os.Stderr, msg)
+ io.WriteString(os.Stderr, "\n")
+ return func() {
+ io.WriteString(os.Stderr, msg)
+ io.WriteString(os.Stderr, " end\n")
+ }
+}
+
+// newVar creates a 'var' for use in a types.Tuple.
+func newVar(name string, typ types.Type) *types.Var {
+ return types.NewParam(token.NoPos, nil, name, typ)
+}
+
+// anonVar creates an anonymous 'var' for use in a types.Tuple.
+func anonVar(typ types.Type) *types.Var {
+ return newVar("", typ)
+}
+
+var lenResults = types.NewTuple(anonVar(tInt))
+
+// makeLen returns the len builtin specialized to type func(T)int.
+func makeLen(T types.Type) *Builtin {
+ lenParams := types.NewTuple(anonVar(T))
+ return &Builtin{
+ name: "len",
+ sig: types.NewSignature(nil, lenParams, lenResults, false),
+ }
+}
diff --git a/vendor/honnef.co/go/tools/ssa/wrappers.go b/vendor/honnef.co/go/tools/ssa/wrappers.go
new file mode 100644
index 000000000..701dd90d7
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/wrappers.go
@@ -0,0 +1,294 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines synthesis of Functions that delegate to declared
+// methods; they come in three kinds:
+//
+// (1) wrappers: methods that wrap declared methods, performing
+// implicit pointer indirections and embedded field selections.
+//
+// (2) thunks: funcs that wrap declared methods. Like wrappers,
+// thunks perform indirections and field selections. The thunk's
+// first parameter is used as the receiver for the method call.
+//
+// (3) bounds: funcs that wrap declared methods. The bound's sole
+// free variable, supplied by a closure, is used as the receiver
+// for the method call. No indirections or field selections are
+// performed since they can be done before the call.
+
+import (
+ "fmt"
+
+ "go/types"
+)
+
+// -- wrappers -----------------------------------------------------------
+
+// makeWrapper returns a synthetic method that delegates to the
+// declared method denoted by meth.Obj(), first performing any
+// necessary pointer indirections or field selections implied by meth.
+//
+// The resulting method's receiver type is meth.Recv().
+//
+// This function is versatile but quite subtle! Consider the
+// following axes of variation when making changes:
+// - optional receiver indirection
+// - optional implicit field selections
+// - meth.Obj() may denote a concrete or an interface method
+// - the result may be a thunk or a wrapper.
+//
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
+//
+func makeWrapper(prog *Program, sel *types.Selection) *Function {
+ obj := sel.Obj().(*types.Func) // the declared function
+ sig := sel.Type().(*types.Signature) // type of this wrapper
+
+ var recv *types.Var // wrapper's receiver or thunk's params[0]
+ name := obj.Name()
+ var description string
+ var start int // first regular param
+ if sel.Kind() == types.MethodExpr {
+ name += "$thunk"
+ description = "thunk"
+ recv = sig.Params().At(0)
+ start = 1
+ } else {
+ description = "wrapper"
+ recv = sig.Recv()
+ }
+
+ description = fmt.Sprintf("%s for %s", description, sel.Obj())
+ if prog.mode&LogSource != 0 {
+ defer logStack("make %s to (%s)", description, recv.Type())()
+ }
+ fn := &Function{
+ name: name,
+ method: sel,
+ object: obj,
+ Signature: sig,
+ Synthetic: description,
+ Prog: prog,
+ pos: obj.Pos(),
+ }
+ fn.startBody()
+ fn.addSpilledParam(recv)
+ createParams(fn, start)
+
+ indices := sel.Index()
+
+ var v Value = fn.Locals[0] // spilled receiver
+ if isPointer(sel.Recv()) {
+ v = emitLoad(fn, v)
+
+ // For simple indirection wrappers, perform an informative nil-check:
+ // "value method (T).f called using nil *T pointer"
+ if len(indices) == 1 && !isPointer(recvType(obj)) {
+ var c Call
+ c.Call.Value = &Builtin{
+ name: "ssa:wrapnilchk",
+ sig: types.NewSignature(nil,
+ types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)),
+ types.NewTuple(anonVar(sel.Recv())), false),
+ }
+ c.Call.Args = []Value{
+ v,
+ stringConst(deref(sel.Recv()).String()),
+ stringConst(sel.Obj().Name()),
+ }
+ c.setType(v.Type())
+ v = fn.emit(&c)
+ }
+ }
+
+ // Invariant: v is a pointer, either
+ // value of *A receiver param, or
+ // address of A spilled receiver.
+
+ // We use pointer arithmetic (FieldAddr possibly followed by
+ // Load) in preference to value extraction (Field possibly
+ // preceded by Load).
+
+ v = emitImplicitSelections(fn, v, indices[:len(indices)-1])
+
+ // Invariant: v is a pointer, either
+ // value of implicit *C field, or
+ // address of implicit C field.
+
+ var c Call
+ if r := recvType(obj); !isInterface(r) { // concrete method
+ if !isPointer(r) {
+ v = emitLoad(fn, v)
+ }
+ c.Call.Value = prog.declaredFunc(obj)
+ c.Call.Args = append(c.Call.Args, v)
+ } else {
+ c.Call.Method = obj
+ c.Call.Value = emitLoad(fn, v)
+ }
+ for _, arg := range fn.Params[1:] {
+ c.Call.Args = append(c.Call.Args, arg)
+ }
+ emitTailCall(fn, &c)
+ fn.finishBody()
+ return fn
+}
+
+// createParams creates parameters for wrapper method fn based on its
+// Signature.Params, which do not include the receiver.
+// start is the index of the first regular parameter to use.
+//
+func createParams(fn *Function, start int) {
+ var last *Parameter
+ tparams := fn.Signature.Params()
+ for i, n := start, tparams.Len(); i < n; i++ {
+ last = fn.addParamObj(tparams.At(i))
+ }
+ if fn.Signature.Variadic() {
+ last.typ = types.NewSlice(last.typ)
+ }
+}
+
+// -- bounds -----------------------------------------------------------
+
+// makeBound returns a bound method wrapper (or "bound"), a synthetic
+// function that delegates to a concrete or interface method denoted
+// by obj. The resulting function has no receiver, but has one free
+// variable which will be used as the method's receiver in the
+// tail-call.
+//
+// Use MakeClosure with such a wrapper to construct a bound method
+// closure. e.g.:
+//
+// type T int or: type T interface { meth() }
+// func (t T) meth()
+// var t T
+// f := t.meth
+// f() // calls t.meth()
+//
+// f is a closure of a synthetic wrapper defined as if by:
+//
+// f := func() { return t.meth() }
+//
+// Unlike makeWrapper, makeBound need perform no indirection or field
+// selections because that can be done before the closure is
+// constructed.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
+//
+func makeBound(prog *Program, obj *types.Func) *Function {
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+ fn, ok := prog.bounds[obj]
+ if !ok {
+ description := fmt.Sprintf("bound method wrapper for %s", obj)
+ if prog.mode&LogSource != 0 {
+ defer logStack("%s", description)()
+ }
+ fn = &Function{
+ name: obj.Name() + "$bound",
+ object: obj,
+ Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver
+ Synthetic: description,
+ Prog: prog,
+ pos: obj.Pos(),
+ }
+
+ fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn}
+ fn.FreeVars = []*FreeVar{fv}
+ fn.startBody()
+ createParams(fn, 0)
+ var c Call
+
+ if !isInterface(recvType(obj)) { // concrete
+ c.Call.Value = prog.declaredFunc(obj)
+ c.Call.Args = []Value{fv}
+ } else {
+ c.Call.Value = fv
+ c.Call.Method = obj
+ }
+ for _, arg := range fn.Params {
+ c.Call.Args = append(c.Call.Args, arg)
+ }
+ emitTailCall(fn, &c)
+ fn.finishBody()
+
+ prog.bounds[obj] = fn
+ }
+ return fn
+}
+
+// -- thunks -----------------------------------------------------------
+
+// makeThunk returns a thunk, a synthetic function that delegates to a
+// concrete or interface method denoted by sel.Obj(). The resulting
+// function has no receiver, but has an additional (first) regular
+// parameter.
+//
+// Precondition: sel.Kind() == types.MethodExpr.
+//
+// type T int or: type T interface { meth() }
+// func (t T) meth()
+// f := T.meth
+// var t T
+// f(t) // calls t.meth()
+//
+// f is a synthetic wrapper defined as if by:
+//
+// f := func(t T) { return t.meth() }
+//
+// TODO(adonovan): opt: currently the stub is created even when used
+// directly in a function call: C.f(i, 0). This is less efficient
+// than inlining the stub.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
+//
+func makeThunk(prog *Program, sel *types.Selection) *Function {
+ if sel.Kind() != types.MethodExpr {
+ panic(sel)
+ }
+
+ key := selectionKey{
+ kind: sel.Kind(),
+ recv: sel.Recv(),
+ obj: sel.Obj(),
+ index: fmt.Sprint(sel.Index()),
+ indirect: sel.Indirect(),
+ }
+
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+
+ // Canonicalize key.recv to avoid constructing duplicate thunks.
+ canonRecv, ok := prog.canon.At(key.recv).(types.Type)
+ if !ok {
+ canonRecv = key.recv
+ prog.canon.Set(key.recv, canonRecv)
+ }
+ key.recv = canonRecv
+
+ fn, ok := prog.thunks[key]
+ if !ok {
+ fn = makeWrapper(prog, sel)
+ if fn.Signature.Recv() != nil {
+ panic(fn) // unexpected receiver
+ }
+ prog.thunks[key] = fn
+ }
+ return fn
+}
+
+func changeRecv(s *types.Signature, recv *types.Var) *types.Signature {
+ return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic())
+}
+
+// selectionKey is like types.Selection but a usable map key.
+type selectionKey struct {
+ kind types.SelectionKind
+ recv types.Type // canonicalized via Program.canon
+ obj types.Object
+ index string
+ indirect bool
+}
diff --git a/vendor/honnef.co/go/tools/ssa/write.go b/vendor/honnef.co/go/tools/ssa/write.go
new file mode 100644
index 000000000..89761a18a
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssa/write.go
@@ -0,0 +1,5 @@
+package ssa
+
+func NewJump(parent *BasicBlock) *Jump {
+ return &Jump{anInstruction{parent}}
+}
diff --git a/vendor/honnef.co/go/tools/ssautil/ssautil.go b/vendor/honnef.co/go/tools/ssautil/ssautil.go
new file mode 100644
index 000000000..a18f849ec
--- /dev/null
+++ b/vendor/honnef.co/go/tools/ssautil/ssautil.go
@@ -0,0 +1,41 @@
+package ssautil
+
+import (
+ "honnef.co/go/tools/ssa"
+)
+
+func Reachable(from, to *ssa.BasicBlock) bool {
+ if from == to {
+ return true
+ }
+ if from.Dominates(to) {
+ return true
+ }
+
+ found := false
+ Walk(from, func(b *ssa.BasicBlock) bool {
+ if b == to {
+ found = true
+ return false
+ }
+ return true
+ })
+ return found
+}
+
+func Walk(b *ssa.BasicBlock, fn func(*ssa.BasicBlock) bool) {
+ seen := map[*ssa.BasicBlock]bool{}
+ wl := []*ssa.BasicBlock{b}
+ for len(wl) > 0 {
+ b := wl[len(wl)-1]
+ wl = wl[:len(wl)-1]
+ if seen[b] {
+ continue
+ }
+ seen[b] = true
+ if !fn(b) {
+ continue
+ }
+ wl = append(wl, b.Succs...)
+ }
+}
diff --git a/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md b/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md
new file mode 100644
index 000000000..b12c7afc7
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing to staticcheck
+
+## Before filing an issue:
+
+### Are you having trouble building staticcheck?
+
+Check you have the latest version of its dependencies. Run
+```
+go get -u honnef.co/go/tools/staticcheck
+```
+If you still have problems, consider searching for existing issues before filing a new issue.
+
+## Before sending a pull request:
+
+Have you understood the purpose of staticcheck? Make sure to carefully read `README`.
diff --git a/vendor/honnef.co/go/tools/staticcheck/buildtag.go b/vendor/honnef.co/go/tools/staticcheck/buildtag.go
new file mode 100644
index 000000000..888d3e9dc
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/buildtag.go
@@ -0,0 +1,21 @@
+package staticcheck
+
+import (
+ "go/ast"
+ "strings"
+
+ . "honnef.co/go/tools/lint/lintdsl"
+)
+
+func buildTags(f *ast.File) [][]string {
+ var out [][]string
+ for _, line := range strings.Split(Preamble(f), "\n") {
+ if !strings.HasPrefix(line, "+build ") {
+ continue
+ }
+ line = strings.TrimSpace(strings.TrimPrefix(line, "+build "))
+ fields := strings.Fields(line)
+ out = append(out, fields)
+ }
+ return out
+}
diff --git a/vendor/honnef.co/go/tools/staticcheck/lint.go b/vendor/honnef.co/go/tools/staticcheck/lint.go
new file mode 100644
index 000000000..2cd04ae80
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/lint.go
@@ -0,0 +1,2818 @@
+// Package staticcheck contains a linter for Go source code.
+package staticcheck // import "honnef.co/go/tools/staticcheck"
+
+import (
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ htmltemplate "html/template"
+ "net/http"
+ "regexp"
+ "regexp/syntax"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ texttemplate "text/template"
+
+ . "honnef.co/go/tools/arg"
+ "honnef.co/go/tools/deprecated"
+ "honnef.co/go/tools/functions"
+ "honnef.co/go/tools/internal/sharedcheck"
+ "honnef.co/go/tools/lint"
+ . "honnef.co/go/tools/lint/lintdsl"
+ "honnef.co/go/tools/ssa"
+ "honnef.co/go/tools/ssautil"
+ "honnef.co/go/tools/staticcheck/vrp"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/packages"
+)
+
+func validRegexp(call *Call) {
+ arg := call.Args[0]
+ err := ValidateRegexp(arg.Value)
+ if err != nil {
+ arg.Invalid(err.Error())
+ }
+}
+
+type runeSlice []rune
+
+func (rs runeSlice) Len() int { return len(rs) }
+func (rs runeSlice) Less(i int, j int) bool { return rs[i] < rs[j] }
+func (rs runeSlice) Swap(i int, j int) { rs[i], rs[j] = rs[j], rs[i] }
+
+func utf8Cutset(call *Call) {
+ arg := call.Args[1]
+ if InvalidUTF8(arg.Value) {
+ arg.Invalid(MsgInvalidUTF8)
+ }
+}
+
+func uniqueCutset(call *Call) {
+ arg := call.Args[1]
+ if !UniqueStringCutset(arg.Value) {
+ arg.Invalid(MsgNonUniqueCutset)
+ }
+}
+
+func unmarshalPointer(name string, arg int) CallCheck {
+ return func(call *Call) {
+ if !Pointer(call.Args[arg].Value) {
+ call.Args[arg].Invalid(fmt.Sprintf("%s expects to unmarshal into a pointer, but the provided value is not a pointer", name))
+ }
+ }
+}
+
+func pointlessIntMath(call *Call) {
+ if ConvertedFromInt(call.Args[0].Value) {
+ call.Invalid(fmt.Sprintf("calling %s on a converted integer is pointless", CallName(call.Instr.Common())))
+ }
+}
+
+func checkValidHostPort(arg int) CallCheck {
+ return func(call *Call) {
+ if !ValidHostPort(call.Args[arg].Value) {
+ call.Args[arg].Invalid(MsgInvalidHostPort)
+ }
+ }
+}
+
+var (
+ checkRegexpRules = map[string]CallCheck{
+ "regexp.MustCompile": validRegexp,
+ "regexp.Compile": validRegexp,
+ "regexp.Match": validRegexp,
+ "regexp.MatchReader": validRegexp,
+ "regexp.MatchString": validRegexp,
+ }
+
+ checkTimeParseRules = map[string]CallCheck{
+ "time.Parse": func(call *Call) {
+ arg := call.Args[Arg("time.Parse.layout")]
+ err := ValidateTimeLayout(arg.Value)
+ if err != nil {
+ arg.Invalid(err.Error())
+ }
+ },
+ }
+
+ checkEncodingBinaryRules = map[string]CallCheck{
+ "encoding/binary.Write": func(call *Call) {
+ arg := call.Args[Arg("encoding/binary.Write.data")]
+ if !CanBinaryMarshal(call.Job, arg.Value) {
+ arg.Invalid(fmt.Sprintf("value of type %s cannot be used with binary.Write", arg.Value.Value.Type()))
+ }
+ },
+ }
+
+ checkURLsRules = map[string]CallCheck{
+ "net/url.Parse": func(call *Call) {
+ arg := call.Args[Arg("net/url.Parse.rawurl")]
+ err := ValidateURL(arg.Value)
+ if err != nil {
+ arg.Invalid(err.Error())
+ }
+ },
+ }
+
+ checkSyncPoolValueRules = map[string]CallCheck{
+ "(*sync.Pool).Put": func(call *Call) {
+ arg := call.Args[Arg("(*sync.Pool).Put.x")]
+ typ := arg.Value.Value.Type()
+ if !IsPointerLike(typ) {
+ arg.Invalid("argument should be pointer-like to avoid allocations")
+ }
+ },
+ }
+
+ checkRegexpFindAllRules = map[string]CallCheck{
+ "(*regexp.Regexp).FindAll": RepeatZeroTimes("a FindAll method", 1),
+ "(*regexp.Regexp).FindAllIndex": RepeatZeroTimes("a FindAll method", 1),
+ "(*regexp.Regexp).FindAllString": RepeatZeroTimes("a FindAll method", 1),
+ "(*regexp.Regexp).FindAllStringIndex": RepeatZeroTimes("a FindAll method", 1),
+ "(*regexp.Regexp).FindAllStringSubmatch": RepeatZeroTimes("a FindAll method", 1),
+ "(*regexp.Regexp).FindAllStringSubmatchIndex": RepeatZeroTimes("a FindAll method", 1),
+ "(*regexp.Regexp).FindAllSubmatch": RepeatZeroTimes("a FindAll method", 1),
+ "(*regexp.Regexp).FindAllSubmatchIndex": RepeatZeroTimes("a FindAll method", 1),
+ }
+
+ checkUTF8CutsetRules = map[string]CallCheck{
+ "strings.IndexAny": utf8Cutset,
+ "strings.LastIndexAny": utf8Cutset,
+ "strings.ContainsAny": utf8Cutset,
+ "strings.Trim": utf8Cutset,
+ "strings.TrimLeft": utf8Cutset,
+ "strings.TrimRight": utf8Cutset,
+ }
+
+ checkUniqueCutsetRules = map[string]CallCheck{
+ "strings.Trim": uniqueCutset,
+ "strings.TrimLeft": uniqueCutset,
+ "strings.TrimRight": uniqueCutset,
+ }
+
+ checkUnmarshalPointerRules = map[string]CallCheck{
+ "encoding/xml.Unmarshal": unmarshalPointer("xml.Unmarshal", 1),
+ "(*encoding/xml.Decoder).Decode": unmarshalPointer("Decode", 0),
+ "(*encoding/xml.Decoder).DecodeElement": unmarshalPointer("DecodeElement", 0),
+ "encoding/json.Unmarshal": unmarshalPointer("json.Unmarshal", 1),
+ "(*encoding/json.Decoder).Decode": unmarshalPointer("Decode", 0),
+ }
+
+ checkUnbufferedSignalChanRules = map[string]CallCheck{
+ "os/signal.Notify": func(call *Call) {
+ arg := call.Args[Arg("os/signal.Notify.c")]
+ if UnbufferedChannel(arg.Value) {
+ arg.Invalid("the channel used with signal.Notify should be buffered")
+ }
+ },
+ }
+
+ checkMathIntRules = map[string]CallCheck{
+ "math.Ceil": pointlessIntMath,
+ "math.Floor": pointlessIntMath,
+ "math.IsNaN": pointlessIntMath,
+ "math.Trunc": pointlessIntMath,
+ "math.IsInf": pointlessIntMath,
+ }
+
+ checkStringsReplaceZeroRules = map[string]CallCheck{
+ "strings.Replace": RepeatZeroTimes("strings.Replace", 3),
+ "bytes.Replace": RepeatZeroTimes("bytes.Replace", 3),
+ }
+
+ checkListenAddressRules = map[string]CallCheck{
+ "net/http.ListenAndServe": checkValidHostPort(0),
+ "net/http.ListenAndServeTLS": checkValidHostPort(0),
+ }
+
+ checkBytesEqualIPRules = map[string]CallCheck{
+ "bytes.Equal": func(call *Call) {
+ if ConvertedFrom(call.Args[Arg("bytes.Equal.a")].Value, "net.IP") &&
+ ConvertedFrom(call.Args[Arg("bytes.Equal.b")].Value, "net.IP") {
+ call.Invalid("use net.IP.Equal to compare net.IPs, not bytes.Equal")
+ }
+ },
+ }
+
+ checkRegexpMatchLoopRules = map[string]CallCheck{
+ "regexp.Match": loopedRegexp("regexp.Match"),
+ "regexp.MatchReader": loopedRegexp("regexp.MatchReader"),
+ "regexp.MatchString": loopedRegexp("regexp.MatchString"),
+ }
+)
+
+type Checker struct {
+ CheckGenerated bool
+ funcDescs *functions.Descriptions
+ deprecatedObjs map[types.Object]string
+}
+
+func NewChecker() *Checker {
+ return &Checker{}
+}
+
+func (*Checker) Name() string { return "staticcheck" }
+func (*Checker) Prefix() string { return "SA" }
+
+func (c *Checker) Checks() []lint.Check {
+ return []lint.Check{
+ {ID: "SA1000", FilterGenerated: false, Fn: c.callChecker(checkRegexpRules)},
+ {ID: "SA1001", FilterGenerated: false, Fn: c.CheckTemplate},
+ {ID: "SA1002", FilterGenerated: false, Fn: c.callChecker(checkTimeParseRules)},
+ {ID: "SA1003", FilterGenerated: false, Fn: c.callChecker(checkEncodingBinaryRules)},
+ {ID: "SA1004", FilterGenerated: false, Fn: c.CheckTimeSleepConstant},
+ {ID: "SA1005", FilterGenerated: false, Fn: c.CheckExec},
+ {ID: "SA1006", FilterGenerated: false, Fn: c.CheckUnsafePrintf},
+ {ID: "SA1007", FilterGenerated: false, Fn: c.callChecker(checkURLsRules)},
+ {ID: "SA1008", FilterGenerated: false, Fn: c.CheckCanonicalHeaderKey},
+ {ID: "SA1010", FilterGenerated: false, Fn: c.callChecker(checkRegexpFindAllRules)},
+ {ID: "SA1011", FilterGenerated: false, Fn: c.callChecker(checkUTF8CutsetRules)},
+ {ID: "SA1012", FilterGenerated: false, Fn: c.CheckNilContext},
+ {ID: "SA1013", FilterGenerated: false, Fn: c.CheckSeeker},
+ {ID: "SA1014", FilterGenerated: false, Fn: c.callChecker(checkUnmarshalPointerRules)},
+ {ID: "SA1015", FilterGenerated: false, Fn: c.CheckLeakyTimeTick},
+ {ID: "SA1016", FilterGenerated: false, Fn: c.CheckUntrappableSignal},
+ {ID: "SA1017", FilterGenerated: false, Fn: c.callChecker(checkUnbufferedSignalChanRules)},
+ {ID: "SA1018", FilterGenerated: false, Fn: c.callChecker(checkStringsReplaceZeroRules)},
+ {ID: "SA1019", FilterGenerated: false, Fn: c.CheckDeprecated},
+ {ID: "SA1020", FilterGenerated: false, Fn: c.callChecker(checkListenAddressRules)},
+ {ID: "SA1021", FilterGenerated: false, Fn: c.callChecker(checkBytesEqualIPRules)},
+ {ID: "SA1023", FilterGenerated: false, Fn: c.CheckWriterBufferModified},
+ {ID: "SA1024", FilterGenerated: false, Fn: c.callChecker(checkUniqueCutsetRules)},
+ {ID: "SA1025", FilterGenerated: false, Fn: c.CheckTimerResetReturnValue},
+
+ {ID: "SA2000", FilterGenerated: false, Fn: c.CheckWaitgroupAdd},
+ {ID: "SA2001", FilterGenerated: false, Fn: c.CheckEmptyCriticalSection},
+ {ID: "SA2002", FilterGenerated: false, Fn: c.CheckConcurrentTesting},
+ {ID: "SA2003", FilterGenerated: false, Fn: c.CheckDeferLock},
+
+ {ID: "SA3000", FilterGenerated: false, Fn: c.CheckTestMainExit},
+ {ID: "SA3001", FilterGenerated: false, Fn: c.CheckBenchmarkN},
+
+ {ID: "SA4000", FilterGenerated: false, Fn: c.CheckLhsRhsIdentical},
+ {ID: "SA4001", FilterGenerated: false, Fn: c.CheckIneffectiveCopy},
+ {ID: "SA4002", FilterGenerated: false, Fn: c.CheckDiffSizeComparison},
+ {ID: "SA4003", FilterGenerated: false, Fn: c.CheckUnsignedComparison},
+ {ID: "SA4004", FilterGenerated: false, Fn: c.CheckIneffectiveLoop},
+ {ID: "SA4006", FilterGenerated: false, Fn: c.CheckUnreadVariableValues},
+ {ID: "SA4008", FilterGenerated: false, Fn: c.CheckLoopCondition},
+ {ID: "SA4009", FilterGenerated: false, Fn: c.CheckArgOverwritten},
+ {ID: "SA4010", FilterGenerated: false, Fn: c.CheckIneffectiveAppend},
+ {ID: "SA4011", FilterGenerated: false, Fn: c.CheckScopedBreak},
+ {ID: "SA4012", FilterGenerated: false, Fn: c.CheckNaNComparison},
+ {ID: "SA4013", FilterGenerated: false, Fn: c.CheckDoubleNegation},
+ {ID: "SA4014", FilterGenerated: false, Fn: c.CheckRepeatedIfElse},
+ {ID: "SA4015", FilterGenerated: false, Fn: c.callChecker(checkMathIntRules)},
+ {ID: "SA4016", FilterGenerated: false, Fn: c.CheckSillyBitwiseOps},
+ {ID: "SA4017", FilterGenerated: false, Fn: c.CheckPureFunctions},
+ {ID: "SA4018", FilterGenerated: true, Fn: c.CheckSelfAssignment},
+ {ID: "SA4019", FilterGenerated: true, Fn: c.CheckDuplicateBuildConstraints},
+
+ {ID: "SA5000", FilterGenerated: false, Fn: c.CheckNilMaps},
+ {ID: "SA5001", FilterGenerated: false, Fn: c.CheckEarlyDefer},
+ {ID: "SA5002", FilterGenerated: false, Fn: c.CheckInfiniteEmptyLoop},
+ {ID: "SA5003", FilterGenerated: false, Fn: c.CheckDeferInInfiniteLoop},
+ {ID: "SA5004", FilterGenerated: false, Fn: c.CheckLoopEmptyDefault},
+ {ID: "SA5005", FilterGenerated: false, Fn: c.CheckCyclicFinalizer},
+ {ID: "SA5007", FilterGenerated: false, Fn: c.CheckInfiniteRecursion},
+
+ {ID: "SA6000", FilterGenerated: false, Fn: c.callChecker(checkRegexpMatchLoopRules)},
+ {ID: "SA6001", FilterGenerated: false, Fn: c.CheckMapBytesKey},
+ {ID: "SA6002", FilterGenerated: false, Fn: c.callChecker(checkSyncPoolValueRules)},
+ {ID: "SA6003", FilterGenerated: false, Fn: c.CheckRangeStringRunes},
+ // {ID: "SA6004", FilterGenerated: false, Fn: c.CheckSillyRegexp},
+
+ {ID: "SA9001", FilterGenerated: false, Fn: c.CheckDubiousDeferInChannelRangeLoop},
+ {ID: "SA9002", FilterGenerated: false, Fn: c.CheckNonOctalFileMode},
+ {ID: "SA9003", FilterGenerated: false, Fn: c.CheckEmptyBranch},
+ {ID: "SA9004", FilterGenerated: false, Fn: c.CheckMissingEnumTypesInDeclaration},
+ }
+
+ // "SA5006": c.CheckSliceOutOfBounds,
+ // "SA4007": c.CheckPredeterminedBooleanExprs,
+}
+
+func (c *Checker) findDeprecated(prog *lint.Program) {
+ var docs []*ast.CommentGroup
+ var names []*ast.Ident
+
+ doDocs := func(pkg *packages.Package, names []*ast.Ident, docs []*ast.CommentGroup) {
+ var alt string
+ for _, doc := range docs {
+ if doc == nil {
+ continue
+ }
+ parts := strings.Split(doc.Text(), "\n\n")
+ last := parts[len(parts)-1]
+ if !strings.HasPrefix(last, "Deprecated: ") {
+ continue
+ }
+ alt = last[len("Deprecated: "):]
+ alt = strings.Replace(alt, "\n", " ", -1)
+ break
+ }
+ if alt == "" {
+ return
+ }
+
+ for _, name := range names {
+ obj := pkg.TypesInfo.ObjectOf(name)
+ c.deprecatedObjs[obj] = alt
+ }
+ }
+
+ for _, pkg := range prog.AllPackages {
+ for _, f := range pkg.Syntax {
+ fn := func(node ast.Node) bool {
+ if node == nil {
+ return true
+ }
+ var ret bool
+ switch node := node.(type) {
+ case *ast.GenDecl:
+ switch node.Tok {
+ case token.TYPE, token.CONST, token.VAR:
+ docs = append(docs, node.Doc)
+ return true
+ default:
+ return false
+ }
+ case *ast.FuncDecl:
+ docs = append(docs, node.Doc)
+ names = []*ast.Ident{node.Name}
+ ret = false
+ case *ast.TypeSpec:
+ docs = append(docs, node.Doc)
+ names = []*ast.Ident{node.Name}
+ ret = true
+ case *ast.ValueSpec:
+ docs = append(docs, node.Doc)
+ names = node.Names
+ ret = false
+ case *ast.File:
+ return true
+ case *ast.StructType:
+ for _, field := range node.Fields.List {
+ doDocs(pkg, field.Names, []*ast.CommentGroup{field.Doc})
+ }
+ return false
+ case *ast.InterfaceType:
+ for _, field := range node.Methods.List {
+ doDocs(pkg, field.Names, []*ast.CommentGroup{field.Doc})
+ }
+ return false
+ default:
+ return false
+ }
+ if len(names) == 0 || len(docs) == 0 {
+ return ret
+ }
+ doDocs(pkg, names, docs)
+
+ docs = docs[:0]
+ names = nil
+ return ret
+ }
+ ast.Inspect(f, fn)
+ }
+ }
+}
+
+func (c *Checker) Init(prog *lint.Program) {
+ wg := &sync.WaitGroup{}
+ wg.Add(2)
+ go func() {
+ c.funcDescs = functions.NewDescriptions(prog.SSA)
+ for _, fn := range prog.AllFunctions {
+ if fn.Blocks != nil {
+ applyStdlibKnowledge(fn)
+ ssa.OptimizeBlocks(fn)
+ }
+ }
+ wg.Done()
+ }()
+
+ go func() {
+ c.deprecatedObjs = map[types.Object]string{}
+ c.findDeprecated(prog)
+ wg.Done()
+ }()
+
+ wg.Wait()
+}
+
+func (c *Checker) isInLoop(b *ssa.BasicBlock) bool {
+ sets := c.funcDescs.Get(b.Parent()).Loops
+ for _, set := range sets {
+ if set[b] {
+ return true
+ }
+ }
+ return false
+}
+
+func applyStdlibKnowledge(fn *ssa.Function) {
+ if len(fn.Blocks) == 0 {
+ return
+ }
+
+ // comma-ok receiving from a time.Tick channel will never return
+ // ok == false, so any branching on the value of ok can be
+ // replaced with an unconditional jump. This will primarily match
+ // `for range time.Tick(x)` loops, but it can also match
+ // user-written code.
+ for _, block := range fn.Blocks {
+ if len(block.Instrs) < 3 {
+ continue
+ }
+ if len(block.Succs) != 2 {
+ continue
+ }
+ var instrs []*ssa.Instruction
+ for i, ins := range block.Instrs {
+ if _, ok := ins.(*ssa.DebugRef); ok {
+ continue
+ }
+ instrs = append(instrs, &block.Instrs[i])
+ }
+
+ for i, ins := range instrs {
+ unop, ok := (*ins).(*ssa.UnOp)
+ if !ok || unop.Op != token.ARROW {
+ continue
+ }
+ call, ok := unop.X.(*ssa.Call)
+ if !ok {
+ continue
+ }
+ if !IsCallTo(call.Common(), "time.Tick") {
+ continue
+ }
+ ex, ok := (*instrs[i+1]).(*ssa.Extract)
+ if !ok || ex.Tuple != unop || ex.Index != 1 {
+ continue
+ }
+
+ ifstmt, ok := (*instrs[i+2]).(*ssa.If)
+ if !ok || ifstmt.Cond != ex {
+ continue
+ }
+
+ *instrs[i+2] = ssa.NewJump(block)
+ succ := block.Succs[1]
+ block.Succs = block.Succs[0:1]
+ succ.RemovePred(block)
+ }
+ }
+}
+
+func hasType(j *lint.Job, expr ast.Expr, name string) bool {
+ T := TypeOf(j, expr)
+ return IsType(T, name)
+}
+
+func (c *Checker) CheckUntrappableSignal(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAnyAST(j, call,
+ "os/signal.Ignore", "os/signal.Notify", "os/signal.Reset") {
+ return true
+ }
+ for _, arg := range call.Args {
+ if conv, ok := arg.(*ast.CallExpr); ok && isName(j, conv.Fun, "os.Signal") {
+ arg = conv.Args[0]
+ }
+
+ if isName(j, arg, "os.Kill") || isName(j, arg, "syscall.SIGKILL") {
+ j.Errorf(arg, "%s cannot be trapped (did you mean syscall.SIGTERM?)", Render(j, arg))
+ }
+ if isName(j, arg, "syscall.SIGSTOP") {
+ j.Errorf(arg, "%s signal cannot be trapped", Render(j, arg))
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckTemplate(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ var kind string
+ if IsCallToAST(j, call, "(*text/template.Template).Parse") {
+ kind = "text"
+ } else if IsCallToAST(j, call, "(*html/template.Template).Parse") {
+ kind = "html"
+ } else {
+ return true
+ }
+ sel := call.Fun.(*ast.SelectorExpr)
+ if !IsCallToAST(j, sel.X, "text/template.New") &&
+ !IsCallToAST(j, sel.X, "html/template.New") {
+ // TODO(dh): this is a cheap workaround for templates with
+ // different delims. A better solution with less false
+ // negatives would use data flow analysis to see where the
+ // template comes from and where it has been
+ return true
+ }
+ s, ok := ExprToString(j, call.Args[Arg("(*text/template.Template).Parse.text")])
+ if !ok {
+ return true
+ }
+ var err error
+ switch kind {
+ case "text":
+ _, err = texttemplate.New("").Parse(s)
+ case "html":
+ _, err = htmltemplate.New("").Parse(s)
+ }
+ if err != nil {
+ // TODO(dominikh): whitelist other parse errors, if any
+ if strings.Contains(err.Error(), "unexpected") {
+ j.Errorf(call.Args[Arg("(*text/template.Template).Parse.text")], "%s", err)
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckTimeSleepConstant(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAST(j, call, "time.Sleep") {
+ return true
+ }
+ lit, ok := call.Args[Arg("time.Sleep.d")].(*ast.BasicLit)
+ if !ok {
+ return true
+ }
+ n, err := strconv.Atoi(lit.Value)
+ if err != nil {
+ return true
+ }
+ if n == 0 || n > 120 {
+ // time.Sleep(0) is a seldom used pattern in concurrency
+ // tests. >120 might be intentional. 120 was chosen
+ // because the user could've meant 2 minutes.
+ return true
+ }
+ recommendation := "time.Sleep(time.Nanosecond)"
+ if n != 1 {
+ recommendation = fmt.Sprintf("time.Sleep(%d * time.Nanosecond)", n)
+ }
+ j.Errorf(call.Args[Arg("time.Sleep.d")],
+ "sleeping for %d nanoseconds is probably a bug. Be explicit if it isn't: %s", n, recommendation)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckWaitgroupAdd(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ g, ok := node.(*ast.GoStmt)
+ if !ok {
+ return true
+ }
+ fun, ok := g.Call.Fun.(*ast.FuncLit)
+ if !ok {
+ return true
+ }
+ if len(fun.Body.List) == 0 {
+ return true
+ }
+ stmt, ok := fun.Body.List[0].(*ast.ExprStmt)
+ if !ok {
+ return true
+ }
+ call, ok := stmt.X.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ fn, ok := ObjectOf(j, sel.Sel).(*types.Func)
+ if !ok {
+ return true
+ }
+ if fn.FullName() == "(*sync.WaitGroup).Add" {
+ j.Errorf(sel, "should call %s before starting the goroutine to avoid a race",
+ Render(j, stmt))
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckInfiniteEmptyLoop(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ loop, ok := node.(*ast.ForStmt)
+ if !ok || len(loop.Body.List) != 0 || loop.Post != nil {
+ return true
+ }
+
+ if loop.Init != nil {
+ // TODO(dh): this isn't strictly necessary, it just makes
+ // the check easier.
+ return true
+ }
+ // An empty loop is bad news in two cases: 1) The loop has no
+ // condition. In that case, it's just a loop that spins
+ // forever and as fast as it can, keeping a core busy. 2) The
+ // loop condition only consists of variable or field reads and
+ // operators on those. The only way those could change their
+ // value is with unsynchronised access, which constitutes a
+ // data race.
+ //
+ // If the condition contains any function calls, its behaviour
+ // is dynamic and the loop might terminate. Similarly for
+ // channel receives.
+
+ if loop.Cond != nil && hasSideEffects(loop.Cond) {
+ return true
+ }
+
+ j.Errorf(loop, "this loop will spin, using 100%% CPU")
+ if loop.Cond != nil {
+ j.Errorf(loop, "loop condition never changes or has a race condition")
+ }
+
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckDeferInInfiniteLoop(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ mightExit := false
+ var defers []ast.Stmt
+ loop, ok := node.(*ast.ForStmt)
+ if !ok || loop.Cond != nil {
+ return true
+ }
+ fn2 := func(node ast.Node) bool {
+ switch stmt := node.(type) {
+ case *ast.ReturnStmt:
+ mightExit = true
+ case *ast.BranchStmt:
+ // TODO(dominikh): if this sees a break in a switch or
+ // select, it doesn't check if it breaks the loop or
+ // just the select/switch. This causes some false
+ // negatives.
+ if stmt.Tok == token.BREAK {
+ mightExit = true
+ }
+ case *ast.DeferStmt:
+ defers = append(defers, stmt)
+ case *ast.FuncLit:
+ // Don't look into function bodies
+ return false
+ }
+ return true
+ }
+ ast.Inspect(loop.Body, fn2)
+ if mightExit {
+ return true
+ }
+ for _, stmt := range defers {
+ j.Errorf(stmt, "defers in this infinite loop will never run")
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckDubiousDeferInChannelRangeLoop(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ loop, ok := node.(*ast.RangeStmt)
+ if !ok {
+ return true
+ }
+ typ := TypeOf(j, loop.X)
+ _, ok = typ.Underlying().(*types.Chan)
+ if !ok {
+ return true
+ }
+ fn2 := func(node ast.Node) bool {
+ switch stmt := node.(type) {
+ case *ast.DeferStmt:
+ j.Errorf(stmt, "defers in this range loop won't run unless the channel gets closed")
+ case *ast.FuncLit:
+ // Don't look into function bodies
+ return false
+ }
+ return true
+ }
+ ast.Inspect(loop.Body, fn2)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckTestMainExit(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ if !isTestMain(j, node) {
+ return true
+ }
+
+ arg := ObjectOf(j, node.(*ast.FuncDecl).Type.Params.List[0].Names[0])
+ callsRun := false
+ fn2 := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ ident, ok := sel.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if arg != ObjectOf(j, ident) {
+ return true
+ }
+ if sel.Sel.Name == "Run" {
+ callsRun = true
+ return false
+ }
+ return true
+ }
+ ast.Inspect(node.(*ast.FuncDecl).Body, fn2)
+
+ callsExit := false
+ fn3 := func(node ast.Node) bool {
+ if IsCallToAST(j, node, "os.Exit") {
+ callsExit = true
+ return false
+ }
+ return true
+ }
+ ast.Inspect(node.(*ast.FuncDecl).Body, fn3)
+ if !callsExit && callsRun {
+ j.Errorf(node, "TestMain should call os.Exit to set exit code")
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func isTestMain(j *lint.Job, node ast.Node) bool {
+ decl, ok := node.(*ast.FuncDecl)
+ if !ok {
+ return false
+ }
+ if decl.Name.Name != "TestMain" {
+ return false
+ }
+ if len(decl.Type.Params.List) != 1 {
+ return false
+ }
+ arg := decl.Type.Params.List[0]
+ if len(arg.Names) != 1 {
+ return false
+ }
+ return IsOfType(j, arg.Type, "*testing.M")
+}
+
+func (c *Checker) CheckExec(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAST(j, call, "os/exec.Command") {
+ return true
+ }
+ val, ok := ExprToString(j, call.Args[Arg("os/exec.Command.name")])
+ if !ok {
+ return true
+ }
+ if !strings.Contains(val, " ") || strings.Contains(val, `\`) || strings.Contains(val, "/") {
+ return true
+ }
+ j.Errorf(call.Args[Arg("os/exec.Command.name")],
+ "first argument to exec.Command looks like a shell command, but a program name or path are expected")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckLoopEmptyDefault(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ loop, ok := node.(*ast.ForStmt)
+ if !ok || len(loop.Body.List) != 1 || loop.Cond != nil || loop.Init != nil {
+ return true
+ }
+ sel, ok := loop.Body.List[0].(*ast.SelectStmt)
+ if !ok {
+ return true
+ }
+ for _, c := range sel.Body.List {
+ if comm, ok := c.(*ast.CommClause); ok && comm.Comm == nil && len(comm.Body) == 0 {
+ j.Errorf(comm, "should not have an empty default case in a for+select loop. The loop will spin.")
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckLhsRhsIdentical(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ op, ok := node.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+ switch op.Op {
+ case token.EQL, token.NEQ:
+ if basic, ok := TypeOf(j, op.X).(*types.Basic); ok {
+ if kind := basic.Kind(); kind == types.Float32 || kind == types.Float64 {
+ // f == f and f != f might be used to check for NaN
+ return true
+ }
+ }
+ case token.SUB, token.QUO, token.AND, token.REM, token.OR, token.XOR, token.AND_NOT,
+ token.LAND, token.LOR, token.LSS, token.GTR, token.LEQ, token.GEQ:
+ default:
+ // For some ops, such as + and *, it can make sense to
+ // have identical operands
+ return true
+ }
+
+ if Render(j, op.X) != Render(j, op.Y) {
+ return true
+ }
+ j.Errorf(op, "identical expressions on the left and right side of the '%s' operator", op.Op)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckScopedBreak(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ var body *ast.BlockStmt
+ switch node := node.(type) {
+ case *ast.ForStmt:
+ body = node.Body
+ case *ast.RangeStmt:
+ body = node.Body
+ default:
+ return true
+ }
+ for _, stmt := range body.List {
+ var blocks [][]ast.Stmt
+ switch stmt := stmt.(type) {
+ case *ast.SwitchStmt:
+ for _, c := range stmt.Body.List {
+ blocks = append(blocks, c.(*ast.CaseClause).Body)
+ }
+ case *ast.SelectStmt:
+ for _, c := range stmt.Body.List {
+ blocks = append(blocks, c.(*ast.CommClause).Body)
+ }
+ default:
+ continue
+ }
+
+ for _, body := range blocks {
+ if len(body) == 0 {
+ continue
+ }
+ lasts := []ast.Stmt{body[len(body)-1]}
+ // TODO(dh): unfold all levels of nested block
+ // statements, not just a single level if statement
+ if ifs, ok := lasts[0].(*ast.IfStmt); ok {
+ if len(ifs.Body.List) == 0 {
+ continue
+ }
+ lasts[0] = ifs.Body.List[len(ifs.Body.List)-1]
+
+ if block, ok := ifs.Else.(*ast.BlockStmt); ok {
+ if len(block.List) != 0 {
+ lasts = append(lasts, block.List[len(block.List)-1])
+ }
+ }
+ }
+ for _, last := range lasts {
+ branch, ok := last.(*ast.BranchStmt)
+ if !ok || branch.Tok != token.BREAK || branch.Label != nil {
+ continue
+ }
+ j.Errorf(branch, "ineffective break statement. Did you mean to break out of the outer loop?")
+ }
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckUnsafePrintf(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !IsCallToAnyAST(j, call, "fmt.Printf", "fmt.Sprintf", "log.Printf") {
+ return true
+ }
+ if len(call.Args) != 1 {
+ return true
+ }
+ switch call.Args[Arg("fmt.Printf.format")].(type) {
+ case *ast.CallExpr, *ast.Ident:
+ default:
+ return true
+ }
+ j.Errorf(call.Args[Arg("fmt.Printf.format")],
+ "printf-style function with dynamic first argument and no further arguments should use print-style function instead")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckEarlyDefer(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ block, ok := node.(*ast.BlockStmt)
+ if !ok {
+ return true
+ }
+ if len(block.List) < 2 {
+ return true
+ }
+ for i, stmt := range block.List {
+ if i == len(block.List)-1 {
+ break
+ }
+ assign, ok := stmt.(*ast.AssignStmt)
+ if !ok {
+ continue
+ }
+ if len(assign.Rhs) != 1 {
+ continue
+ }
+ if len(assign.Lhs) < 2 {
+ continue
+ }
+ if lhs, ok := assign.Lhs[len(assign.Lhs)-1].(*ast.Ident); ok && lhs.Name == "_" {
+ continue
+ }
+ call, ok := assign.Rhs[0].(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ sig, ok := TypeOf(j, call.Fun).(*types.Signature)
+ if !ok {
+ continue
+ }
+ if sig.Results().Len() < 2 {
+ continue
+ }
+ last := sig.Results().At(sig.Results().Len() - 1)
+ // FIXME(dh): check that it's error from universe, not
+ // another type of the same name
+ if last.Type().String() != "error" {
+ continue
+ }
+ lhs, ok := assign.Lhs[0].(*ast.Ident)
+ if !ok {
+ continue
+ }
+ def, ok := block.List[i+1].(*ast.DeferStmt)
+ if !ok {
+ continue
+ }
+ sel, ok := def.Call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ continue
+ }
+ ident, ok := selectorX(sel).(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if ident.Obj != lhs.Obj {
+ continue
+ }
+ if sel.Sel.Name != "Close" {
+ continue
+ }
+ j.Errorf(def, "should check returned error before deferring %s", Render(j, def.Call))
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func selectorX(sel *ast.SelectorExpr) ast.Node {
+ switch x := sel.X.(type) {
+ case *ast.SelectorExpr:
+ return selectorX(x)
+ default:
+ return x
+ }
+}
+
+func (c *Checker) CheckEmptyCriticalSection(j *lint.Job) {
+ // Initially it might seem like this check would be easier to
+ // implement in SSA. After all, we're only checking for two
+ // consecutive method calls. In reality, however, there may be any
+ // number of other instructions between the lock and unlock, while
+ // still constituting an empty critical section. For example,
+ // given `m.x().Lock(); m.x().Unlock()`, there will be a call to
+ // x(). In the AST-based approach, this has a tiny potential for a
+ // false positive (the second call to x might be doing work that
+ // is protected by the mutex). In an SSA-based approach, however,
+ // it would miss a lot of real bugs.
+
+ mutexParams := func(s ast.Stmt) (x ast.Expr, funcName string, ok bool) {
+ expr, ok := s.(*ast.ExprStmt)
+ if !ok {
+ return nil, "", false
+ }
+ call, ok := expr.X.(*ast.CallExpr)
+ if !ok {
+ return nil, "", false
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return nil, "", false
+ }
+
+ fn, ok := ObjectOf(j, sel.Sel).(*types.Func)
+ if !ok {
+ return nil, "", false
+ }
+ sig := fn.Type().(*types.Signature)
+ if sig.Params().Len() != 0 || sig.Results().Len() != 0 {
+ return nil, "", false
+ }
+
+ return sel.X, fn.Name(), true
+ }
+
+ fn := func(node ast.Node) bool {
+ block, ok := node.(*ast.BlockStmt)
+ if !ok {
+ return true
+ }
+ if len(block.List) < 2 {
+ return true
+ }
+ for i := range block.List[:len(block.List)-1] {
+ sel1, method1, ok1 := mutexParams(block.List[i])
+ sel2, method2, ok2 := mutexParams(block.List[i+1])
+
+ if !ok1 || !ok2 || Render(j, sel1) != Render(j, sel2) {
+ continue
+ }
+ if (method1 == "Lock" && method2 == "Unlock") ||
+ (method1 == "RLock" && method2 == "RUnlock") {
+ j.Errorf(block.List[i+1], "empty critical section")
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+// cgo produces code like fn(&*_Cvar_kSomeCallbacks) which we don't
+// want to flag.
+var cgoIdent = regexp.MustCompile(`^_C(func|var)_.+$`)
+
+func (c *Checker) CheckIneffectiveCopy(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ if unary, ok := node.(*ast.UnaryExpr); ok {
+ if star, ok := unary.X.(*ast.StarExpr); ok && unary.Op == token.AND {
+ ident, ok := star.X.(*ast.Ident)
+ if !ok || !cgoIdent.MatchString(ident.Name) {
+ j.Errorf(unary, "&*x will be simplified to x. It will not copy x.")
+ }
+ }
+ }
+
+ if star, ok := node.(*ast.StarExpr); ok {
+ if unary, ok := star.X.(*ast.UnaryExpr); ok && unary.Op == token.AND {
+ j.Errorf(star, "*&x will be simplified to x. It will not copy x.")
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckDiffSizeComparison(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, b := range ssafn.Blocks {
+ for _, ins := range b.Instrs {
+ binop, ok := ins.(*ssa.BinOp)
+ if !ok {
+ continue
+ }
+ if binop.Op != token.EQL && binop.Op != token.NEQ {
+ continue
+ }
+ _, ok1 := binop.X.(*ssa.Slice)
+ _, ok2 := binop.Y.(*ssa.Slice)
+ if !ok1 && !ok2 {
+ continue
+ }
+ r := c.funcDescs.Get(ssafn).Ranges
+ r1, ok1 := r.Get(binop.X).(vrp.StringInterval)
+ r2, ok2 := r.Get(binop.Y).(vrp.StringInterval)
+ if !ok1 || !ok2 {
+ continue
+ }
+ if r1.Length.Intersection(r2.Length).Empty() {
+ j.Errorf(binop, "comparing strings of different sizes for equality will always return false")
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckCanonicalHeaderKey(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ assign, ok := node.(*ast.AssignStmt)
+ if ok {
+ // TODO(dh): This risks missing some Header reads, for
+ // example in `h1["foo"] = h2["foo"]` – these edge
+ // cases are probably rare enough to ignore for now.
+ for _, expr := range assign.Lhs {
+ op, ok := expr.(*ast.IndexExpr)
+ if !ok {
+ continue
+ }
+ if hasType(j, op.X, "net/http.Header") {
+ return false
+ }
+ }
+ return true
+ }
+ op, ok := node.(*ast.IndexExpr)
+ if !ok {
+ return true
+ }
+ if !hasType(j, op.X, "net/http.Header") {
+ return true
+ }
+ s, ok := ExprToString(j, op.Index)
+ if !ok {
+ return true
+ }
+ if s == http.CanonicalHeaderKey(s) {
+ return true
+ }
+ j.Errorf(op, "keys in http.Header are canonicalized, %q is not canonical; fix the constant or use http.CanonicalHeaderKey", s)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckBenchmarkN(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ assign, ok := node.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 {
+ return true
+ }
+ sel, ok := assign.Lhs[0].(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ if sel.Sel.Name != "N" {
+ return true
+ }
+ if !hasType(j, sel.X, "*testing.B") {
+ return true
+ }
+ j.Errorf(assign, "should not assign to %s", Render(j, sel))
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckUnreadVariableValues(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ if IsExample(ssafn) {
+ continue
+ }
+ node := ssafn.Syntax()
+ if node == nil {
+ continue
+ }
+
+ ast.Inspect(node, func(node ast.Node) bool {
+ assign, ok := node.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if len(assign.Lhs) > 1 && len(assign.Rhs) == 1 {
+ // Either a function call with multiple return values,
+ // or a comma-ok assignment
+
+ val, _ := ssafn.ValueForExpr(assign.Rhs[0])
+ if val == nil {
+ return true
+ }
+ refs := val.Referrers()
+ if refs == nil {
+ return true
+ }
+ for _, ref := range *refs {
+ ex, ok := ref.(*ssa.Extract)
+ if !ok {
+ continue
+ }
+ exrefs := ex.Referrers()
+ if exrefs == nil {
+ continue
+ }
+ if len(FilterDebug(*exrefs)) == 0 {
+ lhs := assign.Lhs[ex.Index]
+ if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" {
+ continue
+ }
+ j.Errorf(lhs, "this value of %s is never used", lhs)
+ }
+ }
+ return true
+ }
+ for i, lhs := range assign.Lhs {
+ rhs := assign.Rhs[i]
+ if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" {
+ continue
+ }
+ val, _ := ssafn.ValueForExpr(rhs)
+ if val == nil {
+ continue
+ }
+
+ refs := val.Referrers()
+ if refs == nil {
+ // TODO investigate why refs can be nil
+ return true
+ }
+ if len(FilterDebug(*refs)) == 0 {
+ j.Errorf(lhs, "this value of %s is never used", lhs)
+ }
+ }
+ return true
+ })
+ }
+}
+
+func (c *Checker) CheckPredeterminedBooleanExprs(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ ssabinop, ok := ins.(*ssa.BinOp)
+ if !ok {
+ continue
+ }
+ switch ssabinop.Op {
+ case token.GTR, token.LSS, token.EQL, token.NEQ, token.LEQ, token.GEQ:
+ default:
+ continue
+ }
+
+ xs, ok1 := consts(ssabinop.X, nil, nil)
+ ys, ok2 := consts(ssabinop.Y, nil, nil)
+ if !ok1 || !ok2 || len(xs) == 0 || len(ys) == 0 {
+ continue
+ }
+
+ trues := 0
+ for _, x := range xs {
+ for _, y := range ys {
+ if x.Value == nil {
+ if y.Value == nil {
+ trues++
+ }
+ continue
+ }
+ if constant.Compare(x.Value, ssabinop.Op, y.Value) {
+ trues++
+ }
+ }
+ }
+ b := trues != 0
+ if trues == 0 || trues == len(xs)*len(ys) {
+ j.Errorf(ssabinop, "binary expression is always %t for all possible values (%s %s %s)",
+ b, xs, ssabinop.Op, ys)
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckNilMaps(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ mu, ok := ins.(*ssa.MapUpdate)
+ if !ok {
+ continue
+ }
+ c, ok := mu.Map.(*ssa.Const)
+ if !ok {
+ continue
+ }
+ if c.Value != nil {
+ continue
+ }
+ j.Errorf(mu, "assignment to nil map")
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckUnsignedComparison(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ expr, ok := node.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+ tx := TypeOf(j, expr.X)
+ basic, ok := tx.Underlying().(*types.Basic)
+ if !ok {
+ return true
+ }
+ if (basic.Info() & types.IsUnsigned) == 0 {
+ return true
+ }
+ lit, ok := expr.Y.(*ast.BasicLit)
+ if !ok || lit.Value != "0" {
+ return true
+ }
+ switch expr.Op {
+ case token.GEQ:
+ j.Errorf(expr, "unsigned values are always >= 0")
+ case token.LSS:
+ j.Errorf(expr, "unsigned values are never < 0")
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func consts(val ssa.Value, out []*ssa.Const, visitedPhis map[string]bool) ([]*ssa.Const, bool) {
+ if visitedPhis == nil {
+ visitedPhis = map[string]bool{}
+ }
+ var ok bool
+ switch val := val.(type) {
+ case *ssa.Phi:
+ if visitedPhis[val.Name()] {
+ break
+ }
+ visitedPhis[val.Name()] = true
+ vals := val.Operands(nil)
+ for _, phival := range vals {
+ out, ok = consts(*phival, out, visitedPhis)
+ if !ok {
+ return nil, false
+ }
+ }
+ case *ssa.Const:
+ out = append(out, val)
+ case *ssa.Convert:
+ out, ok = consts(val.X, out, visitedPhis)
+ if !ok {
+ return nil, false
+ }
+ default:
+ return nil, false
+ }
+ if len(out) < 2 {
+ return out, true
+ }
+ uniq := []*ssa.Const{out[0]}
+ for _, val := range out[1:] {
+ if val.Value == uniq[len(uniq)-1].Value {
+ continue
+ }
+ uniq = append(uniq, val)
+ }
+ return uniq, true
+}
+
+func (c *Checker) CheckLoopCondition(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ fn := func(node ast.Node) bool {
+ loop, ok := node.(*ast.ForStmt)
+ if !ok {
+ return true
+ }
+ if loop.Init == nil || loop.Cond == nil || loop.Post == nil {
+ return true
+ }
+ init, ok := loop.Init.(*ast.AssignStmt)
+ if !ok || len(init.Lhs) != 1 || len(init.Rhs) != 1 {
+ return true
+ }
+ cond, ok := loop.Cond.(*ast.BinaryExpr)
+ if !ok {
+ return true
+ }
+ x, ok := cond.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ lhs, ok := init.Lhs[0].(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if x.Obj != lhs.Obj {
+ return true
+ }
+ if _, ok := loop.Post.(*ast.IncDecStmt); !ok {
+ return true
+ }
+
+ v, isAddr := ssafn.ValueForExpr(cond.X)
+ if v == nil || isAddr {
+ return true
+ }
+ switch v := v.(type) {
+ case *ssa.Phi:
+ ops := v.Operands(nil)
+ if len(ops) != 2 {
+ return true
+ }
+ _, ok := (*ops[0]).(*ssa.Const)
+ if !ok {
+ return true
+ }
+ sigma, ok := (*ops[1]).(*ssa.Sigma)
+ if !ok {
+ return true
+ }
+ if sigma.X != v {
+ return true
+ }
+ case *ssa.UnOp:
+ return true
+ }
+ j.Errorf(cond, "variable in loop condition never changes")
+
+ return true
+ }
+ Inspect(ssafn.Syntax(), fn)
+ }
+}
+
+func (c *Checker) CheckArgOverwritten(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ fn := func(node ast.Node) bool {
+ var typ *ast.FuncType
+ var body *ast.BlockStmt
+ switch fn := node.(type) {
+ case *ast.FuncDecl:
+ typ = fn.Type
+ body = fn.Body
+ case *ast.FuncLit:
+ typ = fn.Type
+ body = fn.Body
+ }
+ if body == nil {
+ return true
+ }
+ if len(typ.Params.List) == 0 {
+ return true
+ }
+ for _, field := range typ.Params.List {
+ for _, arg := range field.Names {
+ obj := ObjectOf(j, arg)
+ var ssaobj *ssa.Parameter
+ for _, param := range ssafn.Params {
+ if param.Object() == obj {
+ ssaobj = param
+ break
+ }
+ }
+ if ssaobj == nil {
+ continue
+ }
+ refs := ssaobj.Referrers()
+ if refs == nil {
+ continue
+ }
+ if len(FilterDebug(*refs)) != 0 {
+ continue
+ }
+
+ assigned := false
+ ast.Inspect(body, func(node ast.Node) bool {
+ assign, ok := node.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ for _, lhs := range assign.Lhs {
+ ident, ok := lhs.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if ObjectOf(j, ident) == obj {
+ assigned = true
+ return false
+ }
+ }
+ return true
+ })
+ if assigned {
+ j.Errorf(arg, "argument %s is overwritten before first use", arg)
+ }
+ }
+ }
+ return true
+ }
+ Inspect(ssafn.Syntax(), fn)
+ }
+}
+
+func (c *Checker) CheckIneffectiveLoop(j *lint.Job) {
+ // This check detects some, but not all unconditional loop exits.
+ // We give up in the following cases:
+ //
+ // - a goto anywhere in the loop. The goto might skip over our
+ // return, and we don't check that it doesn't.
+ //
+ // - any nested, unlabelled continue, even if it is in another
+ // loop or closure.
+ fn := func(node ast.Node) bool {
+ var body *ast.BlockStmt
+ switch fn := node.(type) {
+ case *ast.FuncDecl:
+ body = fn.Body
+ case *ast.FuncLit:
+ body = fn.Body
+ default:
+ return true
+ }
+ if body == nil {
+ return true
+ }
+ labels := map[*ast.Object]ast.Stmt{}
+ ast.Inspect(body, func(node ast.Node) bool {
+ label, ok := node.(*ast.LabeledStmt)
+ if !ok {
+ return true
+ }
+ labels[label.Label.Obj] = label.Stmt
+ return true
+ })
+
+ ast.Inspect(body, func(node ast.Node) bool {
+ var loop ast.Node
+ var body *ast.BlockStmt
+ switch node := node.(type) {
+ case *ast.ForStmt:
+ body = node.Body
+ loop = node
+ case *ast.RangeStmt:
+ typ := TypeOf(j, node.X)
+ if _, ok := typ.Underlying().(*types.Map); ok {
+ // looping once over a map is a valid pattern for
+ // getting an arbitrary element.
+ return true
+ }
+ body = node.Body
+ loop = node
+ default:
+ return true
+ }
+ if len(body.List) < 2 {
+ // avoid flagging the somewhat common pattern of using
+ // a range loop to get the first element in a slice,
+ // or the first rune in a string.
+ return true
+ }
+ var unconditionalExit ast.Node
+ hasBranching := false
+ for _, stmt := range body.List {
+ switch stmt := stmt.(type) {
+ case *ast.BranchStmt:
+ switch stmt.Tok {
+ case token.BREAK:
+ if stmt.Label == nil || labels[stmt.Label.Obj] == loop {
+ unconditionalExit = stmt
+ }
+ case token.CONTINUE:
+ if stmt.Label == nil || labels[stmt.Label.Obj] == loop {
+ unconditionalExit = nil
+ return false
+ }
+ }
+ case *ast.ReturnStmt:
+ unconditionalExit = stmt
+ case *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.SelectStmt:
+ hasBranching = true
+ }
+ }
+ if unconditionalExit == nil || !hasBranching {
+ return false
+ }
+ ast.Inspect(body, func(node ast.Node) bool {
+ if branch, ok := node.(*ast.BranchStmt); ok {
+
+ switch branch.Tok {
+ case token.GOTO:
+ unconditionalExit = nil
+ return false
+ case token.CONTINUE:
+ if branch.Label != nil && labels[branch.Label.Obj] != loop {
+ return true
+ }
+ unconditionalExit = nil
+ return false
+ }
+ }
+ return true
+ })
+ if unconditionalExit != nil {
+ j.Errorf(unconditionalExit, "the surrounding loop is unconditionally terminated")
+ }
+ return true
+ })
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckNilContext(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if len(call.Args) == 0 {
+ return true
+ }
+ if typ, ok := TypeOf(j, call.Args[0]).(*types.Basic); !ok || typ.Kind() != types.UntypedNil {
+ return true
+ }
+ sig, ok := TypeOf(j, call.Fun).(*types.Signature)
+ if !ok {
+ return true
+ }
+ if sig.Params().Len() == 0 {
+ return true
+ }
+ if !IsType(sig.Params().At(0).Type(), "context.Context") {
+ return true
+ }
+ j.Errorf(call.Args[0],
+ "do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckSeeker(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ if sel.Sel.Name != "Seek" {
+ return true
+ }
+ if len(call.Args) != 2 {
+ return true
+ }
+ arg0, ok := call.Args[Arg("(io.Seeker).Seek.offset")].(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ switch arg0.Sel.Name {
+ case "SeekStart", "SeekCurrent", "SeekEnd":
+ default:
+ return true
+ }
+ pkg, ok := arg0.X.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if pkg.Name != "io" {
+ return true
+ }
+ j.Errorf(call, "the first argument of io.Seeker is the offset, but an io.Seek* constant is being used instead")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckIneffectiveAppend(j *lint.Job) {
+ isAppend := func(ins ssa.Value) bool {
+ call, ok := ins.(*ssa.Call)
+ if !ok {
+ return false
+ }
+ if call.Call.IsInvoke() {
+ return false
+ }
+ if builtin, ok := call.Call.Value.(*ssa.Builtin); !ok || builtin.Name() != "append" {
+ return false
+ }
+ return true
+ }
+
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ val, ok := ins.(ssa.Value)
+ if !ok || !isAppend(val) {
+ continue
+ }
+
+ isUsed := false
+ visited := map[ssa.Instruction]bool{}
+ var walkRefs func(refs []ssa.Instruction)
+ walkRefs = func(refs []ssa.Instruction) {
+ loop:
+ for _, ref := range refs {
+ if visited[ref] {
+ continue
+ }
+ visited[ref] = true
+ if _, ok := ref.(*ssa.DebugRef); ok {
+ continue
+ }
+ switch ref := ref.(type) {
+ case *ssa.Phi:
+ walkRefs(*ref.Referrers())
+ case *ssa.Sigma:
+ walkRefs(*ref.Referrers())
+ case ssa.Value:
+ if !isAppend(ref) {
+ isUsed = true
+ } else {
+ walkRefs(*ref.Referrers())
+ }
+ case ssa.Instruction:
+ isUsed = true
+ break loop
+ }
+ }
+ }
+ refs := val.Referrers()
+ if refs == nil {
+ continue
+ }
+ walkRefs(*refs)
+ if !isUsed {
+ j.Errorf(ins, "this result of append is never used, except maybe in other appends")
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckConcurrentTesting(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ gostmt, ok := ins.(*ssa.Go)
+ if !ok {
+ continue
+ }
+ var fn *ssa.Function
+ switch val := gostmt.Call.Value.(type) {
+ case *ssa.Function:
+ fn = val
+ case *ssa.MakeClosure:
+ fn = val.Fn.(*ssa.Function)
+ default:
+ continue
+ }
+ if fn.Blocks == nil {
+ continue
+ }
+ for _, block := range fn.Blocks {
+ for _, ins := range block.Instrs {
+ call, ok := ins.(*ssa.Call)
+ if !ok {
+ continue
+ }
+ if call.Call.IsInvoke() {
+ continue
+ }
+ callee := call.Call.StaticCallee()
+ if callee == nil {
+ continue
+ }
+ recv := callee.Signature.Recv()
+ if recv == nil {
+ continue
+ }
+ if !IsType(recv.Type(), "*testing.common") {
+ continue
+ }
+ fn, ok := call.Call.StaticCallee().Object().(*types.Func)
+ if !ok {
+ continue
+ }
+ name := fn.Name()
+ switch name {
+ case "FailNow", "Fatal", "Fatalf", "SkipNow", "Skip", "Skipf":
+ default:
+ continue
+ }
+ j.Errorf(gostmt, "the goroutine calls T.%s, which must be called in the same goroutine as the test", name)
+ }
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckCyclicFinalizer(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ node := c.funcDescs.CallGraph.CreateNode(ssafn)
+ for _, edge := range node.Out {
+ if edge.Callee.Func.RelString(nil) != "runtime.SetFinalizer" {
+ continue
+ }
+ arg0 := edge.Site.Common().Args[Arg("runtime.SetFinalizer.obj")]
+ if iface, ok := arg0.(*ssa.MakeInterface); ok {
+ arg0 = iface.X
+ }
+ unop, ok := arg0.(*ssa.UnOp)
+ if !ok {
+ continue
+ }
+ v, ok := unop.X.(*ssa.Alloc)
+ if !ok {
+ continue
+ }
+ arg1 := edge.Site.Common().Args[Arg("runtime.SetFinalizer.finalizer")]
+ if iface, ok := arg1.(*ssa.MakeInterface); ok {
+ arg1 = iface.X
+ }
+ mc, ok := arg1.(*ssa.MakeClosure)
+ if !ok {
+ continue
+ }
+ for _, b := range mc.Bindings {
+ if b == v {
+ pos := j.Program.DisplayPosition(mc.Fn.Pos())
+ j.Errorf(edge.Site, "the finalizer closes over the object, preventing the finalizer from ever running (at %s)", pos)
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckSliceOutOfBounds(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ ia, ok := ins.(*ssa.IndexAddr)
+ if !ok {
+ continue
+ }
+ if _, ok := ia.X.Type().Underlying().(*types.Slice); !ok {
+ continue
+ }
+ sr, ok1 := c.funcDescs.Get(ssafn).Ranges[ia.X].(vrp.SliceInterval)
+ idxr, ok2 := c.funcDescs.Get(ssafn).Ranges[ia.Index].(vrp.IntInterval)
+ if !ok1 || !ok2 || !sr.IsKnown() || !idxr.IsKnown() || sr.Length.Empty() || idxr.Empty() {
+ continue
+ }
+ if idxr.Lower.Cmp(sr.Length.Upper) >= 0 {
+ j.Errorf(ia, "index out of bounds")
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckDeferLock(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, block := range ssafn.Blocks {
+ instrs := FilterDebug(block.Instrs)
+ if len(instrs) < 2 {
+ continue
+ }
+ for i, ins := range instrs[:len(instrs)-1] {
+ call, ok := ins.(*ssa.Call)
+ if !ok {
+ continue
+ }
+ if !IsCallTo(call.Common(), "(*sync.Mutex).Lock") && !IsCallTo(call.Common(), "(*sync.RWMutex).RLock") {
+ continue
+ }
+ nins, ok := instrs[i+1].(*ssa.Defer)
+ if !ok {
+ continue
+ }
+ if !IsCallTo(&nins.Call, "(*sync.Mutex).Lock") && !IsCallTo(&nins.Call, "(*sync.RWMutex).RLock") {
+ continue
+ }
+ if call.Common().Args[0] != nins.Call.Args[0] {
+ continue
+ }
+ name := shortCallName(call.Common())
+ alt := ""
+ switch name {
+ case "Lock":
+ alt = "Unlock"
+ case "RLock":
+ alt = "RUnlock"
+ }
+ j.Errorf(nins, "deferring %s right after having locked already; did you mean to defer %s?", name, alt)
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckNaNComparison(j *lint.Job) {
+ isNaN := func(v ssa.Value) bool {
+ call, ok := v.(*ssa.Call)
+ if !ok {
+ return false
+ }
+ return IsCallTo(call.Common(), "math.NaN")
+ }
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ ins, ok := ins.(*ssa.BinOp)
+ if !ok {
+ continue
+ }
+ if isNaN(ins.X) || isNaN(ins.Y) {
+ j.Errorf(ins, "no value is equal to NaN, not even NaN itself")
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckInfiniteRecursion(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ node := c.funcDescs.CallGraph.CreateNode(ssafn)
+ for _, edge := range node.Out {
+ if edge.Callee != node {
+ continue
+ }
+ if _, ok := edge.Site.(*ssa.Go); ok {
+ // Recursively spawning goroutines doesn't consume
+ // stack space infinitely, so don't flag it.
+ continue
+ }
+
+ block := edge.Site.Block()
+ canReturn := false
+ for _, b := range ssafn.Blocks {
+ if block.Dominates(b) {
+ continue
+ }
+ if len(b.Instrs) == 0 {
+ continue
+ }
+ if _, ok := b.Instrs[len(b.Instrs)-1].(*ssa.Return); ok {
+ canReturn = true
+ break
+ }
+ }
+ if canReturn {
+ continue
+ }
+ j.Errorf(edge.Site, "infinite recursive call")
+ }
+ }
+}
+
+func objectName(obj types.Object) string {
+ if obj == nil {
+ return "<nil>"
+ }
+ var name string
+ if obj.Pkg() != nil && obj.Pkg().Scope().Lookup(obj.Name()) == obj {
+ s := obj.Pkg().Path()
+ if s != "" {
+ name += s + "."
+ }
+ }
+ name += obj.Name()
+ return name
+}
+
+func isName(j *lint.Job, expr ast.Expr, name string) bool {
+ var obj types.Object
+ switch expr := expr.(type) {
+ case *ast.Ident:
+ obj = ObjectOf(j, expr)
+ case *ast.SelectorExpr:
+ obj = ObjectOf(j, expr.Sel)
+ }
+ return objectName(obj) == name
+}
+
+func (c *Checker) CheckLeakyTimeTick(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ if IsInMain(j, ssafn) || IsInTest(j, ssafn) {
+ continue
+ }
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ call, ok := ins.(*ssa.Call)
+ if !ok || !IsCallTo(call.Common(), "time.Tick") {
+ continue
+ }
+ if c.funcDescs.Get(call.Parent()).Infinite {
+ continue
+ }
+ j.Errorf(call, "using time.Tick leaks the underlying ticker, consider using it only in endless functions, tests and the main package, and use time.NewTicker here")
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckDoubleNegation(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ unary1, ok := node.(*ast.UnaryExpr)
+ if !ok {
+ return true
+ }
+ unary2, ok := unary1.X.(*ast.UnaryExpr)
+ if !ok {
+ return true
+ }
+ if unary1.Op != token.NOT || unary2.Op != token.NOT {
+ return true
+ }
+ j.Errorf(unary1, "negating a boolean twice has no effect; is this a typo?")
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func hasSideEffects(node ast.Node) bool {
+ dynamic := false
+ ast.Inspect(node, func(node ast.Node) bool {
+ switch node := node.(type) {
+ case *ast.CallExpr:
+ dynamic = true
+ return false
+ case *ast.UnaryExpr:
+ if node.Op == token.ARROW {
+ dynamic = true
+ return false
+ }
+ }
+ return true
+ })
+ return dynamic
+}
+
+func (c *Checker) CheckRepeatedIfElse(j *lint.Job) {
+ seen := map[ast.Node]bool{}
+
+ var collectConds func(ifstmt *ast.IfStmt, inits []ast.Stmt, conds []ast.Expr) ([]ast.Stmt, []ast.Expr)
+ collectConds = func(ifstmt *ast.IfStmt, inits []ast.Stmt, conds []ast.Expr) ([]ast.Stmt, []ast.Expr) {
+ seen[ifstmt] = true
+ if ifstmt.Init != nil {
+ inits = append(inits, ifstmt.Init)
+ }
+ conds = append(conds, ifstmt.Cond)
+ if elsestmt, ok := ifstmt.Else.(*ast.IfStmt); ok {
+ return collectConds(elsestmt, inits, conds)
+ }
+ return inits, conds
+ }
+ fn := func(node ast.Node) bool {
+ ifstmt, ok := node.(*ast.IfStmt)
+ if !ok {
+ return true
+ }
+ if seen[ifstmt] {
+ return true
+ }
+ inits, conds := collectConds(ifstmt, nil, nil)
+ if len(inits) > 0 {
+ return true
+ }
+ for _, cond := range conds {
+ if hasSideEffects(cond) {
+ return true
+ }
+ }
+ counts := map[string]int{}
+ for _, cond := range conds {
+ s := Render(j, cond)
+ counts[s]++
+ if counts[s] == 2 {
+ j.Errorf(cond, "this condition occurs multiple times in this if/else if chain")
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckSillyBitwiseOps(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ ins, ok := ins.(*ssa.BinOp)
+ if !ok {
+ continue
+ }
+
+ if c, ok := ins.Y.(*ssa.Const); !ok || c.Value == nil || c.Value.Kind() != constant.Int || c.Uint64() != 0 {
+ continue
+ }
+ switch ins.Op {
+ case token.AND, token.OR, token.XOR:
+ default:
+ // we do not flag shifts because too often, x<<0 is part
+ // of a pattern, x<<0, x<<8, x<<16, ...
+ continue
+ }
+ path, _ := astutil.PathEnclosingInterval(j.File(ins), ins.Pos(), ins.Pos())
+ if len(path) == 0 {
+ continue
+ }
+ if node, ok := path[0].(*ast.BinaryExpr); !ok || !IsZero(node.Y) {
+ continue
+ }
+
+ switch ins.Op {
+ case token.AND:
+ j.Errorf(ins, "x & 0 always equals 0")
+ case token.OR, token.XOR:
+ j.Errorf(ins, "x %s 0 always equals x", ins.Op)
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckNonOctalFileMode(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ sig, ok := TypeOf(j, call.Fun).(*types.Signature)
+ if !ok {
+ return true
+ }
+ n := sig.Params().Len()
+ var args []int
+ for i := 0; i < n; i++ {
+ typ := sig.Params().At(i).Type()
+ if IsType(typ, "os.FileMode") {
+ args = append(args, i)
+ }
+ }
+ for _, i := range args {
+ lit, ok := call.Args[i].(*ast.BasicLit)
+ if !ok {
+ continue
+ }
+ if len(lit.Value) == 3 &&
+ lit.Value[0] != '0' &&
+ lit.Value[0] >= '0' && lit.Value[0] <= '7' &&
+ lit.Value[1] >= '0' && lit.Value[1] <= '7' &&
+ lit.Value[2] >= '0' && lit.Value[2] <= '7' {
+
+ v, err := strconv.ParseInt(lit.Value, 10, 64)
+ if err != nil {
+ continue
+ }
+ j.Errorf(call.Args[i], "file mode '%s' evaluates to %#o; did you mean '0%s'?", lit.Value, v, lit.Value)
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckPureFunctions(j *lint.Job) {
+fnLoop:
+ for _, ssafn := range j.Program.InitialFunctions {
+ if IsInTest(j, ssafn) {
+ params := ssafn.Signature.Params()
+ for i := 0; i < params.Len(); i++ {
+ param := params.At(i)
+ if IsType(param.Type(), "*testing.B") {
+ // Ignore discarded pure functions in code related
+ // to benchmarks. Instead of matching BenchmarkFoo
+ // functions, we match any function accepting a
+ // *testing.B. Benchmarks sometimes call generic
+ // functions for doing the actual work, and
+ // checking for the parameter is a lot easier and
+ // faster than analyzing call trees.
+ continue fnLoop
+ }
+ }
+ }
+
+ for _, b := range ssafn.Blocks {
+ for _, ins := range b.Instrs {
+ ins, ok := ins.(*ssa.Call)
+ if !ok {
+ continue
+ }
+ refs := ins.Referrers()
+ if refs == nil || len(FilterDebug(*refs)) > 0 {
+ continue
+ }
+ callee := ins.Common().StaticCallee()
+ if callee == nil {
+ continue
+ }
+ if c.funcDescs.Get(callee).Pure && !c.funcDescs.Get(callee).Stub {
+ j.Errorf(ins, "%s is a pure function but its return value is ignored", callee.Name())
+ continue
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) isDeprecated(j *lint.Job, ident *ast.Ident) (bool, string) {
+ obj := ObjectOf(j, ident)
+ if obj.Pkg() == nil {
+ return false, ""
+ }
+ alt := c.deprecatedObjs[obj]
+ return alt != "", alt
+}
+
+func (c *Checker) CheckDeprecated(j *lint.Job) {
+ // Selectors can appear outside of function literals, e.g. when
+ // declaring package level variables.
+
+ var ssafn *ssa.Function
+ stack := 0
+ fn := func(node ast.Node) bool {
+ if node == nil {
+ stack--
+ } else {
+ stack++
+ }
+ if stack == 1 {
+ ssafn = nil
+ }
+ if fn, ok := node.(*ast.FuncDecl); ok {
+ ssafn = j.Program.SSA.FuncValue(ObjectOf(j, fn.Name).(*types.Func))
+ }
+ sel, ok := node.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+
+ obj := ObjectOf(j, sel.Sel)
+ if obj.Pkg() == nil {
+ return true
+ }
+ nodePkg := j.NodePackage(node).Types
+ if nodePkg == obj.Pkg() || obj.Pkg().Path()+"_test" == nodePkg.Path() {
+ // Don't flag stuff in our own package
+ return true
+ }
+ if ok, alt := c.isDeprecated(j, sel.Sel); ok {
+ // Look for the first available alternative, not the first
+ // version something was deprecated in. If a function was
+ // deprecated in Go 1.6, an alternative has been available
+ // already in 1.0, and we're targeting 1.2, it still
+ // makes sense to use the alternative from 1.0, to be
+ // future-proof.
+ minVersion := deprecated.Stdlib[SelectorName(j, sel)].AlternativeAvailableSince
+ if !IsGoVersion(j, minVersion) {
+ return true
+ }
+
+ if ssafn != nil {
+ if _, ok := c.deprecatedObjs[ssafn.Object()]; ok {
+ // functions that are deprecated may use deprecated
+ // symbols
+ return true
+ }
+ }
+ j.Errorf(sel, "%s is deprecated: %s", Render(j, sel), alt)
+ return true
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) callChecker(rules map[string]CallCheck) func(j *lint.Job) {
+ return func(j *lint.Job) {
+ c.checkCalls(j, rules)
+ }
+}
+
+func (c *Checker) checkCalls(j *lint.Job, rules map[string]CallCheck) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ node := c.funcDescs.CallGraph.CreateNode(ssafn)
+ for _, edge := range node.Out {
+ callee := edge.Callee.Func
+ obj, ok := callee.Object().(*types.Func)
+ if !ok {
+ continue
+ }
+
+ r, ok := rules[obj.FullName()]
+ if !ok {
+ continue
+ }
+ var args []*Argument
+ ssaargs := edge.Site.Common().Args
+ if callee.Signature.Recv() != nil {
+ ssaargs = ssaargs[1:]
+ }
+ for _, arg := range ssaargs {
+ if iarg, ok := arg.(*ssa.MakeInterface); ok {
+ arg = iarg.X
+ }
+ vr := c.funcDescs.Get(edge.Site.Parent()).Ranges[arg]
+ args = append(args, &Argument{Value: Value{arg, vr}})
+ }
+ call := &Call{
+ Job: j,
+ Instr: edge.Site,
+ Args: args,
+ Checker: c,
+ Parent: edge.Site.Parent(),
+ }
+ r(call)
+ for idx, arg := range call.Args {
+ _ = idx
+ for _, e := range arg.invalids {
+ // path, _ := astutil.PathEnclosingInterval(f.File, edge.Site.Pos(), edge.Site.Pos())
+ // if len(path) < 2 {
+ // continue
+ // }
+ // astcall, ok := path[0].(*ast.CallExpr)
+ // if !ok {
+ // continue
+ // }
+ // j.Errorf(astcall.Args[idx], "%s", e)
+
+ j.Errorf(edge.Site, "%s", e)
+ }
+ }
+ for _, e := range call.invalids {
+ j.Errorf(call.Instr.Common(), "%s", e)
+ }
+ }
+ }
+}
+
+func shortCallName(call *ssa.CallCommon) string {
+ if call.IsInvoke() {
+ return ""
+ }
+ switch v := call.Value.(type) {
+ case *ssa.Function:
+ fn, ok := v.Object().(*types.Func)
+ if !ok {
+ return ""
+ }
+ return fn.Name()
+ case *ssa.Builtin:
+ return v.Name()
+ }
+ return ""
+}
+
+func (c *Checker) CheckWriterBufferModified(j *lint.Job) {
+ // TODO(dh): this might be a good candidate for taint analysis.
+ // Taint the argument as MUST_NOT_MODIFY, then propagate that
+ // through functions like bytes.Split
+
+ for _, ssafn := range j.Program.InitialFunctions {
+ sig := ssafn.Signature
+ if ssafn.Name() != "Write" || sig.Recv() == nil || sig.Params().Len() != 1 || sig.Results().Len() != 2 {
+ continue
+ }
+ tArg, ok := sig.Params().At(0).Type().(*types.Slice)
+ if !ok {
+ continue
+ }
+ if basic, ok := tArg.Elem().(*types.Basic); !ok || basic.Kind() != types.Byte {
+ continue
+ }
+ if basic, ok := sig.Results().At(0).Type().(*types.Basic); !ok || basic.Kind() != types.Int {
+ continue
+ }
+ if named, ok := sig.Results().At(1).Type().(*types.Named); !ok || !IsType(named, "error") {
+ continue
+ }
+
+ for _, block := range ssafn.Blocks {
+ for _, ins := range block.Instrs {
+ switch ins := ins.(type) {
+ case *ssa.Store:
+ addr, ok := ins.Addr.(*ssa.IndexAddr)
+ if !ok {
+ continue
+ }
+ if addr.X != ssafn.Params[1] {
+ continue
+ }
+ j.Errorf(ins, "io.Writer.Write must not modify the provided buffer, not even temporarily")
+ case *ssa.Call:
+ if !IsCallTo(ins.Common(), "append") {
+ continue
+ }
+ if ins.Common().Args[0] != ssafn.Params[1] {
+ continue
+ }
+ j.Errorf(ins, "io.Writer.Write must not modify the provided buffer, not even temporarily")
+ }
+ }
+ }
+ }
+}
+
+func loopedRegexp(name string) CallCheck {
+ return func(call *Call) {
+ if len(extractConsts(call.Args[0].Value.Value)) == 0 {
+ return
+ }
+ if !call.Checker.isInLoop(call.Instr.Block()) {
+ return
+ }
+ call.Invalid(fmt.Sprintf("calling %s in a loop has poor performance, consider using regexp.Compile", name))
+ }
+}
+
+func (c *Checker) CheckEmptyBranch(j *lint.Job) {
+ for _, ssafn := range j.Program.InitialFunctions {
+ if ssafn.Syntax() == nil {
+ continue
+ }
+ if IsGenerated(j.File(ssafn.Syntax())) {
+ continue
+ }
+ if IsExample(ssafn) {
+ continue
+ }
+ fn := func(node ast.Node) bool {
+ ifstmt, ok := node.(*ast.IfStmt)
+ if !ok {
+ return true
+ }
+ if ifstmt.Else != nil {
+ b, ok := ifstmt.Else.(*ast.BlockStmt)
+ if !ok || len(b.List) != 0 {
+ return true
+ }
+ j.Errorf(ifstmt.Else, "empty branch")
+ }
+ if len(ifstmt.Body.List) != 0 {
+ return true
+ }
+ j.Errorf(ifstmt, "empty branch")
+ return true
+ }
+ Inspect(ssafn.Syntax(), fn)
+ }
+}
+
+func (c *Checker) CheckMapBytesKey(j *lint.Job) {
+ for _, fn := range j.Program.InitialFunctions {
+ for _, b := range fn.Blocks {
+ insLoop:
+ for _, ins := range b.Instrs {
+ // find []byte -> string conversions
+ conv, ok := ins.(*ssa.Convert)
+ if !ok || conv.Type() != types.Universe.Lookup("string").Type() {
+ continue
+ }
+ if s, ok := conv.X.Type().(*types.Slice); !ok || s.Elem() != types.Universe.Lookup("byte").Type() {
+ continue
+ }
+ refs := conv.Referrers()
+ // need at least two (DebugRef) references: the
+ // conversion and the *ast.Ident
+ if refs == nil || len(*refs) < 2 {
+ continue
+ }
+ ident := false
+ // skip first reference, that's the conversion itself
+ for _, ref := range (*refs)[1:] {
+ switch ref := ref.(type) {
+ case *ssa.DebugRef:
+ if _, ok := ref.Expr.(*ast.Ident); !ok {
+ // the string seems to be used somewhere
+ // unexpected; the default branch should
+ // catch this already, but be safe
+ continue insLoop
+ } else {
+ ident = true
+ }
+ case *ssa.Lookup:
+ default:
+ // the string is used somewhere else than a
+ // map lookup
+ continue insLoop
+ }
+ }
+
+ // the result of the conversion wasn't assigned to an
+ // identifier
+ if !ident {
+ continue
+ }
+ j.Errorf(conv, "m[string(key)] would be more efficient than k := string(key); m[k]")
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckRangeStringRunes(j *lint.Job) {
+ sharedcheck.CheckRangeStringRunes(j)
+}
+
+func (c *Checker) CheckSelfAssignment(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ assign, ok := node.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if assign.Tok != token.ASSIGN || len(assign.Lhs) != len(assign.Rhs) {
+ return true
+ }
+ for i, stmt := range assign.Lhs {
+ rlh := Render(j, stmt)
+ rrh := Render(j, assign.Rhs[i])
+ if rlh == rrh {
+ j.Errorf(assign, "self-assignment of %s to %s", rrh, rlh)
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func buildTagsIdentical(s1, s2 []string) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ s1s := make([]string, len(s1))
+ copy(s1s, s1)
+ sort.Strings(s1s)
+ s2s := make([]string, len(s2))
+ copy(s2s, s2)
+ sort.Strings(s2s)
+ for i, s := range s1s {
+ if s != s2s[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *Checker) CheckDuplicateBuildConstraints(job *lint.Job) {
+ for _, f := range job.Program.Files {
+ constraints := buildTags(f)
+ for i, constraint1 := range constraints {
+ for j, constraint2 := range constraints {
+ if i >= j {
+ continue
+ }
+ if buildTagsIdentical(constraint1, constraint2) {
+ job.Errorf(f, "identical build constraints %q and %q",
+ strings.Join(constraint1, " "),
+ strings.Join(constraint2, " "))
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckSillyRegexp(j *lint.Job) {
+ // We could use the rule checking engine for this, but the
+ // arguments aren't really invalid.
+ for _, fn := range j.Program.InitialFunctions {
+ for _, b := range fn.Blocks {
+ for _, ins := range b.Instrs {
+ call, ok := ins.(*ssa.Call)
+ if !ok {
+ continue
+ }
+ switch CallName(call.Common()) {
+ case "regexp.MustCompile", "regexp.Compile", "regexp.Match", "regexp.MatchReader", "regexp.MatchString":
+ default:
+ continue
+ }
+ c, ok := call.Common().Args[0].(*ssa.Const)
+ if !ok {
+ continue
+ }
+ s := constant.StringVal(c.Value)
+ re, err := syntax.Parse(s, 0)
+ if err != nil {
+ continue
+ }
+ if re.Op != syntax.OpLiteral && re.Op != syntax.OpEmptyMatch {
+ continue
+ }
+ j.Errorf(call, "regular expression does not contain any meta characters")
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckMissingEnumTypesInDeclaration(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ decl, ok := node.(*ast.GenDecl)
+ if !ok {
+ return true
+ }
+ if !decl.Lparen.IsValid() {
+ return true
+ }
+ if decl.Tok != token.CONST {
+ return true
+ }
+
+ groups := GroupSpecs(j, decl.Specs)
+ groupLoop:
+ for _, group := range groups {
+ if len(group) < 2 {
+ continue
+ }
+ if group[0].(*ast.ValueSpec).Type == nil {
+ // first constant doesn't have a type
+ continue groupLoop
+ }
+ for i, spec := range group {
+ spec := spec.(*ast.ValueSpec)
+ if len(spec.Names) != 1 || len(spec.Values) != 1 {
+ continue groupLoop
+ }
+ switch v := spec.Values[0].(type) {
+ case *ast.BasicLit:
+ case *ast.UnaryExpr:
+ if _, ok := v.X.(*ast.BasicLit); !ok {
+ continue groupLoop
+ }
+ default:
+ // if it's not a literal it might be typed, such as
+ // time.Microsecond = 1000 * Nanosecond
+ continue groupLoop
+ }
+ if i == 0 {
+ continue
+ }
+ if spec.Type != nil {
+ continue groupLoop
+ }
+ }
+ j.Errorf(group[0], "only the first constant in this group has an explicit type")
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckTimerResetReturnValue(j *lint.Job) {
+ for _, fn := range j.Program.InitialFunctions {
+ for _, block := range fn.Blocks {
+ for _, ins := range block.Instrs {
+ call, ok := ins.(*ssa.Call)
+ if !ok {
+ continue
+ }
+ if !IsCallTo(call.Common(), "(*time.Timer).Reset") {
+ continue
+ }
+ refs := call.Referrers()
+ if refs == nil {
+ continue
+ }
+ for _, ref := range FilterDebug(*refs) {
+ ifstmt, ok := ref.(*ssa.If)
+ if !ok {
+ continue
+ }
+
+ found := false
+ for _, succ := range ifstmt.Block().Succs {
+ if len(succ.Preds) != 1 {
+ // Merge point, not a branch in the
+ // syntactical sense.
+
+ // FIXME(dh): this is broken for if
+ // statements a la "if x || y"
+ continue
+ }
+ ssautil.Walk(succ, func(b *ssa.BasicBlock) bool {
+ if !succ.Dominates(b) {
+ // We've reached the end of the branch
+ return false
+ }
+ for _, ins := range b.Instrs {
+ // TODO(dh): we should check that
+ // we're receiving from the channel of
+ // a time.Timer to further reduce
+ // false positives. Not a key
+ // priority, considering the rarity of
+ // Reset and the tiny likeliness of a
+ // false positive
+ if ins, ok := ins.(*ssa.UnOp); ok && ins.Op == token.ARROW && IsType(ins.X.Type(), "<-chan time.Time") {
+ found = true
+ return false
+ }
+ }
+ return true
+ })
+ }
+
+ if found {
+ j.Errorf(call, "it is not possible to use Reset's return value correctly, as there is a race condition between draining the channel and the new timer expiring")
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/honnef.co/go/tools/staticcheck/rules.go b/vendor/honnef.co/go/tools/staticcheck/rules.go
new file mode 100644
index 000000000..d6af573c2
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/rules.go
@@ -0,0 +1,322 @@
+package staticcheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/types"
+ "net"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "honnef.co/go/tools/lint"
+ . "honnef.co/go/tools/lint/lintdsl"
+ "honnef.co/go/tools/ssa"
+ "honnef.co/go/tools/staticcheck/vrp"
+)
+
+const (
+ MsgInvalidHostPort = "invalid port or service name in host:port pair"
+ MsgInvalidUTF8 = "argument is not a valid UTF-8 encoded string"
+ MsgNonUniqueCutset = "cutset contains duplicate characters"
+)
+
+type Call struct {
+ Job *lint.Job
+ Instr ssa.CallInstruction
+ Args []*Argument
+
+ Checker *Checker
+ Parent *ssa.Function
+
+ invalids []string
+}
+
+func (c *Call) Invalid(msg string) {
+ c.invalids = append(c.invalids, msg)
+}
+
+type Argument struct {
+ Value Value
+ invalids []string
+}
+
+func (arg *Argument) Invalid(msg string) {
+ arg.invalids = append(arg.invalids, msg)
+}
+
+type Value struct {
+ Value ssa.Value
+ Range vrp.Range
+}
+
+type CallCheck func(call *Call)
+
+func extractConsts(v ssa.Value) []*ssa.Const {
+ switch v := v.(type) {
+ case *ssa.Const:
+ return []*ssa.Const{v}
+ case *ssa.MakeInterface:
+ return extractConsts(v.X)
+ default:
+ return nil
+ }
+}
+
+func ValidateRegexp(v Value) error {
+ for _, c := range extractConsts(v.Value) {
+ if c.Value == nil {
+ continue
+ }
+ if c.Value.Kind() != constant.String {
+ continue
+ }
+ s := constant.StringVal(c.Value)
+ if _, err := regexp.Compile(s); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ValidateTimeLayout(v Value) error {
+ for _, c := range extractConsts(v.Value) {
+ if c.Value == nil {
+ continue
+ }
+ if c.Value.Kind() != constant.String {
+ continue
+ }
+ s := constant.StringVal(c.Value)
+ s = strings.Replace(s, "_", " ", -1)
+ s = strings.Replace(s, "Z", "-", -1)
+ _, err := time.Parse(s, s)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ValidateURL(v Value) error {
+ for _, c := range extractConsts(v.Value) {
+ if c.Value == nil {
+ continue
+ }
+ if c.Value.Kind() != constant.String {
+ continue
+ }
+ s := constant.StringVal(c.Value)
+ _, err := url.Parse(s)
+ if err != nil {
+ return fmt.Errorf("%q is not a valid URL: %s", s, err)
+ }
+ }
+ return nil
+}
+
+func IntValue(v Value, z vrp.Z) bool {
+ r, ok := v.Range.(vrp.IntInterval)
+ if !ok || !r.IsKnown() {
+ return false
+ }
+ if r.Lower != r.Upper {
+ return false
+ }
+ if r.Lower.Cmp(z) == 0 {
+ return true
+ }
+ return false
+}
+
+func InvalidUTF8(v Value) bool {
+ for _, c := range extractConsts(v.Value) {
+ if c.Value == nil {
+ continue
+ }
+ if c.Value.Kind() != constant.String {
+ continue
+ }
+ s := constant.StringVal(c.Value)
+ if !utf8.ValidString(s) {
+ return true
+ }
+ }
+ return false
+}
+
+func UnbufferedChannel(v Value) bool {
+ r, ok := v.Range.(vrp.ChannelInterval)
+ if !ok || !r.IsKnown() {
+ return false
+ }
+ if r.Size.Lower.Cmp(vrp.NewZ(0)) == 0 &&
+ r.Size.Upper.Cmp(vrp.NewZ(0)) == 0 {
+ return true
+ }
+ return false
+}
+
+func Pointer(v Value) bool {
+ switch v.Value.Type().Underlying().(type) {
+ case *types.Pointer, *types.Interface:
+ return true
+ }
+ return false
+}
+
+func ConvertedFromInt(v Value) bool {
+ conv, ok := v.Value.(*ssa.Convert)
+ if !ok {
+ return false
+ }
+ b, ok := conv.X.Type().Underlying().(*types.Basic)
+ if !ok {
+ return false
+ }
+ if (b.Info() & types.IsInteger) == 0 {
+ return false
+ }
+ return true
+}
+
+func validEncodingBinaryType(j *lint.Job, typ types.Type) bool {
+ typ = typ.Underlying()
+ switch typ := typ.(type) {
+ case *types.Basic:
+ switch typ.Kind() {
+ case types.Uint8, types.Uint16, types.Uint32, types.Uint64,
+ types.Int8, types.Int16, types.Int32, types.Int64,
+ types.Float32, types.Float64, types.Complex64, types.Complex128, types.Invalid:
+ return true
+ case types.Bool:
+ return IsGoVersion(j, 8)
+ }
+ return false
+ case *types.Struct:
+ n := typ.NumFields()
+ for i := 0; i < n; i++ {
+ if !validEncodingBinaryType(j, typ.Field(i).Type()) {
+ return false
+ }
+ }
+ return true
+ case *types.Array:
+ return validEncodingBinaryType(j, typ.Elem())
+ case *types.Interface:
+ // we can't determine if it's a valid type or not
+ return true
+ }
+ return false
+}
+
+func CanBinaryMarshal(j *lint.Job, v Value) bool {
+ typ := v.Value.Type().Underlying()
+ if ttyp, ok := typ.(*types.Pointer); ok {
+ typ = ttyp.Elem().Underlying()
+ }
+ if ttyp, ok := typ.(interface {
+ Elem() types.Type
+ }); ok {
+ if _, ok := ttyp.(*types.Pointer); !ok {
+ typ = ttyp.Elem()
+ }
+ }
+
+ return validEncodingBinaryType(j, typ)
+}
+
+func RepeatZeroTimes(name string, arg int) CallCheck {
+ return func(call *Call) {
+ arg := call.Args[arg]
+ if IntValue(arg.Value, vrp.NewZ(0)) {
+ arg.Invalid(fmt.Sprintf("calling %s with n == 0 will return no results, did you mean -1?", name))
+ }
+ }
+}
+
+func validateServiceName(s string) bool {
+ if len(s) < 1 || len(s) > 15 {
+ return false
+ }
+ if s[0] == '-' || s[len(s)-1] == '-' {
+ return false
+ }
+ if strings.Contains(s, "--") {
+ return false
+ }
+ hasLetter := false
+ for _, r := range s {
+ if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') {
+ hasLetter = true
+ continue
+ }
+ if r >= '0' && r <= '9' {
+ continue
+ }
+ return false
+ }
+ return hasLetter
+}
+
+func validatePort(s string) bool {
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return validateServiceName(s)
+ }
+ return n >= 0 && n <= 65535
+}
+
+func ValidHostPort(v Value) bool {
+ for _, k := range extractConsts(v.Value) {
+ if k.Value == nil {
+ continue
+ }
+ if k.Value.Kind() != constant.String {
+ continue
+ }
+ s := constant.StringVal(k.Value)
+ _, port, err := net.SplitHostPort(s)
+ if err != nil {
+ return false
+ }
+ // TODO(dh): check hostname
+ if !validatePort(port) {
+ return false
+ }
+ }
+ return true
+}
+
+// ConvertedFrom reports whether value v was converted from type typ.
+func ConvertedFrom(v Value, typ string) bool {
+ change, ok := v.Value.(*ssa.ChangeType)
+ return ok && IsType(change.X.Type(), typ)
+}
+
+func UniqueStringCutset(v Value) bool {
+ for _, c := range extractConsts(v.Value) {
+ if c.Value == nil {
+ continue
+ }
+ if c.Value.Kind() != constant.String {
+ continue
+ }
+ s := constant.StringVal(c.Value)
+ rs := runeSlice(s)
+ if len(rs) < 2 {
+ continue
+ }
+ sort.Sort(rs)
+ for i, r := range rs[1:] {
+ if rs[i] == r {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go b/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go
new file mode 100644
index 000000000..0ef73787b
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go
@@ -0,0 +1,73 @@
+package vrp
+
+import (
+ "fmt"
+
+ "honnef.co/go/tools/ssa"
+)
+
+type ChannelInterval struct {
+ Size IntInterval
+}
+
+func (c ChannelInterval) Union(other Range) Range {
+ i, ok := other.(ChannelInterval)
+ if !ok {
+ i = ChannelInterval{EmptyIntInterval}
+ }
+ if c.Size.Empty() || !c.Size.IsKnown() {
+ return i
+ }
+ if i.Size.Empty() || !i.Size.IsKnown() {
+ return c
+ }
+ return ChannelInterval{
+ Size: c.Size.Union(i.Size).(IntInterval),
+ }
+}
+
+func (c ChannelInterval) String() string {
+ return c.Size.String()
+}
+
+func (c ChannelInterval) IsKnown() bool {
+ return c.Size.IsKnown()
+}
+
+type MakeChannelConstraint struct {
+ aConstraint
+ Buffer ssa.Value
+}
+type ChannelChangeTypeConstraint struct {
+ aConstraint
+ X ssa.Value
+}
+
+func NewMakeChannelConstraint(buffer, y ssa.Value) Constraint {
+ return &MakeChannelConstraint{NewConstraint(y), buffer}
+}
+func NewChannelChangeTypeConstraint(x, y ssa.Value) Constraint {
+ return &ChannelChangeTypeConstraint{NewConstraint(y), x}
+}
+
+func (c *MakeChannelConstraint) Operands() []ssa.Value { return []ssa.Value{c.Buffer} }
+func (c *ChannelChangeTypeConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
+
+func (c *MakeChannelConstraint) String() string {
+ return fmt.Sprintf("%s = make(chan, %s)", c.Y().Name(), c.Buffer.Name())
+}
+func (c *ChannelChangeTypeConstraint) String() string {
+ return fmt.Sprintf("%s = changetype(%s)", c.Y().Name(), c.X.Name())
+}
+
+func (c *MakeChannelConstraint) Eval(g *Graph) Range {
+ i, ok := g.Range(c.Buffer).(IntInterval)
+ if !ok {
+ return ChannelInterval{NewIntInterval(NewZ(0), PInfinity)}
+ }
+ if i.Lower.Sign() == -1 {
+ i.Lower = NewZ(0)
+ }
+ return ChannelInterval{i}
+}
+func (c *ChannelChangeTypeConstraint) Eval(g *Graph) Range { return g.Range(c.X) }
diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/int.go b/vendor/honnef.co/go/tools/staticcheck/vrp/int.go
new file mode 100644
index 000000000..926bb7af3
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/vrp/int.go
@@ -0,0 +1,476 @@
+package vrp
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "math/big"
+
+ "honnef.co/go/tools/ssa"
+)
+
+type Zs []Z
+
+func (zs Zs) Len() int {
+ return len(zs)
+}
+
+func (zs Zs) Less(i int, j int) bool {
+ return zs[i].Cmp(zs[j]) == -1
+}
+
+func (zs Zs) Swap(i int, j int) {
+ zs[i], zs[j] = zs[j], zs[i]
+}
+
+type Z struct {
+ infinity int8
+ integer *big.Int
+}
+
+func NewZ(n int64) Z {
+ return NewBigZ(big.NewInt(n))
+}
+
+func NewBigZ(n *big.Int) Z {
+ return Z{integer: n}
+}
+
+func (z1 Z) Infinite() bool {
+ return z1.infinity != 0
+}
+
+func (z1 Z) Add(z2 Z) Z {
+ if z2.Sign() == -1 {
+ return z1.Sub(z2.Negate())
+ }
+ if z1 == NInfinity {
+ return NInfinity
+ }
+ if z1 == PInfinity {
+ return PInfinity
+ }
+ if z2 == PInfinity {
+ return PInfinity
+ }
+
+ if !z1.Infinite() && !z2.Infinite() {
+ n := &big.Int{}
+ n.Add(z1.integer, z2.integer)
+ return NewBigZ(n)
+ }
+
+ panic(fmt.Sprintf("%s + %s is not defined", z1, z2))
+}
+
+func (z1 Z) Sub(z2 Z) Z {
+ if z2.Sign() == -1 {
+ return z1.Add(z2.Negate())
+ }
+ if !z1.Infinite() && !z2.Infinite() {
+ n := &big.Int{}
+ n.Sub(z1.integer, z2.integer)
+ return NewBigZ(n)
+ }
+
+ if z1 != PInfinity && z2 == PInfinity {
+ return NInfinity
+ }
+ if z1.Infinite() && !z2.Infinite() {
+ return Z{infinity: z1.infinity}
+ }
+ if z1 == PInfinity && z2 == PInfinity {
+ return PInfinity
+ }
+ panic(fmt.Sprintf("%s - %s is not defined", z1, z2))
+}
+
+func (z1 Z) Mul(z2 Z) Z {
+ if (z1.integer != nil && z1.integer.Sign() == 0) ||
+ (z2.integer != nil && z2.integer.Sign() == 0) {
+ return NewBigZ(&big.Int{})
+ }
+
+ if z1.infinity != 0 || z2.infinity != 0 {
+ return Z{infinity: int8(z1.Sign() * z2.Sign())}
+ }
+
+ n := &big.Int{}
+ n.Mul(z1.integer, z2.integer)
+ return NewBigZ(n)
+}
+
+func (z1 Z) Negate() Z {
+ if z1.infinity == 1 {
+ return NInfinity
+ }
+ if z1.infinity == -1 {
+ return PInfinity
+ }
+ n := &big.Int{}
+ n.Neg(z1.integer)
+ return NewBigZ(n)
+}
+
+func (z1 Z) Sign() int {
+ if z1.infinity != 0 {
+ return int(z1.infinity)
+ }
+ return z1.integer.Sign()
+}
+
+func (z1 Z) String() string {
+ if z1 == NInfinity {
+ return "-∞"
+ }
+ if z1 == PInfinity {
+ return "∞"
+ }
+ return fmt.Sprintf("%d", z1.integer)
+}
+
+func (z1 Z) Cmp(z2 Z) int {
+ if z1.infinity == z2.infinity && z1.infinity != 0 {
+ return 0
+ }
+ if z1 == PInfinity {
+ return 1
+ }
+ if z1 == NInfinity {
+ return -1
+ }
+ if z2 == NInfinity {
+ return 1
+ }
+ if z2 == PInfinity {
+ return -1
+ }
+ return z1.integer.Cmp(z2.integer)
+}
+
+func MaxZ(zs ...Z) Z {
+ if len(zs) == 0 {
+ panic("Max called with no arguments")
+ }
+ if len(zs) == 1 {
+ return zs[0]
+ }
+ ret := zs[0]
+ for _, z := range zs[1:] {
+ if z.Cmp(ret) == 1 {
+ ret = z
+ }
+ }
+ return ret
+}
+
+func MinZ(zs ...Z) Z {
+ if len(zs) == 0 {
+ panic("Min called with no arguments")
+ }
+ if len(zs) == 1 {
+ return zs[0]
+ }
+ ret := zs[0]
+ for _, z := range zs[1:] {
+ if z.Cmp(ret) == -1 {
+ ret = z
+ }
+ }
+ return ret
+}
+
+var NInfinity = Z{infinity: -1}
+var PInfinity = Z{infinity: 1}
+var EmptyIntInterval = IntInterval{true, PInfinity, NInfinity}
+
+func InfinityFor(v ssa.Value) IntInterval {
+ if b, ok := v.Type().Underlying().(*types.Basic); ok {
+ if (b.Info() & types.IsUnsigned) != 0 {
+ return NewIntInterval(NewZ(0), PInfinity)
+ }
+ }
+ return NewIntInterval(NInfinity, PInfinity)
+}
+
+type IntInterval struct {
+ known bool
+ Lower Z
+ Upper Z
+}
+
+func NewIntInterval(l, u Z) IntInterval {
+ if u.Cmp(l) == -1 {
+ return EmptyIntInterval
+ }
+ return IntInterval{known: true, Lower: l, Upper: u}
+}
+
+func (i IntInterval) IsKnown() bool {
+ return i.known
+}
+
+func (i IntInterval) Empty() bool {
+ return i.Lower == PInfinity && i.Upper == NInfinity
+}
+
+func (i IntInterval) IsMaxRange() bool {
+ return i.Lower == NInfinity && i.Upper == PInfinity
+}
+
+func (i1 IntInterval) Intersection(i2 IntInterval) IntInterval {
+ if !i1.IsKnown() {
+ return i2
+ }
+ if !i2.IsKnown() {
+ return i1
+ }
+ if i1.Empty() || i2.Empty() {
+ return EmptyIntInterval
+ }
+ i3 := NewIntInterval(MaxZ(i1.Lower, i2.Lower), MinZ(i1.Upper, i2.Upper))
+ if i3.Lower.Cmp(i3.Upper) == 1 {
+ return EmptyIntInterval
+ }
+ return i3
+}
+
+func (i1 IntInterval) Union(other Range) Range {
+ i2, ok := other.(IntInterval)
+ if !ok {
+ i2 = EmptyIntInterval
+ }
+ if i1.Empty() || !i1.IsKnown() {
+ return i2
+ }
+ if i2.Empty() || !i2.IsKnown() {
+ return i1
+ }
+ return NewIntInterval(MinZ(i1.Lower, i2.Lower), MaxZ(i1.Upper, i2.Upper))
+}
+
+func (i1 IntInterval) Add(i2 IntInterval) IntInterval {
+ if i1.Empty() || i2.Empty() {
+ return EmptyIntInterval
+ }
+ l1, u1, l2, u2 := i1.Lower, i1.Upper, i2.Lower, i2.Upper
+ return NewIntInterval(l1.Add(l2), u1.Add(u2))
+}
+
+func (i1 IntInterval) Sub(i2 IntInterval) IntInterval {
+ if i1.Empty() || i2.Empty() {
+ return EmptyIntInterval
+ }
+ l1, u1, l2, u2 := i1.Lower, i1.Upper, i2.Lower, i2.Upper
+ return NewIntInterval(l1.Sub(u2), u1.Sub(l2))
+}
+
+func (i1 IntInterval) Mul(i2 IntInterval) IntInterval {
+ if i1.Empty() || i2.Empty() {
+ return EmptyIntInterval
+ }
+ x1, x2 := i1.Lower, i1.Upper
+ y1, y2 := i2.Lower, i2.Upper
+ return NewIntInterval(
+ MinZ(x1.Mul(y1), x1.Mul(y2), x2.Mul(y1), x2.Mul(y2)),
+ MaxZ(x1.Mul(y1), x1.Mul(y2), x2.Mul(y1), x2.Mul(y2)),
+ )
+}
+
+func (i1 IntInterval) String() string {
+ if !i1.IsKnown() {
+ return "[⊥, ⊥]"
+ }
+ if i1.Empty() {
+ return "{}"
+ }
+ return fmt.Sprintf("[%s, %s]", i1.Lower, i1.Upper)
+}
+
+type IntArithmeticConstraint struct {
+ aConstraint
+ A ssa.Value
+ B ssa.Value
+ Op token.Token
+ Fn func(IntInterval, IntInterval) IntInterval
+}
+
+type IntAddConstraint struct{ *IntArithmeticConstraint }
+type IntSubConstraint struct{ *IntArithmeticConstraint }
+type IntMulConstraint struct{ *IntArithmeticConstraint }
+
+type IntConversionConstraint struct {
+ aConstraint
+ X ssa.Value
+}
+
+type IntIntersectionConstraint struct {
+ aConstraint
+ ranges Ranges
+ A ssa.Value
+ B ssa.Value
+ Op token.Token
+ I IntInterval
+ resolved bool
+}
+
+type IntIntervalConstraint struct {
+ aConstraint
+ I IntInterval
+}
+
+func NewIntArithmeticConstraint(a, b, y ssa.Value, op token.Token, fn func(IntInterval, IntInterval) IntInterval) *IntArithmeticConstraint {
+ return &IntArithmeticConstraint{NewConstraint(y), a, b, op, fn}
+}
+func NewIntAddConstraint(a, b, y ssa.Value) Constraint {
+ return &IntAddConstraint{NewIntArithmeticConstraint(a, b, y, token.ADD, IntInterval.Add)}
+}
+func NewIntSubConstraint(a, b, y ssa.Value) Constraint {
+ return &IntSubConstraint{NewIntArithmeticConstraint(a, b, y, token.SUB, IntInterval.Sub)}
+}
+func NewIntMulConstraint(a, b, y ssa.Value) Constraint {
+ return &IntMulConstraint{NewIntArithmeticConstraint(a, b, y, token.MUL, IntInterval.Mul)}
+}
+func NewIntConversionConstraint(x, y ssa.Value) Constraint {
+ return &IntConversionConstraint{NewConstraint(y), x}
+}
+func NewIntIntersectionConstraint(a, b ssa.Value, op token.Token, ranges Ranges, y ssa.Value) Constraint {
+ return &IntIntersectionConstraint{
+ aConstraint: NewConstraint(y),
+ ranges: ranges,
+ A: a,
+ B: b,
+ Op: op,
+ }
+}
+func NewIntIntervalConstraint(i IntInterval, y ssa.Value) Constraint {
+ return &IntIntervalConstraint{NewConstraint(y), i}
+}
+
+func (c *IntArithmeticConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} }
+func (c *IntConversionConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
+func (c *IntIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.A} }
+func (s *IntIntervalConstraint) Operands() []ssa.Value { return nil }
+
+func (c *IntArithmeticConstraint) String() string {
+ return fmt.Sprintf("%s = %s %s %s", c.Y().Name(), c.A.Name(), c.Op, c.B.Name())
+}
+func (c *IntConversionConstraint) String() string {
+ return fmt.Sprintf("%s = %s(%s)", c.Y().Name(), c.Y().Type(), c.X.Name())
+}
+func (c *IntIntersectionConstraint) String() string {
+ return fmt.Sprintf("%s = %s %s %s (%t branch)", c.Y().Name(), c.A.Name(), c.Op, c.B.Name(), c.Y().(*ssa.Sigma).Branch)
+}
+func (c *IntIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) }
+
+func (c *IntArithmeticConstraint) Eval(g *Graph) Range {
+ i1, i2 := g.Range(c.A).(IntInterval), g.Range(c.B).(IntInterval)
+ if !i1.IsKnown() || !i2.IsKnown() {
+ return IntInterval{}
+ }
+ return c.Fn(i1, i2)
+}
+func (c *IntConversionConstraint) Eval(g *Graph) Range {
+ s := &types.StdSizes{
+ // XXX is it okay to assume the largest word size, or do we
+ // need to be platform specific?
+ WordSize: 8,
+ MaxAlign: 1,
+ }
+ fromI := g.Range(c.X).(IntInterval)
+ toI := g.Range(c.Y()).(IntInterval)
+ fromT := c.X.Type().Underlying().(*types.Basic)
+ toT := c.Y().Type().Underlying().(*types.Basic)
+ fromB := s.Sizeof(c.X.Type())
+ toB := s.Sizeof(c.Y().Type())
+
+ if !fromI.IsKnown() {
+ return toI
+ }
+ if !toI.IsKnown() {
+ return fromI
+ }
+
+ // uint<N> -> sint/uint<M>, M > N: [max(0, l1), min(2**N-1, u2)]
+ if (fromT.Info()&types.IsUnsigned != 0) &&
+ toB > fromB {
+
+ n := big.NewInt(1)
+ n.Lsh(n, uint(fromB*8))
+ n.Sub(n, big.NewInt(1))
+ return NewIntInterval(
+ MaxZ(NewZ(0), fromI.Lower),
+ MinZ(NewBigZ(n), toI.Upper),
+ )
+ }
+
+ // sint<N> -> sint<M>, M > N; [max(-∞, l1), min(2**N-1, u2)]
+ if (fromT.Info()&types.IsUnsigned == 0) &&
+ (toT.Info()&types.IsUnsigned == 0) &&
+ toB > fromB {
+
+ n := big.NewInt(1)
+ n.Lsh(n, uint(fromB*8))
+ n.Sub(n, big.NewInt(1))
+ return NewIntInterval(
+ MaxZ(NInfinity, fromI.Lower),
+ MinZ(NewBigZ(n), toI.Upper),
+ )
+ }
+
+ return fromI
+}
+func (c *IntIntersectionConstraint) Eval(g *Graph) Range {
+ xi := g.Range(c.A).(IntInterval)
+ if !xi.IsKnown() {
+ return c.I
+ }
+ return xi.Intersection(c.I)
+}
+func (c *IntIntervalConstraint) Eval(*Graph) Range { return c.I }
+
+func (c *IntIntersectionConstraint) Futures() []ssa.Value {
+ return []ssa.Value{c.B}
+}
+
+func (c *IntIntersectionConstraint) Resolve() {
+ r, ok := c.ranges[c.B].(IntInterval)
+ if !ok {
+ c.I = InfinityFor(c.Y())
+ return
+ }
+
+ switch c.Op {
+ case token.EQL:
+ c.I = r
+ case token.GTR:
+ c.I = NewIntInterval(r.Lower.Add(NewZ(1)), PInfinity)
+ case token.GEQ:
+ c.I = NewIntInterval(r.Lower, PInfinity)
+ case token.LSS:
+ // TODO(dh): do we need 0 instead of NInfinity for uints?
+ c.I = NewIntInterval(NInfinity, r.Upper.Sub(NewZ(1)))
+ case token.LEQ:
+ c.I = NewIntInterval(NInfinity, r.Upper)
+ case token.NEQ:
+ c.I = InfinityFor(c.Y())
+ default:
+ panic("unsupported op " + c.Op.String())
+ }
+}
+
+func (c *IntIntersectionConstraint) IsKnown() bool {
+ return c.I.IsKnown()
+}
+
+func (c *IntIntersectionConstraint) MarkUnresolved() {
+ c.resolved = false
+}
+
+func (c *IntIntersectionConstraint) MarkResolved() {
+ c.resolved = true
+}
+
+func (c *IntIntersectionConstraint) IsResolved() bool {
+ return c.resolved
+}
diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go b/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go
new file mode 100644
index 000000000..40658dd8d
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go
@@ -0,0 +1,273 @@
+package vrp
+
+// TODO(dh): most of the constraints have implementations identical to
+// that of strings. Consider reusing them.
+
+import (
+ "fmt"
+ "go/types"
+
+ "honnef.co/go/tools/ssa"
+)
+
+type SliceInterval struct {
+ Length IntInterval
+}
+
+func (s SliceInterval) Union(other Range) Range {
+ i, ok := other.(SliceInterval)
+ if !ok {
+ i = SliceInterval{EmptyIntInterval}
+ }
+ if s.Length.Empty() || !s.Length.IsKnown() {
+ return i
+ }
+ if i.Length.Empty() || !i.Length.IsKnown() {
+ return s
+ }
+ return SliceInterval{
+ Length: s.Length.Union(i.Length).(IntInterval),
+ }
+}
+func (s SliceInterval) String() string { return s.Length.String() }
+func (s SliceInterval) IsKnown() bool { return s.Length.IsKnown() }
+
+type SliceAppendConstraint struct {
+ aConstraint
+ A ssa.Value
+ B ssa.Value
+}
+
+type SliceSliceConstraint struct {
+ aConstraint
+ X ssa.Value
+ Lower ssa.Value
+ Upper ssa.Value
+}
+
+type ArraySliceConstraint struct {
+ aConstraint
+ X ssa.Value
+ Lower ssa.Value
+ Upper ssa.Value
+}
+
+type SliceIntersectionConstraint struct {
+ aConstraint
+ X ssa.Value
+ I IntInterval
+}
+
+type SliceLengthConstraint struct {
+ aConstraint
+ X ssa.Value
+}
+
+type MakeSliceConstraint struct {
+ aConstraint
+ Size ssa.Value
+}
+
+type SliceIntervalConstraint struct {
+ aConstraint
+ I IntInterval
+}
+
+func NewSliceAppendConstraint(a, b, y ssa.Value) Constraint {
+ return &SliceAppendConstraint{NewConstraint(y), a, b}
+}
+func NewSliceSliceConstraint(x, lower, upper, y ssa.Value) Constraint {
+ return &SliceSliceConstraint{NewConstraint(y), x, lower, upper}
+}
+func NewArraySliceConstraint(x, lower, upper, y ssa.Value) Constraint {
+ return &ArraySliceConstraint{NewConstraint(y), x, lower, upper}
+}
+func NewSliceIntersectionConstraint(x ssa.Value, i IntInterval, y ssa.Value) Constraint {
+ return &SliceIntersectionConstraint{NewConstraint(y), x, i}
+}
+func NewSliceLengthConstraint(x, y ssa.Value) Constraint {
+ return &SliceLengthConstraint{NewConstraint(y), x}
+}
+func NewMakeSliceConstraint(size, y ssa.Value) Constraint {
+ return &MakeSliceConstraint{NewConstraint(y), size}
+}
+func NewSliceIntervalConstraint(i IntInterval, y ssa.Value) Constraint {
+ return &SliceIntervalConstraint{NewConstraint(y), i}
+}
+
+func (c *SliceAppendConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} }
+func (c *SliceSliceConstraint) Operands() []ssa.Value {
+ ops := []ssa.Value{c.X}
+ if c.Lower != nil {
+ ops = append(ops, c.Lower)
+ }
+ if c.Upper != nil {
+ ops = append(ops, c.Upper)
+ }
+ return ops
+}
+func (c *ArraySliceConstraint) Operands() []ssa.Value {
+ ops := []ssa.Value{c.X}
+ if c.Lower != nil {
+ ops = append(ops, c.Lower)
+ }
+ if c.Upper != nil {
+ ops = append(ops, c.Upper)
+ }
+ return ops
+}
+func (c *SliceIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
+func (c *SliceLengthConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
+func (c *MakeSliceConstraint) Operands() []ssa.Value { return []ssa.Value{c.Size} }
+func (s *SliceIntervalConstraint) Operands() []ssa.Value { return nil }
+
+func (c *SliceAppendConstraint) String() string {
+ return fmt.Sprintf("%s = append(%s, %s)", c.Y().Name(), c.A.Name(), c.B.Name())
+}
+func (c *SliceSliceConstraint) String() string {
+ var lname, uname string
+ if c.Lower != nil {
+ lname = c.Lower.Name()
+ }
+ if c.Upper != nil {
+ uname = c.Upper.Name()
+ }
+ return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname)
+}
+func (c *ArraySliceConstraint) String() string {
+ var lname, uname string
+ if c.Lower != nil {
+ lname = c.Lower.Name()
+ }
+ if c.Upper != nil {
+ uname = c.Upper.Name()
+ }
+ return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname)
+}
+func (c *SliceIntersectionConstraint) String() string {
+ return fmt.Sprintf("%s = %s.%t ⊓ %s", c.Y().Name(), c.X.Name(), c.Y().(*ssa.Sigma).Branch, c.I)
+}
+func (c *SliceLengthConstraint) String() string {
+ return fmt.Sprintf("%s = len(%s)", c.Y().Name(), c.X.Name())
+}
+func (c *MakeSliceConstraint) String() string {
+ return fmt.Sprintf("%s = make(slice, %s)", c.Y().Name(), c.Size.Name())
+}
+func (c *SliceIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) }
+
+func (c *SliceAppendConstraint) Eval(g *Graph) Range {
+ l1 := g.Range(c.A).(SliceInterval).Length
+ var l2 IntInterval
+ switch r := g.Range(c.B).(type) {
+ case SliceInterval:
+ l2 = r.Length
+ case StringInterval:
+ l2 = r.Length
+ default:
+ return SliceInterval{}
+ }
+ if !l1.IsKnown() || !l2.IsKnown() {
+ return SliceInterval{}
+ }
+ return SliceInterval{
+ Length: l1.Add(l2),
+ }
+}
+func (c *SliceSliceConstraint) Eval(g *Graph) Range {
+ lr := NewIntInterval(NewZ(0), NewZ(0))
+ if c.Lower != nil {
+ lr = g.Range(c.Lower).(IntInterval)
+ }
+ ur := g.Range(c.X).(SliceInterval).Length
+ if c.Upper != nil {
+ ur = g.Range(c.Upper).(IntInterval)
+ }
+ if !lr.IsKnown() || !ur.IsKnown() {
+ return SliceInterval{}
+ }
+
+ ls := []Z{
+ ur.Lower.Sub(lr.Lower),
+ ur.Upper.Sub(lr.Lower),
+ ur.Lower.Sub(lr.Upper),
+ ur.Upper.Sub(lr.Upper),
+ }
+ // TODO(dh): if we don't truncate lengths to 0 we might be able to
+ // easily detect slices with high < low. we'd need to treat -∞
+ // specially, though.
+ for i, l := range ls {
+ if l.Sign() == -1 {
+ ls[i] = NewZ(0)
+ }
+ }
+
+ return SliceInterval{
+ Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)),
+ }
+}
+func (c *ArraySliceConstraint) Eval(g *Graph) Range {
+ lr := NewIntInterval(NewZ(0), NewZ(0))
+ if c.Lower != nil {
+ lr = g.Range(c.Lower).(IntInterval)
+ }
+ var l int64
+ switch typ := c.X.Type().(type) {
+ case *types.Array:
+ l = typ.Len()
+ case *types.Pointer:
+ l = typ.Elem().(*types.Array).Len()
+ }
+ ur := NewIntInterval(NewZ(l), NewZ(l))
+ if c.Upper != nil {
+ ur = g.Range(c.Upper).(IntInterval)
+ }
+ if !lr.IsKnown() || !ur.IsKnown() {
+ return SliceInterval{}
+ }
+
+ ls := []Z{
+ ur.Lower.Sub(lr.Lower),
+ ur.Upper.Sub(lr.Lower),
+ ur.Lower.Sub(lr.Upper),
+ ur.Upper.Sub(lr.Upper),
+ }
+ // TODO(dh): if we don't truncate lengths to 0 we might be able to
+ // easily detect slices with high < low. we'd need to treat -∞
+ // specially, though.
+ for i, l := range ls {
+ if l.Sign() == -1 {
+ ls[i] = NewZ(0)
+ }
+ }
+
+ return SliceInterval{
+ Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)),
+ }
+}
+func (c *SliceIntersectionConstraint) Eval(g *Graph) Range {
+ xi := g.Range(c.X).(SliceInterval)
+ if !xi.IsKnown() {
+ return c.I
+ }
+ return SliceInterval{
+ Length: xi.Length.Intersection(c.I),
+ }
+}
+func (c *SliceLengthConstraint) Eval(g *Graph) Range {
+ i := g.Range(c.X).(SliceInterval).Length
+ if !i.IsKnown() {
+ return NewIntInterval(NewZ(0), PInfinity)
+ }
+ return i
+}
+func (c *MakeSliceConstraint) Eval(g *Graph) Range {
+ i, ok := g.Range(c.Size).(IntInterval)
+ if !ok {
+ return SliceInterval{NewIntInterval(NewZ(0), PInfinity)}
+ }
+ if i.Lower.Sign() == -1 {
+ i.Lower = NewZ(0)
+ }
+ return SliceInterval{i}
+}
+func (c *SliceIntervalConstraint) Eval(*Graph) Range { return SliceInterval{c.I} }
diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/string.go b/vendor/honnef.co/go/tools/staticcheck/vrp/string.go
new file mode 100644
index 000000000..e05877f9f
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/vrp/string.go
@@ -0,0 +1,258 @@
+package vrp
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+
+ "honnef.co/go/tools/ssa"
+)
+
+type StringInterval struct {
+ Length IntInterval
+}
+
+func (s StringInterval) Union(other Range) Range {
+ i, ok := other.(StringInterval)
+ if !ok {
+ i = StringInterval{EmptyIntInterval}
+ }
+ if s.Length.Empty() || !s.Length.IsKnown() {
+ return i
+ }
+ if i.Length.Empty() || !i.Length.IsKnown() {
+ return s
+ }
+ return StringInterval{
+ Length: s.Length.Union(i.Length).(IntInterval),
+ }
+}
+
+func (s StringInterval) String() string {
+ return s.Length.String()
+}
+
+func (s StringInterval) IsKnown() bool {
+ return s.Length.IsKnown()
+}
+
+type StringSliceConstraint struct {
+ aConstraint
+ X ssa.Value
+ Lower ssa.Value
+ Upper ssa.Value
+}
+
+type StringIntersectionConstraint struct {
+ aConstraint
+ ranges Ranges
+ A ssa.Value
+ B ssa.Value
+ Op token.Token
+ I IntInterval
+ resolved bool
+}
+
+type StringConcatConstraint struct {
+ aConstraint
+ A ssa.Value
+ B ssa.Value
+}
+
+type StringLengthConstraint struct {
+ aConstraint
+ X ssa.Value
+}
+
+type StringIntervalConstraint struct {
+ aConstraint
+ I IntInterval
+}
+
+func NewStringSliceConstraint(x, lower, upper, y ssa.Value) Constraint {
+ return &StringSliceConstraint{NewConstraint(y), x, lower, upper}
+}
+func NewStringIntersectionConstraint(a, b ssa.Value, op token.Token, ranges Ranges, y ssa.Value) Constraint {
+ return &StringIntersectionConstraint{
+ aConstraint: NewConstraint(y),
+ ranges: ranges,
+ A: a,
+ B: b,
+ Op: op,
+ }
+}
+func NewStringConcatConstraint(a, b, y ssa.Value) Constraint {
+ return &StringConcatConstraint{NewConstraint(y), a, b}
+}
+func NewStringLengthConstraint(x ssa.Value, y ssa.Value) Constraint {
+ return &StringLengthConstraint{NewConstraint(y), x}
+}
+func NewStringIntervalConstraint(i IntInterval, y ssa.Value) Constraint {
+ return &StringIntervalConstraint{NewConstraint(y), i}
+}
+
+func (c *StringSliceConstraint) Operands() []ssa.Value {
+ vs := []ssa.Value{c.X}
+ if c.Lower != nil {
+ vs = append(vs, c.Lower)
+ }
+ if c.Upper != nil {
+ vs = append(vs, c.Upper)
+ }
+ return vs
+}
+func (c *StringIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.A} }
+func (c StringConcatConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} }
+func (c *StringLengthConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
+func (s *StringIntervalConstraint) Operands() []ssa.Value { return nil }
+
+func (c *StringSliceConstraint) String() string {
+ var lname, uname string
+ if c.Lower != nil {
+ lname = c.Lower.Name()
+ }
+ if c.Upper != nil {
+ uname = c.Upper.Name()
+ }
+ return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname)
+}
+func (c *StringIntersectionConstraint) String() string {
+ return fmt.Sprintf("%s = %s %s %s (%t branch)", c.Y().Name(), c.A.Name(), c.Op, c.B.Name(), c.Y().(*ssa.Sigma).Branch)
+}
+func (c StringConcatConstraint) String() string {
+ return fmt.Sprintf("%s = %s + %s", c.Y().Name(), c.A.Name(), c.B.Name())
+}
+func (c *StringLengthConstraint) String() string {
+ return fmt.Sprintf("%s = len(%s)", c.Y().Name(), c.X.Name())
+}
+func (c *StringIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) }
+
+func (c *StringSliceConstraint) Eval(g *Graph) Range {
+ lr := NewIntInterval(NewZ(0), NewZ(0))
+ if c.Lower != nil {
+ lr = g.Range(c.Lower).(IntInterval)
+ }
+ ur := g.Range(c.X).(StringInterval).Length
+ if c.Upper != nil {
+ ur = g.Range(c.Upper).(IntInterval)
+ }
+ if !lr.IsKnown() || !ur.IsKnown() {
+ return StringInterval{}
+ }
+
+ ls := []Z{
+ ur.Lower.Sub(lr.Lower),
+ ur.Upper.Sub(lr.Lower),
+ ur.Lower.Sub(lr.Upper),
+ ur.Upper.Sub(lr.Upper),
+ }
+ // TODO(dh): if we don't truncate lengths to 0 we might be able to
+ // easily detect slices with high < low. we'd need to treat -∞
+ // specially, though.
+ for i, l := range ls {
+ if l.Sign() == -1 {
+ ls[i] = NewZ(0)
+ }
+ }
+
+ return StringInterval{
+ Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)),
+ }
+}
+func (c *StringIntersectionConstraint) Eval(g *Graph) Range {
+ var l IntInterval
+ switch r := g.Range(c.A).(type) {
+ case StringInterval:
+ l = r.Length
+ case IntInterval:
+ l = r
+ }
+
+ if !l.IsKnown() {
+ return StringInterval{c.I}
+ }
+ return StringInterval{
+ Length: l.Intersection(c.I),
+ }
+}
+func (c StringConcatConstraint) Eval(g *Graph) Range {
+ i1, i2 := g.Range(c.A).(StringInterval), g.Range(c.B).(StringInterval)
+ if !i1.Length.IsKnown() || !i2.Length.IsKnown() {
+ return StringInterval{}
+ }
+ return StringInterval{
+ Length: i1.Length.Add(i2.Length),
+ }
+}
+func (c *StringLengthConstraint) Eval(g *Graph) Range {
+ i := g.Range(c.X).(StringInterval).Length
+ if !i.IsKnown() {
+ return NewIntInterval(NewZ(0), PInfinity)
+ }
+ return i
+}
+func (c *StringIntervalConstraint) Eval(*Graph) Range { return StringInterval{c.I} }
+
+func (c *StringIntersectionConstraint) Futures() []ssa.Value {
+ return []ssa.Value{c.B}
+}
+
+func (c *StringIntersectionConstraint) Resolve() {
+ if (c.A.Type().Underlying().(*types.Basic).Info() & types.IsString) != 0 {
+ // comparing two strings
+ r, ok := c.ranges[c.B].(StringInterval)
+ if !ok {
+ c.I = NewIntInterval(NewZ(0), PInfinity)
+ return
+ }
+ switch c.Op {
+ case token.EQL:
+ c.I = r.Length
+ case token.GTR, token.GEQ:
+ c.I = NewIntInterval(r.Length.Lower, PInfinity)
+ case token.LSS, token.LEQ:
+ c.I = NewIntInterval(NewZ(0), r.Length.Upper)
+ case token.NEQ:
+ default:
+ panic("unsupported op " + c.Op.String())
+ }
+ } else {
+ r, ok := c.ranges[c.B].(IntInterval)
+ if !ok {
+ c.I = NewIntInterval(NewZ(0), PInfinity)
+ return
+ }
+ // comparing two lengths
+ switch c.Op {
+ case token.EQL:
+ c.I = r
+ case token.GTR:
+ c.I = NewIntInterval(r.Lower.Add(NewZ(1)), PInfinity)
+ case token.GEQ:
+ c.I = NewIntInterval(r.Lower, PInfinity)
+ case token.LSS:
+ c.I = NewIntInterval(NInfinity, r.Upper.Sub(NewZ(1)))
+ case token.LEQ:
+ c.I = NewIntInterval(NInfinity, r.Upper)
+ case token.NEQ:
+ default:
+ panic("unsupported op " + c.Op.String())
+ }
+ }
+}
+
+func (c *StringIntersectionConstraint) IsKnown() bool {
+ return c.I.IsKnown()
+}
+
+func (c *StringIntersectionConstraint) MarkUnresolved() {
+ c.resolved = false
+}
+
+func (c *StringIntersectionConstraint) MarkResolved() {
+ c.resolved = true
+}
+
+func (c *StringIntersectionConstraint) IsResolved() bool {
+ return c.resolved
+}
diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go b/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go
new file mode 100644
index 000000000..cb17f042a
--- /dev/null
+++ b/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go
@@ -0,0 +1,1049 @@
+package vrp
+
+// TODO(dh) widening and narrowing have a lot of code in common. Make
+// it reusable.
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "math/big"
+ "sort"
+ "strings"
+
+ "honnef.co/go/tools/ssa"
+)
+
+type Future interface {
+ Constraint
+ Futures() []ssa.Value
+ Resolve()
+ IsKnown() bool
+ MarkUnresolved()
+ MarkResolved()
+ IsResolved() bool
+}
+
+type Range interface {
+ Union(other Range) Range
+ IsKnown() bool
+}
+
+type Constraint interface {
+ Y() ssa.Value
+ isConstraint()
+ String() string
+ Eval(*Graph) Range
+ Operands() []ssa.Value
+}
+
+type aConstraint struct {
+ y ssa.Value
+}
+
+func NewConstraint(y ssa.Value) aConstraint {
+ return aConstraint{y}
+}
+
+func (aConstraint) isConstraint() {}
+func (c aConstraint) Y() ssa.Value { return c.y }
+
+type PhiConstraint struct {
+ aConstraint
+ Vars []ssa.Value
+}
+
+func NewPhiConstraint(vars []ssa.Value, y ssa.Value) Constraint {
+ uniqm := map[ssa.Value]struct{}{}
+ for _, v := range vars {
+ uniqm[v] = struct{}{}
+ }
+ var uniq []ssa.Value
+ for v := range uniqm {
+ uniq = append(uniq, v)
+ }
+ return &PhiConstraint{
+ aConstraint: NewConstraint(y),
+ Vars: uniq,
+ }
+}
+
+func (c *PhiConstraint) Operands() []ssa.Value {
+ return c.Vars
+}
+
+func (c *PhiConstraint) Eval(g *Graph) Range {
+ i := Range(nil)
+ for _, v := range c.Vars {
+ i = g.Range(v).Union(i)
+ }
+ return i
+}
+
+func (c *PhiConstraint) String() string {
+ names := make([]string, len(c.Vars))
+ for i, v := range c.Vars {
+ names[i] = v.Name()
+ }
+ return fmt.Sprintf("%s = φ(%s)", c.Y().Name(), strings.Join(names, ", "))
+}
+
+func isSupportedType(typ types.Type) bool {
+ switch typ := typ.Underlying().(type) {
+ case *types.Basic:
+ switch typ.Kind() {
+ case types.String, types.UntypedString:
+ return true
+ default:
+ if (typ.Info() & types.IsInteger) == 0 {
+ return false
+ }
+ }
+ case *types.Chan:
+ return true
+ case *types.Slice:
+ return true
+ default:
+ return false
+ }
+ return true
+}
+
+func ConstantToZ(c constant.Value) Z {
+ s := constant.ToInt(c).ExactString()
+ n := &big.Int{}
+ n.SetString(s, 10)
+ return NewBigZ(n)
+}
+
+func sigmaInteger(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint {
+ op := cond.Op
+ if !ins.Branch {
+ op = (invertToken(op))
+ }
+
+ switch op {
+ case token.EQL, token.GTR, token.GEQ, token.LSS, token.LEQ:
+ default:
+ return nil
+ }
+ var a, b ssa.Value
+ if (*ops[0]) == ins.X {
+ a = *ops[0]
+ b = *ops[1]
+ } else {
+ a = *ops[1]
+ b = *ops[0]
+ op = flipToken(op)
+ }
+ return NewIntIntersectionConstraint(a, b, op, g.ranges, ins)
+}
+
+func sigmaString(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint {
+ op := cond.Op
+ if !ins.Branch {
+ op = (invertToken(op))
+ }
+
+ switch op {
+ case token.EQL, token.GTR, token.GEQ, token.LSS, token.LEQ:
+ default:
+ return nil
+ }
+
+ if ((*ops[0]).Type().Underlying().(*types.Basic).Info() & types.IsString) == 0 {
+ var a, b ssa.Value
+ call, ok := (*ops[0]).(*ssa.Call)
+ if ok && call.Common().Args[0] == ins.X {
+ a = *ops[0]
+ b = *ops[1]
+ } else {
+ a = *ops[1]
+ b = *ops[0]
+ op = flipToken(op)
+ }
+ return NewStringIntersectionConstraint(a, b, op, g.ranges, ins)
+ }
+ var a, b ssa.Value
+ if (*ops[0]) == ins.X {
+ a = *ops[0]
+ b = *ops[1]
+ } else {
+ a = *ops[1]
+ b = *ops[0]
+ op = flipToken(op)
+ }
+ return NewStringIntersectionConstraint(a, b, op, g.ranges, ins)
+}
+
+func sigmaSlice(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint {
+ // TODO(dh) sigmaSlice and sigmaString are a lot alike. Can they
+ // be merged?
+ //
+ // XXX support futures
+
+ op := cond.Op
+ if !ins.Branch {
+ op = (invertToken(op))
+ }
+
+ k, ok := (*ops[1]).(*ssa.Const)
+ // XXX investigate in what cases this wouldn't be a Const
+ //
+ // XXX what if left and right are swapped?
+ if !ok {
+ return nil
+ }
+
+ call, ok := (*ops[0]).(*ssa.Call)
+ if !ok {
+ return nil
+ }
+ builtin, ok := call.Common().Value.(*ssa.Builtin)
+ if !ok {
+ return nil
+ }
+ if builtin.Name() != "len" {
+ return nil
+ }
+ callops := call.Operands(nil)
+
+ v := ConstantToZ(k.Value)
+ c := NewSliceIntersectionConstraint(*callops[1], IntInterval{}, ins).(*SliceIntersectionConstraint)
+ switch op {
+ case token.EQL:
+ c.I = NewIntInterval(v, v)
+ case token.GTR, token.GEQ:
+ off := int64(0)
+ if cond.Op == token.GTR {
+ off = 1
+ }
+ c.I = NewIntInterval(
+ v.Add(NewZ(off)),
+ PInfinity,
+ )
+ case token.LSS, token.LEQ:
+ off := int64(0)
+ if cond.Op == token.LSS {
+ off = -1
+ }
+ c.I = NewIntInterval(
+ NInfinity,
+ v.Add(NewZ(off)),
+ )
+ default:
+ return nil
+ }
+ return c
+}
+
+func BuildGraph(f *ssa.Function) *Graph {
+ g := &Graph{
+ Vertices: map[interface{}]*Vertex{},
+ ranges: Ranges{},
+ }
+
+ var cs []Constraint
+
+ ops := make([]*ssa.Value, 16)
+ seen := map[ssa.Value]bool{}
+ for _, block := range f.Blocks {
+ for _, ins := range block.Instrs {
+ ops = ins.Operands(ops[:0])
+ for _, op := range ops {
+ if c, ok := (*op).(*ssa.Const); ok {
+ if seen[c] {
+ continue
+ }
+ seen[c] = true
+ if c.Value == nil {
+ switch c.Type().Underlying().(type) {
+ case *types.Slice:
+ cs = append(cs, NewSliceIntervalConstraint(NewIntInterval(NewZ(0), NewZ(0)), c))
+ }
+ continue
+ }
+ switch c.Value.Kind() {
+ case constant.Int:
+ v := ConstantToZ(c.Value)
+ cs = append(cs, NewIntIntervalConstraint(NewIntInterval(v, v), c))
+ case constant.String:
+ s := constant.StringVal(c.Value)
+ n := NewZ(int64(len(s)))
+ cs = append(cs, NewStringIntervalConstraint(NewIntInterval(n, n), c))
+ }
+ }
+ }
+ }
+ }
+ for _, block := range f.Blocks {
+ for _, ins := range block.Instrs {
+ switch ins := ins.(type) {
+ case *ssa.Convert:
+ switch v := ins.Type().Underlying().(type) {
+ case *types.Basic:
+ if (v.Info() & types.IsInteger) == 0 {
+ continue
+ }
+ cs = append(cs, NewIntConversionConstraint(ins.X, ins))
+ }
+ case *ssa.Call:
+ if static := ins.Common().StaticCallee(); static != nil {
+ if fn, ok := static.Object().(*types.Func); ok {
+ switch fn.FullName() {
+ case "bytes.Index", "bytes.IndexAny", "bytes.IndexByte",
+ "bytes.IndexFunc", "bytes.IndexRune", "bytes.LastIndex",
+ "bytes.LastIndexAny", "bytes.LastIndexByte", "bytes.LastIndexFunc",
+ "strings.Index", "strings.IndexAny", "strings.IndexByte",
+ "strings.IndexFunc", "strings.IndexRune", "strings.LastIndex",
+ "strings.LastIndexAny", "strings.LastIndexByte", "strings.LastIndexFunc":
+ // TODO(dh): instead of limiting by +∞,
+ // limit by the upper bound of the passed
+ // string
+ cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(-1), PInfinity), ins))
+ case "bytes.Title", "bytes.ToLower", "bytes.ToTitle", "bytes.ToUpper",
+ "strings.Title", "strings.ToLower", "strings.ToTitle", "strings.ToUpper":
+ cs = append(cs, NewCopyConstraint(ins.Common().Args[0], ins))
+ case "bytes.ToLowerSpecial", "bytes.ToTitleSpecial", "bytes.ToUpperSpecial",
+ "strings.ToLowerSpecial", "strings.ToTitleSpecial", "strings.ToUpperSpecial":
+ cs = append(cs, NewCopyConstraint(ins.Common().Args[1], ins))
+ case "bytes.Compare", "strings.Compare":
+ cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(-1), NewZ(1)), ins))
+ case "bytes.Count", "strings.Count":
+ // TODO(dh): instead of limiting by +∞,
+ // limit by the upper bound of the passed
+ // string.
+ cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(0), PInfinity), ins))
+ case "bytes.Map", "bytes.TrimFunc", "bytes.TrimLeft", "bytes.TrimLeftFunc",
+ "bytes.TrimRight", "bytes.TrimRightFunc", "bytes.TrimSpace",
+ "strings.Map", "strings.TrimFunc", "strings.TrimLeft", "strings.TrimLeftFunc",
+ "strings.TrimRight", "strings.TrimRightFunc", "strings.TrimSpace":
+ // TODO(dh): lower = 0, upper = upper of passed string
+ case "bytes.TrimPrefix", "bytes.TrimSuffix",
+ "strings.TrimPrefix", "strings.TrimSuffix":
+ // TODO(dh) range between "unmodified" and len(cutset) removed
+ case "(*bytes.Buffer).Cap", "(*bytes.Buffer).Len", "(*bytes.Reader).Len", "(*bytes.Reader).Size":
+ cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(0), PInfinity), ins))
+ }
+ }
+ }
+ builtin, ok := ins.Common().Value.(*ssa.Builtin)
+ ops := ins.Operands(nil)
+ if !ok {
+ continue
+ }
+ switch builtin.Name() {
+ case "len":
+ switch op1 := (*ops[1]).Type().Underlying().(type) {
+ case *types.Basic:
+ if op1.Kind() == types.String || op1.Kind() == types.UntypedString {
+ cs = append(cs, NewStringLengthConstraint(*ops[1], ins))
+ }
+ case *types.Slice:
+ cs = append(cs, NewSliceLengthConstraint(*ops[1], ins))
+ }
+
+ case "append":
+ cs = append(cs, NewSliceAppendConstraint(ins.Common().Args[0], ins.Common().Args[1], ins))
+ }
+ case *ssa.BinOp:
+ ops := ins.Operands(nil)
+ basic, ok := (*ops[0]).Type().Underlying().(*types.Basic)
+ if !ok {
+ continue
+ }
+ switch basic.Kind() {
+ case types.Int, types.Int8, types.Int16, types.Int32, types.Int64,
+ types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.UntypedInt:
+ fns := map[token.Token]func(ssa.Value, ssa.Value, ssa.Value) Constraint{
+ token.ADD: NewIntAddConstraint,
+ token.SUB: NewIntSubConstraint,
+ token.MUL: NewIntMulConstraint,
+ // XXX support QUO, REM, SHL, SHR
+ }
+ fn, ok := fns[ins.Op]
+ if ok {
+ cs = append(cs, fn(*ops[0], *ops[1], ins))
+ }
+ case types.String, types.UntypedString:
+ if ins.Op == token.ADD {
+ cs = append(cs, NewStringConcatConstraint(*ops[0], *ops[1], ins))
+ }
+ }
+ case *ssa.Slice:
+ typ := ins.X.Type().Underlying()
+ switch typ := typ.(type) {
+ case *types.Basic:
+ cs = append(cs, NewStringSliceConstraint(ins.X, ins.Low, ins.High, ins))
+ case *types.Slice:
+ cs = append(cs, NewSliceSliceConstraint(ins.X, ins.Low, ins.High, ins))
+ case *types.Array:
+ cs = append(cs, NewArraySliceConstraint(ins.X, ins.Low, ins.High, ins))
+ case *types.Pointer:
+ if _, ok := typ.Elem().(*types.Array); !ok {
+ continue
+ }
+ cs = append(cs, NewArraySliceConstraint(ins.X, ins.Low, ins.High, ins))
+ }
+ case *ssa.Phi:
+ if !isSupportedType(ins.Type()) {
+ continue
+ }
+ ops := ins.Operands(nil)
+ dops := make([]ssa.Value, len(ops))
+ for i, op := range ops {
+ dops[i] = *op
+ }
+ cs = append(cs, NewPhiConstraint(dops, ins))
+ case *ssa.Sigma:
+ pred := ins.Block().Preds[0]
+ instrs := pred.Instrs
+ cond, ok := instrs[len(instrs)-1].(*ssa.If).Cond.(*ssa.BinOp)
+ ops := cond.Operands(nil)
+ if !ok {
+ continue
+ }
+ switch typ := ins.Type().Underlying().(type) {
+ case *types.Basic:
+ var c Constraint
+ switch typ.Kind() {
+ case types.Int, types.Int8, types.Int16, types.Int32, types.Int64,
+ types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.UntypedInt:
+ c = sigmaInteger(g, ins, cond, ops)
+ case types.String, types.UntypedString:
+ c = sigmaString(g, ins, cond, ops)
+ }
+ if c != nil {
+ cs = append(cs, c)
+ }
+ case *types.Slice:
+ c := sigmaSlice(g, ins, cond, ops)
+ if c != nil {
+ cs = append(cs, c)
+ }
+ default:
+ //log.Printf("unsupported sigma type %T", typ) // XXX
+ }
+ case *ssa.MakeChan:
+ cs = append(cs, NewMakeChannelConstraint(ins.Size, ins))
+ case *ssa.MakeSlice:
+ cs = append(cs, NewMakeSliceConstraint(ins.Len, ins))
+ case *ssa.ChangeType:
+ switch ins.X.Type().Underlying().(type) {
+ case *types.Chan:
+ cs = append(cs, NewChannelChangeTypeConstraint(ins.X, ins))
+ }
+ }
+ }
+ }
+
+ for _, c := range cs {
+ if c == nil {
+ panic("nil constraint")
+ }
+ // If V is used in constraint C, then we create an edge V->C
+ for _, op := range c.Operands() {
+ g.AddEdge(op, c, false)
+ }
+ if c, ok := c.(Future); ok {
+ for _, op := range c.Futures() {
+ g.AddEdge(op, c, true)
+ }
+ }
+ // If constraint C defines variable V, then we create an edge
+ // C->V
+ g.AddEdge(c, c.Y(), false)
+ }
+
+ g.FindSCCs()
+ g.sccEdges = make([][]Edge, len(g.SCCs))
+ g.futures = make([][]Future, len(g.SCCs))
+ for _, e := range g.Edges {
+ g.sccEdges[e.From.SCC] = append(g.sccEdges[e.From.SCC], e)
+ if !e.control {
+ continue
+ }
+ if c, ok := e.To.Value.(Future); ok {
+ g.futures[e.From.SCC] = append(g.futures[e.From.SCC], c)
+ }
+ }
+ return g
+}
+
+func (g *Graph) Solve() Ranges {
+ var consts []Z
+ off := NewZ(1)
+ for _, n := range g.Vertices {
+ if c, ok := n.Value.(*ssa.Const); ok {
+ basic, ok := c.Type().Underlying().(*types.Basic)
+ if !ok {
+ continue
+ }
+ if (basic.Info() & types.IsInteger) != 0 {
+ z := ConstantToZ(c.Value)
+ consts = append(consts, z)
+ consts = append(consts, z.Add(off))
+ consts = append(consts, z.Sub(off))
+ }
+ }
+
+ }
+ sort.Sort(Zs(consts))
+
+ for scc, vertices := range g.SCCs {
+ n := 0
+ n = len(vertices)
+ if n == 1 {
+ g.resolveFutures(scc)
+ v := vertices[0]
+ if v, ok := v.Value.(ssa.Value); ok {
+ switch typ := v.Type().Underlying().(type) {
+ case *types.Basic:
+ switch typ.Kind() {
+ case types.String, types.UntypedString:
+ if !g.Range(v).(StringInterval).IsKnown() {
+ g.SetRange(v, StringInterval{NewIntInterval(NewZ(0), PInfinity)})
+ }
+ default:
+ if !g.Range(v).(IntInterval).IsKnown() {
+ g.SetRange(v, InfinityFor(v))
+ }
+ }
+ case *types.Chan:
+ if !g.Range(v).(ChannelInterval).IsKnown() {
+ g.SetRange(v, ChannelInterval{NewIntInterval(NewZ(0), PInfinity)})
+ }
+ case *types.Slice:
+ if !g.Range(v).(SliceInterval).IsKnown() {
+ g.SetRange(v, SliceInterval{NewIntInterval(NewZ(0), PInfinity)})
+ }
+ }
+ }
+ if c, ok := v.Value.(Constraint); ok {
+ g.SetRange(c.Y(), c.Eval(g))
+ }
+ } else {
+ uses := g.uses(scc)
+ entries := g.entries(scc)
+ for len(entries) > 0 {
+ v := entries[len(entries)-1]
+ entries = entries[:len(entries)-1]
+ for _, use := range uses[v] {
+ if g.widen(use, consts) {
+ entries = append(entries, use.Y())
+ }
+ }
+ }
+
+ g.resolveFutures(scc)
+
+ // XXX this seems to be necessary, but shouldn't be.
+ // removing it leads to nil pointer derefs; investigate
+ // where we're not setting values correctly.
+ for _, n := range vertices {
+ if v, ok := n.Value.(ssa.Value); ok {
+ i, ok := g.Range(v).(IntInterval)
+ if !ok {
+ continue
+ }
+ if !i.IsKnown() {
+ g.SetRange(v, InfinityFor(v))
+ }
+ }
+ }
+
+ actives := g.actives(scc)
+ for len(actives) > 0 {
+ v := actives[len(actives)-1]
+ actives = actives[:len(actives)-1]
+ for _, use := range uses[v] {
+ if g.narrow(use) {
+ actives = append(actives, use.Y())
+ }
+ }
+ }
+ }
+ // propagate scc
+ for _, edge := range g.sccEdges[scc] {
+ if edge.control {
+ continue
+ }
+ if edge.From.SCC == edge.To.SCC {
+ continue
+ }
+ if c, ok := edge.To.Value.(Constraint); ok {
+ g.SetRange(c.Y(), c.Eval(g))
+ }
+ if c, ok := edge.To.Value.(Future); ok {
+ if !c.IsKnown() {
+ c.MarkUnresolved()
+ }
+ }
+ }
+ }
+
+ for v, r := range g.ranges {
+ i, ok := r.(IntInterval)
+ if !ok {
+ continue
+ }
+ if (v.Type().Underlying().(*types.Basic).Info() & types.IsUnsigned) == 0 {
+ if i.Upper != PInfinity {
+ s := &types.StdSizes{
+ // XXX is it okay to assume the largest word size, or do we
+ // need to be platform specific?
+ WordSize: 8,
+ MaxAlign: 1,
+ }
+ bits := (s.Sizeof(v.Type()) * 8) - 1
+ n := big.NewInt(1)
+ n = n.Lsh(n, uint(bits))
+ upper, lower := &big.Int{}, &big.Int{}
+ upper.Sub(n, big.NewInt(1))
+ lower.Neg(n)
+
+ if i.Upper.Cmp(NewBigZ(upper)) == 1 {
+ i = NewIntInterval(NInfinity, PInfinity)
+ } else if i.Lower.Cmp(NewBigZ(lower)) == -1 {
+ i = NewIntInterval(NInfinity, PInfinity)
+ }
+ }
+ }
+
+ g.ranges[v] = i
+ }
+
+ return g.ranges
+}
+
+func VertexString(v *Vertex) string {
+ switch v := v.Value.(type) {
+ case Constraint:
+ return v.String()
+ case ssa.Value:
+ return v.Name()
+ case nil:
+ return "BUG: nil vertex value"
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v))
+ }
+}
+
+type Vertex struct {
+ Value interface{} // one of Constraint or ssa.Value
+ SCC int
+ index int
+ lowlink int
+ stack bool
+
+ Succs []Edge
+}
+
+type Ranges map[ssa.Value]Range
+
+func (r Ranges) Get(x ssa.Value) Range {
+ if x == nil {
+ return nil
+ }
+ i, ok := r[x]
+ if !ok {
+ switch x := x.Type().Underlying().(type) {
+ case *types.Basic:
+ switch x.Kind() {
+ case types.String, types.UntypedString:
+ return StringInterval{}
+ default:
+ return IntInterval{}
+ }
+ case *types.Chan:
+ return ChannelInterval{}
+ case *types.Slice:
+ return SliceInterval{}
+ }
+ }
+ return i
+}
+
+type Graph struct {
+ Vertices map[interface{}]*Vertex
+ Edges []Edge
+ SCCs [][]*Vertex
+ ranges Ranges
+
+ // map SCCs to futures
+ futures [][]Future
+ // map SCCs to edges
+ sccEdges [][]Edge
+}
+
+func (g Graph) Graphviz() string {
+ var lines []string
+ lines = append(lines, "digraph{")
+ ids := map[interface{}]int{}
+ i := 1
+ for _, v := range g.Vertices {
+ ids[v] = i
+ shape := "box"
+ if _, ok := v.Value.(ssa.Value); ok {
+ shape = "oval"
+ }
+ lines = append(lines, fmt.Sprintf(`n%d [shape="%s", label=%q, colorscheme=spectral11, style="filled", fillcolor="%d"]`,
+ i, shape, VertexString(v), (v.SCC%11)+1))
+ i++
+ }
+ for _, e := range g.Edges {
+ style := "solid"
+ if e.control {
+ style = "dashed"
+ }
+ lines = append(lines, fmt.Sprintf(`n%d -> n%d [style="%s"]`, ids[e.From], ids[e.To], style))
+ }
+ lines = append(lines, "}")
+ return strings.Join(lines, "\n")
+}
+
+func (g *Graph) SetRange(x ssa.Value, r Range) {
+ g.ranges[x] = r
+}
+
+func (g *Graph) Range(x ssa.Value) Range {
+ return g.ranges.Get(x)
+}
+
+func (g *Graph) widen(c Constraint, consts []Z) bool {
+ setRange := func(i Range) {
+ g.SetRange(c.Y(), i)
+ }
+ widenIntInterval := func(oi, ni IntInterval) (IntInterval, bool) {
+ if !ni.IsKnown() {
+ return oi, false
+ }
+ nlc := NInfinity
+ nuc := PInfinity
+ for _, co := range consts {
+ if co.Cmp(ni.Lower) <= 0 {
+ nlc = co
+ break
+ }
+ }
+ for _, co := range consts {
+ if co.Cmp(ni.Upper) >= 0 {
+ nuc = co
+ break
+ }
+ }
+
+ if !oi.IsKnown() {
+ return ni, true
+ }
+ if ni.Lower.Cmp(oi.Lower) == -1 && ni.Upper.Cmp(oi.Upper) == 1 {
+ return NewIntInterval(nlc, nuc), true
+ }
+ if ni.Lower.Cmp(oi.Lower) == -1 {
+ return NewIntInterval(nlc, oi.Upper), true
+ }
+ if ni.Upper.Cmp(oi.Upper) == 1 {
+ return NewIntInterval(oi.Lower, nuc), true
+ }
+ return oi, false
+ }
+ switch oi := g.Range(c.Y()).(type) {
+ case IntInterval:
+ ni := c.Eval(g).(IntInterval)
+ si, changed := widenIntInterval(oi, ni)
+ if changed {
+ setRange(si)
+ return true
+ }
+ return false
+ case StringInterval:
+ ni := c.Eval(g).(StringInterval)
+ si, changed := widenIntInterval(oi.Length, ni.Length)
+ if changed {
+ setRange(StringInterval{si})
+ return true
+ }
+ return false
+ case SliceInterval:
+ ni := c.Eval(g).(SliceInterval)
+ si, changed := widenIntInterval(oi.Length, ni.Length)
+ if changed {
+ setRange(SliceInterval{si})
+ return true
+ }
+ return false
+ default:
+ return false
+ }
+}
+
+func (g *Graph) narrow(c Constraint) bool {
+ narrowIntInterval := func(oi, ni IntInterval) (IntInterval, bool) {
+ oLower := oi.Lower
+ oUpper := oi.Upper
+ nLower := ni.Lower
+ nUpper := ni.Upper
+
+ if oLower == NInfinity && nLower != NInfinity {
+ return NewIntInterval(nLower, oUpper), true
+ }
+ if oUpper == PInfinity && nUpper != PInfinity {
+ return NewIntInterval(oLower, nUpper), true
+ }
+ if oLower.Cmp(nLower) == 1 {
+ return NewIntInterval(nLower, oUpper), true
+ }
+ if oUpper.Cmp(nUpper) == -1 {
+ return NewIntInterval(oLower, nUpper), true
+ }
+ return oi, false
+ }
+ switch oi := g.Range(c.Y()).(type) {
+ case IntInterval:
+ ni := c.Eval(g).(IntInterval)
+ si, changed := narrowIntInterval(oi, ni)
+ if changed {
+ g.SetRange(c.Y(), si)
+ return true
+ }
+ return false
+ case StringInterval:
+ ni := c.Eval(g).(StringInterval)
+ si, changed := narrowIntInterval(oi.Length, ni.Length)
+ if changed {
+ g.SetRange(c.Y(), StringInterval{si})
+ return true
+ }
+ return false
+ case SliceInterval:
+ ni := c.Eval(g).(SliceInterval)
+ si, changed := narrowIntInterval(oi.Length, ni.Length)
+ if changed {
+ g.SetRange(c.Y(), SliceInterval{si})
+ return true
+ }
+ return false
+ default:
+ return false
+ }
+}
+
+func (g *Graph) resolveFutures(scc int) {
+ for _, c := range g.futures[scc] {
+ c.Resolve()
+ }
+}
+
+func (g *Graph) entries(scc int) []ssa.Value {
+ var entries []ssa.Value
+ for _, n := range g.Vertices {
+ if n.SCC != scc {
+ continue
+ }
+ if v, ok := n.Value.(ssa.Value); ok {
+ // XXX avoid quadratic runtime
+ //
+ // XXX I cannot think of any code where the future and its
+ // variables aren't in the same SCC, in which case this
+ // code isn't very useful (the variables won't be resolved
+ // yet). Before we have a cross-SCC example, however, we
+ // can't really verify that this code is working
+ // correctly, or indeed doing anything useful.
+ for _, on := range g.Vertices {
+ if c, ok := on.Value.(Future); ok {
+ if c.Y() == v {
+ if !c.IsResolved() {
+ g.SetRange(c.Y(), c.Eval(g))
+ c.MarkResolved()
+ }
+ break
+ }
+ }
+ }
+ if g.Range(v).IsKnown() {
+ entries = append(entries, v)
+ }
+ }
+ }
+ return entries
+}
+
+func (g *Graph) uses(scc int) map[ssa.Value][]Constraint {
+ m := map[ssa.Value][]Constraint{}
+ for _, e := range g.sccEdges[scc] {
+ if e.control {
+ continue
+ }
+ if v, ok := e.From.Value.(ssa.Value); ok {
+ c := e.To.Value.(Constraint)
+ sink := c.Y()
+ if g.Vertices[sink].SCC == scc {
+ m[v] = append(m[v], c)
+ }
+ }
+ }
+ return m
+}
+
+func (g *Graph) actives(scc int) []ssa.Value {
+ var actives []ssa.Value
+ for _, n := range g.Vertices {
+ if n.SCC != scc {
+ continue
+ }
+ if v, ok := n.Value.(ssa.Value); ok {
+ if _, ok := v.(*ssa.Const); !ok {
+ actives = append(actives, v)
+ }
+ }
+ }
+ return actives
+}
+
+func (g *Graph) AddEdge(from, to interface{}, ctrl bool) {
+ vf, ok := g.Vertices[from]
+ if !ok {
+ vf = &Vertex{Value: from}
+ g.Vertices[from] = vf
+ }
+ vt, ok := g.Vertices[to]
+ if !ok {
+ vt = &Vertex{Value: to}
+ g.Vertices[to] = vt
+ }
+ e := Edge{From: vf, To: vt, control: ctrl}
+ g.Edges = append(g.Edges, e)
+ vf.Succs = append(vf.Succs, e)
+}
+
+type Edge struct {
+ From, To *Vertex
+ control bool
+}
+
+func (e Edge) String() string {
+ return fmt.Sprintf("%s -> %s", VertexString(e.From), VertexString(e.To))
+}
+
+func (g *Graph) FindSCCs() {
+ // use Tarjan to find the SCCs
+
+ index := 1
+ var s []*Vertex
+
+ scc := 0
+ var strongconnect func(v *Vertex)
+ strongconnect = func(v *Vertex) {
+ // set the depth index for v to the smallest unused index
+ v.index = index
+ v.lowlink = index
+ index++
+ s = append(s, v)
+ v.stack = true
+
+ for _, e := range v.Succs {
+ w := e.To
+ if w.index == 0 {
+ // successor w has not yet been visited; recurse on it
+ strongconnect(w)
+ if w.lowlink < v.lowlink {
+ v.lowlink = w.lowlink
+ }
+ } else if w.stack {
+ // successor w is in stack s and hence in the current scc
+ if w.index < v.lowlink {
+ v.lowlink = w.index
+ }
+ }
+ }
+
+ if v.lowlink == v.index {
+ for {
+ w := s[len(s)-1]
+ s = s[:len(s)-1]
+ w.stack = false
+ w.SCC = scc
+ if w == v {
+ break
+ }
+ }
+ scc++
+ }
+ }
+ for _, v := range g.Vertices {
+ if v.index == 0 {
+ strongconnect(v)
+ }
+ }
+
+ g.SCCs = make([][]*Vertex, scc)
+ for _, n := range g.Vertices {
+ n.SCC = scc - n.SCC - 1
+ g.SCCs[n.SCC] = append(g.SCCs[n.SCC], n)
+ }
+}
+
+func invertToken(tok token.Token) token.Token {
+ switch tok {
+ case token.LSS:
+ return token.GEQ
+ case token.GTR:
+ return token.LEQ
+ case token.EQL:
+ return token.NEQ
+ case token.NEQ:
+ return token.EQL
+ case token.GEQ:
+ return token.LSS
+ case token.LEQ:
+ return token.GTR
+ default:
+ panic(fmt.Sprintf("unsupported token %s", tok))
+ }
+}
+
+func flipToken(tok token.Token) token.Token {
+ switch tok {
+ case token.LSS:
+ return token.GTR
+ case token.GTR:
+ return token.LSS
+ case token.EQL:
+ return token.EQL
+ case token.NEQ:
+ return token.NEQ
+ case token.GEQ:
+ return token.LEQ
+ case token.LEQ:
+ return token.GEQ
+ default:
+ panic(fmt.Sprintf("unsupported token %s", tok))
+ }
+}
+
+type CopyConstraint struct {
+ aConstraint
+ X ssa.Value
+}
+
+func (c *CopyConstraint) String() string {
+ return fmt.Sprintf("%s = copy(%s)", c.Y().Name(), c.X.Name())
+}
+
+func (c *CopyConstraint) Eval(g *Graph) Range {
+ return g.Range(c.X)
+}
+
+func (c *CopyConstraint) Operands() []ssa.Value {
+ return []ssa.Value{c.X}
+}
+
+func NewCopyConstraint(x, y ssa.Value) Constraint {
+ return &CopyConstraint{
+ aConstraint: aConstraint{
+ y: y,
+ },
+ X: x,
+ }
+}
diff --git a/vendor/honnef.co/go/tools/stylecheck/lint.go b/vendor/honnef.co/go/tools/stylecheck/lint.go
new file mode 100644
index 000000000..e0c683ce3
--- /dev/null
+++ b/vendor/honnef.co/go/tools/stylecheck/lint.go
@@ -0,0 +1,618 @@
+package stylecheck // import "honnef.co/go/tools/stylecheck"
+
+import (
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "honnef.co/go/tools/lint"
+ . "honnef.co/go/tools/lint/lintdsl"
+ "honnef.co/go/tools/ssa"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+type Checker struct {
+ CheckGenerated bool
+}
+
+func NewChecker() *Checker {
+ return &Checker{}
+}
+
+func (*Checker) Name() string { return "stylecheck" }
+func (*Checker) Prefix() string { return "ST" }
+func (c *Checker) Init(prog *lint.Program) {}
+
+func (c *Checker) Checks() []lint.Check {
+ return []lint.Check{
+ {ID: "ST1000", FilterGenerated: false, Fn: c.CheckPackageComment},
+ {ID: "ST1001", FilterGenerated: true, Fn: c.CheckDotImports},
+ // {ID: "ST1002", FilterGenerated: true, Fn: c.CheckBlankImports},
+ {ID: "ST1003", FilterGenerated: true, Fn: c.CheckNames},
+ // {ID: "ST1004", FilterGenerated: false, Fn: nil, },
+ {ID: "ST1005", FilterGenerated: false, Fn: c.CheckErrorStrings},
+ {ID: "ST1006", FilterGenerated: false, Fn: c.CheckReceiverNames},
+ // {ID: "ST1007", FilterGenerated: true, Fn: c.CheckIncDec},
+ {ID: "ST1008", FilterGenerated: false, Fn: c.CheckErrorReturn},
+ // {ID: "ST1009", FilterGenerated: false, Fn: c.CheckUnexportedReturn},
+ // {ID: "ST1010", FilterGenerated: false, Fn: c.CheckContextFirstArg},
+ {ID: "ST1011", FilterGenerated: false, Fn: c.CheckTimeNames},
+ {ID: "ST1012", FilterGenerated: false, Fn: c.CheckErrorVarNames},
+ {ID: "ST1013", FilterGenerated: true, Fn: c.CheckHTTPStatusCodes},
+ {ID: "ST1015", FilterGenerated: true, Fn: c.CheckDefaultCaseOrder},
+ {ID: "ST1016", FilterGenerated: false, Fn: c.CheckReceiverNamesIdentical},
+ }
+}
+
+func (c *Checker) CheckPackageComment(j *lint.Job) {
+ // - At least one file in a non-main package should have a package comment
+ //
+ // - The comment should be of the form
+ // "Package x ...". This has a slight potential for false
+ // positives, as multiple files can have package comments, in
+ // which case they get appended. But that doesn't happen a lot in
+ // the real world.
+
+ for _, pkg := range j.Program.InitialPackages {
+ if pkg.Name == "main" {
+ continue
+ }
+ hasDocs := false
+ for _, f := range pkg.Syntax {
+ if IsInTest(j, f) {
+ continue
+ }
+ if f.Doc != nil && len(f.Doc.List) > 0 {
+ hasDocs = true
+ prefix := "Package " + f.Name.Name + " "
+ if !strings.HasPrefix(strings.TrimSpace(f.Doc.Text()), prefix) {
+ j.Errorf(f.Doc, `package comment should be of the form "%s..."`, prefix)
+ }
+ f.Doc.Text()
+ }
+ }
+
+ if !hasDocs {
+ for _, f := range pkg.Syntax {
+ if IsInTest(j, f) {
+ continue
+ }
+ j.Errorf(f, "at least one file in a package should have a package comment")
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckDotImports(j *lint.Job) {
+ for _, pkg := range j.Program.InitialPackages {
+ for _, f := range pkg.Syntax {
+ imports:
+ for _, imp := range f.Imports {
+ path := imp.Path.Value
+ path = path[1 : len(path)-1]
+ for _, w := range pkg.Config.DotImportWhitelist {
+ if w == path {
+ continue imports
+ }
+ }
+
+ if imp.Name != nil && imp.Name.Name == "." && !IsInTest(j, f) {
+ j.Errorf(imp, "should not use dot imports")
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckBlankImports(j *lint.Job) {
+ fset := j.Program.Fset()
+ for _, f := range j.Program.Files {
+ if IsInMain(j, f) || IsInTest(j, f) {
+ continue
+ }
+
+ // Collect imports of the form `import _ "foo"`, i.e. with no
+ // parentheses, as their comment will be associated with the
+ // (paren-free) GenDecl, not the import spec itself.
+ //
+ // We don't directly process the GenDecl so that we can
+ // correctly handle the following:
+ //
+ // import _ "foo"
+ // import _ "bar"
+ //
+ // where only the first import should get flagged.
+ skip := map[ast.Spec]bool{}
+ ast.Inspect(f, func(node ast.Node) bool {
+ switch node := node.(type) {
+ case *ast.File:
+ return true
+ case *ast.GenDecl:
+ if node.Tok != token.IMPORT {
+ return false
+ }
+ if node.Lparen == token.NoPos && node.Doc != nil {
+ skip[node.Specs[0]] = true
+ }
+ return false
+ }
+ return false
+ })
+ for i, imp := range f.Imports {
+ pos := fset.Position(imp.Pos())
+
+ if !IsBlank(imp.Name) {
+ continue
+ }
+ // Only flag the first blank import in a group of imports,
+ // or don't flag any of them, if the first one is
+ // commented
+ if i > 0 {
+ prev := f.Imports[i-1]
+ prevPos := fset.Position(prev.Pos())
+ if pos.Line-1 == prevPos.Line && IsBlank(prev.Name) {
+ continue
+ }
+ }
+
+ if imp.Doc == nil && imp.Comment == nil && !skip[imp] {
+ j.Errorf(imp, "a blank import should be only in a main or test package, or have a comment justifying it")
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckIncDec(j *lint.Job) {
+ // TODO(dh): this can be noisy for function bodies that look like this:
+ // x += 3
+ // ...
+ // x += 2
+ // ...
+ // x += 1
+ fn := func(node ast.Node) bool {
+ assign, ok := node.(*ast.AssignStmt)
+ if !ok || (assign.Tok != token.ADD_ASSIGN && assign.Tok != token.SUB_ASSIGN) {
+ return true
+ }
+ if (len(assign.Lhs) != 1 || len(assign.Rhs) != 1) ||
+ !IsIntLiteral(assign.Rhs[0], "1") {
+ return true
+ }
+
+ suffix := ""
+ switch assign.Tok {
+ case token.ADD_ASSIGN:
+ suffix = "++"
+ case token.SUB_ASSIGN:
+ suffix = "--"
+ }
+
+ j.Errorf(assign, "should replace %s with %s%s", Render(j, assign), Render(j, assign.Lhs[0]), suffix)
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
+
+func (c *Checker) CheckErrorReturn(j *lint.Job) {
+fnLoop:
+ for _, fn := range j.Program.InitialFunctions {
+ sig := fn.Type().(*types.Signature)
+ rets := sig.Results()
+ if rets == nil || rets.Len() < 2 {
+ continue
+ }
+
+ if rets.At(rets.Len()-1).Type() == types.Universe.Lookup("error").Type() {
+ // Last return type is error. If the function also returns
+ // errors in other positions, that's fine.
+ continue
+ }
+ for i := rets.Len() - 2; i >= 0; i-- {
+ if rets.At(i).Type() == types.Universe.Lookup("error").Type() {
+ j.Errorf(rets.At(i), "error should be returned as the last argument")
+ continue fnLoop
+ }
+ }
+ }
+}
+
+// CheckUnexportedReturn checks that exported functions on exported
+// types do not return unexported types.
+func (c *Checker) CheckUnexportedReturn(j *lint.Job) {
+ for _, fn := range j.Program.InitialFunctions {
+ if fn.Synthetic != "" || fn.Parent() != nil {
+ continue
+ }
+ if !ast.IsExported(fn.Name()) || IsInMain(j, fn) || IsInTest(j, fn) {
+ continue
+ }
+ sig := fn.Type().(*types.Signature)
+ if sig.Recv() != nil && !ast.IsExported(Dereference(sig.Recv().Type()).(*types.Named).Obj().Name()) {
+ continue
+ }
+ res := sig.Results()
+ for i := 0; i < res.Len(); i++ {
+ if named, ok := DereferenceR(res.At(i).Type()).(*types.Named); ok &&
+ !ast.IsExported(named.Obj().Name()) &&
+ named != types.Universe.Lookup("error").Type() {
+ j.Errorf(fn, "should not return unexported type")
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckReceiverNames(j *lint.Job) {
+ for _, pkg := range j.Program.InitialPackages {
+ for _, m := range pkg.SSA.Members {
+ if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() {
+ ms := typeutil.IntuitiveMethodSet(T.Type(), nil)
+ for _, sel := range ms {
+ fn := sel.Obj().(*types.Func)
+ recv := fn.Type().(*types.Signature).Recv()
+ if Dereference(recv.Type()) != T.Type() {
+ // skip embedded methods
+ continue
+ }
+ if recv.Name() == "self" || recv.Name() == "this" {
+ j.Errorf(recv, `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`)
+ }
+ if recv.Name() == "_" {
+ j.Errorf(recv, "receiver name should not be an underscore, omit the name if it is unused")
+ }
+ }
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckReceiverNamesIdentical(j *lint.Job) {
+ for _, pkg := range j.Program.InitialPackages {
+ for _, m := range pkg.SSA.Members {
+ names := map[string]int{}
+
+ var firstFn *types.Func
+ if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() {
+ ms := typeutil.IntuitiveMethodSet(T.Type(), nil)
+ for _, sel := range ms {
+ fn := sel.Obj().(*types.Func)
+ recv := fn.Type().(*types.Signature).Recv()
+ if Dereference(recv.Type()) != T.Type() {
+ // skip embedded methods
+ continue
+ }
+ if firstFn == nil {
+ firstFn = fn
+ }
+ if recv.Name() != "" && recv.Name() != "_" {
+ names[recv.Name()]++
+ }
+ }
+ }
+
+ if len(names) > 1 {
+ var seen []string
+ for name, count := range names {
+ seen = append(seen, fmt.Sprintf("%dx %q", count, name))
+ }
+
+ j.Errorf(firstFn, "methods on the same type should have the same receiver name (seen %s)", strings.Join(seen, ", "))
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckContextFirstArg(j *lint.Job) {
+ // TODO(dh): this check doesn't apply to test helpers. Example from the stdlib:
+ // func helperCommandContext(t *testing.T, ctx context.Context, s ...string) (cmd *exec.Cmd) {
+fnLoop:
+ for _, fn := range j.Program.InitialFunctions {
+ if fn.Synthetic != "" || fn.Parent() != nil {
+ continue
+ }
+ params := fn.Signature.Params()
+ if params.Len() < 2 {
+ continue
+ }
+ if types.TypeString(params.At(0).Type(), nil) == "context.Context" {
+ continue
+ }
+ for i := 1; i < params.Len(); i++ {
+ param := params.At(i)
+ if types.TypeString(param.Type(), nil) == "context.Context" {
+ j.Errorf(param, "context.Context should be the first argument of a function")
+ continue fnLoop
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckErrorStrings(j *lint.Job) {
+ fnNames := map[*ssa.Package]map[string]bool{}
+ for _, fn := range j.Program.InitialFunctions {
+ m := fnNames[fn.Package()]
+ if m == nil {
+ m = map[string]bool{}
+ fnNames[fn.Package()] = m
+ }
+ m[fn.Name()] = true
+ }
+
+ for _, fn := range j.Program.InitialFunctions {
+ if IsInTest(j, fn) {
+ // We don't care about malformed error messages in tests;
+ // they're usually for direct human consumption, not part
+ // of an API
+ continue
+ }
+ for _, block := range fn.Blocks {
+ instrLoop:
+ for _, ins := range block.Instrs {
+ call, ok := ins.(*ssa.Call)
+ if !ok {
+ continue
+ }
+ if !IsCallTo(call.Common(), "errors.New") && !IsCallTo(call.Common(), "fmt.Errorf") {
+ continue
+ }
+
+ k, ok := call.Common().Args[0].(*ssa.Const)
+ if !ok {
+ continue
+ }
+
+ s := constant.StringVal(k.Value)
+ if len(s) == 0 {
+ continue
+ }
+ switch s[len(s)-1] {
+ case '.', ':', '!', '\n':
+ j.Errorf(call, "error strings should not end with punctuation or a newline")
+ }
+ idx := strings.IndexByte(s, ' ')
+ if idx == -1 {
+ // single word error message, probably not a real
+ // error but something used in tests or during
+ // debugging
+ continue
+ }
+ word := s[:idx]
+ first, n := utf8.DecodeRuneInString(word)
+ if !unicode.IsUpper(first) {
+ continue
+ }
+ for _, c := range word[n:] {
+ if unicode.IsUpper(c) {
+ // Word is probably an initialism or
+ // multi-word function name
+ continue instrLoop
+ }
+ }
+
+ word = strings.TrimRightFunc(word, func(r rune) bool { return unicode.IsPunct(r) })
+ if fnNames[fn.Package()][word] {
+ // Word is probably the name of a function in this package
+ continue
+ }
+ // First word in error starts with a capital
+ // letter, and the word doesn't contain any other
+ // capitals, making it unlikely to be an
+ // initialism or multi-word function name.
+ //
+ // It could still be a proper noun, though.
+
+ j.Errorf(call, "error strings should not be capitalized")
+ }
+ }
+ }
+}
+
+func (c *Checker) CheckTimeNames(j *lint.Job) {
+ suffixes := []string{
+ "Sec", "Secs", "Seconds",
+ "Msec", "Msecs",
+ "Milli", "Millis", "Milliseconds",
+ "Usec", "Usecs", "Microseconds",
+ "MS", "Ms",
+ }
+ fn := func(T types.Type, names []*ast.Ident) {
+ if !IsType(T, "time.Duration") && !IsType(T, "*time.Duration") {
+ return
+ }
+ for _, name := range names {
+ for _, suffix := range suffixes {
+ if strings.HasSuffix(name.Name, suffix) {
+ j.Errorf(name, "var %s is of type %v; don't use unit-specific suffix %q", name.Name, T, suffix)
+ break
+ }
+ }
+ }
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, func(node ast.Node) bool {
+ switch node := node.(type) {
+ case *ast.ValueSpec:
+ T := TypeOf(j, node.Type)
+ fn(T, node.Names)
+ case *ast.FieldList:
+ for _, field := range node.List {
+ T := TypeOf(j, field.Type)
+ fn(T, field.Names)
+ }
+ }
+ return true
+ })
+ }
+}
+
+func (c *Checker) CheckErrorVarNames(j *lint.Job) {
+ for _, f := range j.Program.Files {
+ for _, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok || gen.Tok != token.VAR {
+ continue
+ }
+ for _, spec := range gen.Specs {
+ spec := spec.(*ast.ValueSpec)
+ if len(spec.Names) != len(spec.Values) {
+ continue
+ }
+
+ for i, name := range spec.Names {
+ val := spec.Values[i]
+ if !IsCallToAST(j, val, "errors.New") && !IsCallToAST(j, val, "fmt.Errorf") {
+ continue
+ }
+
+ prefix := "err"
+ if name.IsExported() {
+ prefix = "Err"
+ }
+ if !strings.HasPrefix(name.Name, prefix) {
+ j.Errorf(name, "error var %s should have name of the form %sFoo", name.Name, prefix)
+ }
+ }
+ }
+ }
+ }
+}
+
+var httpStatusCodes = map[int]string{
+ 100: "StatusContinue",
+ 101: "StatusSwitchingProtocols",
+ 102: "StatusProcessing",
+ 200: "StatusOK",
+ 201: "StatusCreated",
+ 202: "StatusAccepted",
+ 203: "StatusNonAuthoritativeInfo",
+ 204: "StatusNoContent",
+ 205: "StatusResetContent",
+ 206: "StatusPartialContent",
+ 207: "StatusMultiStatus",
+ 208: "StatusAlreadyReported",
+ 226: "StatusIMUsed",
+ 300: "StatusMultipleChoices",
+ 301: "StatusMovedPermanently",
+ 302: "StatusFound",
+ 303: "StatusSeeOther",
+ 304: "StatusNotModified",
+ 305: "StatusUseProxy",
+ 307: "StatusTemporaryRedirect",
+ 308: "StatusPermanentRedirect",
+ 400: "StatusBadRequest",
+ 401: "StatusUnauthorized",
+ 402: "StatusPaymentRequired",
+ 403: "StatusForbidden",
+ 404: "StatusNotFound",
+ 405: "StatusMethodNotAllowed",
+ 406: "StatusNotAcceptable",
+ 407: "StatusProxyAuthRequired",
+ 408: "StatusRequestTimeout",
+ 409: "StatusConflict",
+ 410: "StatusGone",
+ 411: "StatusLengthRequired",
+ 412: "StatusPreconditionFailed",
+ 413: "StatusRequestEntityTooLarge",
+ 414: "StatusRequestURITooLong",
+ 415: "StatusUnsupportedMediaType",
+ 416: "StatusRequestedRangeNotSatisfiable",
+ 417: "StatusExpectationFailed",
+ 418: "StatusTeapot",
+ 422: "StatusUnprocessableEntity",
+ 423: "StatusLocked",
+ 424: "StatusFailedDependency",
+ 426: "StatusUpgradeRequired",
+ 428: "StatusPreconditionRequired",
+ 429: "StatusTooManyRequests",
+ 431: "StatusRequestHeaderFieldsTooLarge",
+ 451: "StatusUnavailableForLegalReasons",
+ 500: "StatusInternalServerError",
+ 501: "StatusNotImplemented",
+ 502: "StatusBadGateway",
+ 503: "StatusServiceUnavailable",
+ 504: "StatusGatewayTimeout",
+ 505: "StatusHTTPVersionNotSupported",
+ 506: "StatusVariantAlsoNegotiates",
+ 507: "StatusInsufficientStorage",
+ 508: "StatusLoopDetected",
+ 510: "StatusNotExtended",
+ 511: "StatusNetworkAuthenticationRequired",
+}
+
+func (c *Checker) CheckHTTPStatusCodes(j *lint.Job) {
+ for _, pkg := range j.Program.InitialPackages {
+ whitelist := map[string]bool{}
+ for _, code := range pkg.Config.HTTPStatusCodeWhitelist {
+ whitelist[code] = true
+ }
+ fn := func(node ast.Node) bool {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+
+ var arg int
+ switch CallNameAST(j, call) {
+ case "net/http.Error":
+ arg = 2
+ case "net/http.Redirect":
+ arg = 3
+ case "net/http.StatusText":
+ arg = 0
+ case "net/http.RedirectHandler":
+ arg = 1
+ default:
+ return true
+ }
+ lit, ok := call.Args[arg].(*ast.BasicLit)
+ if !ok {
+ return true
+ }
+ if whitelist[lit.Value] {
+ return true
+ }
+
+ n, err := strconv.Atoi(lit.Value)
+ if err != nil {
+ return true
+ }
+ s, ok := httpStatusCodes[n]
+ if !ok {
+ return true
+ }
+ j.Errorf(lit, "should use constant http.%s instead of numeric literal %d", s, n)
+ return true
+ }
+ for _, f := range pkg.Syntax {
+ ast.Inspect(f, fn)
+ }
+ }
+}
+
+func (c *Checker) CheckDefaultCaseOrder(j *lint.Job) {
+ fn := func(node ast.Node) bool {
+ stmt, ok := node.(*ast.SwitchStmt)
+ if !ok {
+ return true
+ }
+ list := stmt.Body.List
+ for i, c := range list {
+ if c.(*ast.CaseClause).List == nil && i != 0 && i != len(list)-1 {
+ j.Errorf(c, "default case should be first or last in switch statement")
+ break
+ }
+ }
+ return true
+ }
+ for _, f := range j.Program.Files {
+ ast.Inspect(f, fn)
+ }
+}
diff --git a/vendor/honnef.co/go/tools/stylecheck/names.go b/vendor/honnef.co/go/tools/stylecheck/names.go
new file mode 100644
index 000000000..e855590f6
--- /dev/null
+++ b/vendor/honnef.co/go/tools/stylecheck/names.go
@@ -0,0 +1,263 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+// Copyright (c) 2018 Dominik Honnef. All rights reserved.
+
+package stylecheck
+
+import (
+ "go/ast"
+ "go/token"
+ "strings"
+ "unicode"
+
+ "honnef.co/go/tools/lint"
+ . "honnef.co/go/tools/lint/lintdsl"
+)
+
+// knownNameExceptions is a set of names that are known to be exempt from naming checks.
+// This is usually because they are constrained by having to match names in the
+// standard library.
+var knownNameExceptions = map[string]bool{
+ "LastInsertId": true, // must match database/sql
+ "kWh": true,
+}
+
+func (c *Checker) CheckNames(j *lint.Job) {
+ // A large part of this function is copied from
+ // github.com/golang/lint, Copyright (c) 2013 The Go Authors,
+ // licensed under the BSD 3-clause license.
+
+ allCaps := func(s string) bool {
+ for _, r := range s {
+ if !((r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '_') {
+ return false
+ }
+ }
+ return true
+ }
+
+ check := func(id *ast.Ident, thing string, initialisms map[string]bool) {
+ if id.Name == "_" {
+ return
+ }
+ if knownNameExceptions[id.Name] {
+ return
+ }
+
+ // Handle two common styles from other languages that don't belong in Go.
+ if len(id.Name) >= 5 && allCaps(id.Name) && strings.Contains(id.Name, "_") {
+ j.Errorf(id, "should not use ALL_CAPS in Go names; use CamelCase instead")
+ return
+ }
+
+ should := lintName(id.Name, initialisms)
+ if id.Name == should {
+ return
+ }
+
+ if len(id.Name) > 2 && strings.Contains(id.Name[1:len(id.Name)-1], "_") {
+ j.Errorf(id, "should not use underscores in Go names; %s %s should be %s", thing, id.Name, should)
+ return
+ }
+ j.Errorf(id, "%s %s should be %s", thing, id.Name, should)
+ }
+ checkList := func(fl *ast.FieldList, thing string, initialisms map[string]bool) {
+ if fl == nil {
+ return
+ }
+ for _, f := range fl.List {
+ for _, id := range f.Names {
+ check(id, thing, initialisms)
+ }
+ }
+ }
+
+ for _, pkg := range j.Program.InitialPackages {
+ initialisms := make(map[string]bool, len(pkg.Config.Initialisms))
+ for _, word := range pkg.Config.Initialisms {
+ initialisms[word] = true
+ }
+ for _, f := range pkg.Syntax {
+ // Package names need slightly different handling than other names.
+ if !strings.HasSuffix(f.Name.Name, "_test") && strings.Contains(f.Name.Name, "_") {
+ j.Errorf(f, "should not use underscores in package names")
+ }
+ if strings.IndexFunc(f.Name.Name, unicode.IsUpper) != -1 {
+ j.Errorf(f, "should not use MixedCaps in package name; %s should be %s", f.Name.Name, strings.ToLower(f.Name.Name))
+ }
+
+ ast.Inspect(f, func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.AssignStmt:
+ if v.Tok != token.DEFINE {
+ return true
+ }
+ for _, exp := range v.Lhs {
+ if id, ok := exp.(*ast.Ident); ok {
+ check(id, "var", initialisms)
+ }
+ }
+ case *ast.FuncDecl:
+ // Functions with no body are defined elsewhere (in
+ // assembly, or via go:linkname). These are likely to
+ // be something very low level (such as the runtime),
+ // where our rules don't apply.
+ if v.Body == nil {
+ return true
+ }
+
+ if IsInTest(j, v) && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) {
+ return true
+ }
+
+ thing := "func"
+ if v.Recv != nil {
+ thing = "method"
+ }
+
+ if !isTechnicallyExported(v) {
+ check(v.Name, thing, initialisms)
+ }
+
+ checkList(v.Type.Params, thing+" parameter", initialisms)
+ checkList(v.Type.Results, thing+" result", initialisms)
+ case *ast.GenDecl:
+ if v.Tok == token.IMPORT {
+ return true
+ }
+ var thing string
+ switch v.Tok {
+ case token.CONST:
+ thing = "const"
+ case token.TYPE:
+ thing = "type"
+ case token.VAR:
+ thing = "var"
+ }
+ for _, spec := range v.Specs {
+ switch s := spec.(type) {
+ case *ast.TypeSpec:
+ check(s.Name, thing, initialisms)
+ case *ast.ValueSpec:
+ for _, id := range s.Names {
+ check(id, thing, initialisms)
+ }
+ }
+ }
+ case *ast.InterfaceType:
+ // Do not check interface method names.
+ // They are often constrainted by the method names of concrete types.
+ for _, x := range v.Methods.List {
+ ft, ok := x.Type.(*ast.FuncType)
+ if !ok { // might be an embedded interface name
+ continue
+ }
+ checkList(ft.Params, "interface method parameter", initialisms)
+ checkList(ft.Results, "interface method result", initialisms)
+ }
+ case *ast.RangeStmt:
+ if v.Tok == token.ASSIGN {
+ return true
+ }
+ if id, ok := v.Key.(*ast.Ident); ok {
+ check(id, "range var", initialisms)
+ }
+ if id, ok := v.Value.(*ast.Ident); ok {
+ check(id, "range var", initialisms)
+ }
+ case *ast.StructType:
+ for _, f := range v.Fields.List {
+ for _, id := range f.Names {
+ check(id, "struct field", initialisms)
+ }
+ }
+ }
+ return true
+ })
+ }
+ }
+}
+
+// lintName returns a different name if it should be different.
+func lintName(name string, initialisms map[string]bool) (should string) {
+ // A large part of this function is copied from
+ // github.com/golang/lint, Copyright (c) 2013 The Go Authors,
+ // licensed under the BSD 3-clause license.
+
+ // Fast path for simple cases: "_" and all lowercase.
+ if name == "_" {
+ return name
+ }
+ if strings.IndexFunc(name, func(r rune) bool { return !unicode.IsLower(r) }) == -1 {
+ return name
+ }
+
+ // Split camelCase at any lower->upper transition, and split on underscores.
+ // Check each word for common initialisms.
+ runes := []rune(name)
+ w, i := 0, 0 // index of start of word, scan
+ for i+1 <= len(runes) {
+ eow := false // whether we hit the end of a word
+ if i+1 == len(runes) {
+ eow = true
+ } else if runes[i+1] == '_' && i+1 != len(runes)-1 {
+ // underscore; shift the remainder forward over any run of underscores
+ eow = true
+ n := 1
+ for i+n+1 < len(runes) && runes[i+n+1] == '_' {
+ n++
+ }
+
+ // Leave at most one underscore if the underscore is between two digits
+ if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) {
+ n--
+ }
+
+ copy(runes[i+1:], runes[i+n+1:])
+ runes = runes[:len(runes)-n]
+ } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
+ // lower->non-lower
+ eow = true
+ }
+ i++
+ if !eow {
+ continue
+ }
+
+ // [w,i) is a word.
+ word := string(runes[w:i])
+ if u := strings.ToUpper(word); initialisms[u] {
+ // Keep consistent case, which is lowercase only at the start.
+ if w == 0 && unicode.IsLower(runes[w]) {
+ u = strings.ToLower(u)
+ }
+ // All the common initialisms are ASCII,
+ // so we can replace the bytes exactly.
+ // TODO(dh): this won't be true once we allow custom initialisms
+ copy(runes[w:], []rune(u))
+ } else if w > 0 && strings.ToLower(word) == word {
+ // already all lowercase, and not the first word, so uppercase the first character.
+ runes[w] = unicode.ToUpper(runes[w])
+ }
+ w = i
+ }
+ return string(runes)
+}
+
+func isTechnicallyExported(f *ast.FuncDecl) bool {
+ if f.Recv != nil || f.Doc == nil {
+ return false
+ }
+
+ const export = "//export "
+ const linkname = "//go:linkname "
+ for _, c := range f.Doc.List {
+ if strings.HasPrefix(c.Text, export) && len(c.Text) == len(export)+len(f.Name.Name) && c.Text[len(export):] == f.Name.Name {
+ return true
+ }
+
+ if strings.HasPrefix(c.Text, linkname) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/honnef.co/go/tools/unused/implements.go b/vendor/honnef.co/go/tools/unused/implements.go
new file mode 100644
index 000000000..78a545639
--- /dev/null
+++ b/vendor/honnef.co/go/tools/unused/implements.go
@@ -0,0 +1,79 @@
+package unused
+
+import "go/types"
+
+// lookupMethod returns the index of and method with matching package and name, or (-1, nil).
+func lookupMethod(T *types.Interface, pkg *types.Package, name string) (int, *types.Func) {
+ if name != "_" {
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ if sameId(m, pkg, name) {
+ return i, m
+ }
+ }
+ }
+ return -1, nil
+}
+
+func sameId(obj types.Object, pkg *types.Package, name string) bool {
+ // spec:
+ // "Two identifiers are different if they are spelled differently,
+ // or if they appear in different packages and are not exported.
+ // Otherwise, they are the same."
+ if name != obj.Name() {
+ return false
+ }
+ // obj.Name == name
+ if obj.Exported() {
+ return true
+ }
+ // not exported, so packages must be the same (pkg == nil for
+ // fields in Universe scope; this can only happen for types
+ // introduced via Eval)
+ if pkg == nil || obj.Pkg() == nil {
+ return pkg == obj.Pkg()
+ }
+ // pkg != nil && obj.pkg != nil
+ return pkg.Path() == obj.Pkg().Path()
+}
+
+func (c *Checker) implements(V types.Type, T *types.Interface) bool {
+ // fast path for common case
+ if T.Empty() {
+ return true
+ }
+
+ if ityp, _ := V.Underlying().(*types.Interface); ityp != nil {
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ _, obj := lookupMethod(ityp, m.Pkg(), m.Name())
+ switch {
+ case obj == nil:
+ return false
+ case !types.Identical(obj.Type(), m.Type()):
+ return false
+ }
+ }
+ return true
+ }
+
+ // A concrete type implements T if it implements all methods of T.
+ ms := c.msCache.MethodSet(V)
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ sel := ms.Lookup(m.Pkg(), m.Name())
+ if sel == nil {
+ return false
+ }
+
+ f, _ := sel.Obj().(*types.Func)
+ if f == nil {
+ return false
+ }
+
+ if !types.Identical(f.Type(), m.Type()) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/honnef.co/go/tools/unused/unused.go b/vendor/honnef.co/go/tools/unused/unused.go
new file mode 100644
index 000000000..b1dbd6f54
--- /dev/null
+++ b/vendor/honnef.co/go/tools/unused/unused.go
@@ -0,0 +1,1100 @@
+package unused // import "honnef.co/go/tools/unused"
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "io"
+ "path/filepath"
+ "strings"
+
+ "honnef.co/go/tools/lint"
+ . "honnef.co/go/tools/lint/lintdsl"
+
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+func NewLintChecker(c *Checker) *LintChecker {
+ l := &LintChecker{
+ c: c,
+ }
+ return l
+}
+
+type LintChecker struct {
+ c *Checker
+}
+
+func (*LintChecker) Name() string { return "unused" }
+func (*LintChecker) Prefix() string { return "U" }
+
+func (l *LintChecker) Init(*lint.Program) {}
+func (l *LintChecker) Checks() []lint.Check {
+ return []lint.Check{
+ {ID: "U1000", FilterGenerated: true, Fn: l.Lint},
+ }
+}
+
+func typString(obj types.Object) string {
+ switch obj := obj.(type) {
+ case *types.Func:
+ return "func"
+ case *types.Var:
+ if obj.IsField() {
+ return "field"
+ }
+ return "var"
+ case *types.Const:
+ return "const"
+ case *types.TypeName:
+ return "type"
+ default:
+ // log.Printf("%T", obj)
+ return "identifier"
+ }
+}
+
+func (l *LintChecker) Lint(j *lint.Job) {
+ unused := l.c.Check(j.Program)
+ for _, u := range unused {
+ name := u.Obj.Name()
+ if sig, ok := u.Obj.Type().(*types.Signature); ok && sig.Recv() != nil {
+ switch sig.Recv().Type().(type) {
+ case *types.Named, *types.Pointer:
+ typ := types.TypeString(sig.Recv().Type(), func(*types.Package) string { return "" })
+ if len(typ) > 0 && typ[0] == '*' {
+ name = fmt.Sprintf("(%s).%s", typ, u.Obj.Name())
+ } else if len(typ) > 0 {
+ name = fmt.Sprintf("%s.%s", typ, u.Obj.Name())
+ }
+ }
+ }
+ j.Errorf(u.Obj, "%s %s is unused", typString(u.Obj), name)
+ }
+}
+
+type graph struct {
+ roots []*graphNode
+ nodes map[interface{}]*graphNode
+}
+
+func (g *graph) markUsedBy(obj, usedBy interface{}) {
+ objNode := g.getNode(obj)
+ usedByNode := g.getNode(usedBy)
+ if objNode.obj == usedByNode.obj {
+ return
+ }
+ usedByNode.uses[objNode] = struct{}{}
+}
+
+var labelCounter = 1
+
+func (g *graph) getNode(obj interface{}) *graphNode {
+ for {
+ if pt, ok := obj.(*types.Pointer); ok {
+ obj = pt.Elem()
+ } else {
+ break
+ }
+ }
+ _, ok := g.nodes[obj]
+ if !ok {
+ g.addObj(obj)
+ }
+
+ return g.nodes[obj]
+}
+
+func (g *graph) addObj(obj interface{}) {
+ if pt, ok := obj.(*types.Pointer); ok {
+ obj = pt.Elem()
+ }
+ node := &graphNode{obj: obj, uses: make(map[*graphNode]struct{}), n: labelCounter}
+ g.nodes[obj] = node
+ labelCounter++
+
+ if obj, ok := obj.(*types.Struct); ok {
+ n := obj.NumFields()
+ for i := 0; i < n; i++ {
+ field := obj.Field(i)
+ g.markUsedBy(obj, field)
+ }
+ }
+}
+
+type graphNode struct {
+ obj interface{}
+ uses map[*graphNode]struct{}
+ used bool
+ quiet bool
+ n int
+}
+
+type CheckMode int
+
+const (
+ CheckConstants CheckMode = 1 << iota
+ CheckFields
+ CheckFunctions
+ CheckTypes
+ CheckVariables
+
+ CheckAll = CheckConstants | CheckFields | CheckFunctions | CheckTypes | CheckVariables
+)
+
+type Unused struct {
+ Obj types.Object
+ Position token.Position
+}
+
+type Checker struct {
+ Mode CheckMode
+ WholeProgram bool
+ ConsiderReflection bool
+ Debug io.Writer
+
+ graph *graph
+
+ msCache typeutil.MethodSetCache
+ prog *lint.Program
+ topmostCache map[*types.Scope]*types.Scope
+ interfaces []*types.Interface
+}
+
+func NewChecker(mode CheckMode) *Checker {
+ return &Checker{
+ Mode: mode,
+ graph: &graph{
+ nodes: make(map[interface{}]*graphNode),
+ },
+ topmostCache: make(map[*types.Scope]*types.Scope),
+ }
+}
+
+func (c *Checker) checkConstants() bool { return (c.Mode & CheckConstants) > 0 }
+func (c *Checker) checkFields() bool { return (c.Mode & CheckFields) > 0 }
+func (c *Checker) checkFunctions() bool { return (c.Mode & CheckFunctions) > 0 }
+func (c *Checker) checkTypes() bool { return (c.Mode & CheckTypes) > 0 }
+func (c *Checker) checkVariables() bool { return (c.Mode & CheckVariables) > 0 }
+
+func (c *Checker) markFields(typ types.Type) {
+ structType, ok := typ.Underlying().(*types.Struct)
+ if !ok {
+ return
+ }
+ n := structType.NumFields()
+ for i := 0; i < n; i++ {
+ field := structType.Field(i)
+ c.graph.markUsedBy(field, typ)
+ }
+}
+
+type Error struct {
+ Errors map[string][]error
+}
+
+func (e Error) Error() string {
+ return fmt.Sprintf("errors in %d packages", len(e.Errors))
+}
+
+func (c *Checker) Check(prog *lint.Program) []Unused {
+ var unused []Unused
+ c.prog = prog
+ if c.WholeProgram {
+ c.findExportedInterfaces()
+ }
+ for _, pkg := range prog.InitialPackages {
+ c.processDefs(pkg)
+ c.processUses(pkg)
+ c.processTypes(pkg)
+ c.processSelections(pkg)
+ c.processAST(pkg)
+ }
+
+ for _, node := range c.graph.nodes {
+ obj, ok := node.obj.(types.Object)
+ if !ok {
+ continue
+ }
+ typNode, ok := c.graph.nodes[obj.Type()]
+ if !ok {
+ continue
+ }
+ node.uses[typNode] = struct{}{}
+ }
+
+ roots := map[*graphNode]struct{}{}
+ for _, root := range c.graph.roots {
+ roots[root] = struct{}{}
+ }
+ markNodesUsed(roots)
+ c.markNodesQuiet()
+ c.deduplicate()
+
+ if c.Debug != nil {
+ c.printDebugGraph(c.Debug)
+ }
+
+ for _, node := range c.graph.nodes {
+ if node.used || node.quiet {
+ continue
+ }
+ obj, ok := node.obj.(types.Object)
+ if !ok {
+ continue
+ }
+ found := false
+ if !false {
+ for _, pkg := range prog.InitialPackages {
+ if pkg.Types == obj.Pkg() {
+ found = true
+ break
+ }
+ }
+ }
+ if !found {
+ continue
+ }
+
+ pos := c.prog.Fset().Position(obj.Pos())
+ if pos.Filename == "" || filepath.Base(pos.Filename) == "C" {
+ continue
+ }
+
+ unused = append(unused, Unused{Obj: obj, Position: pos})
+ }
+
+ return unused
+}
+
+// isNoCopyType reports whether a type represents the NoCopy sentinel
+// type. The NoCopy type is a named struct with no fields and exactly
+// one method `func Lock()` that is empty.
+//
+// FIXME(dh): currently we're not checking that the function body is
+// empty.
+func isNoCopyType(typ types.Type) bool {
+ st, ok := typ.Underlying().(*types.Struct)
+ if !ok {
+ return false
+ }
+ if st.NumFields() != 0 {
+ return false
+ }
+
+ named, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ if named.NumMethods() != 1 {
+ return false
+ }
+ meth := named.Method(0)
+ if meth.Name() != "Lock" {
+ return false
+ }
+ sig := meth.Type().(*types.Signature)
+ if sig.Params().Len() != 0 || sig.Results().Len() != 0 {
+ return false
+ }
+ return true
+}
+
+func (c *Checker) useNoCopyFields(typ types.Type) {
+ if st, ok := typ.Underlying().(*types.Struct); ok {
+ n := st.NumFields()
+ for i := 0; i < n; i++ {
+ field := st.Field(i)
+ if isNoCopyType(field.Type()) {
+ c.graph.markUsedBy(field, typ)
+ c.graph.markUsedBy(field.Type().(*types.Named).Method(0), field.Type())
+ }
+ }
+ }
+}
+
+func (c *Checker) useExportedFields(typ types.Type, by types.Type) bool {
+ any := false
+ if st, ok := typ.Underlying().(*types.Struct); ok {
+ n := st.NumFields()
+ for i := 0; i < n; i++ {
+ field := st.Field(i)
+ if field.Anonymous() {
+ if c.useExportedFields(field.Type(), typ) {
+ c.graph.markUsedBy(field, typ)
+ }
+ }
+ if field.Exported() {
+ c.graph.markUsedBy(field, by)
+ any = true
+ }
+ }
+ }
+ return any
+}
+
+func (c *Checker) useExportedMethods(typ types.Type) {
+ named, ok := typ.(*types.Named)
+ if !ok {
+ return
+ }
+ ms := typeutil.IntuitiveMethodSet(named, &c.msCache)
+ for i := 0; i < len(ms); i++ {
+ meth := ms[i].Obj()
+ if meth.Exported() {
+ c.graph.markUsedBy(meth, typ)
+ }
+ }
+
+ st, ok := named.Underlying().(*types.Struct)
+ if !ok {
+ return
+ }
+ n := st.NumFields()
+ for i := 0; i < n; i++ {
+ field := st.Field(i)
+ if !field.Anonymous() {
+ continue
+ }
+ ms := typeutil.IntuitiveMethodSet(field.Type(), &c.msCache)
+ for j := 0; j < len(ms); j++ {
+ if ms[j].Obj().Exported() {
+ c.graph.markUsedBy(field, typ)
+ break
+ }
+ }
+ }
+}
+
+func (c *Checker) processDefs(pkg *lint.Pkg) {
+ for _, obj := range pkg.TypesInfo.Defs {
+ if obj == nil {
+ continue
+ }
+ c.graph.getNode(obj)
+
+ if obj, ok := obj.(*types.TypeName); ok {
+ c.graph.markUsedBy(obj.Type().Underlying(), obj.Type())
+ c.graph.markUsedBy(obj.Type(), obj) // TODO is this needed?
+ c.graph.markUsedBy(obj, obj.Type())
+
+ // We mark all exported fields as used. For normal
+ // operation, we have to. The user may use these fields
+ // without us knowing.
+ //
+ // TODO(dh): In whole-program mode, however, we mark them
+ // as used because of reflection (such as JSON
+ // marshaling). Strictly speaking, we would only need to
+ // mark them used if an instance of the type was
+ // accessible via an interface value.
+ if !c.WholeProgram || c.ConsiderReflection {
+ c.useExportedFields(obj.Type(), obj.Type())
+ }
+
+ // TODO(dh): Traditionally we have not marked all exported
+ // methods as exported, even though they're strictly
+ // speaking accessible through reflection. We've done that
+ // because using methods just via reflection is rare, and
+ // not worth the false negatives. With the new -reflect
+ // flag, however, we should reconsider that choice.
+ if !c.WholeProgram {
+ c.useExportedMethods(obj.Type())
+ }
+ }
+
+ switch obj := obj.(type) {
+ case *types.Var, *types.Const, *types.Func, *types.TypeName:
+ if obj.Exported() {
+ // Exported variables and constants use their types,
+ // even if there's no expression using them in the
+ // checked program.
+ //
+ // Also operates on funcs and type names, but that's
+ // irrelevant/redundant.
+ c.graph.markUsedBy(obj.Type(), obj)
+ }
+ if obj.Name() == "_" {
+ node := c.graph.getNode(obj)
+ node.quiet = true
+ scope := c.topmostScope(pkg.Types.Scope().Innermost(obj.Pos()), pkg.Types)
+ if scope == pkg.Types.Scope() {
+ c.graph.roots = append(c.graph.roots, node)
+ } else {
+ c.graph.markUsedBy(obj, scope)
+ }
+ } else {
+ // Variables declared in functions are used. This is
+ // done so that arguments and return parameters are
+ // always marked as used.
+ if _, ok := obj.(*types.Var); ok {
+ if obj.Parent() != obj.Pkg().Scope() && obj.Parent() != nil {
+ c.graph.markUsedBy(obj, c.topmostScope(obj.Parent(), obj.Pkg()))
+ c.graph.markUsedBy(obj.Type(), obj)
+ }
+ }
+ }
+ }
+
+ if fn, ok := obj.(*types.Func); ok {
+ // A function uses its signature
+ c.graph.markUsedBy(fn, fn.Type())
+
+ // A function uses its return types
+ sig := fn.Type().(*types.Signature)
+ res := sig.Results()
+ n := res.Len()
+ for i := 0; i < n; i++ {
+ c.graph.markUsedBy(res.At(i).Type(), fn)
+ }
+ }
+
+ if obj, ok := obj.(interface {
+ Scope() *types.Scope
+ Pkg() *types.Package
+ }); ok {
+ scope := obj.Scope()
+ c.graph.markUsedBy(c.topmostScope(scope, obj.Pkg()), obj)
+ }
+
+ if c.isRoot(obj) {
+ node := c.graph.getNode(obj)
+ c.graph.roots = append(c.graph.roots, node)
+ if obj, ok := obj.(*types.PkgName); ok {
+ scope := obj.Pkg().Scope()
+ c.graph.markUsedBy(scope, obj)
+ }
+ }
+ }
+}
+
+func (c *Checker) processUses(pkg *lint.Pkg) {
+ for ident, usedObj := range pkg.TypesInfo.Uses {
+ if _, ok := usedObj.(*types.PkgName); ok {
+ continue
+ }
+ pos := ident.Pos()
+ scope := pkg.Types.Scope().Innermost(pos)
+ scope = c.topmostScope(scope, pkg.Types)
+ if scope != pkg.Types.Scope() {
+ c.graph.markUsedBy(usedObj, scope)
+ }
+
+ switch usedObj.(type) {
+ case *types.Var, *types.Const:
+ c.graph.markUsedBy(usedObj.Type(), usedObj)
+ }
+ }
+}
+
+func (c *Checker) findExportedInterfaces() {
+ c.interfaces = []*types.Interface{types.Universe.Lookup("error").Type().(*types.Named).Underlying().(*types.Interface)}
+ var pkgs []*packages.Package
+ if c.WholeProgram {
+ pkgs = append(pkgs, c.prog.AllPackages...)
+ } else {
+ for _, pkg := range c.prog.InitialPackages {
+ pkgs = append(pkgs, pkg.Package)
+ }
+ }
+
+ for _, pkg := range pkgs {
+ for _, tv := range pkg.TypesInfo.Types {
+ iface, ok := tv.Type.(*types.Interface)
+ if !ok {
+ continue
+ }
+ if iface.NumMethods() == 0 {
+ continue
+ }
+ c.interfaces = append(c.interfaces, iface)
+ }
+ }
+}
+
+func (c *Checker) processTypes(pkg *lint.Pkg) {
+ named := map[*types.Named]*types.Pointer{}
+ var interfaces []*types.Interface
+ for _, tv := range pkg.TypesInfo.Types {
+ if typ, ok := tv.Type.(interface {
+ Elem() types.Type
+ }); ok {
+ c.graph.markUsedBy(typ.Elem(), typ)
+ }
+
+ switch obj := tv.Type.(type) {
+ case *types.Named:
+ named[obj] = types.NewPointer(obj)
+ c.graph.markUsedBy(obj, obj.Underlying())
+ c.graph.markUsedBy(obj.Underlying(), obj)
+ case *types.Interface:
+ if obj.NumMethods() > 0 {
+ interfaces = append(interfaces, obj)
+ }
+ case *types.Struct:
+ c.useNoCopyFields(obj)
+ if pkg.Types.Name() != "main" && !c.WholeProgram {
+ c.useExportedFields(obj, obj)
+ }
+ }
+ }
+
+ // Pretend that all types are meant to implement as many
+ // interfaces as possible.
+ //
+ // TODO(dh): For normal operations, that's the best we can do, as
+ // we have no idea what external users will do with our types. In
+ // whole-program mode, we could be more precise, in two ways:
+ // 1) Only consider interfaces if a type has been assigned to one
+ // 2) Use SSA and flow analysis and determine the exact set of
+ // interfaces that is relevant.
+ fn := func(iface *types.Interface) {
+ for i := 0; i < iface.NumEmbeddeds(); i++ {
+ c.graph.markUsedBy(iface.Embedded(i), iface)
+ }
+ namedLoop:
+ for obj, objPtr := range named {
+ switch obj.Underlying().(type) {
+ case *types.Interface:
+ // pointers to interfaces have no methods, only checking non-pointer
+ if !c.implements(obj, iface) {
+ continue namedLoop
+ }
+ default:
+ // pointer receivers include the method set of non-pointer receivers,
+ // only checking pointer
+ if !c.implements(objPtr, iface) {
+ continue namedLoop
+ }
+ }
+
+ ifaceMethods := make(map[string]struct{}, iface.NumMethods())
+ n := iface.NumMethods()
+ for i := 0; i < n; i++ {
+ meth := iface.Method(i)
+ ifaceMethods[meth.Name()] = struct{}{}
+ }
+ for _, obj := range []types.Type{obj, objPtr} {
+ ms := c.msCache.MethodSet(obj)
+ n := ms.Len()
+ for i := 0; i < n; i++ {
+ sel := ms.At(i)
+ meth := sel.Obj().(*types.Func)
+ _, found := ifaceMethods[meth.Name()]
+ if !found {
+ continue
+ }
+ c.graph.markUsedBy(meth.Type().(*types.Signature).Recv().Type(), obj) // embedded receiver
+ if len(sel.Index()) > 1 {
+ f := getField(obj, sel.Index()[0])
+ c.graph.markUsedBy(f, obj) // embedded receiver
+ }
+ c.graph.markUsedBy(meth, obj)
+ }
+ }
+ }
+ }
+
+ for _, iface := range interfaces {
+ fn(iface)
+ }
+ for _, iface := range c.interfaces {
+ fn(iface)
+ }
+}
+
+func (c *Checker) processSelections(pkg *lint.Pkg) {
+ fn := func(expr *ast.SelectorExpr, sel *types.Selection, offset int) {
+ scope := pkg.Types.Scope().Innermost(expr.Pos())
+ c.graph.markUsedBy(sel, c.topmostScope(scope, pkg.Types))
+ c.graph.markUsedBy(sel.Obj(), sel)
+ if len(sel.Index()) > 1 {
+ typ := sel.Recv()
+ indices := sel.Index()
+ for _, idx := range indices[:len(indices)-offset] {
+ obj := getField(typ, idx)
+ typ = obj.Type()
+ c.graph.markUsedBy(obj, sel)
+ }
+ }
+ }
+
+ for expr, sel := range pkg.TypesInfo.Selections {
+ switch sel.Kind() {
+ case types.FieldVal:
+ fn(expr, sel, 0)
+ case types.MethodVal:
+ fn(expr, sel, 1)
+ }
+ }
+}
+
+func dereferenceType(typ types.Type) types.Type {
+ if typ, ok := typ.(*types.Pointer); ok {
+ return typ.Elem()
+ }
+ return typ
+}
+
+// processConversion marks fields as used if they're part of a type conversion.
+func (c *Checker) processConversion(pkg *lint.Pkg, node ast.Node) {
+ if node, ok := node.(*ast.CallExpr); ok {
+ callTyp := pkg.TypesInfo.TypeOf(node.Fun)
+ var typDst *types.Struct
+ var ok bool
+ switch typ := callTyp.(type) {
+ case *types.Named:
+ typDst, ok = typ.Underlying().(*types.Struct)
+ case *types.Pointer:
+ typDst, ok = typ.Elem().Underlying().(*types.Struct)
+ default:
+ return
+ }
+ if !ok {
+ return
+ }
+
+ if typ, ok := pkg.TypesInfo.TypeOf(node.Args[0]).(*types.Basic); ok && typ.Kind() == types.UnsafePointer {
+ // This is an unsafe conversion. Assume that all the
+ // fields are relevant (they are, because of memory
+ // layout)
+ n := typDst.NumFields()
+ for i := 0; i < n; i++ {
+ c.graph.markUsedBy(typDst.Field(i), typDst)
+ }
+ return
+ }
+
+ typSrc, ok := dereferenceType(pkg.TypesInfo.TypeOf(node.Args[0])).Underlying().(*types.Struct)
+ if !ok {
+ return
+ }
+
+ // When we convert from type t1 to t2, were t1 and t2 are
+ // structs, all fields are relevant, as otherwise the
+ // conversion would fail.
+ //
+ // We mark t2's fields as used by t1's fields, and vice
+ // versa. That way, if no code actually refers to a field
+ // in either type, it's still correctly marked as unused.
+ // If a field is used in either struct, it's implicitly
+ // relevant in the other one, too.
+ //
+ // It works in a similar way for conversions between types
+ // of two packages, only that the extra information in the
+ // graph is redundant unless we're in whole program mode.
+ n := typDst.NumFields()
+ for i := 0; i < n; i++ {
+ fDst := typDst.Field(i)
+ fSrc := typSrc.Field(i)
+ c.graph.markUsedBy(fDst, fSrc)
+ c.graph.markUsedBy(fSrc, fDst)
+ }
+ }
+}
+
+// processCompositeLiteral marks fields as used if the struct is used
+// in a composite literal.
+func (c *Checker) processCompositeLiteral(pkg *lint.Pkg, node ast.Node) {
+ // XXX how does this actually work? wouldn't it match t{}?
+ if node, ok := node.(*ast.CompositeLit); ok {
+ typ := pkg.TypesInfo.TypeOf(node)
+ if _, ok := typ.(*types.Named); ok {
+ typ = typ.Underlying()
+ }
+ if _, ok := typ.(*types.Struct); !ok {
+ return
+ }
+
+ if isBasicStruct(node.Elts) {
+ c.markFields(typ)
+ }
+ }
+}
+
+// processCgoExported marks functions as used if they're being
+// exported to cgo.
+func (c *Checker) processCgoExported(pkg *lint.Pkg, node ast.Node) {
+ if node, ok := node.(*ast.FuncDecl); ok {
+ if node.Doc == nil {
+ return
+ }
+ for _, cmt := range node.Doc.List {
+ if !strings.HasPrefix(cmt.Text, "//go:cgo_export_") {
+ return
+ }
+ obj := pkg.TypesInfo.ObjectOf(node.Name)
+ c.graph.roots = append(c.graph.roots, c.graph.getNode(obj))
+ }
+ }
+}
+
+func (c *Checker) processVariableDeclaration(pkg *lint.Pkg, node ast.Node) {
+ if decl, ok := node.(*ast.GenDecl); ok {
+ for _, spec := range decl.Specs {
+ spec, ok := spec.(*ast.ValueSpec)
+ if !ok {
+ continue
+ }
+ for i, name := range spec.Names {
+ if i >= len(spec.Values) {
+ break
+ }
+ value := spec.Values[i]
+ fn := func(node ast.Node) bool {
+ if node3, ok := node.(*ast.Ident); ok {
+ obj := pkg.TypesInfo.ObjectOf(node3)
+ if _, ok := obj.(*types.PkgName); ok {
+ return true
+ }
+ c.graph.markUsedBy(obj, pkg.TypesInfo.ObjectOf(name))
+ }
+ return true
+ }
+ ast.Inspect(value, fn)
+ }
+ }
+ }
+}
+
+func (c *Checker) processArrayConstants(pkg *lint.Pkg, node ast.Node) {
+ if decl, ok := node.(*ast.ArrayType); ok {
+ ident, ok := decl.Len.(*ast.Ident)
+ if !ok {
+ return
+ }
+ c.graph.markUsedBy(pkg.TypesInfo.ObjectOf(ident), pkg.TypesInfo.TypeOf(decl))
+ }
+}
+
+func (c *Checker) processKnownReflectMethodCallers(pkg *lint.Pkg, node ast.Node) {
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return
+ }
+ if !IsType(pkg.TypesInfo.TypeOf(sel.X), "*net/rpc.Server") {
+ x, ok := sel.X.(*ast.Ident)
+ if !ok {
+ return
+ }
+ pkgname, ok := pkg.TypesInfo.ObjectOf(x).(*types.PkgName)
+ if !ok {
+ return
+ }
+ if pkgname.Imported().Path() != "net/rpc" {
+ return
+ }
+ }
+
+ var arg ast.Expr
+ switch sel.Sel.Name {
+ case "Register":
+ if len(call.Args) != 1 {
+ return
+ }
+ arg = call.Args[0]
+ case "RegisterName":
+ if len(call.Args) != 2 {
+ return
+ }
+ arg = call.Args[1]
+ }
+ typ := pkg.TypesInfo.TypeOf(arg)
+ ms := types.NewMethodSet(typ)
+ for i := 0; i < ms.Len(); i++ {
+ c.graph.markUsedBy(ms.At(i).Obj(), typ)
+ }
+}
+
+func (c *Checker) processAST(pkg *lint.Pkg) {
+ fn := func(node ast.Node) bool {
+ c.processConversion(pkg, node)
+ c.processKnownReflectMethodCallers(pkg, node)
+ c.processCompositeLiteral(pkg, node)
+ c.processCgoExported(pkg, node)
+ c.processVariableDeclaration(pkg, node)
+ c.processArrayConstants(pkg, node)
+ return true
+ }
+ for _, file := range pkg.Syntax {
+ ast.Inspect(file, fn)
+ }
+}
+
+func isBasicStruct(elts []ast.Expr) bool {
+ for _, elt := range elts {
+ if _, ok := elt.(*ast.KeyValueExpr); !ok {
+ return true
+ }
+ }
+ return false
+}
+
+func isPkgScope(obj types.Object) bool {
+ return obj.Parent() == obj.Pkg().Scope()
+}
+
+func isMain(obj types.Object) bool {
+ if obj.Pkg().Name() != "main" {
+ return false
+ }
+ if obj.Name() != "main" {
+ return false
+ }
+ if !isPkgScope(obj) {
+ return false
+ }
+ if !isFunction(obj) {
+ return false
+ }
+ if isMethod(obj) {
+ return false
+ }
+ return true
+}
+
+func isFunction(obj types.Object) bool {
+ _, ok := obj.(*types.Func)
+ return ok
+}
+
+func isMethod(obj types.Object) bool {
+ if !isFunction(obj) {
+ return false
+ }
+ return obj.(*types.Func).Type().(*types.Signature).Recv() != nil
+}
+
+func isVariable(obj types.Object) bool {
+ _, ok := obj.(*types.Var)
+ return ok
+}
+
+func isConstant(obj types.Object) bool {
+ _, ok := obj.(*types.Const)
+ return ok
+}
+
+func isType(obj types.Object) bool {
+ _, ok := obj.(*types.TypeName)
+ return ok
+}
+
+func isField(obj types.Object) bool {
+ if obj, ok := obj.(*types.Var); ok && obj.IsField() {
+ return true
+ }
+ return false
+}
+
+func (c *Checker) checkFlags(v interface{}) bool {
+ obj, ok := v.(types.Object)
+ if !ok {
+ return false
+ }
+ if isFunction(obj) && !c.checkFunctions() {
+ return false
+ }
+ if isVariable(obj) && !c.checkVariables() {
+ return false
+ }
+ if isConstant(obj) && !c.checkConstants() {
+ return false
+ }
+ if isType(obj) && !c.checkTypes() {
+ return false
+ }
+ if isField(obj) && !c.checkFields() {
+ return false
+ }
+ return true
+}
+
+func (c *Checker) isRoot(obj types.Object) bool {
+ // - in local mode, main, init, tests, and non-test, non-main exported are roots
+ // - in global mode (not yet implemented), main, init and tests are roots
+
+ if _, ok := obj.(*types.PkgName); ok {
+ return true
+ }
+
+ if isMain(obj) || (isFunction(obj) && !isMethod(obj) && obj.Name() == "init") {
+ return true
+ }
+ if obj.Exported() {
+ f := c.prog.Fset().Position(obj.Pos()).Filename
+ if strings.HasSuffix(f, "_test.go") {
+ return strings.HasPrefix(obj.Name(), "Test") ||
+ strings.HasPrefix(obj.Name(), "Benchmark") ||
+ strings.HasPrefix(obj.Name(), "Example")
+ }
+
+ // Package-level are used, except in package main
+ if isPkgScope(obj) && obj.Pkg().Name() != "main" && !c.WholeProgram {
+ return true
+ }
+ }
+ return false
+}
+
+func markNodesUsed(nodes map[*graphNode]struct{}) {
+ for node := range nodes {
+ wasUsed := node.used
+ node.used = true
+ if !wasUsed {
+ markNodesUsed(node.uses)
+ }
+ }
+}
+
+// deduplicate merges objects based on their positions. This is done
+// to work around packages existing multiple times in go/packages.
+func (c *Checker) deduplicate() {
+ m := map[token.Position]struct{ used, quiet bool }{}
+ for _, node := range c.graph.nodes {
+ obj, ok := node.obj.(types.Object)
+ if !ok {
+ continue
+ }
+ pos := c.prog.Fset().Position(obj.Pos())
+ m[pos] = struct{ used, quiet bool }{
+ m[pos].used || node.used,
+ m[pos].quiet || node.quiet,
+ }
+ }
+
+ for _, node := range c.graph.nodes {
+ obj, ok := node.obj.(types.Object)
+ if !ok {
+ continue
+ }
+ pos := c.prog.Fset().Position(obj.Pos())
+ node.used = m[pos].used
+ node.quiet = m[pos].quiet
+ }
+}
+
+func (c *Checker) markNodesQuiet() {
+ for _, node := range c.graph.nodes {
+ if node.used {
+ continue
+ }
+ if obj, ok := node.obj.(types.Object); ok && !c.checkFlags(obj) {
+ node.quiet = true
+ continue
+ }
+ c.markObjQuiet(node.obj)
+ }
+}
+
+func (c *Checker) markObjQuiet(obj interface{}) {
+ switch obj := obj.(type) {
+ case *types.Named:
+ n := obj.NumMethods()
+ for i := 0; i < n; i++ {
+ meth := obj.Method(i)
+ node := c.graph.getNode(meth)
+ node.quiet = true
+ c.markObjQuiet(meth.Scope())
+ }
+ case *types.Struct:
+ n := obj.NumFields()
+ for i := 0; i < n; i++ {
+ field := obj.Field(i)
+ c.graph.nodes[field].quiet = true
+ }
+ case *types.Func:
+ c.markObjQuiet(obj.Scope())
+ case *types.Scope:
+ if obj == nil {
+ return
+ }
+ if obj.Parent() == types.Universe {
+ return
+ }
+ for _, name := range obj.Names() {
+ v := obj.Lookup(name)
+ if n, ok := c.graph.nodes[v]; ok {
+ n.quiet = true
+ }
+ }
+ n := obj.NumChildren()
+ for i := 0; i < n; i++ {
+ c.markObjQuiet(obj.Child(i))
+ }
+ }
+}
+
+func getField(typ types.Type, idx int) *types.Var {
+ switch obj := typ.(type) {
+ case *types.Pointer:
+ return getField(obj.Elem(), idx)
+ case *types.Named:
+ switch v := obj.Underlying().(type) {
+ case *types.Struct:
+ return v.Field(idx)
+ case *types.Pointer:
+ return getField(v.Elem(), idx)
+ default:
+ panic(fmt.Sprintf("unexpected type %s", typ))
+ }
+ case *types.Struct:
+ return obj.Field(idx)
+ }
+ return nil
+}
+
+func (c *Checker) topmostScope(scope *types.Scope, pkg *types.Package) (ret *types.Scope) {
+ if top, ok := c.topmostCache[scope]; ok {
+ return top
+ }
+ defer func() {
+ c.topmostCache[scope] = ret
+ }()
+ if scope == pkg.Scope() {
+ return scope
+ }
+ if scope.Parent().Parent() == pkg.Scope() {
+ return scope
+ }
+ return c.topmostScope(scope.Parent(), pkg)
+}
+
+func (c *Checker) printDebugGraph(w io.Writer) {
+ fmt.Fprintln(w, "digraph {")
+ fmt.Fprintln(w, "n0 [label = roots]")
+ for _, node := range c.graph.nodes {
+ s := fmt.Sprintf("%s (%T)", node.obj, node.obj)
+ s = strings.Replace(s, "\n", "", -1)
+ s = strings.Replace(s, `"`, "", -1)
+ fmt.Fprintf(w, `n%d [label = %q]`, node.n, s)
+ color := "black"
+ switch {
+ case node.used:
+ color = "green"
+ case node.quiet:
+ color = "orange"
+ case !c.checkFlags(node.obj):
+ color = "purple"
+ default:
+ color = "red"
+ }
+ fmt.Fprintf(w, "[color = %s]", color)
+ fmt.Fprintln(w)
+ }
+
+ for _, node1 := range c.graph.nodes {
+ for node2 := range node1.uses {
+ fmt.Fprintf(w, "n%d -> n%d\n", node1.n, node2.n)
+ }
+ }
+ for _, root := range c.graph.roots {
+ fmt.Fprintf(w, "n0 -> n%d\n", root.n)
+ }
+ fmt.Fprintln(w, "}")
+}
diff --git a/vendor/honnef.co/go/tools/version/version.go b/vendor/honnef.co/go/tools/version/version.go
new file mode 100644
index 000000000..511fb0bda
--- /dev/null
+++ b/vendor/honnef.co/go/tools/version/version.go
@@ -0,0 +1,17 @@
+package version
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+const Version = "2019.1.1"
+
+func Print() {
+ if Version == "devel" {
+ fmt.Printf("%s (no version)\n", filepath.Base(os.Args[0]))
+ } else {
+ fmt.Printf("%s %s\n", filepath.Base(os.Args[0]), Version)
+ }
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 019f4b6b2..6af46dcb6 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -3,6 +3,12 @@
"ignore": "test",
"package": [
{
+ "checksumSHA1": "sn0Cf33yl52eVs1B4wNSTetnUTI=",
+ "path": "github.com/Bowery/prompt",
+ "revision": "8a1d5376df1cbec3468f2138fecc44dd8b48e342",
+ "revisionTime": "2018-08-17T13:42:58Z"
+ },
+ {
"checksumSHA1": "Pc2ORQp+VY3Un/dkh4QwLC7R6lE=",
"path": "github.com/BurntSushi/toml",
"revision": "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005",
@@ -41,6 +47,12 @@
"versionExact": "v1.1.1"
},
{
+ "checksumSHA1": "ULnk7ggN82JFO0ZdBCmSsQH3Vh8=",
+ "path": "github.com/dchest/safefile",
+ "revision": "855e8d98f1852d48dde521e0522408d1fe7e836a",
+ "revisionTime": "2015-10-22T10:31:44Z"
+ },
+ {
"checksumSHA1": "hL8smC/vjdkuE1twM8TKpuTiOmw=",
"path": "github.com/getsentry/raven-go",
"revision": "3033899c76deb3fb6570d9c4074d00443aeab88f",
@@ -97,6 +109,12 @@
"revisionTime": "2019-02-05T22:20:52Z"
},
{
+ "checksumSHA1": "c0Z2sKLKi+IKRVzq0IzNvqvfCrQ=",
+ "path": "github.com/google/shlex",
+ "revision": "c34317bd91bf98fab745d77b03933cf8769299fe",
+ "revisionTime": "2018-11-06T13:46:48Z"
+ },
+ {
"checksumSHA1": "ZRhE1BjkcaROD1NZMZwICtPemTs=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware",
"revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
@@ -159,6 +177,84 @@
"versionExact": "v1.2.0"
},
{
+ "checksumSHA1": "eHdL3LdmUx5+M6RDV1LLQuqCwYg=",
+ "path": "github.com/kardianos/govendor",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "EDbwTD6uQOx+c13If7Jxo9hFbs4=",
+ "path": "github.com/kardianos/govendor/cliprompt",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "bkOWuhW8enzlEtJMCxYj1fFBVoY=",
+ "path": "github.com/kardianos/govendor/context",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "zNEnQheihh/BKnZl16KiIcG86F8=",
+ "path": "github.com/kardianos/govendor/help",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "SOMQHVCs36zWP0cZAEtSEU8H2bQ=",
+ "path": "github.com/kardianos/govendor/internal/pathos",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "Bl6I6yMiK60dzOAfyO7As6MSPIk=",
+ "path": "github.com/kardianos/govendor/internal/vfilepath",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "2Vg+J79rEhmtnprErQ7fTZdneIk=",
+ "path": "github.com/kardianos/govendor/internal/vos",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "3h5OMPOj5oIkObUzLDJjE9DnFPs=",
+ "path": "github.com/kardianos/govendor/migrate",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "wL4SaLS/HTn32Gmq8kpYRr/cn68=",
+ "path": "github.com/kardianos/govendor/pkgspec",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "B+3eGc34fJuF5dHsOSCa9dSdHQY=",
+ "path": "github.com/kardianos/govendor/prompt",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "QjnTESUuxrrcMLwutBkaxF4pLBw=",
+ "path": "github.com/kardianos/govendor/run",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "A9cS/HSXJjmExS3T3bpXA/iO6ok=",
+ "path": "github.com/kardianos/govendor/vcs",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
+ "checksumSHA1": "qAEXdxLQRkP1i8KhgyndRsVUzrw=",
+ "path": "github.com/kardianos/govendor/vendorfile",
+ "revision": "e07957427183a9892f35634ffc9ea48dedc6bbb4",
+ "revisionTime": "2018-11-14T07:15:11Z"
+ },
+ {
"checksumSHA1": "4szkOYHAJsnVGtjdfE41n8x+iuE=",
"path": "github.com/kelseyhightower/envconfig",
"revision": "f611eb38b3875cc3bd991ca91c51d06446afa14c",
@@ -491,6 +587,12 @@
"revisionTime": "2018-11-02T16:30:54Z"
},
{
+ "checksumSHA1": "ih4CCYD19rjjF9fjid+l7w/+cIg=",
+ "path": "github.com/wadey/gocovmerge",
+ "revision": "b5bfa59ec0adc420475f97f89b58045c721d761c",
+ "revisionTime": "2016-03-31T18:18:00Z"
+ },
+ {
"checksumSHA1": "GfvhruEiAgylKnkURvEDwtA2tK8=",
"path": "gitlab.com/gitlab-org/gitaly-proto/go/gitalypb",
"revision": "290be0f5b5f18fba1d5438464ac6308e20c14149",
@@ -553,6 +655,18 @@
"revisionTime": "2018-10-23T16:52:47Z"
},
{
+ "checksumSHA1": "RGFEeJVsaq+s5qGwd0MzXNMMFSU=",
+ "path": "golang.org/x/lint",
+ "revision": "959b441ac422379a43da2230f62be024250818b0",
+ "revisionTime": "2019-04-09T20:23:51Z"
+ },
+ {
+ "checksumSHA1": "543dY7RKJCSNiqYONDu1O2aD8s8=",
+ "path": "golang.org/x/lint/golint",
+ "revision": "959b441ac422379a43da2230f62be024250818b0",
+ "revisionTime": "2019-04-09T20:23:51Z"
+ },
+ {
"checksumSHA1": "GtamqiJoL7PGHsN454AoffBFMa8=",
"path": "golang.org/x/net/context",
"revision": "c44066c5c816ec500d459a2a324a753f78531ae0",
@@ -637,6 +751,114 @@
"revisionTime": "2018-10-29T18:00:05Z"
},
{
+ "checksumSHA1": "1AaBS00YoGPd0fAjH1soKkj12J0=",
+ "path": "golang.org/x/tools/cmd/goimports",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "w/OExaIcqB7E5IG5097RPHtOlzc=",
+ "path": "golang.org/x/tools/cover",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "LHHkpIoYHpQiBc7y0JaTmf3VbVQ=",
+ "path": "golang.org/x/tools/go/ast/astutil",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "oLxfhYYiQoXoC63nQfZ6JGYc33o=",
+ "path": "golang.org/x/tools/go/ast/inspector",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "/UCuUGzk9/QJTUcTkYDRdCas3Tc=",
+ "path": "golang.org/x/tools/go/buildutil",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "6lzLm5eIswP0fziPs74p+lb0QUo=",
+ "path": "golang.org/x/tools/go/gcexportdata",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "VQ0sTJYKkzzAvWA6zvaKD5lar0o=",
+ "path": "golang.org/x/tools/go/internal/cgo",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "cEqToMNtE7HuQ2rmaDHxLjz4Fg0=",
+ "path": "golang.org/x/tools/go/internal/gcimporter",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "0eGV7/fQ1iQbU5tB7MewaXJdEjA=",
+ "path": "golang.org/x/tools/go/internal/packagesdriver",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "wr2oXQWRoxvsHoc349wVfTzpXfM=",
+ "path": "golang.org/x/tools/go/loader",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "ni65bJo/oo6Cj008r9WBAlSWUX0=",
+ "path": "golang.org/x/tools/go/packages",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "oT4r2iJVt4b9X0hqK2pN8qzAwoU=",
+ "path": "golang.org/x/tools/go/types/typeutil",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "4YVF0nbGWFYLyh75C7dD1Dgpyug=",
+ "path": "golang.org/x/tools/go/vcs",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "gSRO+5x4H8KnYwmh3PvHAFfGQZA=",
+ "path": "golang.org/x/tools/imports",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "dC7MpahR/U0T8CRcTypgrwO1Lg0=",
+ "path": "golang.org/x/tools/internal/fastwalk",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "Irm7ntYQFWovUjCUwExQu7Gbhi4=",
+ "path": "golang.org/x/tools/internal/gopathwalk",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "jOeksgcnpvOhYXbU84r8/yvE+rI=",
+ "path": "golang.org/x/tools/internal/module",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
+ "checksumSHA1": "Lcm5y2VtSqdVuOjhBFEeAHNI+ug=",
+ "path": "golang.org/x/tools/internal/semver",
+ "revision": "2538eef75904eff384a2551359968e40c207d9d2",
+ "revisionTime": "2019-03-30T20:18:22Z"
+ },
+ {
"checksumSHA1": "YNqziavfZHurG6wrwR5Uf9SnI4s=",
"path": "google.golang.org/genproto/googleapis/api/annotations",
"revision": "bd91e49a0898e27abb88c339b432fa53d7497ac0",
@@ -951,6 +1173,144 @@
"path": "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer",
"revision": "5a8139286014811d2bccd5ef67ec4844ea0998a7",
"revisionTime": "2018-12-07T15:03:43Z"
+ },
+ {
+ "checksumSHA1": "QqDq2x8XOU7IoOR98Cx1eiV5QY8=",
+ "path": "gopkg.in/yaml.v2",
+ "revision": "51d6538a90f86fe93ac480b35f37b2be17fef232",
+ "revisionTime": "2018-11-15T11:05:04Z"
+ },
+ {
+ "checksumSHA1": "oGRlLuA0CvzLQ3utUM9r18HVxg4=",
+ "path": "honnef.co/go/tools/arg",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "FG8LnaSRTHBnrPHwa0zW4zX9K7M=",
+ "path": "honnef.co/go/tools/callgraph",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "fR7Q7BVwKHUEsUNGn6Q2zygAvTU=",
+ "path": "honnef.co/go/tools/callgraph/static",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "9GNh6/rWaXKcRs9Agbbctdvhldo=",
+ "path": "honnef.co/go/tools/cmd/staticcheck",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "G7eJP+BGE+I1j7xiMoqgGD4hhaE=",
+ "path": "honnef.co/go/tools/config",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "smQXvyCgi0lsTRk7edZNx/z44rc=",
+ "path": "honnef.co/go/tools/deprecated",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "3yOlSVvgQlavY+xmmiJNXGNthaQ=",
+ "path": "honnef.co/go/tools/functions",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "qKxjxPLB3eHR68rR6E3JkiqcVvA=",
+ "path": "honnef.co/go/tools/go/types/typeutil",
+ "revision": "0e5f7e4d37794123e77f619a0d455574585124f0",
+ "revisionTime": "2019-04-10T22:08:42Z"
+ },
+ {
+ "checksumSHA1": "4+4lxKUu1GCGzacYEeKTM1DBae4=",
+ "path": "honnef.co/go/tools/internal/sharedcheck",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "NFDvAr8IE+1sWU7gJy7yQBdvQeU=",
+ "path": "honnef.co/go/tools/lint",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "V+ywLC+UoTh/X1sMp9pOUXUt0lc=",
+ "path": "honnef.co/go/tools/lint/lintdsl",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "zCSL1aqBSvmgO2MNarqatRx1eA0=",
+ "path": "honnef.co/go/tools/lint/lintutil",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "LBjb8HSNfwCGXZLNqDikfmeN7+U=",
+ "path": "honnef.co/go/tools/lint/lintutil/format",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "OW2q8+BJBAf1aoRNg1N8jmrEpas=",
+ "path": "honnef.co/go/tools/simple",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "t1kAVlyBNAMyLooYDd6IB6QSR2g=",
+ "path": "honnef.co/go/tools/ssa",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "3dS5xh04DHB+HN3TUJ9O9XlZTbo=",
+ "path": "honnef.co/go/tools/ssa/ssautil",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "v8jS9vuH/Psf4M7AtBQHcgV01wg=",
+ "path": "honnef.co/go/tools/ssautil",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "xKrbQZtIIsDI8D0HeY6x5/IGfms=",
+ "path": "honnef.co/go/tools/staticcheck",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "nHc585tyewC/dLuNeTg4sDHbyKU=",
+ "path": "honnef.co/go/tools/staticcheck/vrp",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "bSrR02M8KFnrqHlk60WTy5XXYNs=",
+ "path": "honnef.co/go/tools/stylecheck",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "3Mg4N7LDh4jL2EwhTgNlH5nGjZE=",
+ "path": "honnef.co/go/tools/unused",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
+ },
+ {
+ "checksumSHA1": "znlVTn4nc5RPUOrP8E7d8k6cSkU=",
+ "path": "honnef.co/go/tools/version",
+ "revision": "95959eaf5e3c41c66151dcfd91779616b84077a8",
+ "revisionTime": "2019-03-15T11:34:50Z"
}
],
"rootPath": "gitlab.com/gitlab-org/gitaly"