summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNaveen Sriram <naveen.sriram@huawei.com>2019-05-03 19:02:47 +0530
committerNaveen Sriram <naveen.sriram@huawei.com>2019-05-03 20:26:43 +0530
commitac2d9bf9045a56fc9630e5026b68fccf68fbbb96 (patch)
treea64f593ff6071ba44d9075cecf51b9642a3a70c6
parentImplemented for cloud init & reset w.r.t PR#342 (diff)
downloadkubeedge-ac2d9bf9045a56fc9630e5026b68fccf68fbbb96.tar.gz
For KubeEdge installer required vendor updates for PR#342
-rw-r--r--Gopkg.lock99
-rw-r--r--Gopkg.toml4
-rw-r--r--external-dependency.md16
-rw-r--r--keadm/app/cmd/util/cloudinstaller.go30
-rw-r--r--vendor/github.com/Azure/go-ansiterm/LICENSE21
-rw-r--r--vendor/github.com/Azure/go-ansiterm/README.md12
-rw-r--r--vendor/github.com/Azure/go-ansiterm/constants.go188
-rw-r--r--vendor/github.com/Azure/go-ansiterm/context.go7
-rw-r--r--vendor/github.com/Azure/go-ansiterm/csi_entry_state.go49
-rw-r--r--vendor/github.com/Azure/go-ansiterm/csi_param_state.go38
-rw-r--r--vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go36
-rw-r--r--vendor/github.com/Azure/go-ansiterm/escape_state.go47
-rw-r--r--vendor/github.com/Azure/go-ansiterm/event_handler.go90
-rw-r--r--vendor/github.com/Azure/go-ansiterm/ground_state.go24
-rw-r--r--vendor/github.com/Azure/go-ansiterm/osc_string_state.go31
-rw-r--r--vendor/github.com/Azure/go-ansiterm/parser.go151
-rw-r--r--vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go99
-rw-r--r--vendor/github.com/Azure/go-ansiterm/parser_actions.go119
-rw-r--r--vendor/github.com/Azure/go-ansiterm/states.go71
-rw-r--r--vendor/github.com/Azure/go-ansiterm/utilities.go21
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/ansi.go182
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/api.go327
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go100
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go101
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go84
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go118
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/utilities.go9
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go743
-rw-r--r--vendor/github.com/JeffAshton/win_pdh/AUTHORS14
-rw-r--r--vendor/github.com/JeffAshton/win_pdh/LICENSE23
-rw-r--r--vendor/github.com/JeffAshton/win_pdh/README.mdown15
-rw-r--r--vendor/github.com/JeffAshton/win_pdh/pdh.go453
-rw-r--r--vendor/github.com/Microsoft/go-winio/.gitignore1
-rw-r--r--vendor/github.com/Microsoft/go-winio/LICENSE22
-rw-r--r--vendor/github.com/Microsoft/go-winio/README.md22
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE27
-rw-r--r--vendor/github.com/Microsoft/go-winio/backup.go280
-rw-r--r--vendor/github.com/Microsoft/go-winio/ea.go137
-rw-r--r--vendor/github.com/Microsoft/go-winio/file.go307
-rw-r--r--vendor/github.com/Microsoft/go-winio/fileinfo.go61
-rw-r--r--vendor/github.com/Microsoft/go-winio/pipe.go421
-rw-r--r--vendor/github.com/Microsoft/go-winio/privilege.go202
-rw-r--r--vendor/github.com/Microsoft/go-winio/reparse.go128
-rw-r--r--vendor/github.com/Microsoft/go-winio/sd.go98
-rw-r--r--vendor/github.com/Microsoft/go-winio/syscall.go3
-rw-r--r--vendor/github.com/Microsoft/go-winio/zsyscall_windows.go520
-rw-r--r--vendor/github.com/Nvveen/Gotty/gotty.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go38
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go18
-rw-r--r--vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go8
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/properties.go2
-rw-r--r--vendor/github.com/docker/spdystream/connection.go6
-rw-r--r--vendor/github.com/eclipse/paho.mqtt.golang/filestore.go2
-rw-r--r--vendor/github.com/eclipse/paho.mqtt.golang/trace.go2
-rwxr-xr-xvendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/reconfigurable_sink.go5
-rwxr-xr-xvendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/writer_sink.go1
-rw-r--r--vendor/github.com/godbus/dbus/default_handler.go4
-rw-r--r--vendor/github.com/godbus/dbus/transport_generic.go2
-rw-r--r--vendor/github.com/golang/glog/glog.go2
-rw-r--r--vendor/github.com/google/cadvisor/pages/containers.go2
-rw-r--r--vendor/github.com/google/cadvisor/pages/docker.go2
-rw-r--r--vendor/github.com/gorilla/context/.travis.yml19
-rw-r--r--vendor/github.com/gorilla/context/LICENSE27
-rw-r--r--vendor/github.com/gorilla/context/README.md10
-rw-r--r--vendor/github.com/gorilla/context/context.go143
-rw-r--r--vendor/github.com/gorilla/context/doc.go88
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/LICENSE13
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/README.md23
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_others.go15
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_windows.go98
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go46
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE9
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/README.md41
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod1
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go36
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go11
-rw-r--r--vendor/github.com/kr/pty/ztypes_openbsd_386.go8
-rw-r--r--vendor/github.com/modern-go/concurrent/log.go6
-rw-r--r--vendor/github.com/modern-go/concurrent/unbounded_executor.go2
-rw-r--r--vendor/github.com/modern-go/reflect2/reflect2.go6
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/spec.go10
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go32
-rw-r--r--vendor/github.com/pmezard/go-difflib/difflib/difflib.go8
-rw-r--r--vendor/github.com/rkt/rkt/LICENSE201
-rw-r--r--vendor/github.com/rkt/rkt/api/v1alpha/README.md25
-rw-r--r--vendor/github.com/rkt/rkt/api/v1alpha/api.pb.go1775
-rw-r--r--vendor/github.com/rkt/rkt/api/v1alpha/api.proto487
-rw-r--r--vendor/github.com/rkt/rkt/api/v1alpha/client_example.go154
-rw-r--r--vendor/github.com/rkt/rkt/pkg/acl/LICENSE.MIT22
l---------vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/bash.manifest1
l---------vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/journald.manifest1
l---------vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/systemd.manifest1
l---------vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/bash.manifest1
l---------vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/journald.manifest1
l---------vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/systemd.manifest1
-rw-r--r--vendor/github.com/rkt/rkt/store/imagestore/LICENSE.BSD30
l---------vendor/github.com/rkt/rkt/tests/cloudinit/fedora-rawhide.cloudinit1
l---------vendor/github.com/rkt/rkt/tests/cloudinit/ubuntu-1510.cloudinit1
l---------vendor/github.com/rkt/rkt/tests/cloudinit/ubuntu-1604.cloudinit1
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/.gitignore4
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/CHANGELOG6
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/LICENSE22
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/Makefile26
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/README51
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES112
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp.go902
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go514
-rw-r--r--vendor/github.com/sirupsen/logrus/appveyor.yml28
-rw-r--r--vendor/github.com/spf13/afero/.travis.yml42
-rw-r--r--vendor/github.com/spf13/afero/memmap.go2
-rw-r--r--vendor/github.com/spf13/cobra/.gitignore36
-rw-r--r--vendor/github.com/spf13/cobra/.mailmap3
-rw-r--r--vendor/github.com/spf13/cobra/.travis.yml21
-rw-r--r--vendor/github.com/spf13/cobra/LICENSE.txt174
-rw-r--r--vendor/github.com/spf13/cobra/README.md736
-rw-r--r--vendor/github.com/spf13/cobra/args.go89
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go584
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.md221
-rw-r--r--vendor/github.com/spf13/cobra/cobra.go200
-rw-r--r--vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden202
-rw-r--r--vendor/github.com/spf13/cobra/command.go1517
-rw-r--r--vendor/github.com/spf13/cobra/command_notwin.go5
-rw-r--r--vendor/github.com/spf13/cobra/command_win.go20
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go126
-rw-r--r--vendor/github.com/ugorji/go/codec/0doc.go2
-rw-r--r--vendor/github.com/ugorji/go/codec/fast-path.generated.go512
-rw-r--r--vendor/golang.org/x/text/encoding/unicode/unicode.go4
-rw-r--r--vendor/golang.org/x/text/language/gen.go2
-rw-r--r--vendor/golang.org/x/text/language/lookup.go56
-rw-r--r--vendor/golang.org/x/text/language/tables.go6
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/cldr.go2
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/resolve.go4
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/slice.go2
-rw-r--r--vendor/golang.org/x/text/unicode/norm/maketables.go2
-rw-r--r--vendor/golang.org/x/tools/AUTHORS3
-rw-r--r--vendor/golang.org/x/tools/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/tools/LICENSE27
-rw-r--r--vendor/golang.org/x/tools/PATENTS22
-rw-r--r--vendor/golang.org/x/tools/cmd/getgo/LICENSE27
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/enclosing.go627
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/imports.go481
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/rewrite.go477
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/util.go14
-rw-r--r--vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go109
-rw-r--r--vendor/golang.org/x/tools/go/gcexportdata/importer.go73
-rw-r--r--vendor/golang.org/x/tools/go/gcexportdata/main.go99
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go852
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go1036
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go93
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go1078
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go723
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go606
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go21
-rw-r--r--vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go13
-rw-r--r--vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go160
-rw-r--r--vendor/golang.org/x/tools/go/packages/doc.go222
-rw-r--r--vendor/golang.org/x/tools/go/packages/external.go79
-rw-r--r--vendor/golang.org/x/tools/go/packages/golist.go832
-rw-r--r--vendor/golang.org/x/tools/go/packages/golist_overlay.go138
-rw-r--r--vendor/golang.org/x/tools/go/packages/packages.go1084
-rw-r--r--vendor/golang.org/x/tools/go/packages/visit.go55
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/callee.go46
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/imports.go31
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/map.go313
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go72
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/ui.go52
-rw-r--r--vendor/golang.org/x/tools/imports/fix.go1259
-rw-r--r--vendor/golang.org/x/tools/imports/imports.go315
-rw-r--r--vendor/golang.org/x/tools/imports/mkindex.go173
-rw-r--r--vendor/golang.org/x/tools/imports/mkstdlib.go132
-rw-r--r--vendor/golang.org/x/tools/imports/mod.go355
-rw-r--r--vendor/golang.org/x/tools/imports/sortimports.go230
-rw-r--r--vendor/golang.org/x/tools/imports/zstdlib.go10325
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go196
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go13
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go14
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go13
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go29
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go37
-rw-r--r--vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go127
-rw-r--r--vendor/golang.org/x/tools/internal/gopathwalk/walk.go250
-rw-r--r--vendor/golang.org/x/tools/internal/module/module.go540
-rw-r--r--vendor/golang.org/x/tools/internal/semver/semver.go388
-rw-r--r--vendor/gopkg.in/inf.v0/dec.go2
-rw-r--r--vendor/gopkg.in/tomb.v1/tomb.go10
-rw-r--r--vendor/gopkg.in/yaml.v2/readerc.go2
-rw-r--r--vendor/gopkg.in/yaml.v2/resolve.go2
-rw-r--r--vendor/gopkg.in/yaml.v2/sorter.go2
-rw-r--r--vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go2
-rw-r--r--vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go2
-rw-r--r--vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go8
-rw-r--r--vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go8
-rw-r--r--vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go36
-rw-r--r--vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go2
-rw-r--r--vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go2
-rw-r--r--vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go2
-rw-r--r--vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go2
-rw-r--r--vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go2
-rw-r--r--vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/meta/meta.go12
-rw-r--r--vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go2
-rw-r--r--vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go2
-rw-r--r--vendor/k8s.io/client-go/tools/cache/shared_informer.go2
-rw-r--r--vendor/k8s.io/client-go/util/cert/cert.go4
-rw-r--r--vendor/k8s.io/kubernetes/Godeps/LICENSES444
-rw-r--r--vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go12
-rw-r--r--vendor/k8s.io/kubernetes/pkg/features/kube_features.go4
-rw-r--r--vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go8
-rw-r--r--vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go4
-rw-r--r--vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go6
-rw-r--r--vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go8
211 files changed, 38963 insertions, 732 deletions
diff --git a/Gopkg.lock b/Gopkg.lock
index 952348c2f..fe10f2985 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -32,6 +32,33 @@
[[projects]]
branch = "master"
+ digest = "1:6da51e5ec493ad2b44cb04129e2d0a068c8fb9bd6cb5739d199573558696bb94"
+ name = "github.com/Azure/go-ansiterm"
+ packages = [
+ ".",
+ "winterm",
+ ]
+ pruneopts = "UT"
+ revision = "d6e3b3328b783f23731bc4d058875b0371ff8109"
+
+[[projects]]
+ branch = "master"
+ digest = "1:2608e06730dcdf85183940e6b5ff068b5e6fc0777a6b4a99cd0182594b030e00"
+ name = "github.com/JeffAshton/win_pdh"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "76bb4ee9f0ab50f77826f2a2ee7fb9d3880d6ec2"
+
+[[projects]]
+ digest = "1:f9ae348e1f793dcf9ed930ed47136a67343dbd6809c5c91391322267f4476892"
+ name = "github.com/Microsoft/go-winio"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "1a8911d1ed007260465c3bfbbc785ac6915a0bb8"
+ version = "v0.4.12"
+
+[[projects]]
+ branch = "master"
digest = "1:3721a10686511b80c052323423f0de17a8c06d417dbdd3b392b1578432a33aae"
name = "github.com/Nvveen/Gotty"
packages = ["."]
@@ -383,6 +410,7 @@
[[projects]]
digest = "1:a81c1bc427850b9f040713c61b9df6af36edadf17297b926da0bb2f88af4b6cb"
+ name = "github.com/go-chassis/go-archaius"
packages = [
"core",
"core/cast",
@@ -406,7 +434,6 @@
]
pruneopts = "UT"
revision = "eff93e5e67dbf7eab20c058db4e37e8739bf8df5"
- version = "v1.0"
[[projects]]
branch = "master"
@@ -554,6 +581,14 @@
version = "v0.2.0"
[[projects]]
+ digest = "1:c79fb010be38a59d657c48c6ba1d003a8aa651fa56b579d959d74573b7dff8e1"
+ name = "github.com/gorilla/context"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
+ version = "v1.1.1"
+
+[[projects]]
digest = "1:e73f5b0152105f18bc131fba127d9949305c8693f8a762588a82a48f61756f5f"
name = "github.com/gorilla/mux"
packages = ["."]
@@ -610,6 +645,14 @@
version = "v0.3.7"
[[projects]]
+ digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
+ name = "github.com/inconshreveable/mousetrap"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
+ version = "v1.0"
+
+[[projects]]
digest = "1:e22af8c7518e1eab6f2eab2b7d7558927f816262586cd6ed9f349c97a6c285c4"
name = "github.com/jmespath/go-jmespath"
packages = ["."]
@@ -624,6 +667,14 @@
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[projects]]
+ digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de"
+ name = "github.com/konsorten/go-windows-terminal-sequences"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
+ version = "v1.0.2"
+
+[[projects]]
digest = "1:29d28429a93b076908a36420a7ecb2bf429d24c15a4b5e2c220ea00fc84b71af"
name = "github.com/kr/pty"
packages = ["."]
@@ -906,6 +957,14 @@
version = "v1.0.0"
[[projects]]
+ digest = "1:026648b32e84057740dde0a1b309b9b90f19afcc6ab2faf98998d0ac518bfe3a"
+ name = "github.com/rkt/rkt"
+ packages = ["api/v1alpha"]
+ pruneopts = "UT"
+ revision = "e04dd994baa1051f1205578d12d69eec83dbb905"
+ version = "v1.30.0"
+
+[[projects]]
digest = "1:274f67cb6fed9588ea2521ecdac05a6d62a8c51c074c1fccc6a49a40ba80e925"
name = "github.com/satori/go.uuid"
packages = ["."]
@@ -914,6 +973,14 @@
version = "v1.2.0"
[[projects]]
+ digest = "1:76b02b3f516299ddfed21a4e7ce26cda0b9a57f14575bfe4aa2d644ea0e8e24c"
+ name = "github.com/seccomp/libseccomp-golang"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "e3496e3a417d1dc9ecdceca5af2513271fed37a0"
+ version = "v0.9.0"
+
+[[projects]]
digest = "1:3f53e9e4dfbb664cd62940c9c4b65a2171c66acd0b7621a1a6b8e78513525a52"
name = "github.com/sirupsen/logrus"
packages = ["."]
@@ -941,6 +1008,14 @@
version = "v1.2.0"
[[projects]]
+ digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939"
+ name = "github.com/spf13/cobra"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
+ version = "v0.0.3"
+
+[[projects]]
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
name = "github.com/spf13/pflag"
packages = ["."]
@@ -1101,6 +1176,26 @@
[[projects]]
branch = "master"
+ digest = "1:93f3f17886abf8d17ff07ece88d6e1974226f5638d216041bd2234ca03ef848c"
+ name = "golang.org/x/tools"
+ packages = [
+ "go/ast/astutil",
+ "go/gcexportdata",
+ "go/internal/gcimporter",
+ "go/internal/packagesdriver",
+ "go/packages",
+ "go/types/typeutil",
+ "imports",
+ "internal/fastwalk",
+ "internal/gopathwalk",
+ "internal/module",
+ "internal/semver",
+ ]
+ pruneopts = "UT"
+ revision = "5cec639030af3a6ada2732d9bfa1d3731ed55106"
+
+[[projects]]
+ branch = "master"
digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
@@ -1756,6 +1851,8 @@
"github.com/paypal/gatt",
"github.com/paypal/gatt/examples/option",
"github.com/satori/go.uuid",
+ "github.com/spf13/cobra",
+ "github.com/spf13/pflag",
"github.com/stretchr/testify/assert",
"golang.org/x/net/context",
"gopkg.in/yaml.v2",
diff --git a/Gopkg.toml b/Gopkg.toml
index 50d1f3ea9..9663d68cc 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -99,6 +99,10 @@ required = [
name = "github.com/256dpi/gomqtt"
revision = "adb21d1b0cbac79463b51d1a9b234ee1e743a4c2"
+[[override]]
+ name = "github.com/go-chassis/paas-lager"
+revision = "eff93e5e67dbf7eab20c058db4e37e8739bf8df5"
+
[prune]
go-tests = true
unused-packages = true
diff --git a/external-dependency.md b/external-dependency.md
index 4d560bff2..c4103701e 100644
--- a/external-dependency.md
+++ b/external-dependency.md
@@ -112,3 +112,19 @@
|k8s.io/kubernetes | Apache License 2.0 |https://github.com/kubernetes/kubernetes
|k8s.io/utils | Apache License 2.0 |https://github.com/kubernetes/utils
|paypal/gatt |BSD-3-Clause |https://github.com/paypal/gatt
+|go-ansiterm |MIT |https://github.com/Azure/go-ansiterm
+|win_pdh | |github.com/JeffAshton/win_pdh
+|go-winio |MIT |github.com/Microsoft/go-winio
+|chacha20 |MIT |github.com/aead/chacha20
+|mint |MIT |github.com/bifurcation/mint
+|genny |MIT |github.com/cheekybits/genny
+|context |BSD-3-Clause |https://github.com/gorilla/context
+|go-windows-terminal-sequences |MIT |github.com/konsorten/go-windows-terminal-sequences
+|viaduct |Apache License 2.0 |https://github.com/kubeedge/viaduct
+|aes12 |MIT |github.com/lucas-clemente/aes12
+|quic-go |MIT |github.com/lucas-clemente/quic-go
+|quic-go-certificates |MIT |github.com/lucas-clemente/quic-go-certificates
+|libseccomp-golang |BSD 2-Clause |https://github.com/seccomp/libseccomp-golang
+|cobra |Apache License 2.0 |https://github.com/spf13/cobra
+|pflag |BSD 3-Clause "New" or "Revised" License |https://github.com/spf13/pflag
+
diff --git a/keadm/app/cmd/util/cloudinstaller.go b/keadm/app/cmd/util/cloudinstaller.go
index 177b23b8c..7d28d59c7 100644
--- a/keadm/app/cmd/util/cloudinstaller.go
+++ b/keadm/app/cmd/util/cloudinstaller.go
@@ -56,34 +56,34 @@ func (cu *KubeCloudInstTool) InstallTools() error {
}
//Create controller.yaml
- _, err = os.Stat(KubeEdgeControllerYaml)
- if err != nil {
+ //_, err = os.Stat(KubeEdgeControllerYaml)
+ //if err != nil {
if err = ioutil.WriteFile(KubeEdgeControllerYaml, ControllerYaml, 0666); err != nil {
return err
}
- } else {
- fmt.Println(KubeEdgeControllerYaml, "is already available, hence not overwriting it")
- }
+ //} else {
+ // fmt.Println(KubeEdgeControllerYaml, "is already available, hence not overwriting it")
+ //}
//Create logger.yaml
- _, err = os.Stat(KubeEdgeControllerLoggingYaml)
- if err != nil {
+ //_, err = os.Stat(KubeEdgeControllerLoggingYaml)
+ //if err != nil {
if err = ioutil.WriteFile(KubeEdgeControllerLoggingYaml, ControllerLoggingYaml, 0666); err != nil {
return err
}
- } else {
- fmt.Println(KubeEdgeControllerLoggingYaml, "is already available, hence not overwriting it")
- }
+ //} else {
+ // fmt.Println(KubeEdgeControllerLoggingYaml, "is already available, hence not overwriting it")
+ //}
//Create modules.yaml
- _, err = os.Stat(KubeEdgeControllerModulesYaml)
- if err != nil {
+ //_, err = os.Stat(KubeEdgeControllerModulesYaml)
+ //if err != nil {
if err = ioutil.WriteFile(KubeEdgeControllerModulesYaml, ControllerModulesYaml, 0666); err != nil {
return err
}
- } else {
- fmt.Println(KubeEdgeControllerLoggingYaml, "is already available, hence not overwriting it")
- }
+ //} else {
+ // fmt.Println(KubeEdgeControllerLoggingYaml, "is already available, hence not overwriting it")
+ //}
time.Sleep(1 * time.Second)
diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE
new file mode 100644
index 000000000..e3d9a64d1
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md
new file mode 100644
index 000000000..261c041e7
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/README.md
@@ -0,0 +1,12 @@
+# go-ansiterm
+
+This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
+
+For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
+
+The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
+
+See parser_test.go for examples exercising the state machine and generating appropriate function calls.
+
+-----
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go
new file mode 100644
index 000000000..96504a33b
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/constants.go
@@ -0,0 +1,188 @@
+package ansiterm
+
+const LogEnv = "DEBUG_TERMINAL"
+
+// ANSI constants
+// References:
+// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
+// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
+// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
+// -- http://en.wikipedia.org/wiki/ANSI_escape_code
+// -- http://vt100.net/emu/dec_ansi_parser
+// -- http://vt100.net/emu/vt500_parser.svg
+// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+// -- http://www.inwap.com/pdp10/ansicode.txt
+const (
+ // ECMA-48 Set Graphics Rendition
+ // Note:
+ // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
+ // -- Fonts could possibly be supported via SetCurrentConsoleFontEx
+ // -- Windows does not expose the per-window cursor (i.e., caret) blink times
+ ANSI_SGR_RESET = 0
+ ANSI_SGR_BOLD = 1
+ ANSI_SGR_DIM = 2
+ _ANSI_SGR_ITALIC = 3
+ ANSI_SGR_UNDERLINE = 4
+ _ANSI_SGR_BLINKSLOW = 5
+ _ANSI_SGR_BLINKFAST = 6
+ ANSI_SGR_REVERSE = 7
+ _ANSI_SGR_INVISIBLE = 8
+ _ANSI_SGR_LINETHROUGH = 9
+ _ANSI_SGR_FONT_00 = 10
+ _ANSI_SGR_FONT_01 = 11
+ _ANSI_SGR_FONT_02 = 12
+ _ANSI_SGR_FONT_03 = 13
+ _ANSI_SGR_FONT_04 = 14
+ _ANSI_SGR_FONT_05 = 15
+ _ANSI_SGR_FONT_06 = 16
+ _ANSI_SGR_FONT_07 = 17
+ _ANSI_SGR_FONT_08 = 18
+ _ANSI_SGR_FONT_09 = 19
+ _ANSI_SGR_FONT_10 = 20
+ _ANSI_SGR_DOUBLEUNDERLINE = 21
+ ANSI_SGR_BOLD_DIM_OFF = 22
+ _ANSI_SGR_ITALIC_OFF = 23
+ ANSI_SGR_UNDERLINE_OFF = 24
+ _ANSI_SGR_BLINK_OFF = 25
+ _ANSI_SGR_RESERVED_00 = 26
+ ANSI_SGR_REVERSE_OFF = 27
+ _ANSI_SGR_INVISIBLE_OFF = 28
+ _ANSI_SGR_LINETHROUGH_OFF = 29
+ ANSI_SGR_FOREGROUND_BLACK = 30
+ ANSI_SGR_FOREGROUND_RED = 31
+ ANSI_SGR_FOREGROUND_GREEN = 32
+ ANSI_SGR_FOREGROUND_YELLOW = 33
+ ANSI_SGR_FOREGROUND_BLUE = 34
+ ANSI_SGR_FOREGROUND_MAGENTA = 35
+ ANSI_SGR_FOREGROUND_CYAN = 36
+ ANSI_SGR_FOREGROUND_WHITE = 37
+ _ANSI_SGR_RESERVED_01 = 38
+ ANSI_SGR_FOREGROUND_DEFAULT = 39
+ ANSI_SGR_BACKGROUND_BLACK = 40
+ ANSI_SGR_BACKGROUND_RED = 41
+ ANSI_SGR_BACKGROUND_GREEN = 42
+ ANSI_SGR_BACKGROUND_YELLOW = 43
+ ANSI_SGR_BACKGROUND_BLUE = 44
+ ANSI_SGR_BACKGROUND_MAGENTA = 45
+ ANSI_SGR_BACKGROUND_CYAN = 46
+ ANSI_SGR_BACKGROUND_WHITE = 47
+ _ANSI_SGR_RESERVED_02 = 48
+ ANSI_SGR_BACKGROUND_DEFAULT = 49
+ // 50 - 65: Unsupported
+
+ ANSI_MAX_CMD_LENGTH = 4096
+
+ MAX_INPUT_EVENTS = 128
+ DEFAULT_WIDTH = 80
+ DEFAULT_HEIGHT = 24
+
+ ANSI_BEL = 0x07
+ ANSI_BACKSPACE = 0x08
+ ANSI_TAB = 0x09
+ ANSI_LINE_FEED = 0x0A
+ ANSI_VERTICAL_TAB = 0x0B
+ ANSI_FORM_FEED = 0x0C
+ ANSI_CARRIAGE_RETURN = 0x0D
+ ANSI_ESCAPE_PRIMARY = 0x1B
+ ANSI_ESCAPE_SECONDARY = 0x5B
+ ANSI_OSC_STRING_ENTRY = 0x5D
+ ANSI_COMMAND_FIRST = 0x40
+ ANSI_COMMAND_LAST = 0x7E
+ DCS_ENTRY = 0x90
+ CSI_ENTRY = 0x9B
+ OSC_STRING = 0x9D
+ ANSI_PARAMETER_SEP = ";"
+ ANSI_CMD_G0 = '('
+ ANSI_CMD_G1 = ')'
+ ANSI_CMD_G2 = '*'
+ ANSI_CMD_G3 = '+'
+ ANSI_CMD_DECPNM = '>'
+ ANSI_CMD_DECPAM = '='
+ ANSI_CMD_OSC = ']'
+ ANSI_CMD_STR_TERM = '\\'
+
+ KEY_CONTROL_PARAM_2 = ";2"
+ KEY_CONTROL_PARAM_3 = ";3"
+ KEY_CONTROL_PARAM_4 = ";4"
+ KEY_CONTROL_PARAM_5 = ";5"
+ KEY_CONTROL_PARAM_6 = ";6"
+ KEY_CONTROL_PARAM_7 = ";7"
+ KEY_CONTROL_PARAM_8 = ";8"
+ KEY_ESC_CSI = "\x1B["
+ KEY_ESC_N = "\x1BN"
+ KEY_ESC_O = "\x1BO"
+
+ FILL_CHARACTER = ' '
+)
+
+func getByteRange(start byte, end byte) []byte {
+ bytes := make([]byte, 0, 32)
+ for i := start; i <= end; i++ {
+ bytes = append(bytes, byte(i))
+ }
+
+ return bytes
+}
+
+var toGroundBytes = getToGroundBytes()
+var executors = getExecuteBytes()
+
+// SPACE 20+A0 hex Always and everywhere a blank space
+// Intermediate 20-2F hex !"#$%&'()*+,-./
+var intermeds = getByteRange(0x20, 0x2F)
+
+// Parameters 30-3F hex 0123456789:;<=>?
+// CSI Parameters 30-39, 3B hex 0123456789;
+var csiParams = getByteRange(0x30, 0x3F)
+
+var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
+
+// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
+var upperCase = getByteRange(0x40, 0x5F)
+
+// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
+var lowerCase = getByteRange(0x60, 0x7E)
+
+// Alphabetics 40-7E hex (all of upper and lower case)
+var alphabetics = append(upperCase, lowerCase...)
+
+var printables = getByteRange(0x20, 0x7F)
+
+var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
+var escapeToGroundBytes = getEscapeToGroundBytes()
+
+// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
+// byte ranges below
+
+func getEscapeToGroundBytes() []byte {
+ escapeToGroundBytes := getByteRange(0x30, 0x4F)
+ escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
+ escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
+ return escapeToGroundBytes
+}
+
+func getExecuteBytes() []byte {
+ executeBytes := getByteRange(0x00, 0x17)
+ executeBytes = append(executeBytes, 0x19)
+ executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
+ return executeBytes
+}
+
+func getToGroundBytes() []byte {
+ groundBytes := []byte{0x18}
+ groundBytes = append(groundBytes, 0x1A)
+ groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
+ groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
+ groundBytes = append(groundBytes, 0x99)
+ groundBytes = append(groundBytes, 0x9A)
+ groundBytes = append(groundBytes, 0x9C)
+ return groundBytes
+}
+
+// Delete 7F hex Always and everywhere ignored
+// C1 Control 80-9F hex 32 additional control characters
+// G1 Displayable A1-FE hex 94 additional displayable characters
+// Special A0+FF hex Same as SPACE and DELETE
diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go
new file mode 100644
index 000000000..8d66e777c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/context.go
@@ -0,0 +1,7 @@
+package ansiterm
+
+type ansiContext struct {
+ currentChar byte
+ paramBuffer []byte
+ interBuffer []byte
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
new file mode 100644
index 000000000..bcbe00d0c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
@@ -0,0 +1,49 @@
+package ansiterm
+
+type csiEntryState struct {
+ baseState
+}
+
+func (csiState csiEntryState) Handle(b byte) (s state, e error) {
+ csiState.parser.logf("CsiEntry::Handle %#x", b)
+
+ nextState, err := csiState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(alphabetics, b):
+ return csiState.parser.ground, nil
+ case sliceContains(csiCollectables, b):
+ return csiState.parser.csiParam, nil
+ case sliceContains(executors, b):
+ return csiState, csiState.parser.execute()
+ }
+
+ return csiState, nil
+}
+
+func (csiState csiEntryState) Transition(s state) error {
+ csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
+ csiState.baseState.Transition(s)
+
+ switch s {
+ case csiState.parser.ground:
+ return csiState.parser.csiDispatch()
+ case csiState.parser.csiParam:
+ switch {
+ case sliceContains(csiParams, csiState.parser.context.currentChar):
+ csiState.parser.collectParam()
+ case sliceContains(intermeds, csiState.parser.context.currentChar):
+ csiState.parser.collectInter()
+ }
+ }
+
+ return nil
+}
+
+func (csiState csiEntryState) Enter() error {
+ csiState.parser.clear()
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
new file mode 100644
index 000000000..7ed5e01c3
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
@@ -0,0 +1,38 @@
+package ansiterm
+
+type csiParamState struct {
+ baseState
+}
+
+func (csiState csiParamState) Handle(b byte) (s state, e error) {
+ csiState.parser.logf("CsiParam::Handle %#x", b)
+
+ nextState, err := csiState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(alphabetics, b):
+ return csiState.parser.ground, nil
+ case sliceContains(csiCollectables, b):
+ csiState.parser.collectParam()
+ return csiState, nil
+ case sliceContains(executors, b):
+ return csiState, csiState.parser.execute()
+ }
+
+ return csiState, nil
+}
+
+func (csiState csiParamState) Transition(s state) error {
+ csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
+ csiState.baseState.Transition(s)
+
+ switch s {
+ case csiState.parser.ground:
+ return csiState.parser.csiDispatch()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
new file mode 100644
index 000000000..1c719db9e
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
@@ -0,0 +1,36 @@
+package ansiterm
+
+type escapeIntermediateState struct {
+ baseState
+}
+
+func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
+ escState.parser.logf("escapeIntermediateState::Handle %#x", b)
+ nextState, err := escState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(intermeds, b):
+ return escState, escState.parser.collectInter()
+ case sliceContains(executors, b):
+ return escState, escState.parser.execute()
+ case sliceContains(escapeIntermediateToGroundBytes, b):
+ return escState.parser.ground, nil
+ }
+
+ return escState, nil
+}
+
+func (escState escapeIntermediateState) Transition(s state) error {
+ escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
+ escState.baseState.Transition(s)
+
+ switch s {
+ case escState.parser.ground:
+ return escState.parser.escDispatch()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go
new file mode 100644
index 000000000..6390abd23
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go
@@ -0,0 +1,47 @@
+package ansiterm
+
+type escapeState struct {
+ baseState
+}
+
+func (escState escapeState) Handle(b byte) (s state, e error) {
+ escState.parser.logf("escapeState::Handle %#x", b)
+ nextState, err := escState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case b == ANSI_ESCAPE_SECONDARY:
+ return escState.parser.csiEntry, nil
+ case b == ANSI_OSC_STRING_ENTRY:
+ return escState.parser.oscString, nil
+ case sliceContains(executors, b):
+ return escState, escState.parser.execute()
+ case sliceContains(escapeToGroundBytes, b):
+ return escState.parser.ground, nil
+ case sliceContains(intermeds, b):
+ return escState.parser.escapeIntermediate, nil
+ }
+
+ return escState, nil
+}
+
+func (escState escapeState) Transition(s state) error {
+ escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
+ escState.baseState.Transition(s)
+
+ switch s {
+ case escState.parser.ground:
+ return escState.parser.escDispatch()
+ case escState.parser.escapeIntermediate:
+ return escState.parser.collectInter()
+ }
+
+ return nil
+}
+
+func (escState escapeState) Enter() error {
+ escState.parser.clear()
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go
new file mode 100644
index 000000000..98087b38c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go
@@ -0,0 +1,90 @@
+package ansiterm
+
+type AnsiEventHandler interface {
+ // Print
+ Print(b byte) error
+
+ // Execute C0 commands
+ Execute(b byte) error
+
+ // CUrsor Up
+ CUU(int) error
+
+ // CUrsor Down
+ CUD(int) error
+
+ // CUrsor Forward
+ CUF(int) error
+
+ // CUrsor Backward
+ CUB(int) error
+
+ // Cursor to Next Line
+ CNL(int) error
+
+ // Cursor to Previous Line
+ CPL(int) error
+
+ // Cursor Horizontal position Absolute
+ CHA(int) error
+
+ // Vertical line Position Absolute
+ VPA(int) error
+
+ // CUrsor Position
+ CUP(int, int) error
+
+ // Horizontal and Vertical Position (depends on PUM)
+ HVP(int, int) error
+
+ // Text Cursor Enable Mode
+ DECTCEM(bool) error
+
+ // Origin Mode
+ DECOM(bool) error
+
+ // 132 Column Mode
+ DECCOLM(bool) error
+
+ // Erase in Display
+ ED(int) error
+
+ // Erase in Line
+ EL(int) error
+
+ // Insert Line
+ IL(int) error
+
+ // Delete Line
+ DL(int) error
+
+ // Insert Character
+ ICH(int) error
+
+ // Delete Character
+ DCH(int) error
+
+ // Set Graphics Rendition
+ SGR([]int) error
+
+ // Pan Down
+ SU(int) error
+
+ // Pan Up
+ SD(int) error
+
+ // Device Attributes
+ DA([]string) error
+
+ // Set Top and Bottom Margins
+ DECSTBM(int, int) error
+
+ // Index
+ IND() error
+
+ // Reverse Index
+ RI() error
+
+ // Flush updates from previous commands
+ Flush() error
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go
new file mode 100644
index 000000000..52451e946
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go
@@ -0,0 +1,24 @@
+package ansiterm
+
+type groundState struct {
+ baseState
+}
+
+func (gs groundState) Handle(b byte) (s state, e error) {
+ gs.parser.context.currentChar = b
+
+ nextState, err := gs.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(printables, b):
+ return gs, gs.parser.print()
+
+ case sliceContains(executors, b):
+ return gs, gs.parser.execute()
+ }
+
+ return gs, nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
new file mode 100644
index 000000000..593b10ab6
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
@@ -0,0 +1,31 @@
+package ansiterm
+
+type oscStringState struct {
+ baseState
+}
+
+func (oscState oscStringState) Handle(b byte) (s state, e error) {
+ oscState.parser.logf("OscString::Handle %#x", b)
+ nextState, err := oscState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case isOscStringTerminator(b):
+ return oscState.parser.ground, nil
+ }
+
+ return oscState, nil
+}
+
+// See below for OSC string terminators for linux
+// http://man7.org/linux/man-pages/man4/console_codes.4.html
+func isOscStringTerminator(b byte) bool {
+
+ if b == ANSI_BEL || b == 0x5C {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go
new file mode 100644
index 000000000..03cec7ada
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser.go
@@ -0,0 +1,151 @@
+package ansiterm
+
+import (
+ "errors"
+ "log"
+ "os"
+)
+
+type AnsiParser struct {
+ currState state
+ eventHandler AnsiEventHandler
+ context *ansiContext
+ csiEntry state
+ csiParam state
+ dcsEntry state
+ escape state
+ escapeIntermediate state
+ error state
+ ground state
+ oscString state
+ stateMap []state
+
+ logf func(string, ...interface{})
+}
+
+type Option func(*AnsiParser)
+
+func WithLogf(f func(string, ...interface{})) Option {
+ return func(ap *AnsiParser) {
+ ap.logf = f
+ }
+}
+
+func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
+ ap := &AnsiParser{
+ eventHandler: evtHandler,
+ context: &ansiContext{},
+ }
+ for _, o := range opts {
+ o(ap)
+ }
+
+ if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
+ logFile, _ := os.Create("ansiParser.log")
+ logger := log.New(logFile, "", log.LstdFlags)
+ if ap.logf != nil {
+ l := ap.logf
+ ap.logf = func(s string, v ...interface{}) {
+ l(s, v...)
+ logger.Printf(s, v...)
+ }
+ } else {
+ ap.logf = logger.Printf
+ }
+ }
+
+ if ap.logf == nil {
+ ap.logf = func(string, ...interface{}) {}
+ }
+
+ ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
+ ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
+ ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
+ ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
+ ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
+ ap.error = errorState{baseState{name: "Error", parser: ap}}
+ ap.ground = groundState{baseState{name: "Ground", parser: ap}}
+ ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
+
+ ap.stateMap = []state{
+ ap.csiEntry,
+ ap.csiParam,
+ ap.dcsEntry,
+ ap.escape,
+ ap.escapeIntermediate,
+ ap.error,
+ ap.ground,
+ ap.oscString,
+ }
+
+ ap.currState = getState(initialState, ap.stateMap)
+
+ ap.logf("CreateParser: parser %p", ap)
+ return ap
+}
+
+func getState(name string, states []state) state {
+ for _, el := range states {
+ if el.Name() == name {
+ return el
+ }
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
+ for i, b := range bytes {
+ if err := ap.handle(b); err != nil {
+ return i, err
+ }
+ }
+
+ return len(bytes), ap.eventHandler.Flush()
+}
+
+func (ap *AnsiParser) handle(b byte) error {
+ ap.context.currentChar = b
+ newState, err := ap.currState.Handle(b)
+ if err != nil {
+ return err
+ }
+
+ if newState == nil {
+ ap.logf("WARNING: newState is nil")
+ return errors.New("New state of 'nil' is invalid.")
+ }
+
+ if newState != ap.currState {
+ if err := ap.changeState(newState); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) changeState(newState state) error {
+ ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
+
+ // Exit old state
+ if err := ap.currState.Exit(); err != nil {
+ ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
+ return err
+ }
+
+ // Perform transition action
+ if err := ap.currState.Transition(newState); err != nil {
+ ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
+ return err
+ }
+
+ // Enter new state
+ if err := newState.Enter(); err != nil {
+ ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
+ return err
+ }
+
+ ap.currState = newState
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
new file mode 100644
index 000000000..de0a1f9cd
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
@@ -0,0 +1,99 @@
+package ansiterm
+
+import (
+ "strconv"
+)
+
+func parseParams(bytes []byte) ([]string, error) {
+ paramBuff := make([]byte, 0, 0)
+ params := []string{}
+
+ for _, v := range bytes {
+ if v == ';' {
+ if len(paramBuff) > 0 {
+ // Completed parameter, append it to the list
+ s := string(paramBuff)
+ params = append(params, s)
+ paramBuff = make([]byte, 0, 0)
+ }
+ } else {
+ paramBuff = append(paramBuff, v)
+ }
+ }
+
+ // Last parameter may not be terminated with ';'
+ if len(paramBuff) > 0 {
+ s := string(paramBuff)
+ params = append(params, s)
+ }
+
+ return params, nil
+}
+
+func parseCmd(context ansiContext) (string, error) {
+ return string(context.currentChar), nil
+}
+
+func getInt(params []string, dflt int) int {
+ i := getInts(params, 1, dflt)[0]
+ return i
+}
+
+func getInts(params []string, minCount int, dflt int) []int {
+ ints := []int{}
+
+ for _, v := range params {
+ i, _ := strconv.Atoi(v)
+ // Zero is mapped to the default value in VT100.
+ if i == 0 {
+ i = dflt
+ }
+ ints = append(ints, i)
+ }
+
+ if len(ints) < minCount {
+ remaining := minCount - len(ints)
+ for i := 0; i < remaining; i++ {
+ ints = append(ints, dflt)
+ }
+ }
+
+ return ints
+}
+
+func (ap *AnsiParser) modeDispatch(param string, set bool) error {
+ switch param {
+ case "?3":
+ return ap.eventHandler.DECCOLM(set)
+ case "?6":
+ return ap.eventHandler.DECOM(set)
+ case "?25":
+ return ap.eventHandler.DECTCEM(set)
+ }
+ return nil
+}
+
+func (ap *AnsiParser) hDispatch(params []string) error {
+ if len(params) == 1 {
+ return ap.modeDispatch(params[0], true)
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) lDispatch(params []string) error {
+ if len(params) == 1 {
+ return ap.modeDispatch(params[0], false)
+ }
+
+ return nil
+}
+
+func getEraseParam(params []string) int {
+ param := getInt(params, 0)
+ if param < 0 || 3 < param {
+ param = 0
+ }
+
+ return param
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
new file mode 100644
index 000000000..0bb5e51e9
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
@@ -0,0 +1,119 @@
+package ansiterm
+
+func (ap *AnsiParser) collectParam() error {
+ currChar := ap.context.currentChar
+ ap.logf("collectParam %#x", currChar)
+ ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
+ return nil
+}
+
+func (ap *AnsiParser) collectInter() error {
+ currChar := ap.context.currentChar
+ ap.logf("collectInter %#x", currChar)
+ ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
+ return nil
+}
+
+func (ap *AnsiParser) escDispatch() error {
+ cmd, _ := parseCmd(*ap.context)
+ intermeds := ap.context.interBuffer
+ ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
+ ap.logf("escDispatch: %v(%v)", cmd, intermeds)
+
+ switch cmd {
+ case "D": // IND
+ return ap.eventHandler.IND()
+ case "E": // NEL, equivalent to CRLF
+ err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
+ if err == nil {
+ err = ap.eventHandler.Execute(ANSI_LINE_FEED)
+ }
+ return err
+ case "M": // RI
+ return ap.eventHandler.RI()
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) csiDispatch() error {
+ cmd, _ := parseCmd(*ap.context)
+ params, _ := parseParams(ap.context.paramBuffer)
+ ap.logf("Parsed params: %v with length: %d", params, len(params))
+
+ ap.logf("csiDispatch: %v(%v)", cmd, params)
+
+ switch cmd {
+ case "@":
+ return ap.eventHandler.ICH(getInt(params, 1))
+ case "A":
+ return ap.eventHandler.CUU(getInt(params, 1))
+ case "B":
+ return ap.eventHandler.CUD(getInt(params, 1))
+ case "C":
+ return ap.eventHandler.CUF(getInt(params, 1))
+ case "D":
+ return ap.eventHandler.CUB(getInt(params, 1))
+ case "E":
+ return ap.eventHandler.CNL(getInt(params, 1))
+ case "F":
+ return ap.eventHandler.CPL(getInt(params, 1))
+ case "G":
+ return ap.eventHandler.CHA(getInt(params, 1))
+ case "H":
+ ints := getInts(params, 2, 1)
+ x, y := ints[0], ints[1]
+ return ap.eventHandler.CUP(x, y)
+ case "J":
+ param := getEraseParam(params)
+ return ap.eventHandler.ED(param)
+ case "K":
+ param := getEraseParam(params)
+ return ap.eventHandler.EL(param)
+ case "L":
+ return ap.eventHandler.IL(getInt(params, 1))
+ case "M":
+ return ap.eventHandler.DL(getInt(params, 1))
+ case "P":
+ return ap.eventHandler.DCH(getInt(params, 1))
+ case "S":
+ return ap.eventHandler.SU(getInt(params, 1))
+ case "T":
+ return ap.eventHandler.SD(getInt(params, 1))
+ case "c":
+ return ap.eventHandler.DA(params)
+ case "d":
+ return ap.eventHandler.VPA(getInt(params, 1))
+ case "f":
+ ints := getInts(params, 2, 1)
+ x, y := ints[0], ints[1]
+ return ap.eventHandler.HVP(x, y)
+ case "h":
+ return ap.hDispatch(params)
+ case "l":
+ return ap.lDispatch(params)
+ case "m":
+ return ap.eventHandler.SGR(getInts(params, 1, 0))
+ case "r":
+ ints := getInts(params, 2, 1)
+ top, bottom := ints[0], ints[1]
+ return ap.eventHandler.DECSTBM(top, bottom)
+ default:
+ ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)
+ return nil
+ }
+
+}
+
+func (ap *AnsiParser) print() error {
+ return ap.eventHandler.Print(ap.context.currentChar)
+}
+
+func (ap *AnsiParser) clear() error {
+ ap.context = &ansiContext{}
+ return nil
+}
+
+func (ap *AnsiParser) execute() error {
+ return ap.eventHandler.Execute(ap.context.currentChar)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go
new file mode 100644
index 000000000..f2ea1fcd1
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/states.go
@@ -0,0 +1,71 @@
+package ansiterm
+
+type stateID int
+
+type state interface {
+ Enter() error
+ Exit() error
+ Handle(byte) (state, error)
+ Name() string
+ Transition(state) error
+}
+
+type baseState struct {
+ name string
+ parser *AnsiParser
+}
+
+func (base baseState) Enter() error {
+ return nil
+}
+
+func (base baseState) Exit() error {
+ return nil
+}
+
+func (base baseState) Handle(b byte) (s state, e error) {
+
+ switch {
+ case b == CSI_ENTRY:
+ return base.parser.csiEntry, nil
+ case b == DCS_ENTRY:
+ return base.parser.dcsEntry, nil
+ case b == ANSI_ESCAPE_PRIMARY:
+ return base.parser.escape, nil
+ case b == OSC_STRING:
+ return base.parser.oscString, nil
+ case sliceContains(toGroundBytes, b):
+ return base.parser.ground, nil
+ }
+
+ return nil, nil
+}
+
+func (base baseState) Name() string {
+ return base.name
+}
+
+func (base baseState) Transition(s state) error {
+ if s == base.parser.ground {
+ execBytes := []byte{0x18}
+ execBytes = append(execBytes, 0x1A)
+ execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
+ execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
+ execBytes = append(execBytes, 0x99)
+ execBytes = append(execBytes, 0x9A)
+
+ if sliceContains(execBytes, base.parser.context.currentChar) {
+ return base.parser.execute()
+ }
+ }
+
+ return nil
+}
+
+type dcsEntryState struct {
+ baseState
+}
+
+type errorState struct {
+ baseState
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go
new file mode 100644
index 000000000..392114493
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/utilities.go
@@ -0,0 +1,21 @@
+package ansiterm
+
+import (
+ "strconv"
+)
+
+func sliceContains(bytes []byte, b byte) bool {
+ for _, v := range bytes {
+ if v == b {
+ return true
+ }
+ }
+
+ return false
+}
+
+func convertBytesToInteger(bytes []byte) int {
+ s := string(bytes)
+ i, _ := strconv.Atoi(s)
+ return i
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
new file mode 100644
index 000000000..a67327972
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
@@ -0,0 +1,182 @@
+// +build windows
+
+package winterm
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/Azure/go-ansiterm"
+)
+
+// Windows keyboard constants
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
+const (
+ VK_PRIOR = 0x21 // PAGE UP key
+ VK_NEXT = 0x22 // PAGE DOWN key
+ VK_END = 0x23 // END key
+ VK_HOME = 0x24 // HOME key
+ VK_LEFT = 0x25 // LEFT ARROW key
+ VK_UP = 0x26 // UP ARROW key
+ VK_RIGHT = 0x27 // RIGHT ARROW key
+ VK_DOWN = 0x28 // DOWN ARROW key
+ VK_SELECT = 0x29 // SELECT key
+ VK_PRINT = 0x2A // PRINT key
+ VK_EXECUTE = 0x2B // EXECUTE key
+ VK_SNAPSHOT = 0x2C // PRINT SCREEN key
+ VK_INSERT = 0x2D // INS key
+ VK_DELETE = 0x2E // DEL key
+ VK_HELP = 0x2F // HELP key
+ VK_F1 = 0x70 // F1 key
+ VK_F2 = 0x71 // F2 key
+ VK_F3 = 0x72 // F3 key
+ VK_F4 = 0x73 // F4 key
+ VK_F5 = 0x74 // F5 key
+ VK_F6 = 0x75 // F6 key
+ VK_F7 = 0x76 // F7 key
+ VK_F8 = 0x77 // F8 key
+ VK_F9 = 0x78 // F9 key
+ VK_F10 = 0x79 // F10 key
+ VK_F11 = 0x7A // F11 key
+ VK_F12 = 0x7B // F12 key
+
+ RIGHT_ALT_PRESSED = 0x0001
+ LEFT_ALT_PRESSED = 0x0002
+ RIGHT_CTRL_PRESSED = 0x0004
+ LEFT_CTRL_PRESSED = 0x0008
+ SHIFT_PRESSED = 0x0010
+ NUMLOCK_ON = 0x0020
+ SCROLLLOCK_ON = 0x0040
+ CAPSLOCK_ON = 0x0080
+ ENHANCED_KEY = 0x0100
+)
+
+type ansiCommand struct {
+ CommandBytes []byte
+ Command string
+ Parameters []string
+ IsSpecial bool
+}
+
+func newAnsiCommand(command []byte) *ansiCommand {
+
+ if isCharacterSelectionCmdChar(command[1]) {
+ // Is Character Set Selection commands
+ return &ansiCommand{
+ CommandBytes: command,
+ Command: string(command),
+ IsSpecial: true,
+ }
+ }
+
+ // last char is command character
+ lastCharIndex := len(command) - 1
+
+ ac := &ansiCommand{
+ CommandBytes: command,
+ Command: string(command[lastCharIndex]),
+ IsSpecial: false,
+ }
+
+ // more than a single escape
+ if lastCharIndex != 0 {
+ start := 1
+ // skip if double char escape sequence
+ if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
+ start++
+ }
+ // convert this to GetNextParam method
+ ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
+ }
+
+ return ac
+}
+
+func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
+ if index < 0 || index >= len(ac.Parameters) {
+ return defaultValue
+ }
+
+ param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
+ if err != nil {
+ return defaultValue
+ }
+
+ return int16(param)
+}
+
+func (ac *ansiCommand) String() string {
+ return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
+ bytesToHex(ac.CommandBytes),
+ ac.Command,
+ strings.Join(ac.Parameters, "\",\""))
+}
+
+// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
+// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
+func isAnsiCommandChar(b byte) bool {
+ switch {
+ case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
+ return true
+ case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
+ // non-CSI escape sequence terminator
+ return true
+ case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
+ // String escape sequence terminator
+ return true
+ }
+ return false
+}
+
+func isXtermOscSequence(command []byte, current byte) bool {
+ return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
+}
+
+func isCharacterSelectionCmdChar(b byte) bool {
+ return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
+}
+
+// bytesToHex converts a slice of bytes to a human-readable string.
+func bytesToHex(b []byte) string {
+ hex := make([]string, len(b))
+ for i, ch := range b {
+ hex[i] = fmt.Sprintf("%X", ch)
+ }
+ return strings.Join(hex, "")
+}
+
+// ensureInRange adjusts the passed value, if necessary, to ensure it is within
+// the passed min / max range.
+func ensureInRange(n int16, min int16, max int16) int16 {
+ if n < min {
+ return min
+ } else if n > max {
+ return max
+ } else {
+ return n
+ }
+}
+
+func GetStdFile(nFile int) (*os.File, uintptr) {
+ var file *os.File
+ switch nFile {
+ case syscall.STD_INPUT_HANDLE:
+ file = os.Stdin
+ case syscall.STD_OUTPUT_HANDLE:
+ file = os.Stdout
+ case syscall.STD_ERROR_HANDLE:
+ file = os.Stderr
+ default:
+ panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
+ }
+
+ fd, err := syscall.GetStdHandle(nFile)
+ if err != nil {
+ panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
+ }
+
+ return file, uintptr(fd)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
new file mode 100644
index 000000000..6055e33b9
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
@@ -0,0 +1,327 @@
+// +build windows
+
+package winterm
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+//===========================================================================================================
+// IMPORTANT NOTE:
+//
+// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
+// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
+// variables) the pointers reference *before* the API completes.
+//
+// As a result, in those cases, the code must hint that the variables remain in active by invoking the
+// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
+// require unsafe pointers.
+//
+// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
+// the garbage collector the variables remain in use if:
+//
+// -- The value is not a pointer (e.g., int32, struct)
+// -- The value is not referenced by the method after passing the pointer to Windows
+//
+// See http://golang.org/doc/go1.3.
+//===========================================================================================================
+
+var (
+ kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
+
+ getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
+ setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
+ setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
+ setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
+ getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
+ setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
+ scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
+ setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
+ setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
+ writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
+ readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
+ waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
+)
+
+// Windows Console constants
+const (
+ // Console modes
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+ ENABLE_PROCESSED_INPUT = 0x0001
+ ENABLE_LINE_INPUT = 0x0002
+ ENABLE_ECHO_INPUT = 0x0004
+ ENABLE_WINDOW_INPUT = 0x0008
+ ENABLE_MOUSE_INPUT = 0x0010
+ ENABLE_INSERT_MODE = 0x0020
+ ENABLE_QUICK_EDIT_MODE = 0x0040
+ ENABLE_EXTENDED_FLAGS = 0x0080
+ ENABLE_AUTO_POSITION = 0x0100
+ ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
+
+ ENABLE_PROCESSED_OUTPUT = 0x0001
+ ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
+ ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+ DISABLE_NEWLINE_AUTO_RETURN = 0x0008
+ ENABLE_LVB_GRID_WORLDWIDE = 0x0010
+
+ // Character attributes
+ // Note:
+ // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
+ // Clearing all foreground or background colors results in black; setting all creates white.
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
+ FOREGROUND_BLUE uint16 = 0x0001
+ FOREGROUND_GREEN uint16 = 0x0002
+ FOREGROUND_RED uint16 = 0x0004
+ FOREGROUND_INTENSITY uint16 = 0x0008
+ FOREGROUND_MASK uint16 = 0x000F
+
+ BACKGROUND_BLUE uint16 = 0x0010
+ BACKGROUND_GREEN uint16 = 0x0020
+ BACKGROUND_RED uint16 = 0x0040
+ BACKGROUND_INTENSITY uint16 = 0x0080
+ BACKGROUND_MASK uint16 = 0x00F0
+
+ COMMON_LVB_MASK uint16 = 0xFF00
+ COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
+ COMMON_LVB_UNDERSCORE uint16 = 0x8000
+
+ // Input event types
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+ KEY_EVENT = 0x0001
+ MOUSE_EVENT = 0x0002
+ WINDOW_BUFFER_SIZE_EVENT = 0x0004
+ MENU_EVENT = 0x0008
+ FOCUS_EVENT = 0x0010
+
+ // WaitForSingleObject return codes
+ WAIT_ABANDONED = 0x00000080
+ WAIT_FAILED = 0xFFFFFFFF
+ WAIT_SIGNALED = 0x0000000
+ WAIT_TIMEOUT = 0x00000102
+
+ // WaitForSingleObject wait duration
+ WAIT_INFINITE = 0xFFFFFFFF
+ WAIT_ONE_SECOND = 1000
+ WAIT_HALF_SECOND = 500
+ WAIT_QUARTER_SECOND = 250
+)
+
+// Windows API Console types
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
+// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
+type (
+ CHAR_INFO struct {
+ UnicodeChar uint16
+ Attributes uint16
+ }
+
+ CONSOLE_CURSOR_INFO struct {
+ Size uint32
+ Visible int32
+ }
+
+ CONSOLE_SCREEN_BUFFER_INFO struct {
+ Size COORD
+ CursorPosition COORD
+ Attributes uint16
+ Window SMALL_RECT
+ MaximumWindowSize COORD
+ }
+
+ COORD struct {
+ X int16
+ Y int16
+ }
+
+ SMALL_RECT struct {
+ Left int16
+ Top int16
+ Right int16
+ Bottom int16
+ }
+
+ // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+ INPUT_RECORD struct {
+ EventType uint16
+ KeyEvent KEY_EVENT_RECORD
+ }
+
+ KEY_EVENT_RECORD struct {
+ KeyDown int32
+ RepeatCount uint16
+ VirtualKeyCode uint16
+ VirtualScanCode uint16
+ UnicodeChar uint16
+ ControlKeyState uint32
+ }
+
+ WINDOW_BUFFER_SIZE struct {
+ Size COORD
+ }
+)
+
+// boolToBOOL converts a Go bool into a Windows int32.
+func boolToBOOL(f bool) int32 {
+ if f {
+ return int32(1)
+ } else {
+ return int32(0)
+ }
+}
+
+// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
+func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+ r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
+func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+ r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorPosition location of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
+func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
+ r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
+ use(coord)
+ return checkError(r1, r2, err)
+}
+
+// GetConsoleMode gets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
+func GetConsoleMode(handle uintptr) (mode uint32, err error) {
+ err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
+ return mode, err
+}
+
+// SetConsoleMode sets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+func SetConsoleMode(handle uintptr, mode uint32) error {
+ r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
+ use(mode)
+ return checkError(r1, r2, err)
+}
+
+// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
+func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
+ info := CONSOLE_SCREEN_BUFFER_INFO{}
+ err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
+ if err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
+ r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
+ use(scrollRect)
+ use(clipRect)
+ use(destOrigin)
+ use(char)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleScreenBufferSize sets the size of the console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
+func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
+ r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
+ use(coord)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleTextAttribute sets the attributes of characters written to the
+// console screen buffer by the WriteFile or WriteConsole function.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
+func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
+ r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
+ use(attribute)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
+// Note that the size and location must be within and no larger than the backing console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
+func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
+ r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
+ use(isAbsolute)
+ use(rect)
+ return checkError(r1, r2, err)
+}
+
+// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
+func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
+ r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
+ use(buffer)
+ use(bufferSize)
+ use(bufferCoord)
+ return checkError(r1, r2, err)
+}
+
+// ReadConsoleInput reads (and removes) data from the console input buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
+func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
+ r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
+ use(buffer)
+ return checkError(r1, r2, err)
+}
+
+// WaitForSingleObject waits for the passed handle to be signaled.
+// It returns true if the handle was signaled; false otherwise.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
+func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
+ r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
+ switch r1 {
+ case WAIT_ABANDONED, WAIT_TIMEOUT:
+ return false, nil
+ case WAIT_SIGNALED:
+ return true, nil
+ }
+ use(msWait)
+ return false, err
+}
+
+// String helpers
+func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
+ return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
+}
+
+func (coord COORD) String() string {
+ return fmt.Sprintf("%v,%v", coord.X, coord.Y)
+}
+
+func (rect SMALL_RECT) String() string {
+ return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
+}
+
+// checkError evaluates the results of a Windows API call and returns the error if it failed.
+func checkError(r1, r2 uintptr, err error) error {
+ // Windows APIs return non-zero to indicate success
+ if r1 != 0 {
+ return nil
+ }
+
+ // Return the error if provided, otherwise default to EINVAL
+ if err != nil {
+ return err
+ }
+ return syscall.EINVAL
+}
+
+// coordToPointer converts a COORD into a uintptr (by fooling the type system).
+func coordToPointer(c COORD) uintptr {
+ // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
+ return uintptr(*((*uint32)(unsafe.Pointer(&c))))
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+func use(p interface{}) {}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
new file mode 100644
index 000000000..cbec8f728
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
@@ -0,0 +1,100 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+const (
+ FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+ BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+)
+
+// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
+// request represented by the passed ANSI mode.
+func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
+ switch ansiMode {
+
+ // Mode styles
+ case ansiterm.ANSI_SGR_BOLD:
+ windowsMode = windowsMode | FOREGROUND_INTENSITY
+
+ case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
+ windowsMode &^= FOREGROUND_INTENSITY
+
+ case ansiterm.ANSI_SGR_UNDERLINE:
+ windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
+
+ case ansiterm.ANSI_SGR_REVERSE:
+ inverted = true
+
+ case ansiterm.ANSI_SGR_REVERSE_OFF:
+ inverted = false
+
+ case ansiterm.ANSI_SGR_UNDERLINE_OFF:
+ windowsMode &^= COMMON_LVB_UNDERSCORE
+
+ // Foreground colors
+ case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
+ windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
+
+ case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
+
+ case ansiterm.ANSI_SGR_FOREGROUND_RED:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
+
+ case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+ // Background colors
+ case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
+ // Black with no intensity
+ windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
+
+ case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
+
+ case ansiterm.ANSI_SGR_BACKGROUND_RED:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
+
+ case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+ }
+
+ return windowsMode, inverted
+}
+
+// invertAttributes inverts the foreground and background colors of a Windows attributes value
+func invertAttributes(windowsMode uint16) uint16 {
+ return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
new file mode 100644
index 000000000..3ee06ea72
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
@@ -0,0 +1,101 @@
+// +build windows
+
+package winterm
+
+const (
+ horizontal = iota
+ vertical
+)
+
+func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
+ if h.originMode {
+ sr := h.effectiveSr(info.Window)
+ return SMALL_RECT{
+ Top: sr.top,
+ Bottom: sr.bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+ } else {
+ return SMALL_RECT{
+ Top: info.Window.Top,
+ Bottom: info.Window.Bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+ }
+}
+
+// setCursorPosition sets the cursor to the specified position, bounded to the screen size
+func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
+ position.X = ensureInRange(position.X, window.Left, window.Right)
+ position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
+ err := SetConsoleCursorPosition(h.fd, position)
+ if err != nil {
+ return err
+ }
+ h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
+ return err
+}
+
+func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
+ return h.moveCursor(vertical, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
+ return h.moveCursor(horizontal, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ switch moveMode {
+ case horizontal:
+ position.X += int16(param)
+ case vertical:
+ position.Y += int16(param)
+ }
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ position.X = 0
+ position.Y += int16(param)
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ position.X = int16(param) - 1
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
new file mode 100644
index 000000000..244b5fa25
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
@@ -0,0 +1,84 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
+ // Ignore an invalid (negative area) request
+ if toCoord.Y < fromCoord.Y {
+ return nil
+ }
+
+ var err error
+
+ var coordStart = COORD{}
+ var coordEnd = COORD{}
+
+ xCurrent, yCurrent := fromCoord.X, fromCoord.Y
+ xEnd, yEnd := toCoord.X, toCoord.Y
+
+ // Clear any partial initial line
+ if xCurrent > 0 {
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yCurrent
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ xCurrent = 0
+ yCurrent += 1
+ }
+
+ // Clear intervening rectangular section
+ if yCurrent < yEnd {
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yEnd-1
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ xCurrent = 0
+ yCurrent = yEnd
+ }
+
+ // Clear remaining partial ending line
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yEnd
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
+ region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
+ width := toCoord.X - fromCoord.X + 1
+ height := toCoord.Y - fromCoord.Y + 1
+ size := uint32(width) * uint32(height)
+
+ if size <= 0 {
+ return nil
+ }
+
+ buffer := make([]CHAR_INFO, size)
+
+ char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
+ for i := 0; i < int(size); i++ {
+ buffer[i] = char
+ }
+
+ err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &region)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
new file mode 100644
index 000000000..2d27fa1d0
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
@@ -0,0 +1,118 @@
+// +build windows
+
+package winterm
+
+// effectiveSr gets the current effective scroll region in buffer coordinates
+func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
+ top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
+ bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
+ if top >= bottom {
+ top = window.Top
+ bottom = window.Bottom
+ }
+ return scrollRegion{top: top, bottom: bottom}
+}
+
+func (h *windowsAnsiEventHandler) scrollUp(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ sr := h.effectiveSr(info.Window)
+ return h.scroll(param, sr, info)
+}
+
+func (h *windowsAnsiEventHandler) scrollDown(param int) error {
+ return h.scrollUp(-param)
+}
+
+func (h *windowsAnsiEventHandler) deleteLines(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ start := info.CursorPosition.Y
+ sr := h.effectiveSr(info.Window)
+ // Lines cannot be inserted or deleted outside the scrolling region.
+ if start >= sr.top && start <= sr.bottom {
+ sr.top = start
+ return h.scroll(param, sr, info)
+ } else {
+ return nil
+ }
+}
+
+func (h *windowsAnsiEventHandler) insertLines(param int) error {
+ return h.deleteLines(-param)
+}
+
+// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
+func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+ h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
+ h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
+
+ // Copy from and clip to the scroll region (full buffer width)
+ scrollRect := SMALL_RECT{
+ Top: sr.top,
+ Bottom: sr.bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+
+ // Origin to which area should be copied
+ destOrigin := COORD{
+ X: 0,
+ Y: sr.top - int16(param),
+ }
+
+ char := CHAR_INFO{
+ UnicodeChar: ' ',
+ Attributes: h.attributes,
+ }
+
+ if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ return h.scrollLine(param, info.CursorPosition, info)
+}
+
+func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
+ return h.deleteCharacters(-param)
+}
+
+// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
+func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+ // Copy from and clip to the scroll region (full buffer width)
+ scrollRect := SMALL_RECT{
+ Top: position.Y,
+ Bottom: position.Y,
+ Left: position.X,
+ Right: info.Size.X - 1,
+ }
+
+ // Origin to which area should be copied
+ destOrigin := COORD{
+ X: position.X - int16(columns),
+ Y: position.Y,
+ }
+
+ char := CHAR_INFO{
+ UnicodeChar: ' ',
+ Attributes: h.attributes,
+ }
+
+ if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
new file mode 100644
index 000000000..afa7635d7
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package winterm
+
+// AddInRange increments a value by the passed quantity while ensuring the values
+// always remain within the supplied min / max range.
+func addInRange(n int16, increment int16, min int16, max int16) int16 {
+ return ensureInRange(n+increment, min, max)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
new file mode 100644
index 000000000..2d40fb75a
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
@@ -0,0 +1,743 @@
+// +build windows
+
+package winterm
+
+import (
+ "bytes"
+ "log"
+ "os"
+ "strconv"
+
+ "github.com/Azure/go-ansiterm"
+)
+
+type windowsAnsiEventHandler struct {
+ fd uintptr
+ file *os.File
+ infoReset *CONSOLE_SCREEN_BUFFER_INFO
+ sr scrollRegion
+ buffer bytes.Buffer
+ attributes uint16
+ inverted bool
+ wrapNext bool
+ drewMarginByte bool
+ originMode bool
+ marginByte byte
+ curInfo *CONSOLE_SCREEN_BUFFER_INFO
+ curPos COORD
+ logf func(string, ...interface{})
+}
+
+type Option func(*windowsAnsiEventHandler)
+
+func WithLogf(f func(string, ...interface{})) Option {
+ return func(w *windowsAnsiEventHandler) {
+ w.logf = f
+ }
+}
+
+func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
+ infoReset, err := GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil
+ }
+
+ h := &windowsAnsiEventHandler{
+ fd: fd,
+ file: file,
+ infoReset: infoReset,
+ attributes: infoReset.Attributes,
+ }
+ for _, o := range opts {
+ o(h)
+ }
+
+ if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+ logFile, _ := os.Create("winEventHandler.log")
+ logger := log.New(logFile, "", log.LstdFlags)
+ if h.logf != nil {
+ l := h.logf
+ h.logf = func(s string, v ...interface{}) {
+ l(s, v...)
+ logger.Printf(s, v...)
+ }
+ } else {
+ h.logf = logger.Printf
+ }
+ }
+
+ if h.logf == nil {
+ h.logf = func(string, ...interface{}) {}
+ }
+
+ return h
+}
+
+type scrollRegion struct {
+ top int16
+ bottom int16
+}
+
+// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
+// current cursor position and scroll region settings, in which case it returns
+// true. If no special handling is necessary, then it does nothing and returns
+// false.
+//
+// In the false case, the caller should ensure that a carriage return
+// and line feed are inserted or that the text is otherwise wrapped.
+func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ h.clearWrap()
+ }
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return false, err
+ }
+ sr := h.effectiveSr(info.Window)
+ if pos.Y == sr.bottom {
+ // Scrolling is necessary. Let Windows automatically scroll if the scrolling region
+ // is the full window.
+ if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
+ if includeCR {
+ pos.X = 0
+ h.updatePos(pos)
+ }
+ return false, nil
+ }
+
+ // A custom scroll region is active. Scroll the window manually to simulate
+ // the LF.
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ h.logf("Simulating LF inside scroll region")
+ if err := h.scrollUp(1); err != nil {
+ return false, err
+ }
+ if includeCR {
+ pos.X = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+
+ } else if pos.Y < info.Window.Bottom {
+ // Let Windows handle the LF.
+ pos.Y++
+ if includeCR {
+ pos.X = 0
+ }
+ h.updatePos(pos)
+ return false, nil
+ } else {
+ // The cursor is at the bottom of the screen but outside the scroll
+ // region. Skip the LF.
+ h.logf("Simulating LF outside scroll region")
+ if includeCR {
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ pos.X = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+ }
+}
+
+// executeLF executes a LF without a CR.
+func (h *windowsAnsiEventHandler) executeLF() error {
+ handled, err := h.simulateLF(false)
+ if err != nil {
+ return err
+ }
+ if !handled {
+ // Windows LF will reset the cursor column position. Write the LF
+ // and restore the cursor position.
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+ if pos.X != 0 {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("Resetting cursor position for LF without CR")
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) Print(b byte) error {
+ if h.wrapNext {
+ h.buffer.WriteByte(h.marginByte)
+ h.clearWrap()
+ if _, err := h.simulateLF(true); err != nil {
+ return err
+ }
+ }
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X == info.Size.X-1 {
+ h.wrapNext = true
+ h.marginByte = b
+ } else {
+ pos.X++
+ h.updatePos(pos)
+ h.buffer.WriteByte(b)
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) Execute(b byte) error {
+ switch b {
+ case ansiterm.ANSI_TAB:
+ h.logf("Execute(TAB)")
+ // Move to the next tab stop, but preserve auto-wrap if already set.
+ if !h.wrapNext {
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ pos.X = (pos.X + 8) - pos.X%8
+ if pos.X >= info.Size.X {
+ pos.X = info.Size.X - 1
+ }
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case ansiterm.ANSI_BEL:
+ h.buffer.WriteByte(ansiterm.ANSI_BEL)
+ return nil
+
+ case ansiterm.ANSI_BACKSPACE:
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.clearWrap()
+ }
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X > 0 {
+ pos.X--
+ h.updatePos(pos)
+ h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
+ }
+ return nil
+
+ case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
+ // Treat as true LF.
+ return h.executeLF()
+
+ case ansiterm.ANSI_LINE_FEED:
+ // Simulate a CR and LF for now since there is no way in go-ansiterm
+ // to tell if the LF should include CR (and more things break when it's
+ // missing than when it's incorrectly added).
+ handled, err := h.simulateLF(true)
+ if handled || err != nil {
+ return err
+ }
+ return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+
+ case ansiterm.ANSI_CARRIAGE_RETURN:
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.clearWrap()
+ }
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X != 0 {
+ pos.X = 0
+ h.updatePos(pos)
+ h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
+ }
+ return nil
+
+ default:
+ return nil
+ }
+}
+
+func (h *windowsAnsiEventHandler) CUU(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorVertical(-param)
+}
+
+func (h *windowsAnsiEventHandler) CUD(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorVertical(param)
+}
+
+func (h *windowsAnsiEventHandler) CUF(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorHorizontal(param)
+}
+
+func (h *windowsAnsiEventHandler) CUB(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorHorizontal(-param)
+}
+
+func (h *windowsAnsiEventHandler) CNL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorLine(param)
+}
+
+func (h *windowsAnsiEventHandler) CPL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorLine(-param)
+}
+
+func (h *windowsAnsiEventHandler) CHA(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorColumn(param)
+}
+
+func (h *windowsAnsiEventHandler) VPA(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("VPA: [[%d]]", param)
+ h.clearWrap()
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ window := h.getCursorWindow(info)
+ position := info.CursorPosition
+ position.Y = window.Top + int16(param) - 1
+ return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUP: [[%d %d]]", row, col)
+ h.clearWrap()
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ window := h.getCursorWindow(info)
+ position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
+ return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("HVP: [[%d %d]]", row, col)
+ h.clearWrap()
+ return h.CUP(row, col)
+}
+
+func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
+ h.clearWrap()
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
+ h.clearWrap()
+ h.originMode = enable
+ return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
+ h.clearWrap()
+ if err := h.ED(2); err != nil {
+ return err
+ }
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ targetWidth := int16(80)
+ if use132 {
+ targetWidth = 132
+ }
+ if info.Size.X < targetWidth {
+ if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+ h.logf("set buffer failed: %v", err)
+ return err
+ }
+ }
+ window := info.Window
+ window.Left = 0
+ window.Right = targetWidth - 1
+ if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+ h.logf("set window failed: %v", err)
+ return err
+ }
+ if info.Size.X > targetWidth {
+ if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+ h.logf("set buffer failed: %v", err)
+ return err
+ }
+ }
+ return SetConsoleCursorPosition(h.fd, COORD{0, 0})
+}
+
+func (h *windowsAnsiEventHandler) ED(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("ED: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+
+ // [J -- Erases from the cursor to the end of the screen, including the cursor position.
+ // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
+ // [2J -- Erases the complete display. The cursor does not move.
+ // Notes:
+ // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ var start COORD
+ var end COORD
+
+ switch param {
+ case 0:
+ start = info.CursorPosition
+ end = COORD{info.Size.X - 1, info.Size.Y - 1}
+
+ case 1:
+ start = COORD{0, 0}
+ end = info.CursorPosition
+
+ case 2:
+ start = COORD{0, 0}
+ end = COORD{info.Size.X - 1, info.Size.Y - 1}
+ }
+
+ err = h.clearRange(h.attributes, start, end)
+ if err != nil {
+ return err
+ }
+
+ // If the whole buffer was cleared, move the window to the top while preserving
+ // the window-relative cursor position.
+ if param == 2 {
+ pos := info.CursorPosition
+ window := info.Window
+ pos.Y -= window.Top
+ window.Bottom -= window.Top
+ window.Top = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) EL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("EL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+
+ // [K -- Erases from the cursor to the end of the line, including the cursor position.
+ // [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
+ // [2K -- Erases the complete line.
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ var start COORD
+ var end COORD
+
+ switch param {
+ case 0:
+ start = info.CursorPosition
+ end = COORD{info.Size.X, info.CursorPosition.Y}
+
+ case 1:
+ start = COORD{0, info.CursorPosition.Y}
+ end = info.CursorPosition
+
+ case 2:
+ start = COORD{0, info.CursorPosition.Y}
+ end = COORD{info.Size.X, info.CursorPosition.Y}
+ }
+
+ err = h.clearRange(h.attributes, start, end)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) IL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("IL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.insertLines(param)
+}
+
+func (h *windowsAnsiEventHandler) DL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.deleteLines(param)
+}
+
+func (h *windowsAnsiEventHandler) ICH(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("ICH: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.insertCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) DCH(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DCH: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.deleteCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) SGR(params []int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ strings := []string{}
+ for _, v := range params {
+ strings = append(strings, strconv.Itoa(v))
+ }
+
+ h.logf("SGR: [%v]", strings)
+
+ if len(params) <= 0 {
+ h.attributes = h.infoReset.Attributes
+ h.inverted = false
+ } else {
+ for _, attr := range params {
+
+ if attr == ansiterm.ANSI_SGR_RESET {
+ h.attributes = h.infoReset.Attributes
+ h.inverted = false
+ continue
+ }
+
+ h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
+ }
+ }
+
+ attributes := h.attributes
+ if h.inverted {
+ attributes = invertAttributes(attributes)
+ }
+ err := SetConsoleTextAttribute(h.fd, attributes)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) SU(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("SU: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.scrollUp(param)
+}
+
+func (h *windowsAnsiEventHandler) SD(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("SD: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.scrollDown(param)
+}
+
+func (h *windowsAnsiEventHandler) DA(params []string) error {
+ h.logf("DA: [%v]", params)
+ // DA cannot be implemented because it must send data on the VT100 input stream,
+ // which is not available to go-ansiterm.
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DECSTBM: [%d, %d]", top, bottom)
+
+ // Windows is 0 indexed, Linux is 1 indexed
+ h.sr.top = int16(top - 1)
+ h.sr.bottom = int16(bottom - 1)
+
+ // This command also moves the cursor to the origin.
+ h.clearWrap()
+ return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) RI() error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("RI: []")
+ h.clearWrap()
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ sr := h.effectiveSr(info.Window)
+ if info.CursorPosition.Y == sr.top {
+ return h.scrollDown(1)
+ }
+
+ return h.moveCursorVertical(-1)
+}
+
+func (h *windowsAnsiEventHandler) IND() error {
+ h.logf("IND: []")
+ return h.executeLF()
+}
+
+func (h *windowsAnsiEventHandler) Flush() error {
+ h.curInfo = nil
+ if h.buffer.Len() > 0 {
+ h.logf("Flush: [%s]", h.buffer.Bytes())
+ if _, err := h.buffer.WriteTo(h.file); err != nil {
+ return err
+ }
+ }
+
+ if h.wrapNext && !h.drewMarginByte {
+ h.logf("Flush: drawing margin byte '%c'", h.marginByte)
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
+ size := COORD{1, 1}
+ position := COORD{0, 0}
+ region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
+ if err := WriteConsoleOutput(h.fd, charInfo, size, position, &region); err != nil {
+ return err
+ }
+ h.drewMarginByte = true
+ }
+ return nil
+}
+
+// cacheConsoleInfo ensures that the current console screen information has been queried
+// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
+func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
+ if h.curInfo == nil {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return COORD{}, nil, err
+ }
+ h.curInfo = info
+ h.curPos = info.CursorPosition
+ }
+ return h.curPos, h.curInfo, nil
+}
+
+func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
+ if h.curInfo == nil {
+ panic("failed to call getCurrentInfo before calling updatePos")
+ }
+ h.curPos = pos
+}
+
+// clearWrap clears the state where the cursor is in the margin
+// waiting for the next character before wrapping the line. This must
+// be done before most operations that act on the cursor.
+func (h *windowsAnsiEventHandler) clearWrap() {
+ h.wrapNext = false
+ h.drewMarginByte = false
+}
diff --git a/vendor/github.com/JeffAshton/win_pdh/AUTHORS b/vendor/github.com/JeffAshton/win_pdh/AUTHORS
new file mode 100644
index 000000000..7129f3d73
--- /dev/null
+++ b/vendor/github.com/JeffAshton/win_pdh/AUTHORS
@@ -0,0 +1,14 @@
+# This is the official list of 'win_pdh' authors for copyright purposes.
+
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+# Contributors
+# ============
+
+Alexander Neumann <an2048@googlemail.com>
+Joseph Watson <jtwatson@linux-consulting.us>
+Kevin Pors <krpors@gmail.com>
diff --git a/vendor/github.com/JeffAshton/win_pdh/LICENSE b/vendor/github.com/JeffAshton/win_pdh/LICENSE
new file mode 100644
index 000000000..5bf54be11
--- /dev/null
+++ b/vendor/github.com/JeffAshton/win_pdh/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2010 The win_pdh Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. The names of the authors may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/JeffAshton/win_pdh/README.mdown b/vendor/github.com/JeffAshton/win_pdh/README.mdown
new file mode 100644
index 000000000..268cd9f84
--- /dev/null
+++ b/vendor/github.com/JeffAshton/win_pdh/README.mdown
@@ -0,0 +1,15 @@
+About win_pdh
+=============
+
+win_pdh is a Windows Performance Data Helper wrapper package for Go.
+
+Originally part of [walk](https://github.com/lxn/walk) and [win](https://github.com/lxn/win), it is now a separate
+project.
+
+Setup
+=====
+
+Make sure you have a working Go installation.
+See [Getting Started](http://golang.org/doc/install.html)
+
+Now run `go get github.com/JeffAshton/win_pdh`
diff --git a/vendor/github.com/JeffAshton/win_pdh/pdh.go b/vendor/github.com/JeffAshton/win_pdh/pdh.go
new file mode 100644
index 000000000..56199001a
--- /dev/null
+++ b/vendor/github.com/JeffAshton/win_pdh/pdh.go
@@ -0,0 +1,453 @@
+// Copyright 2013 The win_pdh Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package win_pdh
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Error codes
+const (
+ ERROR_SUCCESS = 0
+ ERROR_INVALID_FUNCTION = 1
+)
+
+type (
+ HANDLE uintptr
+)
+
+// PDH error codes, which can be returned by all Pdh* functions. Taken from mingw-w64 pdhmsg.h
+const (
+ PDH_CSTATUS_VALID_DATA = 0x00000000 // The returned data is valid.
+ PDH_CSTATUS_NEW_DATA = 0x00000001 // The return data value is valid and different from the last sample.
+ PDH_CSTATUS_NO_MACHINE = 0x800007D0 // Unable to connect to the specified computer, or the computer is offline.
+ PDH_CSTATUS_NO_INSTANCE = 0x800007D1
+ PDH_MORE_DATA = 0x800007D2 // The PdhGetFormattedCounterArray* function can return this if there's 'more data to be displayed'.
+ PDH_CSTATUS_ITEM_NOT_VALIDATED = 0x800007D3
+ PDH_RETRY = 0x800007D4
+ PDH_NO_DATA = 0x800007D5 // The query does not currently contain any counters (for example, limited access)
+ PDH_CALC_NEGATIVE_DENOMINATOR = 0x800007D6
+ PDH_CALC_NEGATIVE_TIMEBASE = 0x800007D7
+ PDH_CALC_NEGATIVE_VALUE = 0x800007D8
+ PDH_DIALOG_CANCELLED = 0x800007D9
+ PDH_END_OF_LOG_FILE = 0x800007DA
+ PDH_ASYNC_QUERY_TIMEOUT = 0x800007DB
+ PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE = 0x800007DC
+ PDH_CSTATUS_NO_OBJECT = 0xC0000BB8
+ PDH_CSTATUS_NO_COUNTER = 0xC0000BB9 // The specified counter could not be found.
+ PDH_CSTATUS_INVALID_DATA = 0xC0000BBA // The counter was successfully found, but the data returned is not valid.
+ PDH_MEMORY_ALLOCATION_FAILURE = 0xC0000BBB
+ PDH_INVALID_HANDLE = 0xC0000BBC
+ PDH_INVALID_ARGUMENT = 0xC0000BBD // Required argument is missing or incorrect.
+ PDH_FUNCTION_NOT_FOUND = 0xC0000BBE
+ PDH_CSTATUS_NO_COUNTERNAME = 0xC0000BBF
+ PDH_CSTATUS_BAD_COUNTERNAME = 0xC0000BC0 // Unable to parse the counter path. Check the format and syntax of the specified path.
+ PDH_INVALID_BUFFER = 0xC0000BC1
+ PDH_INSUFFICIENT_BUFFER = 0xC0000BC2
+ PDH_CANNOT_CONNECT_MACHINE = 0xC0000BC3
+ PDH_INVALID_PATH = 0xC0000BC4
+ PDH_INVALID_INSTANCE = 0xC0000BC5
+ PDH_INVALID_DATA = 0xC0000BC6 // specified counter does not contain valid data or a successful status code.
+ PDH_NO_DIALOG_DATA = 0xC0000BC7
+ PDH_CANNOT_READ_NAME_STRINGS = 0xC0000BC8
+ PDH_LOG_FILE_CREATE_ERROR = 0xC0000BC9
+ PDH_LOG_FILE_OPEN_ERROR = 0xC0000BCA
+ PDH_LOG_TYPE_NOT_FOUND = 0xC0000BCB
+ PDH_NO_MORE_DATA = 0xC0000BCC
+ PDH_ENTRY_NOT_IN_LOG_FILE = 0xC0000BCD
+ PDH_DATA_SOURCE_IS_LOG_FILE = 0xC0000BCE
+ PDH_DATA_SOURCE_IS_REAL_TIME = 0xC0000BCF
+ PDH_UNABLE_READ_LOG_HEADER = 0xC0000BD0
+ PDH_FILE_NOT_FOUND = 0xC0000BD1
+ PDH_FILE_ALREADY_EXISTS = 0xC0000BD2
+ PDH_NOT_IMPLEMENTED = 0xC0000BD3
+ PDH_STRING_NOT_FOUND = 0xC0000BD4
+ PDH_UNABLE_MAP_NAME_FILES = 0x80000BD5
+ PDH_UNKNOWN_LOG_FORMAT = 0xC0000BD6
+ PDH_UNKNOWN_LOGSVC_COMMAND = 0xC0000BD7
+ PDH_LOGSVC_QUERY_NOT_FOUND = 0xC0000BD8
+ PDH_LOGSVC_NOT_OPENED = 0xC0000BD9
+ PDH_WBEM_ERROR = 0xC0000BDA
+ PDH_ACCESS_DENIED = 0xC0000BDB
+ PDH_LOG_FILE_TOO_SMALL = 0xC0000BDC
+ PDH_INVALID_DATASOURCE = 0xC0000BDD
+ PDH_INVALID_SQLDB = 0xC0000BDE
+ PDH_NO_COUNTERS = 0xC0000BDF
+ PDH_SQL_ALLOC_FAILED = 0xC0000BE0
+ PDH_SQL_ALLOCCON_FAILED = 0xC0000BE1
+ PDH_SQL_EXEC_DIRECT_FAILED = 0xC0000BE2
+ PDH_SQL_FETCH_FAILED = 0xC0000BE3
+ PDH_SQL_ROWCOUNT_FAILED = 0xC0000BE4
+ PDH_SQL_MORE_RESULTS_FAILED = 0xC0000BE5
+ PDH_SQL_CONNECT_FAILED = 0xC0000BE6
+ PDH_SQL_BIND_FAILED = 0xC0000BE7
+ PDH_CANNOT_CONNECT_WMI_SERVER = 0xC0000BE8
+ PDH_PLA_COLLECTION_ALREADY_RUNNING = 0xC0000BE9
+ PDH_PLA_ERROR_SCHEDULE_OVERLAP = 0xC0000BEA
+ PDH_PLA_COLLECTION_NOT_FOUND = 0xC0000BEB
+ PDH_PLA_ERROR_SCHEDULE_ELAPSED = 0xC0000BEC
+ PDH_PLA_ERROR_NOSTART = 0xC0000BED
+ PDH_PLA_ERROR_ALREADY_EXISTS = 0xC0000BEE
+ PDH_PLA_ERROR_TYPE_MISMATCH = 0xC0000BEF
+ PDH_PLA_ERROR_FILEPATH = 0xC0000BF0
+ PDH_PLA_SERVICE_ERROR = 0xC0000BF1
+ PDH_PLA_VALIDATION_ERROR = 0xC0000BF2
+ PDH_PLA_VALIDATION_WARNING = 0x80000BF3
+ PDH_PLA_ERROR_NAME_TOO_LONG = 0xC0000BF4
+ PDH_INVALID_SQL_LOG_FORMAT = 0xC0000BF5
+ PDH_COUNTER_ALREADY_IN_QUERY = 0xC0000BF6
+ PDH_BINARY_LOG_CORRUPT = 0xC0000BF7
+ PDH_LOG_SAMPLE_TOO_SMALL = 0xC0000BF8
+ PDH_OS_LATER_VERSION = 0xC0000BF9
+ PDH_OS_EARLIER_VERSION = 0xC0000BFA
+ PDH_INCORRECT_APPEND_TIME = 0xC0000BFB
+ PDH_UNMATCHED_APPEND_COUNTER = 0xC0000BFC
+ PDH_SQL_ALTER_DETAIL_FAILED = 0xC0000BFD
+ PDH_QUERY_PERF_DATA_TIMEOUT = 0xC0000BFE
+)
+
+// Formatting options for GetFormattedCounterValue().
+const (
+ PDH_FMT_RAW = 0x00000010
+ PDH_FMT_ANSI = 0x00000020
+ PDH_FMT_UNICODE = 0x00000040
+ PDH_FMT_LONG = 0x00000100 // Return data as a long int.
+ PDH_FMT_DOUBLE = 0x00000200 // Return data as a double precision floating point real.
+ PDH_FMT_LARGE = 0x00000400 // Return data as a 64 bit integer.
+ PDH_FMT_NOSCALE = 0x00001000 // can be OR-ed: Do not apply the counter's default scaling factor.
+ PDH_FMT_1000 = 0x00002000 // can be OR-ed: multiply the actual value by 1,000.
+ PDH_FMT_NODATA = 0x00004000 // can be OR-ed: unknown what this is for, MSDN says nothing.
+ PDH_FMT_NOCAP100 = 0x00008000 // can be OR-ed: do not cap values > 100.
+ PERF_DETAIL_COSTLY = 0x00010000
+ PERF_DETAIL_STANDARD = 0x0000FFFF
+)
+
+type (
+ PDH_HQUERY HANDLE // query handle
+ PDH_HCOUNTER HANDLE // counter handle
+)
+
+// Union specialization for double values
+type PDH_FMT_COUNTERVALUE_DOUBLE struct {
+ CStatus uint32
+ DoubleValue float64
+}
+
+// Union specialization for 64 bit integer values
+type PDH_FMT_COUNTERVALUE_LARGE struct {
+ CStatus uint32
+ LargeValue int64
+}
+
+// Union specialization for long values
+type PDH_FMT_COUNTERVALUE_LONG struct {
+ CStatus uint32
+ LongValue int32
+ padding [4]byte
+}
+
+// Union specialization for double values, used by PdhGetFormattedCounterArrayDouble()
+type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE struct {
+ SzName *uint16 // pointer to a string
+ FmtValue PDH_FMT_COUNTERVALUE_DOUBLE
+}
+
+// Union specialization for 'large' values, used by PdhGetFormattedCounterArrayLarge()
+type PDH_FMT_COUNTERVALUE_ITEM_LARGE struct {
+ SzName *uint16 // pointer to a string
+ FmtValue PDH_FMT_COUNTERVALUE_LARGE
+}
+
+// Union specialization for long values, used by PdhGetFormattedCounterArrayLong()
+type PDH_FMT_COUNTERVALUE_ITEM_LONG struct {
+ SzName *uint16 // pointer to a string
+ FmtValue PDH_FMT_COUNTERVALUE_LONG
+}
+
+var (
+ // Library
+ libpdhDll *syscall.DLL
+
+ // Functions
+ pdh_AddCounterW *syscall.Proc
+ pdh_AddEnglishCounterW *syscall.Proc
+ pdh_CloseQuery *syscall.Proc
+ pdh_CollectQueryData *syscall.Proc
+ pdh_GetFormattedCounterValue *syscall.Proc
+ pdh_GetFormattedCounterArrayW *syscall.Proc
+ pdh_OpenQuery *syscall.Proc
+ pdh_ValidatePathW *syscall.Proc
+)
+
+func init() {
+ // Library
+ libpdhDll = syscall.MustLoadDLL("pdh.dll")
+
+ // Functions
+ pdh_AddCounterW = libpdhDll.MustFindProc("PdhAddCounterW")
+ pdh_AddEnglishCounterW, _ = libpdhDll.FindProc("PdhAddEnglishCounterW") // XXX: only supported on versions > Vista.
+ pdh_CloseQuery = libpdhDll.MustFindProc("PdhCloseQuery")
+ pdh_CollectQueryData = libpdhDll.MustFindProc("PdhCollectQueryData")
+ pdh_GetFormattedCounterValue = libpdhDll.MustFindProc("PdhGetFormattedCounterValue")
+ pdh_GetFormattedCounterArrayW = libpdhDll.MustFindProc("PdhGetFormattedCounterArrayW")
+ pdh_OpenQuery = libpdhDll.MustFindProc("PdhOpenQuery")
+ pdh_ValidatePathW = libpdhDll.MustFindProc("PdhValidatePathW")
+}
+
+// Adds the specified counter to the query. This is the internationalized version. Preferably, use the
+// function PdhAddEnglishCounter instead. hQuery is the query handle, which has been fetched by PdhOpenQuery.
+// szFullCounterPath is a full, internationalized counter path (this will differ per Windows language version).
+// dwUserData is a 'user-defined value', which becomes part of the counter information. To retrieve this value
+// later, call PdhGetCounterInfo() and access dwQueryUserData of the PDH_COUNTER_INFO structure.
+//
+// Examples of szFullCounterPath (in an English version of Windows):
+//
+// \\Processor(_Total)\\% Idle Time
+// \\Processor(_Total)\\% Processor Time
+// \\LogicalDisk(C:)\% Free Space
+//
+// To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility,
+// the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a
+// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an
+// interface to the available counters, and can be found at the following key:
+//
+// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage
+//
+// This registry key contains several values as follows:
+//
+// 1
+// 1847
+// 2
+// System
+// 4
+// Memory
+// 6
+// % Processor Time
+// ... many, many more
+//
+// Somehow, these numeric values can be used as szFullCounterPath too:
+//
+// \2\6 will correspond to \\System\% Processor Time
+//
+// The typeperf command may also be pretty easy. To find all performance counters, simply execute:
+//
+// typeperf -qx
+func PdhAddCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 {
+ ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
+ ret, _, _ := pdh_AddCounterW.Call(
+ uintptr(hQuery),
+ uintptr(unsafe.Pointer(ptxt)),
+ dwUserData,
+ uintptr(unsafe.Pointer(phCounter)))
+
+ return uint32(ret)
+}
+
+// Adds the specified language-neutral counter to the query. See the PdhAddCounter function. This function only exists on
+// Windows versions higher than Vista.
+func PdhAddEnglishCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 {
+ if pdh_AddEnglishCounterW == nil {
+ return ERROR_INVALID_FUNCTION
+ }
+
+ ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
+ ret, _, _ := pdh_AddEnglishCounterW.Call(
+ uintptr(hQuery),
+ uintptr(unsafe.Pointer(ptxt)),
+ dwUserData,
+ uintptr(unsafe.Pointer(phCounter)))
+
+ return uint32(ret)
+}
+
+// Closes all counters contained in the specified query, closes all handles related to the query,
+// and frees all memory associated with the query.
+func PdhCloseQuery(hQuery PDH_HQUERY) uint32 {
+ ret, _, _ := pdh_CloseQuery.Call(uintptr(hQuery))
+
+ return uint32(ret)
+}
+
+// Collects the current raw data value for all counters in the specified query and updates the status
+// code of each counter. With some counters, this function needs to be repeatedly called before the value
+// of the counter can be extracted with PdhGetFormattedCounterValue(). For example, the following code
+// requires at least two calls:
+//
+// var handle win.PDH_HQUERY
+// var counterHandle win.PDH_HCOUNTER
+// ret := win.PdhOpenQuery(0, 0, &handle)
+// ret = win.PdhAddEnglishCounter(handle, "\\Processor(_Total)\\% Idle Time", 0, &counterHandle)
+// var derp win.PDH_FMT_COUNTERVALUE_DOUBLE
+//
+// ret = win.PdhCollectQueryData(handle)
+// fmt.Printf("Collect return code is %x\n", ret) // return code will be PDH_CSTATUS_INVALID_DATA
+// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp)
+//
+// ret = win.PdhCollectQueryData(handle)
+// fmt.Printf("Collect return code is %x\n", ret) // return code will be ERROR_SUCCESS
+// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp)
+//
+// The PdhCollectQueryData will return an error in the first call because it needs two values for
+// displaying the correct data for the processor idle time. The second call will have a 0 return code.
+func PdhCollectQueryData(hQuery PDH_HQUERY) uint32 {
+ ret, _, _ := pdh_CollectQueryData.Call(uintptr(hQuery))
+
+ return uint32(ret)
+}
+
+// Formats the given hCounter using a 'double'. The result is set into the specialized union struct pValue.
+// This function does not directly translate to a Windows counterpart due to union specialization tricks.
+func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_DOUBLE) uint32 {
+ ret, _, _ := pdh_GetFormattedCounterValue.Call(
+ uintptr(hCounter),
+ uintptr(PDH_FMT_DOUBLE),
+ uintptr(unsafe.Pointer(lpdwType)),
+ uintptr(unsafe.Pointer(pValue)))
+
+ return uint32(ret)
+}
+
+// Formats the given hCounter using a large int (int64). The result is set into the specialized union struct pValue.
+// This function does not directly translate to a Windows counterpart due to union specialization tricks.
+func PdhGetFormattedCounterValueLarge(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_LARGE) uint32 {
+ ret, _, _ := pdh_GetFormattedCounterValue.Call(
+ uintptr(hCounter),
+ uintptr(PDH_FMT_LARGE),
+ uintptr(unsafe.Pointer(lpdwType)),
+ uintptr(unsafe.Pointer(pValue)))
+
+ return uint32(ret)
+}
+
+// Formats the given hCounter using a 'long'. The result is set into the specialized union struct pValue.
+// This function does not directly translate to a Windows counterpart due to union specialization tricks.
+//
+// BUG(krpors): Testing this function on multiple systems yielded inconsistent results. For instance,
+// the pValue.LongValue kept the value '192' on test system A, but on B this was '0', while the padding
+// bytes of the struct got the correct value. Until someone can figure out this behaviour, prefer to use
+// the Double or Large counterparts instead. These functions provide actually the same data, except in
+// a different, working format.
+func PdhGetFormattedCounterValueLong(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_LONG) uint32 {
+ ret, _, _ := pdh_GetFormattedCounterValue.Call(
+ uintptr(hCounter),
+ uintptr(PDH_FMT_LONG),
+ uintptr(unsafe.Pointer(lpdwType)),
+ uintptr(unsafe.Pointer(pValue)))
+
+ return uint32(ret)
+}
+
+// Returns an array of formatted counter values. Use this function when you want to format the counter values of a
+// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE.
+// An example of how this function can be used:
+//
+// okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character
+//
+// // ommitted all necessary stuff ...
+//
+// var bufSize uint32
+// var bufCount uint32
+// var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{}))
+// var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr.
+//
+// for {
+// // collect
+// ret := win.PdhCollectQueryData(queryHandle)
+// if ret == win.ERROR_SUCCESS {
+// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN.
+// if ret == win.PDH_MORE_DATA {
+// filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
+// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &filledBuf[0])
+// for i := 0; i < int(bufCount); i++ {
+// c := filledBuf[i]
+// var s string = win.UTF16PtrToString(c.SzName)
+// fmt.Printf("Index %d -> %s, value %v\n", i, s, c.FmtValue.DoubleValue)
+// }
+//
+// filledBuf = nil
+// // Need to at least set bufSize to zero, because if not, the function will not
+// // return PDH_MORE_DATA and will not set the bufSize.
+// bufCount = 0
+// bufSize = 0
+// }
+//
+// time.Sleep(2000 * time.Millisecond)
+// }
+// }
+func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 {
+ ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
+ uintptr(hCounter),
+ uintptr(PDH_FMT_DOUBLE),
+ uintptr(unsafe.Pointer(lpdwBufferSize)),
+ uintptr(unsafe.Pointer(lpdwBufferCount)),
+ uintptr(unsafe.Pointer(itemBuffer)))
+
+ return uint32(ret)
+}
+
+// Returns an array of formatted counter values. Use this function when you want to format the counter values of a
+// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_LARGE.
+// For an example usage, see PdhGetFormattedCounterArrayDouble.
+func PdhGetFormattedCounterArrayLarge(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_LARGE) uint32 {
+ ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
+ uintptr(hCounter),
+ uintptr(PDH_FMT_LARGE),
+ uintptr(unsafe.Pointer(lpdwBufferSize)),
+ uintptr(unsafe.Pointer(lpdwBufferCount)),
+ uintptr(unsafe.Pointer(itemBuffer)))
+
+ return uint32(ret)
+}
+
+// Returns an array of formatted counter values. Use this function when you want to format the counter values of a
+// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_LONG.
+// For an example usage, see PdhGetFormattedCounterArrayDouble.
+//
+// BUG(krpors): See description of PdhGetFormattedCounterValueLong().
+func PdhGetFormattedCounterArrayLong(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_LONG) uint32 {
+ ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
+ uintptr(hCounter),
+ uintptr(PDH_FMT_LONG),
+ uintptr(unsafe.Pointer(lpdwBufferSize)),
+ uintptr(unsafe.Pointer(lpdwBufferCount)),
+ uintptr(unsafe.Pointer(itemBuffer)))
+
+ return uint32(ret)
+}
+
+// Creates a new query that is used to manage the collection of performance data.
+// szDataSource is a null terminated string that specifies the name of the log file from which to
+// retrieve the performance data. If 0, performance data is collected from a real-time data source.
+// dwUserData is a user-defined value to associate with this query. To retrieve the user data later,
+// call PdhGetCounterInfo and access dwQueryUserData of the PDH_COUNTER_INFO structure. phQuery is
+// the handle to the query, and must be used in subsequent calls. This function returns a PDH_
+// constant error code, or ERROR_SUCCESS if the call succeeded.
+func PdhOpenQuery(szDataSource uintptr, dwUserData uintptr, phQuery *PDH_HQUERY) uint32 {
+ ret, _, _ := pdh_OpenQuery.Call(
+ szDataSource,
+ dwUserData,
+ uintptr(unsafe.Pointer(phQuery)))
+
+ return uint32(ret)
+}
+
+// Validates a path. Will return ERROR_SUCCESS when ok, or PDH_CSTATUS_BAD_COUNTERNAME when the path is
+// erroneous.
+func PdhValidatePath(path string) uint32 {
+ ptxt, _ := syscall.UTF16PtrFromString(path)
+ ret, _, _ := pdh_ValidatePathW.Call(uintptr(unsafe.Pointer(ptxt)))
+
+ return uint32(ret)
+}
+
+func UTF16PtrToString(s *uint16) string {
+ if s == nil {
+ return ""
+ }
+ return syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(s))[0:])
+}
diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore
new file mode 100644
index 000000000..b883f1fdc
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.gitignore
@@ -0,0 +1 @@
+*.exe
diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE
new file mode 100644
index 000000000..b8b569d77
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md
new file mode 100644
index 000000000..568001057
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/README.md
@@ -0,0 +1,22 @@
+# go-winio
+
+This repository contains utilities for efficiently performing Win32 IO operations in
+Go. Currently, this is focused on accessing named pipes and other file handles, and
+for using named pipes as a net transport.
+
+This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
+to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
+newer operating systems. This is similar to the implementation of network sockets in Go's net
+package.
+
+Please see the LICENSE file for licensing information.
+
+This project has adopted the [Microsoft Open Source Code of
+Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
+see the [Code of Conduct
+FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
+[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
+questions or comments.
+
+Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
+for another named pipe implementation.
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go
new file mode 100644
index 000000000..2be34af43
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/backup.go
@@ -0,0 +1,280 @@
+// +build windows
+
+package winio
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "syscall"
+ "unicode/utf16"
+)
+
+//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
+//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
+
+const (
+ BackupData = uint32(iota + 1)
+ BackupEaData
+ BackupSecurity
+ BackupAlternateData
+ BackupLink
+ BackupPropertyData
+ BackupObjectId
+ BackupReparseData
+ BackupSparseBlock
+ BackupTxfsData
+)
+
+const (
+ StreamSparseAttributes = uint32(8)
+)
+
+const (
+ WRITE_DAC = 0x40000
+ WRITE_OWNER = 0x80000
+ ACCESS_SYSTEM_SECURITY = 0x1000000
+)
+
+// BackupHeader represents a backup stream of a file.
+type BackupHeader struct {
+ Id uint32 // The backup stream ID
+ Attributes uint32 // Stream attributes
+ Size int64 // The size of the stream in bytes
+ Name string // The name of the stream (for BackupAlternateData only).
+ Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
+}
+
+type win32StreamId struct {
+ StreamId uint32
+ Attributes uint32
+ Size uint64
+ NameSize uint32
+}
+
+// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
+// of BackupHeader values.
+type BackupStreamReader struct {
+ r io.Reader
+ bytesLeft int64
+}
+
+// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
+func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
+ return &BackupStreamReader{r, 0}
+}
+
+// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
+// it was not completely read.
+func (r *BackupStreamReader) Next() (*BackupHeader, error) {
+ if r.bytesLeft > 0 {
+ if s, ok := r.r.(io.Seeker); ok {
+ // Make sure Seek on io.SeekCurrent sometimes succeeds
+ // before trying the actual seek.
+ if _, err := s.Seek(0, io.SeekCurrent); err == nil {
+ if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
+ return nil, err
+ }
+ r.bytesLeft = 0
+ }
+ }
+ if _, err := io.Copy(ioutil.Discard, r); err != nil {
+ return nil, err
+ }
+ }
+ var wsi win32StreamId
+ if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
+ return nil, err
+ }
+ hdr := &BackupHeader{
+ Id: wsi.StreamId,
+ Attributes: wsi.Attributes,
+ Size: int64(wsi.Size),
+ }
+ if wsi.NameSize != 0 {
+ name := make([]uint16, int(wsi.NameSize/2))
+ if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
+ return nil, err
+ }
+ hdr.Name = syscall.UTF16ToString(name)
+ }
+ if wsi.StreamId == BackupSparseBlock {
+ if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
+ return nil, err
+ }
+ hdr.Size -= 8
+ }
+ r.bytesLeft = hdr.Size
+ return hdr, nil
+}
+
+// Read reads from the current backup stream.
+func (r *BackupStreamReader) Read(b []byte) (int, error) {
+ if r.bytesLeft == 0 {
+ return 0, io.EOF
+ }
+ if int64(len(b)) > r.bytesLeft {
+ b = b[:r.bytesLeft]
+ }
+ n, err := r.r.Read(b)
+ r.bytesLeft -= int64(n)
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ } else if r.bytesLeft == 0 && err == nil {
+ err = io.EOF
+ }
+ return n, err
+}
+
+// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
+type BackupStreamWriter struct {
+ w io.Writer
+ bytesLeft int64
+}
+
+// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
+func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
+ return &BackupStreamWriter{w, 0}
+}
+
+// WriteHeader writes the next backup stream header and prepares for calls to Write().
+func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
+ if w.bytesLeft != 0 {
+ return fmt.Errorf("missing %d bytes", w.bytesLeft)
+ }
+ name := utf16.Encode([]rune(hdr.Name))
+ wsi := win32StreamId{
+ StreamId: hdr.Id,
+ Attributes: hdr.Attributes,
+ Size: uint64(hdr.Size),
+ NameSize: uint32(len(name) * 2),
+ }
+ if hdr.Id == BackupSparseBlock {
+ // Include space for the int64 block offset
+ wsi.Size += 8
+ }
+ if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
+ return err
+ }
+ if len(name) != 0 {
+ if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
+ return err
+ }
+ }
+ if hdr.Id == BackupSparseBlock {
+ if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
+ return err
+ }
+ }
+ w.bytesLeft = hdr.Size
+ return nil
+}
+
+// Write writes to the current backup stream.
+func (w *BackupStreamWriter) Write(b []byte) (int, error) {
+ if w.bytesLeft < int64(len(b)) {
+ return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
+ }
+ n, err := w.w.Write(b)
+ w.bytesLeft -= int64(n)
+ return n, err
+}
+
+// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
+type BackupFileReader struct {
+ f *os.File
+ includeSecurity bool
+ ctx uintptr
+}
+
+// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
+// Read will attempt to read the security descriptor of the file.
+func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
+ r := &BackupFileReader{f, includeSecurity, 0}
+ return r
+}
+
+// Read reads a backup stream from the file by calling the Win32 API BackupRead().
+func (r *BackupFileReader) Read(b []byte) (int, error) {
+ var bytesRead uint32
+ err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
+ if err != nil {
+ return 0, &os.PathError{"BackupRead", r.f.Name(), err}
+ }
+ runtime.KeepAlive(r.f)
+ if bytesRead == 0 {
+ return 0, io.EOF
+ }
+ return int(bytesRead), nil
+}
+
+// Close frees Win32 resources associated with the BackupFileReader. It does not close
+// the underlying file.
+func (r *BackupFileReader) Close() error {
+ if r.ctx != 0 {
+ backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
+ runtime.KeepAlive(r.f)
+ r.ctx = 0
+ }
+ return nil
+}
+
+// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
+type BackupFileWriter struct {
+ f *os.File
+ includeSecurity bool
+ ctx uintptr
+}
+
+// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
+// Write() will attempt to restore the security descriptor from the stream.
+func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
+ w := &BackupFileWriter{f, includeSecurity, 0}
+ return w
+}
+
+// Write restores a portion of the file using the provided backup stream.
+func (w *BackupFileWriter) Write(b []byte) (int, error) {
+ var bytesWritten uint32
+ err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
+ if err != nil {
+ return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
+ }
+ runtime.KeepAlive(w.f)
+ if int(bytesWritten) != len(b) {
+ return int(bytesWritten), errors.New("not all bytes could be written")
+ }
+ return len(b), nil
+}
+
+// Close frees Win32 resources associated with the BackupFileWriter. It does not
+// close the underlying file.
+func (w *BackupFileWriter) Close() error {
+ if w.ctx != 0 {
+ backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
+ runtime.KeepAlive(w.f)
+ w.ctx = 0
+ }
+ return nil
+}
+
+// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
+// or restore privileges have been acquired.
+//
+// If the file opened was a directory, it cannot be used with Readdir().
+func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
+ winPath, err := syscall.UTF16FromString(path)
+ if err != nil {
+ return nil, err
+ }
+ h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
+ if err != nil {
+ err = &os.PathError{Op: "open", Path: path, Err: err}
+ return nil, err
+ }
+ return os.NewFile(uintptr(h), path), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go
new file mode 100644
index 000000000..4051c1b33
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/ea.go
@@ -0,0 +1,137 @@
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+)
+
+type fileFullEaInformation struct {
+ NextEntryOffset uint32
+ Flags uint8
+ NameLength uint8
+ ValueLength uint16
+}
+
+var (
+ fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
+
+ errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
+ errEaNameTooLarge = errors.New("extended attribute name too large")
+ errEaValueTooLarge = errors.New("extended attribute value too large")
+)
+
+// ExtendedAttribute represents a single Windows EA.
+type ExtendedAttribute struct {
+ Name string
+ Value []byte
+ Flags uint8
+}
+
+func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
+ var info fileFullEaInformation
+ err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
+ if err != nil {
+ err = errInvalidEaBuffer
+ return
+ }
+
+ nameOffset := fileFullEaInformationSize
+ nameLen := int(info.NameLength)
+ valueOffset := nameOffset + int(info.NameLength) + 1
+ valueLen := int(info.ValueLength)
+ nextOffset := int(info.NextEntryOffset)
+ if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
+ err = errInvalidEaBuffer
+ return
+ }
+
+ ea.Name = string(b[nameOffset : nameOffset+nameLen])
+ ea.Value = b[valueOffset : valueOffset+valueLen]
+ ea.Flags = info.Flags
+ if info.NextEntryOffset != 0 {
+ nb = b[info.NextEntryOffset:]
+ }
+ return
+}
+
+// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
+// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
+func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
+ for len(b) != 0 {
+ ea, nb, err := parseEa(b)
+ if err != nil {
+ return nil, err
+ }
+
+ eas = append(eas, ea)
+ b = nb
+ }
+ return
+}
+
+func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
+ if int(uint8(len(ea.Name))) != len(ea.Name) {
+ return errEaNameTooLarge
+ }
+ if int(uint16(len(ea.Value))) != len(ea.Value) {
+ return errEaValueTooLarge
+ }
+ entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
+ withPadding := (entrySize + 3) &^ 3
+ nextOffset := uint32(0)
+ if !last {
+ nextOffset = withPadding
+ }
+ info := fileFullEaInformation{
+ NextEntryOffset: nextOffset,
+ Flags: ea.Flags,
+ NameLength: uint8(len(ea.Name)),
+ ValueLength: uint16(len(ea.Value)),
+ }
+
+ err := binary.Write(buf, binary.LittleEndian, &info)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write([]byte(ea.Name))
+ if err != nil {
+ return err
+ }
+
+ err = buf.WriteByte(0)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write(ea.Value)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
+// buffer for use with BackupWrite, ZwSetEaFile, etc.
+func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
+ var buf bytes.Buffer
+ for i := range eas {
+ last := false
+ if i == len(eas)-1 {
+ last = true
+ }
+
+ err := writeEa(&buf, &eas[i], last)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go
new file mode 100644
index 000000000..4334ff1cb
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/file.go
@@ -0,0 +1,307 @@
+// +build windows
+
+package winio
+
+import (
+ "errors"
+ "io"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+)
+
+//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
+//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
+//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
+//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
+
+type atomicBool int32
+
+func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
+func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
+func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
+func (b *atomicBool) swap(new bool) bool {
+ var newInt int32
+ if new {
+ newInt = 1
+ }
+ return atomic.SwapInt32((*int32)(b), newInt) == 1
+}
+
+const (
+ cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
+ cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
+)
+
+var (
+ ErrFileClosed = errors.New("file has already been closed")
+ ErrTimeout = &timeoutError{}
+)
+
+type timeoutError struct{}
+
+func (e *timeoutError) Error() string { return "i/o timeout" }
+func (e *timeoutError) Timeout() bool { return true }
+func (e *timeoutError) Temporary() bool { return true }
+
+type timeoutChan chan struct{}
+
+var ioInitOnce sync.Once
+var ioCompletionPort syscall.Handle
+
+// ioResult contains the result of an asynchronous IO operation
+type ioResult struct {
+ bytes uint32
+ err error
+}
+
+// ioOperation represents an outstanding asynchronous Win32 IO
+type ioOperation struct {
+ o syscall.Overlapped
+ ch chan ioResult
+}
+
+func initIo() {
+ h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
+ if err != nil {
+ panic(err)
+ }
+ ioCompletionPort = h
+ go ioCompletionProcessor(h)
+}
+
+// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
+// It takes ownership of this handle and will close it if it is garbage collected.
+type win32File struct {
+ handle syscall.Handle
+ wg sync.WaitGroup
+ wgLock sync.RWMutex
+ closing atomicBool
+ readDeadline deadlineHandler
+ writeDeadline deadlineHandler
+}
+
+type deadlineHandler struct {
+ setLock sync.Mutex
+ channel timeoutChan
+ channelLock sync.RWMutex
+ timer *time.Timer
+ timedout atomicBool
+}
+
+// makeWin32File makes a new win32File from an existing file handle
+func makeWin32File(h syscall.Handle) (*win32File, error) {
+ f := &win32File{handle: h}
+ ioInitOnce.Do(initIo)
+ _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
+ if err != nil {
+ return nil, err
+ }
+ err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
+ if err != nil {
+ return nil, err
+ }
+ f.readDeadline.channel = make(timeoutChan)
+ f.writeDeadline.channel = make(timeoutChan)
+ return f, nil
+}
+
+func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
+ return makeWin32File(h)
+}
+
+// closeHandle closes the resources associated with a Win32 handle
+func (f *win32File) closeHandle() {
+ f.wgLock.Lock()
+ // Atomically set that we are closing, releasing the resources only once.
+ if !f.closing.swap(true) {
+ f.wgLock.Unlock()
+ // cancel all IO and wait for it to complete
+ cancelIoEx(f.handle, nil)
+ f.wg.Wait()
+ // at this point, no new IO can start
+ syscall.Close(f.handle)
+ f.handle = 0
+ } else {
+ f.wgLock.Unlock()
+ }
+}
+
+// Close closes a win32File.
+func (f *win32File) Close() error {
+ f.closeHandle()
+ return nil
+}
+
+// prepareIo prepares for a new IO operation.
+// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
+func (f *win32File) prepareIo() (*ioOperation, error) {
+ f.wgLock.RLock()
+ if f.closing.isSet() {
+ f.wgLock.RUnlock()
+ return nil, ErrFileClosed
+ }
+ f.wg.Add(1)
+ f.wgLock.RUnlock()
+ c := &ioOperation{}
+ c.ch = make(chan ioResult)
+ return c, nil
+}
+
+// ioCompletionProcessor processes completed async IOs forever
+func ioCompletionProcessor(h syscall.Handle) {
+ for {
+ var bytes uint32
+ var key uintptr
+ var op *ioOperation
+ err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
+ if op == nil {
+ panic(err)
+ }
+ op.ch <- ioResult{bytes, err}
+ }
+}
+
+// asyncIo processes the return value from ReadFile or WriteFile, blocking until
+// the operation has actually completed.
+func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
+ if err != syscall.ERROR_IO_PENDING {
+ return int(bytes), err
+ }
+
+ if f.closing.isSet() {
+ cancelIoEx(f.handle, &c.o)
+ }
+
+ var timeout timeoutChan
+ if d != nil {
+ d.channelLock.Lock()
+ timeout = d.channel
+ d.channelLock.Unlock()
+ }
+
+ var r ioResult
+ select {
+ case r = <-c.ch:
+ err = r.err
+ if err == syscall.ERROR_OPERATION_ABORTED {
+ if f.closing.isSet() {
+ err = ErrFileClosed
+ }
+ }
+ case <-timeout:
+ cancelIoEx(f.handle, &c.o)
+ r = <-c.ch
+ err = r.err
+ if err == syscall.ERROR_OPERATION_ABORTED {
+ err = ErrTimeout
+ }
+ }
+
+ // runtime.KeepAlive is needed, as c is passed via native
+ // code to ioCompletionProcessor, c must remain alive
+ // until the channel read is complete.
+ runtime.KeepAlive(c)
+ return int(r.bytes), err
+}
+
+// Read reads from a file handle.
+func (f *win32File) Read(b []byte) (int, error) {
+ c, err := f.prepareIo()
+ if err != nil {
+ return 0, err
+ }
+ defer f.wg.Done()
+
+ if f.readDeadline.timedout.isSet() {
+ return 0, ErrTimeout
+ }
+
+ var bytes uint32
+ err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
+ n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
+ runtime.KeepAlive(b)
+
+ // Handle EOF conditions.
+ if err == nil && n == 0 && len(b) != 0 {
+ return 0, io.EOF
+ } else if err == syscall.ERROR_BROKEN_PIPE {
+ return 0, io.EOF
+ } else {
+ return n, err
+ }
+}
+
+// Write writes to a file handle.
+func (f *win32File) Write(b []byte) (int, error) {
+ c, err := f.prepareIo()
+ if err != nil {
+ return 0, err
+ }
+ defer f.wg.Done()
+
+ if f.writeDeadline.timedout.isSet() {
+ return 0, ErrTimeout
+ }
+
+ var bytes uint32
+ err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
+ n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
+ runtime.KeepAlive(b)
+ return n, err
+}
+
+func (f *win32File) SetReadDeadline(deadline time.Time) error {
+ return f.readDeadline.set(deadline)
+}
+
+func (f *win32File) SetWriteDeadline(deadline time.Time) error {
+ return f.writeDeadline.set(deadline)
+}
+
+func (f *win32File) Flush() error {
+ return syscall.FlushFileBuffers(f.handle)
+}
+
+func (d *deadlineHandler) set(deadline time.Time) error {
+ d.setLock.Lock()
+ defer d.setLock.Unlock()
+
+ if d.timer != nil {
+ if !d.timer.Stop() {
+ <-d.channel
+ }
+ d.timer = nil
+ }
+ d.timedout.setFalse()
+
+ select {
+ case <-d.channel:
+ d.channelLock.Lock()
+ d.channel = make(chan struct{})
+ d.channelLock.Unlock()
+ default:
+ }
+
+ if deadline.IsZero() {
+ return nil
+ }
+
+ timeoutIO := func() {
+ d.timedout.setTrue()
+ close(d.channel)
+ }
+
+ now := time.Now()
+ duration := deadline.Sub(now)
+ if deadline.After(now) {
+ // Deadline is in the future, set a timer to wait
+ d.timer = time.AfterFunc(duration, timeoutIO)
+ } else {
+ // Deadline is in the past. Cancel all pending IO now.
+ timeoutIO()
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go
new file mode 100644
index 000000000..ada2fbab6
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go
@@ -0,0 +1,61 @@
+// +build windows
+
+package winio
+
+import (
+ "os"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
+//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
+
+const (
+ fileBasicInfo = 0
+ fileIDInfo = 0x12
+)
+
+// FileBasicInfo contains file access time and file attributes information.
+type FileBasicInfo struct {
+ CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
+ FileAttributes uint32
+ pad uint32 // padding
+}
+
+// GetFileBasicInfo retrieves times and attributes for a file.
+func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
+ bi := &FileBasicInfo{}
+ if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return bi, nil
+}
+
+// SetFileBasicInfo sets times and attributes for a file.
+func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
+ if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return nil
+}
+
+// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
+// unique on a system.
+type FileIDInfo struct {
+ VolumeSerialNumber uint64
+ FileID [16]byte
+}
+
+// GetFileID retrieves the unique (volume, file ID) pair for a file.
+func GetFileID(f *os.File) (*FileIDInfo, error) {
+ fileID := &FileIDInfo{}
+ if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
+ return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return fileID, nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go
new file mode 100644
index 000000000..d99eedb64
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pipe.go
@@ -0,0 +1,421 @@
+// +build windows
+
+package winio
+
+import (
+ "errors"
+ "io"
+ "net"
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
+//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
+//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
+//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
+//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
+//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
+
+const (
+ cERROR_PIPE_BUSY = syscall.Errno(231)
+ cERROR_NO_DATA = syscall.Errno(232)
+ cERROR_PIPE_CONNECTED = syscall.Errno(535)
+ cERROR_SEM_TIMEOUT = syscall.Errno(121)
+
+ cPIPE_ACCESS_DUPLEX = 0x3
+ cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
+ cSECURITY_SQOS_PRESENT = 0x100000
+ cSECURITY_ANONYMOUS = 0
+
+ cPIPE_REJECT_REMOTE_CLIENTS = 0x8
+
+ cPIPE_UNLIMITED_INSTANCES = 255
+
+ cNMPWAIT_USE_DEFAULT_WAIT = 0
+ cNMPWAIT_NOWAIT = 1
+
+ cPIPE_TYPE_MESSAGE = 4
+
+ cPIPE_READMODE_MESSAGE = 2
+)
+
+var (
+ // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
+ // This error should match net.errClosing since docker takes a dependency on its text.
+ ErrPipeListenerClosed = errors.New("use of closed network connection")
+
+ errPipeWriteClosed = errors.New("pipe has been closed for write")
+)
+
+type win32Pipe struct {
+ *win32File
+ path string
+}
+
+type win32MessageBytePipe struct {
+ win32Pipe
+ writeClosed bool
+ readEOF bool
+}
+
+type pipeAddress string
+
+func (f *win32Pipe) LocalAddr() net.Addr {
+ return pipeAddress(f.path)
+}
+
+func (f *win32Pipe) RemoteAddr() net.Addr {
+ return pipeAddress(f.path)
+}
+
+func (f *win32Pipe) SetDeadline(t time.Time) error {
+ f.SetReadDeadline(t)
+ f.SetWriteDeadline(t)
+ return nil
+}
+
+// CloseWrite closes the write side of a message pipe in byte mode.
+func (f *win32MessageBytePipe) CloseWrite() error {
+ if f.writeClosed {
+ return errPipeWriteClosed
+ }
+ err := f.win32File.Flush()
+ if err != nil {
+ return err
+ }
+ _, err = f.win32File.Write(nil)
+ if err != nil {
+ return err
+ }
+ f.writeClosed = true
+ return nil
+}
+
+// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
+// they are used to implement CloseWrite().
+func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
+ if f.writeClosed {
+ return 0, errPipeWriteClosed
+ }
+ if len(b) == 0 {
+ return 0, nil
+ }
+ return f.win32File.Write(b)
+}
+
+// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
+// mode pipe will return io.EOF, as will all subsequent reads.
+func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
+ if f.readEOF {
+ return 0, io.EOF
+ }
+ n, err := f.win32File.Read(b)
+ if err == io.EOF {
+ // If this was the result of a zero-byte read, then
+ // it is possible that the read was due to a zero-size
+ // message. Since we are simulating CloseWrite with a
+ // zero-byte message, ensure that all future Read() calls
+ // also return EOF.
+ f.readEOF = true
+ } else if err == syscall.ERROR_MORE_DATA {
+ // ERROR_MORE_DATA indicates that the pipe's read mode is message mode
+ // and the message still has more bytes. Treat this as a success, since
+ // this package presents all named pipes as byte streams.
+ err = nil
+ }
+ return n, err
+}
+
+func (s pipeAddress) Network() string {
+ return "pipe"
+}
+
+func (s pipeAddress) String() string {
+ return string(s)
+}
+
+// DialPipe connects to a named pipe by path, timing out if the connection
+// takes longer than the specified duration. If timeout is nil, then we use
+// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
+func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
+ var absTimeout time.Time
+ if timeout != nil {
+ absTimeout = time.Now().Add(*timeout)
+ } else {
+ absTimeout = time.Now().Add(time.Second * 2)
+ }
+ var err error
+ var h syscall.Handle
+ for {
+ h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
+ if err != cERROR_PIPE_BUSY {
+ break
+ }
+ if time.Now().After(absTimeout) {
+ return nil, ErrTimeout
+ }
+
+ // Wait 10 msec and try again. This is a rather simplistic
+ // view, as we always try each 10 milliseconds.
+ time.Sleep(time.Millisecond * 10)
+ }
+ if err != nil {
+ return nil, &os.PathError{Op: "open", Path: path, Err: err}
+ }
+
+ var flags uint32
+ err = getNamedPipeInfo(h, &flags, nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := makeWin32File(h)
+ if err != nil {
+ syscall.Close(h)
+ return nil, err
+ }
+
+ // If the pipe is in message mode, return a message byte pipe, which
+ // supports CloseWrite().
+ if flags&cPIPE_TYPE_MESSAGE != 0 {
+ return &win32MessageBytePipe{
+ win32Pipe: win32Pipe{win32File: f, path: path},
+ }, nil
+ }
+ return &win32Pipe{win32File: f, path: path}, nil
+}
+
+type acceptResponse struct {
+ f *win32File
+ err error
+}
+
+type win32PipeListener struct {
+ firstHandle syscall.Handle
+ path string
+ securityDescriptor []byte
+ config PipeConfig
+ acceptCh chan (chan acceptResponse)
+ closeCh chan int
+ doneCh chan int
+}
+
+func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
+ var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
+ if first {
+ flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
+ }
+
+ var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
+ if c.MessageMode {
+ mode |= cPIPE_TYPE_MESSAGE
+ }
+
+ sa := &syscall.SecurityAttributes{}
+ sa.Length = uint32(unsafe.Sizeof(*sa))
+ if securityDescriptor != nil {
+ len := uint32(len(securityDescriptor))
+ sa.SecurityDescriptor = localAlloc(0, len)
+ defer localFree(sa.SecurityDescriptor)
+ copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
+ }
+ h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
+ if err != nil {
+ return 0, &os.PathError{Op: "open", Path: path, Err: err}
+ }
+ return h, nil
+}
+
+func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
+ h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
+ if err != nil {
+ return nil, err
+ }
+ f, err := makeWin32File(h)
+ if err != nil {
+ syscall.Close(h)
+ return nil, err
+ }
+ return f, nil
+}
+
+func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
+ p, err := l.makeServerPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ // Wait for the client to connect.
+ ch := make(chan error)
+ go func(p *win32File) {
+ ch <- connectPipe(p)
+ }(p)
+
+ select {
+ case err = <-ch:
+ if err != nil {
+ p.Close()
+ p = nil
+ }
+ case <-l.closeCh:
+ // Abort the connect request by closing the handle.
+ p.Close()
+ p = nil
+ err = <-ch
+ if err == nil || err == ErrFileClosed {
+ err = ErrPipeListenerClosed
+ }
+ }
+ return p, err
+}
+
+func (l *win32PipeListener) listenerRoutine() {
+ closed := false
+ for !closed {
+ select {
+ case <-l.closeCh:
+ closed = true
+ case responseCh := <-l.acceptCh:
+ var (
+ p *win32File
+ err error
+ )
+ for {
+ p, err = l.makeConnectedServerPipe()
+ // If the connection was immediately closed by the client, try
+ // again.
+ if err != cERROR_NO_DATA {
+ break
+ }
+ }
+ responseCh <- acceptResponse{p, err}
+ closed = err == ErrPipeListenerClosed
+ }
+ }
+ syscall.Close(l.firstHandle)
+ l.firstHandle = 0
+ // Notify Close() and Accept() callers that the handle has been closed.
+ close(l.doneCh)
+}
+
+// PipeConfig contain configuration for the pipe listener.
+type PipeConfig struct {
+ // SecurityDescriptor contains a Windows security descriptor in SDDL format.
+ SecurityDescriptor string
+
+ // MessageMode determines whether the pipe is in byte or message mode. In either
+ // case the pipe is read in byte mode by default. The only practical difference in
+ // this implementation is that CloseWrite() is only supported for message mode pipes;
+ // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
+ // transferred to the reader (and returned as io.EOF in this implementation)
+ // when the pipe is in message mode.
+ MessageMode bool
+
+ // InputBufferSize specifies the size the input buffer, in bytes.
+ InputBufferSize int32
+
+ // OutputBufferSize specifies the size the input buffer, in bytes.
+ OutputBufferSize int32
+}
+
+// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
+// The pipe must not already exist.
+func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
+ var (
+ sd []byte
+ err error
+ )
+ if c == nil {
+ c = &PipeConfig{}
+ }
+ if c.SecurityDescriptor != "" {
+ sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
+ if err != nil {
+ return nil, err
+ }
+ }
+ h, err := makeServerPipeHandle(path, sd, c, true)
+ if err != nil {
+ return nil, err
+ }
+ // Create a client handle and connect it. This results in the pipe
+ // instance always existing, so that clients see ERROR_PIPE_BUSY
+ // rather than ERROR_FILE_NOT_FOUND. This ties the first instance
+ // up so that no other instances can be used. This would have been
+ // cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
+ // instead of CreateNamedPipe. (Apparently created named pipes are
+ // considered to be in listening state regardless of whether any
+ // active calls to ConnectNamedPipe are outstanding.)
+ h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
+ if err != nil {
+ syscall.Close(h)
+ return nil, err
+ }
+ // Close the client handle. The server side of the instance will
+ // still be busy, leading to ERROR_PIPE_BUSY instead of
+ // ERROR_NOT_FOUND, as long as we don't close the server handle,
+ // or disconnect the client with DisconnectNamedPipe.
+ syscall.Close(h2)
+ l := &win32PipeListener{
+ firstHandle: h,
+ path: path,
+ securityDescriptor: sd,
+ config: *c,
+ acceptCh: make(chan (chan acceptResponse)),
+ closeCh: make(chan int),
+ doneCh: make(chan int),
+ }
+ go l.listenerRoutine()
+ return l, nil
+}
+
+func connectPipe(p *win32File) error {
+ c, err := p.prepareIo()
+ if err != nil {
+ return err
+ }
+ defer p.wg.Done()
+
+ err = connectNamedPipe(p.handle, &c.o)
+ _, err = p.asyncIo(c, nil, 0, err)
+ if err != nil && err != cERROR_PIPE_CONNECTED {
+ return err
+ }
+ return nil
+}
+
+func (l *win32PipeListener) Accept() (net.Conn, error) {
+ ch := make(chan acceptResponse)
+ select {
+ case l.acceptCh <- ch:
+ response := <-ch
+ err := response.err
+ if err != nil {
+ return nil, err
+ }
+ if l.config.MessageMode {
+ return &win32MessageBytePipe{
+ win32Pipe: win32Pipe{win32File: response.f, path: l.path},
+ }, nil
+ }
+ return &win32Pipe{win32File: response.f, path: l.path}, nil
+ case <-l.doneCh:
+ return nil, ErrPipeListenerClosed
+ }
+}
+
+func (l *win32PipeListener) Close() error {
+ select {
+ case l.closeCh <- 1:
+ <-l.doneCh
+ case <-l.doneCh:
+ }
+ return nil
+}
+
+func (l *win32PipeListener) Addr() net.Addr {
+ return pipeAddress(l.path)
+}
diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go
new file mode 100644
index 000000000..9c83d36fe
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/privilege.go
@@ -0,0 +1,202 @@
+// +build windows
+
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "runtime"
+ "sync"
+ "syscall"
+ "unicode/utf16"
+
+ "golang.org/x/sys/windows"
+)
+
+//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
+//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
+//sys revertToSelf() (err error) = advapi32.RevertToSelf
+//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
+//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
+//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
+//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
+//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
+
+const (
+ SE_PRIVILEGE_ENABLED = 2
+
+ ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
+
+ SeBackupPrivilege = "SeBackupPrivilege"
+ SeRestorePrivilege = "SeRestorePrivilege"
+)
+
+const (
+ securityAnonymous = iota
+ securityIdentification
+ securityImpersonation
+ securityDelegation
+)
+
+var (
+ privNames = make(map[string]uint64)
+ privNameMutex sync.Mutex
+)
+
+// PrivilegeError represents an error enabling privileges.
+type PrivilegeError struct {
+ privileges []uint64
+}
+
+func (e *PrivilegeError) Error() string {
+ s := ""
+ if len(e.privileges) > 1 {
+ s = "Could not enable privileges "
+ } else {
+ s = "Could not enable privilege "
+ }
+ for i, p := range e.privileges {
+ if i != 0 {
+ s += ", "
+ }
+ s += `"`
+ s += getPrivilegeName(p)
+ s += `"`
+ }
+ return s
+}
+
+// RunWithPrivilege enables a single privilege for a function call.
+func RunWithPrivilege(name string, fn func() error) error {
+ return RunWithPrivileges([]string{name}, fn)
+}
+
+// RunWithPrivileges enables privileges for a function call.
+func RunWithPrivileges(names []string, fn func() error) error {
+ privileges, err := mapPrivileges(names)
+ if err != nil {
+ return err
+ }
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ token, err := newThreadToken()
+ if err != nil {
+ return err
+ }
+ defer releaseThreadToken(token)
+ err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
+ if err != nil {
+ return err
+ }
+ return fn()
+}
+
+func mapPrivileges(names []string) ([]uint64, error) {
+ var privileges []uint64
+ privNameMutex.Lock()
+ defer privNameMutex.Unlock()
+ for _, name := range names {
+ p, ok := privNames[name]
+ if !ok {
+ err := lookupPrivilegeValue("", name, &p)
+ if err != nil {
+ return nil, err
+ }
+ privNames[name] = p
+ }
+ privileges = append(privileges, p)
+ }
+ return privileges, nil
+}
+
+// EnableProcessPrivileges enables privileges globally for the process.
+func EnableProcessPrivileges(names []string) error {
+ return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
+}
+
+// DisableProcessPrivileges disables privileges globally for the process.
+func DisableProcessPrivileges(names []string) error {
+ return enableDisableProcessPrivilege(names, 0)
+}
+
+func enableDisableProcessPrivilege(names []string, action uint32) error {
+ privileges, err := mapPrivileges(names)
+ if err != nil {
+ return err
+ }
+
+ p, _ := windows.GetCurrentProcess()
+ var token windows.Token
+ err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
+ if err != nil {
+ return err
+ }
+
+ defer token.Close()
+ return adjustPrivileges(token, privileges, action)
+}
+
+func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
+ var b bytes.Buffer
+ binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
+ for _, p := range privileges {
+ binary.Write(&b, binary.LittleEndian, p)
+ binary.Write(&b, binary.LittleEndian, action)
+ }
+ prevState := make([]byte, b.Len())
+ reqSize := uint32(0)
+ success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
+ if !success {
+ return err
+ }
+ if err == ERROR_NOT_ALL_ASSIGNED {
+ return &PrivilegeError{privileges}
+ }
+ return nil
+}
+
+func getPrivilegeName(luid uint64) string {
+ var nameBuffer [256]uint16
+ bufSize := uint32(len(nameBuffer))
+ err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
+ if err != nil {
+ return fmt.Sprintf("<unknown privilege %d>", luid)
+ }
+
+ var displayNameBuffer [256]uint16
+ displayBufSize := uint32(len(displayNameBuffer))
+ var langID uint32
+ err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
+ if err != nil {
+ return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
+ }
+
+ return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
+}
+
+func newThreadToken() (windows.Token, error) {
+ err := impersonateSelf(securityImpersonation)
+ if err != nil {
+ return 0, err
+ }
+
+ var token windows.Token
+ err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
+ if err != nil {
+ rerr := revertToSelf()
+ if rerr != nil {
+ panic(rerr)
+ }
+ return 0, err
+ }
+ return token, nil
+}
+
+func releaseThreadToken(h windows.Token) {
+ err := revertToSelf()
+ if err != nil {
+ panic(err)
+ }
+ h.Close()
+}
diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go
new file mode 100644
index 000000000..fc1ee4d3a
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/reparse.go
@@ -0,0 +1,128 @@
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "strings"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ reparseTagMountPoint = 0xA0000003
+ reparseTagSymlink = 0xA000000C
+)
+
+type reparseDataBuffer struct {
+ ReparseTag uint32
+ ReparseDataLength uint16
+ Reserved uint16
+ SubstituteNameOffset uint16
+ SubstituteNameLength uint16
+ PrintNameOffset uint16
+ PrintNameLength uint16
+}
+
+// ReparsePoint describes a Win32 symlink or mount point.
+type ReparsePoint struct {
+ Target string
+ IsMountPoint bool
+}
+
+// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
+// mount point reparse point.
+type UnsupportedReparsePointError struct {
+ Tag uint32
+}
+
+func (e *UnsupportedReparsePointError) Error() string {
+ return fmt.Sprintf("unsupported reparse point %x", e.Tag)
+}
+
+// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
+// or a mount point.
+func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
+ tag := binary.LittleEndian.Uint32(b[0:4])
+ return DecodeReparsePointData(tag, b[8:])
+}
+
+func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
+ isMountPoint := false
+ switch tag {
+ case reparseTagMountPoint:
+ isMountPoint = true
+ case reparseTagSymlink:
+ default:
+ return nil, &UnsupportedReparsePointError{tag}
+ }
+ nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
+ if !isMountPoint {
+ nameOffset += 4
+ }
+ nameLength := binary.LittleEndian.Uint16(b[6:8])
+ name := make([]uint16, nameLength/2)
+ err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
+ if err != nil {
+ return nil, err
+ }
+ return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
+}
+
+func isDriveLetter(c byte) bool {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
+// mount point.
+func EncodeReparsePoint(rp *ReparsePoint) []byte {
+ // Generate an NT path and determine if this is a relative path.
+ var ntTarget string
+ relative := false
+ if strings.HasPrefix(rp.Target, `\\?\`) {
+ ntTarget = `\??\` + rp.Target[4:]
+ } else if strings.HasPrefix(rp.Target, `\\`) {
+ ntTarget = `\??\UNC\` + rp.Target[2:]
+ } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
+ ntTarget = `\??\` + rp.Target
+ } else {
+ ntTarget = rp.Target
+ relative = true
+ }
+
+ // The paths must be NUL-terminated even though they are counted strings.
+ target16 := utf16.Encode([]rune(rp.Target + "\x00"))
+ ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
+
+ size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
+ size += len(ntTarget16)*2 + len(target16)*2
+
+ tag := uint32(reparseTagMountPoint)
+ if !rp.IsMountPoint {
+ tag = reparseTagSymlink
+ size += 4 // Add room for symlink flags
+ }
+
+ data := reparseDataBuffer{
+ ReparseTag: tag,
+ ReparseDataLength: uint16(size),
+ SubstituteNameOffset: 0,
+ SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
+ PrintNameOffset: uint16(len(ntTarget16) * 2),
+ PrintNameLength: uint16((len(target16) - 1) * 2),
+ }
+
+ var b bytes.Buffer
+ binary.Write(&b, binary.LittleEndian, &data)
+ if !rp.IsMountPoint {
+ flags := uint32(0)
+ if relative {
+ flags |= 1
+ }
+ binary.Write(&b, binary.LittleEndian, flags)
+ }
+
+ binary.Write(&b, binary.LittleEndian, ntTarget16)
+ binary.Write(&b, binary.LittleEndian, target16)
+ return b.Bytes()
+}
diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go
new file mode 100644
index 000000000..db1b370a1
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/sd.go
@@ -0,0 +1,98 @@
+// +build windows
+
+package winio
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
+//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
+//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
+//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
+//sys localFree(mem uintptr) = LocalFree
+//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
+
+const (
+ cERROR_NONE_MAPPED = syscall.Errno(1332)
+)
+
+type AccountLookupError struct {
+ Name string
+ Err error
+}
+
+func (e *AccountLookupError) Error() string {
+ if e.Name == "" {
+ return "lookup account: empty account name specified"
+ }
+ var s string
+ switch e.Err {
+ case cERROR_NONE_MAPPED:
+ s = "not found"
+ default:
+ s = e.Err.Error()
+ }
+ return "lookup account " + e.Name + ": " + s
+}
+
+type SddlConversionError struct {
+ Sddl string
+ Err error
+}
+
+func (e *SddlConversionError) Error() string {
+ return "convert " + e.Sddl + ": " + e.Err.Error()
+}
+
+// LookupSidByName looks up the SID of an account by name
+func LookupSidByName(name string) (sid string, err error) {
+ if name == "" {
+ return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
+ }
+
+ var sidSize, sidNameUse, refDomainSize uint32
+ err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
+ if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
+ return "", &AccountLookupError{name, err}
+ }
+ sidBuffer := make([]byte, sidSize)
+ refDomainBuffer := make([]uint16, refDomainSize)
+ err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
+ if err != nil {
+ return "", &AccountLookupError{name, err}
+ }
+ var strBuffer *uint16
+ err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
+ if err != nil {
+ return "", &AccountLookupError{name, err}
+ }
+ sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
+ localFree(uintptr(unsafe.Pointer(strBuffer)))
+ return sid, nil
+}
+
+func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
+ var sdBuffer uintptr
+ err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
+ if err != nil {
+ return nil, &SddlConversionError{sddl, err}
+ }
+ defer localFree(sdBuffer)
+ sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
+ copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
+ return sd, nil
+}
+
+func SecurityDescriptorToSddl(sd []byte) (string, error) {
+ var sddl *uint16
+ // The returned string length seems to including an aribtrary number of terminating NULs.
+ // Don't use it.
+ err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
+ if err != nil {
+ return "", err
+ }
+ defer localFree(uintptr(unsafe.Pointer(sddl)))
+ return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go
new file mode 100644
index 000000000..20d64cf41
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/syscall.go
@@ -0,0 +1,3 @@
+package winio
+
+//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
new file mode 100644
index 000000000..3f527639a
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
@@ -0,0 +1,520 @@
+// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
+
+package winio
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+
+ procCancelIoEx = modkernel32.NewProc("CancelIoEx")
+ procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
+ procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
+ procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
+ procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
+ procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
+ procCreateFileW = modkernel32.NewProc("CreateFileW")
+ procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
+ procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
+ procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
+ procLocalAlloc = modkernel32.NewProc("LocalAlloc")
+ procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
+ procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
+ procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
+ procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
+ procLocalFree = modkernel32.NewProc("LocalFree")
+ procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
+ procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
+ procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
+ procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
+ procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
+ procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
+ procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
+ procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
+ procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
+ procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
+ procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
+ procBackupRead = modkernel32.NewProc("BackupRead")
+ procBackupWrite = modkernel32.NewProc("BackupWrite")
+)
+
+func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
+ newport = syscall.Handle(r0)
+ if newport == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
+}
+
+func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
+}
+
+func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func waitNamedPipe(name string, timeout uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _waitNamedPipe(_p0, timeout)
+}
+
+func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
+ r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
+ r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
+ ptr = uintptr(r0)
+ return
+}
+
+func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(accountName)
+ if err != nil {
+ return
+ }
+ return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
+}
+
+func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func convertSidToStringSid(sid *byte, str **uint16) (err error) {
+ r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(str)
+ if err != nil {
+ return
+ }
+ return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
+}
+
+func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func localFree(mem uintptr) {
+ syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
+ return
+}
+
+func getSecurityDescriptorLength(sd uintptr) (len uint32) {
+ r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
+ len = uint32(r0)
+ return
+}
+
+func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
+ var _p0 uint32
+ if releaseAll {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
+ success = r0 != 0
+ if true {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func impersonateSelf(level uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func revertToSelf() (err error) {
+ r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
+ var _p0 uint32
+ if openAsSelf {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getCurrentThread() (h syscall.Handle) {
+ r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
+ h = syscall.Handle(r0)
+ return
+}
+
+func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeValue(_p0, _p1, luid)
+}
+
+func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
+ r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeName(_p0, luid, buffer, size)
+}
+
+func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
+}
+
+func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+ var _p0 *byte
+ if len(b) > 0 {
+ _p0 = &b[0]
+ }
+ var _p1 uint32
+ if abort {
+ _p1 = 1
+ } else {
+ _p1 = 0
+ }
+ var _p2 uint32
+ if processSecurity {
+ _p2 = 1
+ } else {
+ _p2 = 0
+ }
+ r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+ var _p0 *byte
+ if len(b) > 0 {
+ _p0 = &b[0]
+ }
+ var _p1 uint32
+ if abort {
+ _p1 = 1
+ } else {
+ _p1 = 0
+ }
+ var _p2 uint32
+ if processSecurity {
+ _p2 = 1
+ } else {
+ _p2 = 0
+ }
+ r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/Nvveen/Gotty/gotty.go b/vendor/github.com/Nvveen/Gotty/gotty.go
index a4e483026..093cbf37e 100644
--- a/vendor/github.com/Nvveen/Gotty/gotty.go
+++ b/vendor/github.com/Nvveen/Gotty/gotty.go
@@ -110,7 +110,7 @@ func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
return term.GetAttribute(tc)
}
-// A utility function that finds and returns the termcap equivalent of a
+// A utility function that finds and returns the termcap equivalent of a
// variable name.
func GetTermcapName(name string) string {
// Termcap name
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index 815407642..5b52ab221 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -98,25 +98,25 @@ var ignoredHeaders = rules{
var requiredSignedHeaders = rules{
whitelist{
mapRule{
- "Cache-Control": struct{}{},
- "Content-Disposition": struct{}{},
- "Content-Encoding": struct{}{},
- "Content-Language": struct{}{},
- "Content-Md5": struct{}{},
- "Content-Type": struct{}{},
- "Expires": struct{}{},
- "If-Match": struct{}{},
- "If-Modified-Since": struct{}{},
- "If-None-Match": struct{}{},
- "If-Unmodified-Since": struct{}{},
- "Range": struct{}{},
- "X-Amz-Acl": struct{}{},
- "X-Amz-Copy-Source": struct{}{},
- "X-Amz-Copy-Source-If-Match": struct{}{},
- "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
- "X-Amz-Copy-Source-If-None-Match": struct{}{},
- "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
- "X-Amz-Copy-Source-Range": struct{}{},
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
index 615573d84..3bf439028 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -38,7 +38,7 @@ const (
// parseTable is a state machine to dictate the grammar above.
var parseTable = map[ASTKind]map[TokenType]int{
- ASTKindStart: {
+ ASTKindStart: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
@@ -46,7 +46,7 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: TerminalState,
},
- ASTKindCommentStatement: {
+ ASTKindCommentStatement: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
@@ -54,7 +54,7 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
- ASTKindExpr: {
+ ASTKindExpr: map[TokenType]int{
TokenOp: StatementPrimeState,
TokenLit: ValueState,
TokenSep: OpenScopeState,
@@ -63,12 +63,12 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
- ASTKindEqualExpr: {
+ ASTKindEqualExpr: map[TokenType]int{
TokenLit: ValueState,
TokenWS: SkipTokenState,
TokenNL: SkipState,
},
- ASTKindStatement: {
+ ASTKindStatement: map[TokenType]int{
TokenLit: SectionState,
TokenSep: CloseScopeState,
TokenWS: SkipTokenState,
@@ -76,7 +76,7 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
- ASTKindExprStatement: {
+ ASTKindExprStatement: map[TokenType]int{
TokenLit: ValueState,
TokenSep: OpenScopeState,
TokenOp: ValueState,
@@ -86,13 +86,13 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenNone: TerminalState,
TokenComma: SkipState,
},
- ASTKindSectionStatement: {
+ ASTKindSectionStatement: map[TokenType]int{
TokenLit: SectionState,
TokenSep: CloseScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
},
- ASTKindCompletedSectionStatement: {
+ ASTKindCompletedSectionStatement: map[TokenType]int{
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenLit: StatementState,
@@ -100,7 +100,7 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
- ASTKindSkipStatement: {
+ ASTKindSkipStatement: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
index eee6d3950..90d309903 100644
--- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
@@ -460,7 +460,7 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], m.ID)
}
if len(m.Labels) > 0 {
- for k := range m.Labels {
+ for k, _ := range m.Labels {
dAtA[i] = 0x12
i++
v := m.Labels[k]
@@ -531,7 +531,7 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
}
i += n4
if len(m.Extensions) > 0 {
- for k := range m.Extensions {
+ for k, _ := range m.Extensions {
dAtA[i] = 0x52
i++
v := m.Extensions[k]
@@ -1042,7 +1042,7 @@ func (this *Container) String() string {
return "nil"
}
keysForLabels := make([]string, 0, len(this.Labels))
- for k := range this.Labels {
+ for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
@@ -1052,7 +1052,7 @@ func (this *Container) String() string {
}
mapStringForLabels += "}"
keysForExtensions := make([]string, 0, len(this.Extensions))
- for k := range this.Extensions {
+ for k, _ := range this.Extensions {
keysForExtensions = append(keysForExtensions, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForExtensions)
diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go
index ef0021733..6c8189587 100644
--- a/vendor/github.com/coreos/go-systemd/dbus/properties.go
+++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go
@@ -56,7 +56,7 @@ type execStart struct {
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
func PropExecStart(command []string, uncleanIsFailure bool) Property {
execStarts := []execStart{
- {
+ execStart{
Path: command[0],
Args: command,
UncleanIsFailure: uncleanIsFailure,
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
index 53ab12d57..6031a0db1 100644
--- a/vendor/github.com/docker/spdystream/connection.go
+++ b/vendor/github.com/docker/spdystream/connection.go
@@ -103,12 +103,12 @@ Loop:
//
// See https://github.com/docker/spdystream/issues/49 for more details.
go func() {
- for range resetChan {
+ for _ = range resetChan {
}
}()
go func() {
- for range setTimeoutChan {
+ for _ = range setTimeoutChan {
}
}()
@@ -127,7 +127,7 @@ Loop:
}
// Drain resetChan
- for range resetChan {
+ for _ = range resetChan {
}
}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
index 344c3951e..daf3a9e40 100644
--- a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
@@ -160,7 +160,7 @@ func (store *FileStore) all() []string {
for _, f := range files {
DEBUG.Println(STR, "file in All():", f.Name())
name := f.Name()
- if name[len(name)-4:] != msgExt {
+ if name[len(name)-4:len(name)] != msgExt {
DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name)
continue
}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go
index 904a66468..195c8173d 100644
--- a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go
@@ -27,7 +27,7 @@ type (
NOOPLogger struct{}
)
-func (NOOPLogger) Println(v ...interface{}) {}
+func (NOOPLogger) Println(v ...interface{}) {}
func (NOOPLogger) Printf(format string, v ...interface{}) {}
// Internal levels of library output that are initialised to not print
diff --git a/vendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/reconfigurable_sink.go b/vendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/reconfigurable_sink.go
index ab08830a4..5a6693e1f 100755
--- a/vendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/reconfigurable_sink.go
+++ b/vendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/reconfigurable_sink.go
@@ -1,14 +1,12 @@
package lager
import "sync/atomic"
-
//ReconfigurableSink is a struct
type ReconfigurableSink struct {
sink Sink
minLogLevel int32
}
-
//NewReconfigurableSink is a function which returns struct object
func NewReconfigurableSink(sink Sink, initialMinLogLevel LogLevel) *ReconfigurableSink {
return &ReconfigurableSink{
@@ -17,7 +15,6 @@ func NewReconfigurableSink(sink Sink, initialMinLogLevel LogLevel) *Reconfigurab
minLogLevel: int32(initialMinLogLevel),
}
}
-
//Log is a method which returns log level and log
func (sink *ReconfigurableSink) Log(level LogLevel, log []byte) {
minLogLevel := LogLevel(atomic.LoadInt32(&sink.minLogLevel))
@@ -28,12 +25,10 @@ func (sink *ReconfigurableSink) Log(level LogLevel, log []byte) {
sink.sink.Log(level, log)
}
-
//SetMinLevel is a function which sets minimum log level
func (sink *ReconfigurableSink) SetMinLevel(level LogLevel) {
atomic.StoreInt32(&sink.minLogLevel, int32(level))
}
-
//GetMinLevel is a method which gets minimum log level
func (sink *ReconfigurableSink) GetMinLevel() LogLevel {
return LogLevel(atomic.LoadInt32(&sink.minLogLevel))
diff --git a/vendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/writer_sink.go b/vendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/writer_sink.go
index 07dd74d7a..08cd85b8c 100755
--- a/vendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/writer_sink.go
+++ b/vendor/github.com/go-chassis/paas-lager/third_party/forked/cloudfoundry/lager/writer_sink.go
@@ -22,7 +22,6 @@ type writerSink struct {
name string
writeL *sync.Mutex
}
-
//NewWriterSink is function which returns new struct object
func NewWriterSink(name string, writer io.Writer, minLogLevel LogLevel) Sink {
return &writerSink{
diff --git a/vendor/github.com/godbus/dbus/default_handler.go b/vendor/github.com/godbus/dbus/default_handler.go
index 25c387e67..e81f73ac5 100644
--- a/vendor/github.com/godbus/dbus/default_handler.go
+++ b/vendor/github.com/godbus/dbus/default_handler.go
@@ -45,7 +45,7 @@ func (h *defaultHandler) introspectPath(path ObjectPath) string {
subpath := make(map[string]struct{})
var xml bytes.Buffer
xml.WriteString("<node>")
- for obj := range h.objects {
+ for obj, _ := range h.objects {
p := string(path)
if p != "/" {
p += "/"
@@ -55,7 +55,7 @@ func (h *defaultHandler) introspectPath(path ObjectPath) string {
subpath[node_name] = struct{}{}
}
}
- for s := range subpath {
+ for s, _ := range subpath {
xml.WriteString("\n\t<node name=\"" + s + "\"/>")
}
xml.WriteString("\n</node>")
diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go
index 718a1ff02..3fad859a6 100644
--- a/vendor/github.com/godbus/dbus/transport_generic.go
+++ b/vendor/github.com/godbus/dbus/transport_generic.go
@@ -11,7 +11,7 @@ var nativeEndian binary.ByteOrder
func detectEndianness() binary.ByteOrder {
var x uint32 = 0x01020304
- if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
+ if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
return binary.BigEndian
}
return binary.LittleEndian
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go
index 1af55e6fd..3e63fffd5 100644
--- a/vendor/github.com/golang/glog/glog.go
+++ b/vendor/github.com/golang/glog/glog.go
@@ -876,7 +876,7 @@ const flushInterval = 30 * time.Second
// flushDaemon periodically flushes the log file buffers.
func (l *loggingT) flushDaemon() {
- for range time.NewTicker(flushInterval).C {
+ for _ = range time.NewTicker(flushInterval).C {
l.lockAndFlushAll()
}
}
diff --git a/vendor/github.com/google/cadvisor/pages/containers.go b/vendor/github.com/google/cadvisor/pages/containers.go
index 29626c98d..8f2981522 100644
--- a/vendor/github.com/google/cadvisor/pages/containers.go
+++ b/vendor/github.com/google/cadvisor/pages/containers.go
@@ -235,7 +235,7 @@ func serveContainersPage(m manager.Manager, w http.ResponseWriter, u *url.URL) {
FsAvailable: cont.Spec.HasFilesystem,
CustomMetricsAvailable: cont.Spec.HasCustomMetrics,
SubcontainersAvailable: len(subcontainerLinks) > 0,
- Root: rootDir,
+ Root: rootDir,
}
err = pageTemplate.Execute(w, data)
if err != nil {
diff --git a/vendor/github.com/google/cadvisor/pages/docker.go b/vendor/github.com/google/cadvisor/pages/docker.go
index f939d46f9..4fb2f6bc7 100644
--- a/vendor/github.com/google/cadvisor/pages/docker.go
+++ b/vendor/github.com/google/cadvisor/pages/docker.go
@@ -148,7 +148,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) {
NetworkAvailable: cont.Spec.HasNetwork,
FsAvailable: cont.Spec.HasFilesystem,
CustomMetricsAvailable: cont.Spec.HasCustomMetrics,
- Root: rootDir,
+ Root: rootDir,
}
}
diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml
new file mode 100644
index 000000000..6f440f1e4
--- /dev/null
+++ b/vendor/github.com/gorilla/context/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+sudo: false
+
+matrix:
+ include:
+ - go: 1.3
+ - go: 1.4
+ - go: 1.5
+ - go: 1.6
+ - go: 1.7
+ - go: tip
+ allow_failures:
+ - go: tip
+
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - go vet $(go list ./... | grep -v /vendor/)
+ - go test -v -race ./...
diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE
new file mode 100644
index 000000000..0e5fb8728
--- /dev/null
+++ b/vendor/github.com/gorilla/context/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md
new file mode 100644
index 000000000..08f86693b
--- /dev/null
+++ b/vendor/github.com/gorilla/context/README.md
@@ -0,0 +1,10 @@
+context
+=======
+[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
+
+gorilla/context is a general purpose registry for global request variables.
+
+> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
+> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
+
+Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go
new file mode 100644
index 000000000..81cb128b1
--- /dev/null
+++ b/vendor/github.com/gorilla/context/context.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "net/http"
+ "sync"
+ "time"
+)
+
+var (
+ mutex sync.RWMutex
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+)
+
+// Set stores a value for a given key in a given request.
+func Set(r *http.Request, key, val interface{}) {
+ mutex.Lock()
+ if data[r] == nil {
+ data[r] = make(map[interface{}]interface{})
+ datat[r] = time.Now().Unix()
+ }
+ data[r][key] = val
+ mutex.Unlock()
+}
+
+// Get returns a value stored for a given key in a given request.
+func Get(r *http.Request, key interface{}) interface{} {
+ mutex.RLock()
+ if ctx := data[r]; ctx != nil {
+ value := ctx[key]
+ mutex.RUnlock()
+ return value
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetOk returns stored value and presence state like multi-value return of map access.
+func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
+ mutex.RLock()
+ if _, ok := data[r]; ok {
+ value, ok := data[r][key]
+ mutex.RUnlock()
+ return value, ok
+ }
+ mutex.RUnlock()
+ return nil, false
+}
+
+// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
+func GetAll(r *http.Request) map[interface{}]interface{} {
+ mutex.RLock()
+ if context, ok := data[r]; ok {
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
+// the request was registered.
+func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
+ mutex.RLock()
+ context, ok := data[r]
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result, ok
+}
+
+// Delete removes a value stored for a given key in a given request.
+func Delete(r *http.Request, key interface{}) {
+ mutex.Lock()
+ if data[r] != nil {
+ delete(data[r], key)
+ }
+ mutex.Unlock()
+}
+
+// Clear removes all values stored for a given request.
+//
+// This is usually called by a handler wrapper to clean up request
+// variables at the end of a request lifetime. See ClearHandler().
+func Clear(r *http.Request) {
+ mutex.Lock()
+ clear(r)
+ mutex.Unlock()
+}
+
+// clear is Clear without the lock.
+func clear(r *http.Request) {
+ delete(data, r)
+ delete(datat, r)
+}
+
+// Purge removes request data stored for longer than maxAge, in seconds.
+// It returns the amount of requests removed.
+//
+// If maxAge <= 0, all request data is removed.
+//
+// This is only used for sanity check: in case context cleaning was not
+// properly set some request data can be kept forever, consuming an increasing
+// amount of memory. In case this is detected, Purge() must be called
+// periodically until the problem is fixed.
+func Purge(maxAge int) int {
+ mutex.Lock()
+ count := 0
+ if maxAge <= 0 {
+ count = len(data)
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+ } else {
+ min := time.Now().Unix() - int64(maxAge)
+ for r := range data {
+ if datat[r] < min {
+ clear(r)
+ count++
+ }
+ }
+ }
+ mutex.Unlock()
+ return count
+}
+
+// ClearHandler wraps an http.Handler and clears request values at the end
+// of a request lifetime.
+func ClearHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer Clear(r)
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go
new file mode 100644
index 000000000..448d1bfca
--- /dev/null
+++ b/vendor/github.com/gorilla/context/doc.go
@@ -0,0 +1,88 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package context stores values shared during a request lifetime.
+
+Note: gorilla/context, having been born well before `context.Context` existed,
+does not play well > with the shallow copying of the request that
+[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
+(added to net/http Go 1.7 onwards) performs. You should either use *just*
+gorilla/context, or moving forward, the new `http.Request.Context()`.
+
+For example, a router can set variables extracted from the URL and later
+application handlers can access those values, or it can be used to store
+sessions values to be saved at the end of a request. There are several
+others common uses.
+
+The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
+
+ http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
+
+Here's the basic usage: first define the keys that you will need. The key
+type is interface{} so a key can be of any type that supports equality.
+Here we define a key using a custom int type to avoid name collisions:
+
+ package foo
+
+ import (
+ "github.com/gorilla/context"
+ )
+
+ type key int
+
+ const MyKey key = 0
+
+Then set a variable. Variables are bound to an http.Request object, so you
+need a request instance to set a value:
+
+ context.Set(r, MyKey, "bar")
+
+The application can later access the variable using the same key you provided:
+
+ func MyHandler(w http.ResponseWriter, r *http.Request) {
+ // val is "bar".
+ val := context.Get(r, foo.MyKey)
+
+ // returns ("bar", true)
+ val, ok := context.GetOk(r, foo.MyKey)
+ // ...
+ }
+
+And that's all about the basic usage. We discuss some other ideas below.
+
+Any type can be stored in the context. To enforce a given type, make the key
+private and wrap Get() and Set() to accept and return values of a specific
+type:
+
+ type key int
+
+ const mykey key = 0
+
+ // GetMyKey returns a value for this package from the request values.
+ func GetMyKey(r *http.Request) SomeType {
+ if rv := context.Get(r, mykey); rv != nil {
+ return rv.(SomeType)
+ }
+ return nil
+ }
+
+ // SetMyKey sets a value for this package in the request values.
+ func SetMyKey(r *http.Request, val SomeType) {
+ context.Set(r, mykey, val)
+ }
+
+Variables must be cleared at the end of a request, to remove all values
+that were stored. This can be done in an http.Handler, after a request was
+served. Just call Clear() passing the request:
+
+ context.Clear(r)
+
+...or use ClearHandler(), which conveniently wraps an http.Handler to clear
+variables at the end of a request lifetime.
+
+The Routers from the packages gorilla/mux and gorilla/pat call Clear()
+so if you are using either of them you don't need to clear the context manually.
+*/
+package context
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 000000000..5f0d1fb6a
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 000000000..7a950d177
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 000000000..9d2d8a4ba
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 000000000..336142a5e
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // defined by the Win32 API
+ th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+ Process32First = kernel.MustFindProc("Process32FirstW")
+ Process32Next = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+ dwSize uint32
+ cntUsage uint32
+ th32ProcessID uint32
+ th32DefaultHeapID int
+ th32ModuleID uint32
+ cntThreads uint32
+ th32ParentProcessID uint32
+ pcPriClassBase int32
+ dwFlags uint32
+ szExeFile [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+ snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+ if snapshot == uintptr(syscall.InvalidHandle) {
+ err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+ return
+ }
+ defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+ var processEntry processEntry32
+ processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+ ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32First: %v", e1)
+ return
+ }
+
+ for {
+ if processEntry.th32ProcessID == uint32(pid) {
+ pe = &processEntry
+ return
+ }
+
+ ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32Next: %v", e1)
+ return
+ }
+ }
+}
+
+func getppid() (pid int, err error) {
+ pe, err := getProcessEntry(os.Getpid())
+ if err != nil {
+ return
+ }
+
+ pid = int(pe.th32ParentProcessID)
+ return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ ppid, err := getppid()
+ if err != nil {
+ return false
+ }
+
+ pe, err := getProcessEntry(ppid)
+ if err != nil {
+ return false
+ }
+
+ name := syscall.UTF16ToString(pe.szExeFile[:])
+ return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 000000000..9a28e57c3
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(os.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
new file mode 100644
index 000000000..14127cd83
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
@@ -0,0 +1,9 @@
+(The MIT License)
+
+Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
new file mode 100644
index 000000000..195333e51
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -0,0 +1,41 @@
+# Windows Terminal Sequences
+
+This library allow for enabling Windows terminal color support for Go.
+
+See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
+
+## Usage
+
+```go
+import (
+ "syscall"
+
+ sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func main() {
+ sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
+}
+
+```
+
+## Authors
+
+The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
+
+We thank all the authors who provided code to this library:
+
+* Felix Kollmann
+* Nicolas Perraut
+
+## License
+
+(The MIT License)
+
+Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
new file mode 100644
index 000000000..716c61312
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
@@ -0,0 +1 @@
+module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
new file mode 100644
index 000000000..ef18d8f97
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package sequences
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
+ setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
+)
+
+func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
+ const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
+
+ var mode uint32
+ err := syscall.GetConsoleMode(syscall.Stdout, &mode)
+ if err != nil {
+ return err
+ }
+
+ if enable {
+ mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ } else {
+ mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ }
+
+ ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
+ if ret == 0 {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
new file mode 100644
index 000000000..df61a6f2f
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
@@ -0,0 +1,11 @@
+// +build linux darwin
+
+package sequences
+
+import (
+ "fmt"
+)
+
+func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error {
+ return fmt.Errorf("windows only package")
+}
diff --git a/vendor/github.com/kr/pty/ztypes_openbsd_386.go b/vendor/github.com/kr/pty/ztypes_openbsd_386.go
index e67051688..ccb3aab9a 100644
--- a/vendor/github.com/kr/pty/ztypes_openbsd_386.go
+++ b/vendor/github.com/kr/pty/ztypes_openbsd_386.go
@@ -4,10 +4,10 @@
package pty
type ptmget struct {
- Cfd int32
- Sfd int32
- Cn [16]int8
- Sn [16]int8
+ Cfd int32
+ Sfd int32
+ Cn [16]int8
+ Sn [16]int8
}
var ioctl_PTMGET = 0x40287401
diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go
index 4899eed02..9756fcc75 100644
--- a/vendor/github.com/modern-go/concurrent/log.go
+++ b/vendor/github.com/modern-go/concurrent/log.go
@@ -1,13 +1,13 @@
package concurrent
import (
- "io/ioutil"
- "log"
"os"
+ "log"
+ "io/ioutil"
)
// ErrorLogger is used to print out error, can be set to writer other than stderr
var ErrorLogger = log.New(os.Stderr, "", 0)
// InfoLogger is used to print informational message, default to off
-var InfoLogger = log.New(ioutil.Discard, "", 0)
+var InfoLogger = log.New(ioutil.Discard, "", 0) \ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
index 5ea18eb7b..05a77dceb 100644
--- a/vendor/github.com/modern-go/concurrent/unbounded_executor.go
+++ b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
@@ -3,11 +3,11 @@ package concurrent
import (
"context"
"fmt"
- "reflect"
"runtime"
"runtime/debug"
"sync"
"time"
+ "reflect"
)
// HandlePanic logs goroutine panic by default
diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go
index 4fe9a5965..63b49c799 100644
--- a/vendor/github.com/modern-go/reflect2/reflect2.go
+++ b/vendor/github.com/modern-go/reflect2/reflect2.go
@@ -136,7 +136,7 @@ type frozenConfig struct {
func (cfg Config) Froze() *frozenConfig {
return &frozenConfig{
useSafeImplementation: cfg.UseSafeImplementation,
- cache: concurrent.NewMap(),
+ cache: concurrent.NewMap(),
}
}
@@ -291,8 +291,8 @@ func UnsafeCastString(str string) []byte {
stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
sliceHeader := &reflect.SliceHeader{
Data: stringHeader.Data,
- Cap: stringHeader.Len,
- Len: stringHeader.Len,
+ Cap: stringHeader.Len,
+ Len: stringHeader.Len,
}
return *(*[]byte)(unsafe.Pointer(sliceHeader))
}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
index 6eef40a0e..7fd68ee8e 100644
--- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
@@ -107,11 +107,11 @@ func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
NumberOfSamples: spec.subject.Samples(),
ComponentTexts: componentTexts,
ComponentCodeLocations: componentCodeLocations,
- State: spec.getState(),
- RunTime: runTime,
- Failure: spec.failure,
- Measurements: spec.measurementsReport(),
- SuiteID: suiteID,
+ State: spec.getState(),
+ RunTime: runTime,
+ Failure: spec.failure,
+ Measurements: spec.measurementsReport(),
+ SuiteID: suiteID,
}
}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
index 81fc76cda..108800923 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
@@ -663,22 +663,22 @@ func (c consoleColor) backgroundAttr() (attr word) {
}
var color16 = []consoleColor{
- {0x000000, false, false, false, false},
- {0x000080, false, false, true, false},
- {0x008000, false, true, false, false},
- {0x008080, false, true, true, false},
- {0x800000, true, false, false, false},
- {0x800080, true, false, true, false},
- {0x808000, true, true, false, false},
- {0xc0c0c0, true, true, true, false},
- {0x808080, false, false, false, true},
- {0x0000ff, false, false, true, true},
- {0x00ff00, false, true, false, true},
- {0x00ffff, false, true, true, true},
- {0xff0000, true, false, false, true},
- {0xff00ff, true, false, true, true},
- {0xffff00, true, true, false, true},
- {0xffffff, true, true, true, true},
+ consoleColor{0x000000, false, false, false, false},
+ consoleColor{0x000080, false, false, true, false},
+ consoleColor{0x008000, false, true, false, false},
+ consoleColor{0x008080, false, true, true, false},
+ consoleColor{0x800000, true, false, false, false},
+ consoleColor{0x800080, true, false, true, false},
+ consoleColor{0x808000, true, true, false, false},
+ consoleColor{0xc0c0c0, true, true, true, false},
+ consoleColor{0x808080, false, false, false, true},
+ consoleColor{0x0000ff, false, false, true, true},
+ consoleColor{0x00ff00, false, true, false, true},
+ consoleColor{0x00ffff, false, true, true, true},
+ consoleColor{0xff0000, true, false, false, true},
+ consoleColor{0xff00ff, true, false, true, true},
+ consoleColor{0xffff00, true, true, false, true},
+ consoleColor{0xffffff, true, true, true, true},
}
type hsv struct {
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
index 3cfbc1f7e..003e99fad 100644
--- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -161,12 +161,12 @@ func (m *SequenceMatcher) chainB() {
m.bJunk = map[string]struct{}{}
if m.IsJunk != nil {
junk := m.bJunk
- for s := range b2j {
+ for s, _ := range b2j {
if m.IsJunk(s) {
junk[s] = struct{}{}
}
}
- for s := range junk {
+ for s, _ := range junk {
delete(b2j, s)
}
}
@@ -181,7 +181,7 @@ func (m *SequenceMatcher) chainB() {
popular[s] = struct{}{}
}
}
- for s := range popular {
+ for s, _ := range popular {
delete(b2j, s)
}
}
@@ -416,7 +416,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
}
codes := m.GetOpCodes()
if len(codes) == 0 {
- codes = []OpCode{{'e', 0, 1, 0, 1}}
+ codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
}
// Fixup leading and trailing groups if they show no changes.
if codes[0].Tag == 'e' {
diff --git a/vendor/github.com/rkt/rkt/LICENSE b/vendor/github.com/rkt/rkt/LICENSE
new file mode 100644
index 000000000..5c304d1a4
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/rkt/rkt/api/v1alpha/README.md b/vendor/github.com/rkt/rkt/api/v1alpha/README.md
new file mode 100644
index 000000000..1c0e2a7cb
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/api/v1alpha/README.md
@@ -0,0 +1,25 @@
+# WARNING
+
+The API defined here is proposed, experimental, and (for now) subject to change at any time.
+
+If you think you want to use it, or for any other queries, contact <rkt-dev@googlegroups.com> or file an [issue](https://github.com/rkt/rkt/issues/new)
+
+For more information, see:
+- #1208
+- #1359
+- #1468
+- [API Service Subcommand](../../Documentation/subcommands/api-service.md)
+
+## Protobuf
+
+The rkt gRPC API uses Protocol Buffers for its services.
+In order to rebuild the generated code make sure you have protobuf 3.0.0 installed (https://github.com/google/protobuf)
+and execute from the top-level directory:
+
+```
+$ make protobuf
+```
+
+## Documentation
+
+HTML and Markdown Protobuf documentation is automatically generated and placed in [docs](docs/).
diff --git a/vendor/github.com/rkt/rkt/api/v1alpha/api.pb.go b/vendor/github.com/rkt/rkt/api/v1alpha/api.pb.go
new file mode 100644
index 000000000..3f1274999
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/api/v1alpha/api.pb.go
@@ -0,0 +1,1775 @@
+// Code generated by protoc-gen-go.
+// source: api.proto
+// DO NOT EDIT!
+
+/*
+Package v1alpha is a generated protocol buffer package.
+
+It is generated from these files:
+ api.proto
+
+It has these top-level messages:
+ ImageFormat
+ Image
+ Network
+ App
+ Pod
+ KeyValue
+ PodFilter
+ ImageFilter
+ GlobalFlags
+ Info
+ Event
+ EventFilter
+ GetInfoRequest
+ GetInfoResponse
+ ListPodsRequest
+ ListPodsResponse
+ InspectPodRequest
+ InspectPodResponse
+ ListImagesRequest
+ ListImagesResponse
+ InspectImageRequest
+ InspectImageResponse
+ ListenEventsRequest
+ ListenEventsResponse
+ GetLogsRequest
+ GetLogsResponse
+*/
+package v1alpha
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// ImageType defines the supported image type.
+type ImageType int32
+
+const (
+ ImageType_IMAGE_TYPE_UNDEFINED ImageType = 0
+ ImageType_IMAGE_TYPE_APPC ImageType = 1
+ ImageType_IMAGE_TYPE_DOCKER ImageType = 2
+ ImageType_IMAGE_TYPE_OCI ImageType = 3
+)
+
+var ImageType_name = map[int32]string{
+ 0: "IMAGE_TYPE_UNDEFINED",
+ 1: "IMAGE_TYPE_APPC",
+ 2: "IMAGE_TYPE_DOCKER",
+ 3: "IMAGE_TYPE_OCI",
+}
+var ImageType_value = map[string]int32{
+ "IMAGE_TYPE_UNDEFINED": 0,
+ "IMAGE_TYPE_APPC": 1,
+ "IMAGE_TYPE_DOCKER": 2,
+ "IMAGE_TYPE_OCI": 3,
+}
+
+func (x ImageType) String() string {
+ return proto.EnumName(ImageType_name, int32(x))
+}
+func (ImageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+// AppState defines the possible states of the app.
+type AppState int32
+
+const (
+ AppState_APP_STATE_UNDEFINED AppState = 0
+ AppState_APP_STATE_RUNNING AppState = 1
+ AppState_APP_STATE_EXITED AppState = 2
+)
+
+var AppState_name = map[int32]string{
+ 0: "APP_STATE_UNDEFINED",
+ 1: "APP_STATE_RUNNING",
+ 2: "APP_STATE_EXITED",
+}
+var AppState_value = map[string]int32{
+ "APP_STATE_UNDEFINED": 0,
+ "APP_STATE_RUNNING": 1,
+ "APP_STATE_EXITED": 2,
+}
+
+func (x AppState) String() string {
+ return proto.EnumName(AppState_name, int32(x))
+}
+func (AppState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+// PodState defines the possible states of the pod.
+// See https://github.com/rkt/rkt/blob/master/Documentation/devel/pod-lifecycle.md for a detailed
+// explanation of each state.
+type PodState int32
+
+const (
+ PodState_POD_STATE_UNDEFINED PodState = 0
+ // States before the pod is running.
+ PodState_POD_STATE_EMBRYO PodState = 1
+ PodState_POD_STATE_PREPARING PodState = 2
+ PodState_POD_STATE_PREPARED PodState = 3
+ // State that indicates the pod is running.
+ PodState_POD_STATE_RUNNING PodState = 4
+ // States that indicates the pod is exited, and will never run.
+ PodState_POD_STATE_ABORTED_PREPARE PodState = 5
+ PodState_POD_STATE_EXITED PodState = 6
+ PodState_POD_STATE_DELETING PodState = 7
+ PodState_POD_STATE_GARBAGE PodState = 8
+)
+
+var PodState_name = map[int32]string{
+ 0: "POD_STATE_UNDEFINED",
+ 1: "POD_STATE_EMBRYO",
+ 2: "POD_STATE_PREPARING",
+ 3: "POD_STATE_PREPARED",
+ 4: "POD_STATE_RUNNING",
+ 5: "POD_STATE_ABORTED_PREPARE",
+ 6: "POD_STATE_EXITED",
+ 7: "POD_STATE_DELETING",
+ 8: "POD_STATE_GARBAGE",
+}
+var PodState_value = map[string]int32{
+ "POD_STATE_UNDEFINED": 0,
+ "POD_STATE_EMBRYO": 1,
+ "POD_STATE_PREPARING": 2,
+ "POD_STATE_PREPARED": 3,
+ "POD_STATE_RUNNING": 4,
+ "POD_STATE_ABORTED_PREPARE": 5,
+ "POD_STATE_EXITED": 6,
+ "POD_STATE_DELETING": 7,
+ "POD_STATE_GARBAGE": 8,
+}
+
+func (x PodState) String() string {
+ return proto.EnumName(PodState_name, int32(x))
+}
+func (PodState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+// EventType defines the type of the events that will be received via ListenEvents().
+type EventType int32
+
+const (
+ EventType_EVENT_TYPE_UNDEFINED EventType = 0
+ // Pod events.
+ EventType_EVENT_TYPE_POD_PREPARED EventType = 1
+ EventType_EVENT_TYPE_POD_PREPARE_ABORTED EventType = 2
+ EventType_EVENT_TYPE_POD_STARTED EventType = 3
+ EventType_EVENT_TYPE_POD_EXITED EventType = 4
+ EventType_EVENT_TYPE_POD_GARBAGE_COLLECTED EventType = 5
+ // App events.
+ EventType_EVENT_TYPE_APP_STARTED EventType = 6
+ EventType_EVENT_TYPE_APP_EXITED EventType = 7
+ // Image events.
+ EventType_EVENT_TYPE_IMAGE_IMPORTED EventType = 8
+ EventType_EVENT_TYPE_IMAGE_REMOVED EventType = 9
+)
+
+var EventType_name = map[int32]string{
+ 0: "EVENT_TYPE_UNDEFINED",
+ 1: "EVENT_TYPE_POD_PREPARED",
+ 2: "EVENT_TYPE_POD_PREPARE_ABORTED",
+ 3: "EVENT_TYPE_POD_STARTED",
+ 4: "EVENT_TYPE_POD_EXITED",
+ 5: "EVENT_TYPE_POD_GARBAGE_COLLECTED",
+ 6: "EVENT_TYPE_APP_STARTED",
+ 7: "EVENT_TYPE_APP_EXITED",
+ 8: "EVENT_TYPE_IMAGE_IMPORTED",
+ 9: "EVENT_TYPE_IMAGE_REMOVED",
+}
+var EventType_value = map[string]int32{
+ "EVENT_TYPE_UNDEFINED": 0,
+ "EVENT_TYPE_POD_PREPARED": 1,
+ "EVENT_TYPE_POD_PREPARE_ABORTED": 2,
+ "EVENT_TYPE_POD_STARTED": 3,
+ "EVENT_TYPE_POD_EXITED": 4,
+ "EVENT_TYPE_POD_GARBAGE_COLLECTED": 5,
+ "EVENT_TYPE_APP_STARTED": 6,
+ "EVENT_TYPE_APP_EXITED": 7,
+ "EVENT_TYPE_IMAGE_IMPORTED": 8,
+ "EVENT_TYPE_IMAGE_REMOVED": 9,
+}
+
+func (x EventType) String() string {
+ return proto.EnumName(EventType_name, int32(x))
+}
+func (EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+// ImageFormat defines the format of the image.
+type ImageFormat struct {
+ // Type of the image, required.
+ Type ImageType `protobuf:"varint,1,opt,name=type,enum=v1alpha.ImageType" json:"type,omitempty"`
+ // Version of the image format, required.
+ Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+}
+
+func (m *ImageFormat) Reset() { *m = ImageFormat{} }
+func (m *ImageFormat) String() string { return proto.CompactTextString(m) }
+func (*ImageFormat) ProtoMessage() {}
+func (*ImageFormat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *ImageFormat) GetType() ImageType {
+ if m != nil {
+ return m.Type
+ }
+ return ImageType_IMAGE_TYPE_UNDEFINED
+}
+
+func (m *ImageFormat) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+// Image describes the image's information.
+type Image struct {
+ // Base format of the image, required. This indicates the original format
+ // for the image as nowadays all the image formats will be transformed to
+ // ACI.
+ BaseFormat *ImageFormat `protobuf:"bytes,1,opt,name=base_format,json=baseFormat" json:"base_format,omitempty"`
+ // ID of the image, a string that can be used to uniquely identify the image,
+ // e.g. sha512 hash of the ACIs, required.
+ Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"`
+ // Name of the image in the image manifest, e.g. 'coreos.com/etcd', optional.
+ Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+ // Version of the image, e.g. 'latest', '2.0.10', optional.
+ Version string `protobuf:"bytes,4,opt,name=version" json:"version,omitempty"`
+ // Timestamp of when the image is imported, it is the seconds since epoch, optional.
+ ImportTimestamp int64 `protobuf:"varint,5,opt,name=import_timestamp,json=importTimestamp" json:"import_timestamp,omitempty"`
+ // JSON-encoded byte array that represents the image manifest, optional.
+ Manifest []byte `protobuf:"bytes,6,opt,name=manifest,proto3" json:"manifest,omitempty"`
+ // Size is the size in bytes of this image in the store.
+ Size int64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"`
+ // Annotations on this image.
+ Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"`
+ // Labels of this image.
+ Labels []*KeyValue `protobuf:"bytes,9,rep,name=labels" json:"labels,omitempty"`
+}
+
+func (m *Image) Reset() { *m = Image{} }
+func (m *Image) String() string { return proto.CompactTextString(m) }
+func (*Image) ProtoMessage() {}
+func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Image) GetBaseFormat() *ImageFormat {
+ if m != nil {
+ return m.BaseFormat
+ }
+ return nil
+}
+
+func (m *Image) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *Image) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Image) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *Image) GetImportTimestamp() int64 {
+ if m != nil {
+ return m.ImportTimestamp
+ }
+ return 0
+}
+
+func (m *Image) GetManifest() []byte {
+ if m != nil {
+ return m.Manifest
+ }
+ return nil
+}
+
+func (m *Image) GetSize() int64 {
+ if m != nil {
+ return m.Size
+ }
+ return 0
+}
+
+func (m *Image) GetAnnotations() []*KeyValue {
+ if m != nil {
+ return m.Annotations
+ }
+ return nil
+}
+
+func (m *Image) GetLabels() []*KeyValue {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+// Network describes the network information of a pod.
+type Network struct {
+ // Name of the network that a pod belongs to, required.
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // Pod's IPv4 address within the network, optional if IPv6 address is given.
+ Ipv4 string `protobuf:"bytes,2,opt,name=ipv4" json:"ipv4,omitempty"`
+ // Pod's IPv6 address within the network, optional if IPv4 address is given.
+ Ipv6 string `protobuf:"bytes,3,opt,name=ipv6" json:"ipv6,omitempty"`
+}
+
+func (m *Network) Reset() { *m = Network{} }
+func (m *Network) String() string { return proto.CompactTextString(m) }
+func (*Network) ProtoMessage() {}
+func (*Network) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *Network) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Network) GetIpv4() string {
+ if m != nil {
+ return m.Ipv4
+ }
+ return ""
+}
+
+func (m *Network) GetIpv6() string {
+ if m != nil {
+ return m.Ipv6
+ }
+ return ""
+}
+
+// App describes the information of an app that's running in a pod.
+type App struct {
+ // Name of the app, required.
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // Image used by the app, required. However, this may only contain the image id
+ // if it is returned by ListPods().
+ Image *Image `protobuf:"bytes,2,opt,name=image" json:"image,omitempty"`
+ // State of the app. optional, non-empty only if it's returned by InspectPod().
+ State AppState `protobuf:"varint,3,opt,name=state,enum=v1alpha.AppState" json:"state,omitempty"`
+ // Exit code of the app. optional, only valid if it's returned by InspectPod() and
+ // the app has already exited.
+ ExitCode int32 `protobuf:"zigzag32,4,opt,name=exit_code,json=exitCode" json:"exit_code,omitempty"`
+ // Annotations for this app.
+ Annotations []*KeyValue `protobuf:"bytes,5,rep,name=annotations" json:"annotations,omitempty"`
+}
+
+func (m *App) Reset() { *m = App{} }
+func (m *App) String() string { return proto.CompactTextString(m) }
+func (*App) ProtoMessage() {}
+func (*App) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *App) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *App) GetImage() *Image {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *App) GetState() AppState {
+ if m != nil {
+ return m.State
+ }
+ return AppState_APP_STATE_UNDEFINED
+}
+
+func (m *App) GetExitCode() int32 {
+ if m != nil {
+ return m.ExitCode
+ }
+ return 0
+}
+
+func (m *App) GetAnnotations() []*KeyValue {
+ if m != nil {
+ return m.Annotations
+ }
+ return nil
+}
+
+// Pod describes a pod's information.
+// If a pod is in Embryo, Preparing, AbortedPrepare state,
+// only id and state will be returned.
+//
+// If a pod is in other states, the pod manifest and
+// apps will be returned when 'detailed' is true in the request.
+//
+// A valid pid of the stage1 process of the pod will be returned
+// if the pod is Running has run once.
+//
+// Networks are only returned when a pod is in Running.
+type Pod struct {
+ // ID of the pod, in the form of a UUID.
+ Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ // PID of the stage1 process of the pod.
+ Pid int32 `protobuf:"zigzag32,2,opt,name=pid" json:"pid,omitempty"`
+ // State of the pod.
+ State PodState `protobuf:"varint,3,opt,name=state,enum=v1alpha.PodState" json:"state,omitempty"`
+ // List of apps in the pod.
+ Apps []*App `protobuf:"bytes,4,rep,name=apps" json:"apps,omitempty"`
+ // Network information of the pod.
+ // Note that a pod can be in multiple networks.
+ Networks []*Network `protobuf:"bytes,5,rep,name=networks" json:"networks,omitempty"`
+ // JSON-encoded byte array that represents the pod manifest of the pod.
+ Manifest []byte `protobuf:"bytes,6,opt,name=manifest,proto3" json:"manifest,omitempty"`
+ // Annotations on this pod.
+ Annotations []*KeyValue `protobuf:"bytes,7,rep,name=annotations" json:"annotations,omitempty"`
+ // Cgroup of the pod, empty if the pod is not running.
+ Cgroup string `protobuf:"bytes,8,opt,name=cgroup" json:"cgroup,omitempty"`
+ // Timestamp of when the pod is created, nanoseconds since epoch.
+ // Zero if the pod is not created.
+ CreatedAt int64 `protobuf:"varint,9,opt,name=created_at,json=createdAt" json:"created_at,omitempty"`
+ // Timestamp of when the pod is started, nanoseconds since epoch.
+ // Zero if the pod is not started.
+ StartedAt int64 `protobuf:"varint,10,opt,name=started_at,json=startedAt" json:"started_at,omitempty"`
+ // Timestamp of when the pod is moved to exited-garbage/garbage,
+ // in nanoseconds since epoch.
+ // Zero if the pod is not moved to exited-garbage/garbage yet.
+ GcMarkedAt int64 `protobuf:"varint,11,opt,name=gc_marked_at,json=gcMarkedAt" json:"gc_marked_at,omitempty"`
+}
+
+func (m *Pod) Reset() { *m = Pod{} }
+func (m *Pod) String() string { return proto.CompactTextString(m) }
+func (*Pod) ProtoMessage() {}
+func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *Pod) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *Pod) GetPid() int32 {
+ if m != nil {
+ return m.Pid
+ }
+ return 0
+}
+
+func (m *Pod) GetState() PodState {
+ if m != nil {
+ return m.State
+ }
+ return PodState_POD_STATE_UNDEFINED
+}
+
+func (m *Pod) GetApps() []*App {
+ if m != nil {
+ return m.Apps
+ }
+ return nil
+}
+
+func (m *Pod) GetNetworks() []*Network {
+ if m != nil {
+ return m.Networks
+ }
+ return nil
+}
+
+func (m *Pod) GetManifest() []byte {
+ if m != nil {
+ return m.Manifest
+ }
+ return nil
+}
+
+func (m *Pod) GetAnnotations() []*KeyValue {
+ if m != nil {
+ return m.Annotations
+ }
+ return nil
+}
+
+func (m *Pod) GetCgroup() string {
+ if m != nil {
+ return m.Cgroup
+ }
+ return ""
+}
+
+func (m *Pod) GetCreatedAt() int64 {
+ if m != nil {
+ return m.CreatedAt
+ }
+ return 0
+}
+
+func (m *Pod) GetStartedAt() int64 {
+ if m != nil {
+ return m.StartedAt
+ }
+ return 0
+}
+
+func (m *Pod) GetGcMarkedAt() int64 {
+ if m != nil {
+ return m.GcMarkedAt
+ }
+ return 0
+}
+
+type KeyValue struct {
+ // Key part of the key-value pair.
+ Key string `protobuf:"bytes,1,opt,name=Key" json:"Key,omitempty"`
+ // Value part of the key-value pair.
+ Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *KeyValue) Reset() { *m = KeyValue{} }
+func (m *KeyValue) String() string { return proto.CompactTextString(m) }
+func (*KeyValue) ProtoMessage() {}
+func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *KeyValue) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *KeyValue) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+// PodFilter defines the condition that the returned pods need to satisfy in ListPods().
+// The conditions are combined by 'AND', and different filters are combined by 'OR'.
+type PodFilter struct {
+ // If not empty, the pods that have any of the ids will be returned.
+ Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"`
+ // If not empty, the pods that have any of the states will be returned.
+ States []PodState `protobuf:"varint,2,rep,packed,name=states,enum=v1alpha.PodState" json:"states,omitempty"`
+ // If not empty, the pods that all of the apps will be returned.
+ AppNames []string `protobuf:"bytes,3,rep,name=app_names,json=appNames" json:"app_names,omitempty"`
+ // If not empty, the pods that have all of the images(in the apps) will be returned
+ ImageIds []string `protobuf:"bytes,4,rep,name=image_ids,json=imageIds" json:"image_ids,omitempty"`
+ // If not empty, the pods that are in all of the networks will be returned.
+ NetworkNames []string `protobuf:"bytes,5,rep,name=network_names,json=networkNames" json:"network_names,omitempty"`
+ // If not empty, the pods that have all of the annotations will be returned.
+ Annotations []*KeyValue `protobuf:"bytes,6,rep,name=annotations" json:"annotations,omitempty"`
+ // If not empty, the pods whose cgroup are listed will be returned.
+ Cgroups []string `protobuf:"bytes,7,rep,name=cgroups" json:"cgroups,omitempty"`
+ // If not empty, the pods whose these cgroup belong to will be returned.
+ // i.e. the pod's cgroup is a prefix of the specified cgroup
+ PodSubCgroups []string `protobuf:"bytes,8,rep,name=pod_sub_cgroups,json=podSubCgroups" json:"pod_sub_cgroups,omitempty"`
+}
+
+func (m *PodFilter) Reset() { *m = PodFilter{} }
+func (m *PodFilter) String() string { return proto.CompactTextString(m) }
+func (*PodFilter) ProtoMessage() {}
+func (*PodFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *PodFilter) GetIds() []string {
+ if m != nil {
+ return m.Ids
+ }
+ return nil
+}
+
+func (m *PodFilter) GetStates() []PodState {
+ if m != nil {
+ return m.States
+ }
+ return nil
+}
+
+func (m *PodFilter) GetAppNames() []string {
+ if m != nil {
+ return m.AppNames
+ }
+ return nil
+}
+
+func (m *PodFilter) GetImageIds() []string {
+ if m != nil {
+ return m.ImageIds
+ }
+ return nil
+}
+
+func (m *PodFilter) GetNetworkNames() []string {
+ if m != nil {
+ return m.NetworkNames
+ }
+ return nil
+}
+
+func (m *PodFilter) GetAnnotations() []*KeyValue {
+ if m != nil {
+ return m.Annotations
+ }
+ return nil
+}
+
+func (m *PodFilter) GetCgroups() []string {
+ if m != nil {
+ return m.Cgroups
+ }
+ return nil
+}
+
+func (m *PodFilter) GetPodSubCgroups() []string {
+ if m != nil {
+ return m.PodSubCgroups
+ }
+ return nil
+}
+
+// ImageFilter defines the condition that the returned images need to satisfy in ListImages().
+// The conditions are combined by 'AND', and different filters are combined by 'OR'.
+type ImageFilter struct {
+ // If not empty, the images that have any of the ids will be returned.
+ Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"`
+ // if not empty, the images that have any of the prefixes in the name will be returned.
+ Prefixes []string `protobuf:"bytes,2,rep,name=prefixes" json:"prefixes,omitempty"`
+ // If not empty, the images that have any of the base names will be returned.
+ // For example, both 'coreos.com/etcd' and 'k8s.io/etcd' will be returned if 'etcd' is included,
+ // however 'k8s.io/etcd-backup' will not be returned.
+ BaseNames []string `protobuf:"bytes,3,rep,name=base_names,json=baseNames" json:"base_names,omitempty"`
+ // If not empty, the images that have any of the keywords in the name will be returned.
+ // For example, both 'kubernetes-etcd', 'etcd:latest' will be returned if 'etcd' is included,
+ Keywords []string `protobuf:"bytes,4,rep,name=keywords" json:"keywords,omitempty"`
+ // If not empty, the images that have all of the labels will be returned.
+ Labels []*KeyValue `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"`
+ // If set, the images that are imported after this timestamp will be returned.
+ ImportedAfter int64 `protobuf:"varint,6,opt,name=imported_after,json=importedAfter" json:"imported_after,omitempty"`
+ // If set, the images that are imported before this timestamp will be returned.
+ ImportedBefore int64 `protobuf:"varint,7,opt,name=imported_before,json=importedBefore" json:"imported_before,omitempty"`
+ // If not empty, the images that have all of the annotations will be returned.
+ Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"`
+ // If not empty, the images that have any of the exact full names will be returned.
+ FullNames []string `protobuf:"bytes,9,rep,name=full_names,json=fullNames" json:"full_names,omitempty"`
+}
+
+func (m *ImageFilter) Reset() { *m = ImageFilter{} }
+func (m *ImageFilter) String() string { return proto.CompactTextString(m) }
+func (*ImageFilter) ProtoMessage() {}
+func (*ImageFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *ImageFilter) GetIds() []string {
+ if m != nil {
+ return m.Ids
+ }
+ return nil
+}
+
+func (m *ImageFilter) GetPrefixes() []string {
+ if m != nil {
+ return m.Prefixes
+ }
+ return nil
+}
+
+func (m *ImageFilter) GetBaseNames() []string {
+ if m != nil {
+ return m.BaseNames
+ }
+ return nil
+}
+
+func (m *ImageFilter) GetKeywords() []string {
+ if m != nil {
+ return m.Keywords
+ }
+ return nil
+}
+
+func (m *ImageFilter) GetLabels() []*KeyValue {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *ImageFilter) GetImportedAfter() int64 {
+ if m != nil {
+ return m.ImportedAfter
+ }
+ return 0
+}
+
+func (m *ImageFilter) GetImportedBefore() int64 {
+ if m != nil {
+ return m.ImportedBefore
+ }
+ return 0
+}
+
+func (m *ImageFilter) GetAnnotations() []*KeyValue {
+ if m != nil {
+ return m.Annotations
+ }
+ return nil
+}
+
+func (m *ImageFilter) GetFullNames() []string {
+ if m != nil {
+ return m.FullNames
+ }
+ return nil
+}
+
+// GlobalFlags describes the flags that passed to rkt api service when it is launched.
+type GlobalFlags struct {
+ // Data directory.
+ Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"`
+ // System configuration directory.
+ SystemConfigDir string `protobuf:"bytes,2,opt,name=system_config_dir,json=systemConfigDir" json:"system_config_dir,omitempty"`
+ // Local configuration directory.
+ LocalConfigDir string `protobuf:"bytes,3,opt,name=local_config_dir,json=localConfigDir" json:"local_config_dir,omitempty"`
+ // User configuration directory.
+ UserConfigDir string `protobuf:"bytes,4,opt,name=user_config_dir,json=userConfigDir" json:"user_config_dir,omitempty"`
+ // Insecure flags configurates what security features to disable.
+ InsecureFlags string `protobuf:"bytes,5,opt,name=insecure_flags,json=insecureFlags" json:"insecure_flags,omitempty"`
+ // Whether to automatically trust gpg keys fetched from https
+ TrustKeysFromHttps bool `protobuf:"varint,6,opt,name=trust_keys_from_https,json=trustKeysFromHttps" json:"trust_keys_from_https,omitempty"`
+}
+
+func (m *GlobalFlags) Reset() { *m = GlobalFlags{} }
+func (m *GlobalFlags) String() string { return proto.CompactTextString(m) }
+func (*GlobalFlags) ProtoMessage() {}
+func (*GlobalFlags) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *GlobalFlags) GetDir() string {
+ if m != nil {
+ return m.Dir
+ }
+ return ""
+}
+
+func (m *GlobalFlags) GetSystemConfigDir() string {
+ if m != nil {
+ return m.SystemConfigDir
+ }
+ return ""
+}
+
+func (m *GlobalFlags) GetLocalConfigDir() string {
+ if m != nil {
+ return m.LocalConfigDir
+ }
+ return ""
+}
+
+func (m *GlobalFlags) GetUserConfigDir() string {
+ if m != nil {
+ return m.UserConfigDir
+ }
+ return ""
+}
+
+func (m *GlobalFlags) GetInsecureFlags() string {
+ if m != nil {
+ return m.InsecureFlags
+ }
+ return ""
+}
+
+func (m *GlobalFlags) GetTrustKeysFromHttps() bool {
+ if m != nil {
+ return m.TrustKeysFromHttps
+ }
+ return false
+}
+
+// Info describes the information of rkt on the machine.
+type Info struct {
+ // Version of rkt, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
+ RktVersion string `protobuf:"bytes,1,opt,name=rkt_version,json=rktVersion" json:"rkt_version,omitempty"`
+ // Version of appc, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
+ AppcVersion string `protobuf:"bytes,2,opt,name=appc_version,json=appcVersion" json:"appc_version,omitempty"`
+ // Latest version of the api that's supported by the service, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
+ ApiVersion string `protobuf:"bytes,3,opt,name=api_version,json=apiVersion" json:"api_version,omitempty"`
+ // The global flags that passed to the rkt api service when it's launched.
+ GlobalFlags *GlobalFlags `protobuf:"bytes,4,opt,name=global_flags,json=globalFlags" json:"global_flags,omitempty"`
+}
+
+func (m *Info) Reset() { *m = Info{} }
+func (m *Info) String() string { return proto.CompactTextString(m) }
+func (*Info) ProtoMessage() {}
+func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *Info) GetRktVersion() string {
+ if m != nil {
+ return m.RktVersion
+ }
+ return ""
+}
+
+func (m *Info) GetAppcVersion() string {
+ if m != nil {
+ return m.AppcVersion
+ }
+ return ""
+}
+
+func (m *Info) GetApiVersion() string {
+ if m != nil {
+ return m.ApiVersion
+ }
+ return ""
+}
+
+func (m *Info) GetGlobalFlags() *GlobalFlags {
+ if m != nil {
+ return m.GlobalFlags
+ }
+ return nil
+}
+
+// Event describes the events that will be received via ListenEvents().
+type Event struct {
+ // Type of the event, required.
+ Type EventType `protobuf:"varint,1,opt,name=type,enum=v1alpha.EventType" json:"type,omitempty"`
+ // ID of the subject that causes the event, required.
+ // If the event is a pod or app event, the id is the pod's uuid.
+ // If the event is an image event, the id is the image's id.
+ Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"`
+ // Name of the subject that causes the event, required.
+ // If the event is a pod event, the name is the pod's name.
+ // If the event is an app event, the name is the app's name.
+ // If the event is an image event, the name is the image's name.
+ From string `protobuf:"bytes,3,opt,name=from" json:"from,omitempty"`
+ // Timestamp of when the event happens, it is the seconds since epoch, required.
+ Time int64 `protobuf:"varint,4,opt,name=time" json:"time,omitempty"`
+ // Data of the event, in the form of key-value pairs, optional.
+ Data []*KeyValue `protobuf:"bytes,5,rep,name=data" json:"data,omitempty"`
+}
+
+func (m *Event) Reset() { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage() {}
+func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+func (m *Event) GetType() EventType {
+ if m != nil {
+ return m.Type
+ }
+ return EventType_EVENT_TYPE_UNDEFINED
+}
+
+func (m *Event) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *Event) GetFrom() string {
+ if m != nil {
+ return m.From
+ }
+ return ""
+}
+
+func (m *Event) GetTime() int64 {
+ if m != nil {
+ return m.Time
+ }
+ return 0
+}
+
+func (m *Event) GetData() []*KeyValue {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+// EventFilter defines the condition that the returned events needs to satisfy in ListImages().
+// The condition are combined by 'AND'.
+type EventFilter struct {
+ // If not empty, then only returns the events that have the listed types.
+ Types []EventType `protobuf:"varint,1,rep,packed,name=types,enum=v1alpha.EventType" json:"types,omitempty"`
+ // If not empty, then only returns the events whose 'id' is included in the listed ids.
+ Ids []string `protobuf:"bytes,2,rep,name=ids" json:"ids,omitempty"`
+ // If not empty, then only returns the events whose 'from' is included in the listed names.
+ Names []string `protobuf:"bytes,3,rep,name=names" json:"names,omitempty"`
+ // If set, then only returns the events after this timestamp.
+ // If the server starts after since_time, then only the events happened after the start of the server will be returned.
+ // If since_time is a future timestamp, then no events will be returned until that time.
+ SinceTime int64 `protobuf:"varint,4,opt,name=since_time,json=sinceTime" json:"since_time,omitempty"`
+ // If set, then only returns the events before this timestamp.
+ // If it is a future timestamp, then the event stream will be closed at that moment.
+ UntilTime int64 `protobuf:"varint,5,opt,name=until_time,json=untilTime" json:"until_time,omitempty"`
+}
+
+func (m *EventFilter) Reset() { *m = EventFilter{} }
+func (m *EventFilter) String() string { return proto.CompactTextString(m) }
+func (*EventFilter) ProtoMessage() {}
+func (*EventFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *EventFilter) GetTypes() []EventType {
+ if m != nil {
+ return m.Types
+ }
+ return nil
+}
+
+func (m *EventFilter) GetIds() []string {
+ if m != nil {
+ return m.Ids
+ }
+ return nil
+}
+
+func (m *EventFilter) GetNames() []string {
+ if m != nil {
+ return m.Names
+ }
+ return nil
+}
+
+func (m *EventFilter) GetSinceTime() int64 {
+ if m != nil {
+ return m.SinceTime
+ }
+ return 0
+}
+
+func (m *EventFilter) GetUntilTime() int64 {
+ if m != nil {
+ return m.UntilTime
+ }
+ return 0
+}
+
+// Request for GetInfo().
+type GetInfoRequest struct {
+}
+
+func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} }
+func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) }
+func (*GetInfoRequest) ProtoMessage() {}
+func (*GetInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+// Response for GetInfo().
+type GetInfoResponse struct {
+ Info *Info `protobuf:"bytes,1,opt,name=info" json:"info,omitempty"`
+}
+
+func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} }
+func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) }
+func (*GetInfoResponse) ProtoMessage() {}
+func (*GetInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *GetInfoResponse) GetInfo() *Info {
+ if m != nil {
+ return m.Info
+ }
+ return nil
+}
+
+// Request for ListPods().
+type ListPodsRequest struct {
+ Filters []*PodFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+ Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"`
+}
+
+func (m *ListPodsRequest) Reset() { *m = ListPodsRequest{} }
+func (m *ListPodsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListPodsRequest) ProtoMessage() {}
+func (*ListPodsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *ListPodsRequest) GetFilters() []*PodFilter {
+ if m != nil {
+ return m.Filters
+ }
+ return nil
+}
+
+func (m *ListPodsRequest) GetDetail() bool {
+ if m != nil {
+ return m.Detail
+ }
+ return false
+}
+
+// Response for ListPods().
+type ListPodsResponse struct {
+ Pods []*Pod `protobuf:"bytes,1,rep,name=pods" json:"pods,omitempty"`
+}
+
+func (m *ListPodsResponse) Reset() { *m = ListPodsResponse{} }
+func (m *ListPodsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListPodsResponse) ProtoMessage() {}
+func (*ListPodsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+func (m *ListPodsResponse) GetPods() []*Pod {
+ if m != nil {
+ return m.Pods
+ }
+ return nil
+}
+
+// Request for InspectPod().
+type InspectPodRequest struct {
+ // ID of the pod which we are querying status for, required.
+ Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+}
+
+func (m *InspectPodRequest) Reset() { *m = InspectPodRequest{} }
+func (m *InspectPodRequest) String() string { return proto.CompactTextString(m) }
+func (*InspectPodRequest) ProtoMessage() {}
+func (*InspectPodRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+func (m *InspectPodRequest) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+// Response for InspectPod().
+type InspectPodResponse struct {
+ Pod *Pod `protobuf:"bytes,1,opt,name=pod" json:"pod,omitempty"`
+}
+
+func (m *InspectPodResponse) Reset() { *m = InspectPodResponse{} }
+func (m *InspectPodResponse) String() string { return proto.CompactTextString(m) }
+func (*InspectPodResponse) ProtoMessage() {}
+func (*InspectPodResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *InspectPodResponse) GetPod() *Pod {
+ if m != nil {
+ return m.Pod
+ }
+ return nil
+}
+
+// Request for ListImages().
+type ListImagesRequest struct {
+ Filters []*ImageFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+ Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"`
+}
+
+func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} }
+func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListImagesRequest) ProtoMessage() {}
+func (*ListImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+func (m *ListImagesRequest) GetFilters() []*ImageFilter {
+ if m != nil {
+ return m.Filters
+ }
+ return nil
+}
+
+func (m *ListImagesRequest) GetDetail() bool {
+ if m != nil {
+ return m.Detail
+ }
+ return false
+}
+
+// Response for ListImages().
+type ListImagesResponse struct {
+ Images []*Image `protobuf:"bytes,1,rep,name=images" json:"images,omitempty"`
+}
+
+func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} }
+func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListImagesResponse) ProtoMessage() {}
+func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *ListImagesResponse) GetImages() []*Image {
+ if m != nil {
+ return m.Images
+ }
+ return nil
+}
+
+// Request for InspectImage().
+type InspectImageRequest struct {
+ Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+}
+
+func (m *InspectImageRequest) Reset() { *m = InspectImageRequest{} }
+func (m *InspectImageRequest) String() string { return proto.CompactTextString(m) }
+func (*InspectImageRequest) ProtoMessage() {}
+func (*InspectImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *InspectImageRequest) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+// Response for InspectImage().
+type InspectImageResponse struct {
+ Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"`
+}
+
+func (m *InspectImageResponse) Reset() { *m = InspectImageResponse{} }
+func (m *InspectImageResponse) String() string { return proto.CompactTextString(m) }
+func (*InspectImageResponse) ProtoMessage() {}
+func (*InspectImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+func (m *InspectImageResponse) GetImage() *Image {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+// Request for ListenEvents().
+type ListenEventsRequest struct {
+ Filter *EventFilter `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"`
+}
+
+func (m *ListenEventsRequest) Reset() { *m = ListenEventsRequest{} }
+func (m *ListenEventsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListenEventsRequest) ProtoMessage() {}
+func (*ListenEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+func (m *ListenEventsRequest) GetFilter() *EventFilter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+// Response for ListenEvents().
+type ListenEventsResponse struct {
+ // Aggregate multiple events to reduce round trips, optional as the response can contain no events.
+ Events []*Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"`
+}
+
+func (m *ListenEventsResponse) Reset() { *m = ListenEventsResponse{} }
+func (m *ListenEventsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListenEventsResponse) ProtoMessage() {}
+func (*ListenEventsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *ListenEventsResponse) GetEvents() []*Event {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+// Request for GetLogs().
+type GetLogsRequest struct {
+ // ID of the pod which we will get logs from, required.
+ PodId string `protobuf:"bytes,1,opt,name=pod_id,json=podId" json:"pod_id,omitempty"`
+ // Name of the app within the pod which we will get logs
+ // from, optional. If not set, then the logs of all the
+ // apps within the pod will be returned.
+ AppName string `protobuf:"bytes,2,opt,name=app_name,json=appName" json:"app_name,omitempty"`
+ // Number of most recent lines to return, optional.
+ Lines int32 `protobuf:"varint,3,opt,name=lines" json:"lines,omitempty"`
+ // If true, then a response stream will not be closed,
+ // and new log response will be sent via the stream, default is false.
+ Follow bool `protobuf:"varint,4,opt,name=follow" json:"follow,omitempty"`
+ // If set, then only the logs after the timestamp will
+ // be returned, optional.
+ SinceTime int64 `protobuf:"varint,5,opt,name=since_time,json=sinceTime" json:"since_time,omitempty"`
+ // If set, then only the logs before the timestamp will
+ // be returned, optional.
+ UntilTime int64 `protobuf:"varint,6,opt,name=until_time,json=untilTime" json:"until_time,omitempty"`
+}
+
+func (m *GetLogsRequest) Reset() { *m = GetLogsRequest{} }
+func (m *GetLogsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetLogsRequest) ProtoMessage() {}
+func (*GetLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
+func (m *GetLogsRequest) GetPodId() string {
+ if m != nil {
+ return m.PodId
+ }
+ return ""
+}
+
+func (m *GetLogsRequest) GetAppName() string {
+ if m != nil {
+ return m.AppName
+ }
+ return ""
+}
+
+func (m *GetLogsRequest) GetLines() int32 {
+ if m != nil {
+ return m.Lines
+ }
+ return 0
+}
+
+func (m *GetLogsRequest) GetFollow() bool {
+ if m != nil {
+ return m.Follow
+ }
+ return false
+}
+
+func (m *GetLogsRequest) GetSinceTime() int64 {
+ if m != nil {
+ return m.SinceTime
+ }
+ return 0
+}
+
+func (m *GetLogsRequest) GetUntilTime() int64 {
+ if m != nil {
+ return m.UntilTime
+ }
+ return 0
+}
+
+// Response for GetLogs().
+type GetLogsResponse struct {
+ // List of the log lines that returned, optional as the response can contain no logs.
+ Lines []string `protobuf:"bytes,1,rep,name=lines" json:"lines,omitempty"`
+}
+
+func (m *GetLogsResponse) Reset() { *m = GetLogsResponse{} }
+func (m *GetLogsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetLogsResponse) ProtoMessage() {}
+func (*GetLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+
+func (m *GetLogsResponse) GetLines() []string {
+ if m != nil {
+ return m.Lines
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*ImageFormat)(nil), "v1alpha.ImageFormat")
+ proto.RegisterType((*Image)(nil), "v1alpha.Image")
+ proto.RegisterType((*Network)(nil), "v1alpha.Network")
+ proto.RegisterType((*App)(nil), "v1alpha.App")
+ proto.RegisterType((*Pod)(nil), "v1alpha.Pod")
+ proto.RegisterType((*KeyValue)(nil), "v1alpha.KeyValue")
+ proto.RegisterType((*PodFilter)(nil), "v1alpha.PodFilter")
+ proto.RegisterType((*ImageFilter)(nil), "v1alpha.ImageFilter")
+ proto.RegisterType((*GlobalFlags)(nil), "v1alpha.GlobalFlags")
+ proto.RegisterType((*Info)(nil), "v1alpha.Info")
+ proto.RegisterType((*Event)(nil), "v1alpha.Event")
+ proto.RegisterType((*EventFilter)(nil), "v1alpha.EventFilter")
+ proto.RegisterType((*GetInfoRequest)(nil), "v1alpha.GetInfoRequest")
+ proto.RegisterType((*GetInfoResponse)(nil), "v1alpha.GetInfoResponse")
+ proto.RegisterType((*ListPodsRequest)(nil), "v1alpha.ListPodsRequest")
+ proto.RegisterType((*ListPodsResponse)(nil), "v1alpha.ListPodsResponse")
+ proto.RegisterType((*InspectPodRequest)(nil), "v1alpha.InspectPodRequest")
+ proto.RegisterType((*InspectPodResponse)(nil), "v1alpha.InspectPodResponse")
+ proto.RegisterType((*ListImagesRequest)(nil), "v1alpha.ListImagesRequest")
+ proto.RegisterType((*ListImagesResponse)(nil), "v1alpha.ListImagesResponse")
+ proto.RegisterType((*InspectImageRequest)(nil), "v1alpha.InspectImageRequest")
+ proto.RegisterType((*InspectImageResponse)(nil), "v1alpha.InspectImageResponse")
+ proto.RegisterType((*ListenEventsRequest)(nil), "v1alpha.ListenEventsRequest")
+ proto.RegisterType((*ListenEventsResponse)(nil), "v1alpha.ListenEventsResponse")
+ proto.RegisterType((*GetLogsRequest)(nil), "v1alpha.GetLogsRequest")
+ proto.RegisterType((*GetLogsResponse)(nil), "v1alpha.GetLogsResponse")
+ proto.RegisterEnum("v1alpha.ImageType", ImageType_name, ImageType_value)
+ proto.RegisterEnum("v1alpha.AppState", AppState_name, AppState_value)
+ proto.RegisterEnum("v1alpha.PodState", PodState_name, PodState_value)
+ proto.RegisterEnum("v1alpha.EventType", EventType_name, EventType_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for PublicAPI service
+
+type PublicAPIClient interface {
+ // GetInfo gets the rkt's information on the machine.
+ GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error)
+ // ListPods lists rkt pods on the machine.
+ ListPods(ctx context.Context, in *ListPodsRequest, opts ...grpc.CallOption) (*ListPodsResponse, error)
+ // InspectPod gets detailed pod information of the specified pod.
+ InspectPod(ctx context.Context, in *InspectPodRequest, opts ...grpc.CallOption) (*InspectPodResponse, error)
+ // ListImages lists the images on the machine.
+ ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error)
+ // InspectImage gets the detailed image information of the specified image.
+ InspectImage(ctx context.Context, in *InspectImageRequest, opts ...grpc.CallOption) (*InspectImageResponse, error)
+ // ListenEvents listens for the events, it will return a response stream
+ // that will contain event objects.
+ ListenEvents(ctx context.Context, in *ListenEventsRequest, opts ...grpc.CallOption) (PublicAPI_ListenEventsClient, error)
+ // GetLogs gets the logs for a pod, if the app is also specified, then only the logs
+ // of the app will be returned.
+ //
+ // If 'follow' in the 'GetLogsRequest' is set to 'true', then the response stream
+ // will not be closed after the first response, the future logs will be sent via
+ // the stream.
+ GetLogs(ctx context.Context, in *GetLogsRequest, opts ...grpc.CallOption) (PublicAPI_GetLogsClient, error)
+}
+
+type publicAPIClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewPublicAPIClient(cc *grpc.ClientConn) PublicAPIClient {
+ return &publicAPIClient{cc}
+}
+
+func (c *publicAPIClient) GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) {
+ out := new(GetInfoResponse)
+ err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/GetInfo", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *publicAPIClient) ListPods(ctx context.Context, in *ListPodsRequest, opts ...grpc.CallOption) (*ListPodsResponse, error) {
+ out := new(ListPodsResponse)
+ err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/ListPods", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *publicAPIClient) InspectPod(ctx context.Context, in *InspectPodRequest, opts ...grpc.CallOption) (*InspectPodResponse, error) {
+ out := new(InspectPodResponse)
+ err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/InspectPod", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *publicAPIClient) ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) {
+ out := new(ListImagesResponse)
+ err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/ListImages", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *publicAPIClient) InspectImage(ctx context.Context, in *InspectImageRequest, opts ...grpc.CallOption) (*InspectImageResponse, error) {
+ out := new(InspectImageResponse)
+ err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/InspectImage", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *publicAPIClient) ListenEvents(ctx context.Context, in *ListenEventsRequest, opts ...grpc.CallOption) (PublicAPI_ListenEventsClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_PublicAPI_serviceDesc.Streams[0], c.cc, "/v1alpha.PublicAPI/ListenEvents", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &publicAPIListenEventsClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type PublicAPI_ListenEventsClient interface {
+ Recv() (*ListenEventsResponse, error)
+ grpc.ClientStream
+}
+
+type publicAPIListenEventsClient struct {
+ grpc.ClientStream
+}
+
+func (x *publicAPIListenEventsClient) Recv() (*ListenEventsResponse, error) {
+ m := new(ListenEventsResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *publicAPIClient) GetLogs(ctx context.Context, in *GetLogsRequest, opts ...grpc.CallOption) (PublicAPI_GetLogsClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_PublicAPI_serviceDesc.Streams[1], c.cc, "/v1alpha.PublicAPI/GetLogs", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &publicAPIGetLogsClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type PublicAPI_GetLogsClient interface {
+ Recv() (*GetLogsResponse, error)
+ grpc.ClientStream
+}
+
+type publicAPIGetLogsClient struct {
+ grpc.ClientStream
+}
+
+func (x *publicAPIGetLogsClient) Recv() (*GetLogsResponse, error) {
+ m := new(GetLogsResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// Server API for PublicAPI service
+
+type PublicAPIServer interface {
+ // GetInfo gets the rkt's information on the machine.
+ GetInfo(context.Context, *GetInfoRequest) (*GetInfoResponse, error)
+ // ListPods lists rkt pods on the machine.
+ ListPods(context.Context, *ListPodsRequest) (*ListPodsResponse, error)
+ // InspectPod gets detailed pod information of the specified pod.
+ InspectPod(context.Context, *InspectPodRequest) (*InspectPodResponse, error)
+ // ListImages lists the images on the machine.
+ ListImages(context.Context, *ListImagesRequest) (*ListImagesResponse, error)
+ // InspectImage gets the detailed image information of the specified image.
+ InspectImage(context.Context, *InspectImageRequest) (*InspectImageResponse, error)
+ // ListenEvents listens for the events, it will return a response stream
+ // that will contain event objects.
+ ListenEvents(*ListenEventsRequest, PublicAPI_ListenEventsServer) error
+ // GetLogs gets the logs for a pod, if the app is also specified, then only the logs
+ // of the app will be returned.
+ //
+ // If 'follow' in the 'GetLogsRequest' is set to 'true', then the response stream
+ // will not be closed after the first response, the future logs will be sent via
+ // the stream.
+ GetLogs(*GetLogsRequest, PublicAPI_GetLogsServer) error
+}
+
+func RegisterPublicAPIServer(s *grpc.Server, srv PublicAPIServer) {
+ s.RegisterService(&_PublicAPI_serviceDesc, srv)
+}
+
+func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetInfoRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PublicAPIServer).GetInfo(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v1alpha.PublicAPI/GetInfo",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PublicAPIServer).GetInfo(ctx, req.(*GetInfoRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListPodsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PublicAPIServer).ListPods(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v1alpha.PublicAPI/ListPods",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PublicAPIServer).ListPods(ctx, req.(*ListPodsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(InspectPodRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PublicAPIServer).InspectPod(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v1alpha.PublicAPI/InspectPod",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PublicAPIServer).InspectPod(ctx, req.(*InspectPodRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListImagesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PublicAPIServer).ListImages(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v1alpha.PublicAPI/ListImages",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PublicAPIServer).ListImages(ctx, req.(*ListImagesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PublicAPI_InspectImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(InspectImageRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PublicAPIServer).InspectImage(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v1alpha.PublicAPI/InspectImage",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PublicAPIServer).InspectImage(ctx, req.(*InspectImageRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PublicAPI_ListenEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(ListenEventsRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(PublicAPIServer).ListenEvents(m, &publicAPIListenEventsServer{stream})
+}
+
+type PublicAPI_ListenEventsServer interface {
+ Send(*ListenEventsResponse) error
+ grpc.ServerStream
+}
+
+type publicAPIListenEventsServer struct {
+ grpc.ServerStream
+}
+
+func (x *publicAPIListenEventsServer) Send(m *ListenEventsResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _PublicAPI_GetLogs_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(GetLogsRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(PublicAPIServer).GetLogs(m, &publicAPIGetLogsServer{stream})
+}
+
+type PublicAPI_GetLogsServer interface {
+ Send(*GetLogsResponse) error
+ grpc.ServerStream
+}
+
+type publicAPIGetLogsServer struct {
+ grpc.ServerStream
+}
+
+func (x *publicAPIGetLogsServer) Send(m *GetLogsResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+var _PublicAPI_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "v1alpha.PublicAPI",
+ HandlerType: (*PublicAPIServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GetInfo",
+ Handler: _PublicAPI_GetInfo_Handler,
+ },
+ {
+ MethodName: "ListPods",
+ Handler: _PublicAPI_ListPods_Handler,
+ },
+ {
+ MethodName: "InspectPod",
+ Handler: _PublicAPI_InspectPod_Handler,
+ },
+ {
+ MethodName: "ListImages",
+ Handler: _PublicAPI_ListImages_Handler,
+ },
+ {
+ MethodName: "InspectImage",
+ Handler: _PublicAPI_InspectImage_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "ListenEvents",
+ Handler: _PublicAPI_ListenEvents_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "GetLogs",
+ Handler: _PublicAPI_GetLogs_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "api.proto",
+}
+
+func init() { proto.RegisterFile("api.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 1800 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0x4f, 0x73, 0xdb, 0xc6,
+ 0x15, 0x37, 0xf8, 0x17, 0x78, 0xd4, 0x1f, 0x68, 0x2d, 0xd9, 0x30, 0x1d, 0x27, 0x0c, 0x12, 0xdb,
+ 0x8a, 0x26, 0xe3, 0x69, 0x14, 0xb7, 0xbd, 0x64, 0x32, 0xa5, 0x49, 0x48, 0xe5, 0x58, 0x12, 0x39,
+ 0x6b, 0xc6, 0x6d, 0xa6, 0x07, 0x0c, 0x44, 0x2c, 0x19, 0x8c, 0x40, 0x00, 0x05, 0x96, 0x72, 0xd4,
+ 0x63, 0x3f, 0x40, 0xbf, 0x41, 0x7b, 0xea, 0xb9, 0xd7, 0xcc, 0xf4, 0xde, 0x8f, 0xd2, 0x43, 0x3f,
+ 0x41, 0xaf, 0x9d, 0xfd, 0x03, 0x60, 0x09, 0x51, 0xae, 0x27, 0xb7, 0xdd, 0xf7, 0xfb, 0xe1, 0xed,
+ 0xfb, 0xb7, 0xef, 0x2d, 0x09, 0x86, 0x97, 0x04, 0x2f, 0x92, 0x34, 0xa6, 0x31, 0x6a, 0x5f, 0x7f,
+ 0xe5, 0x85, 0xc9, 0x0f, 0x9e, 0x3d, 0x86, 0xce, 0x68, 0xe9, 0x2d, 0xc8, 0x49, 0x9c, 0x2e, 0x3d,
+ 0x8a, 0x9e, 0x41, 0x83, 0xde, 0x24, 0xc4, 0xd2, 0x7a, 0xda, 0xe1, 0xce, 0x31, 0x7a, 0x21, 0x69,
+ 0x2f, 0x38, 0x67, 0x7a, 0x93, 0x10, 0xcc, 0x71, 0x64, 0x41, 0xfb, 0x9a, 0xa4, 0x59, 0x10, 0x47,
+ 0x56, 0xad, 0xa7, 0x1d, 0x1a, 0x38, 0xdf, 0xda, 0x3f, 0xd5, 0xa0, 0xc9, 0xd9, 0xe8, 0x97, 0xd0,
+ 0xb9, 0xf4, 0x32, 0xe2, 0xce, 0xb9, 0x6a, 0xae, 0xb2, 0x73, 0xbc, 0xbf, 0xae, 0x52, 0x1c, 0x8b,
+ 0x81, 0x11, 0xa5, 0x09, 0x3b, 0x50, 0x0b, 0x7c, 0xa9, 0xb5, 0x16, 0xf8, 0x08, 0x41, 0x23, 0xf2,
+ 0x96, 0xc4, 0xaa, 0x73, 0x09, 0x5f, 0xab, 0xc7, 0x37, 0xd6, 0x8e, 0x47, 0x5f, 0x80, 0x19, 0x2c,
+ 0x93, 0x38, 0xa5, 0x2e, 0x0d, 0x96, 0x24, 0xa3, 0xde, 0x32, 0xb1, 0x9a, 0x3d, 0xed, 0xb0, 0x8e,
+ 0x77, 0x85, 0x7c, 0x9a, 0x8b, 0x51, 0x17, 0xf4, 0xa5, 0x17, 0x05, 0x73, 0x92, 0x51, 0xab, 0xd5,
+ 0xd3, 0x0e, 0xb7, 0x70, 0xb1, 0x67, 0x87, 0x66, 0xc1, 0x9f, 0x88, 0xd5, 0xe6, 0x9f, 0xf2, 0x35,
+ 0xfa, 0x1a, 0x3a, 0x5e, 0x14, 0xc5, 0xd4, 0xa3, 0x41, 0x1c, 0x65, 0x96, 0xde, 0xab, 0x1f, 0x76,
+ 0x8e, 0xf7, 0x0a, 0x7f, 0x5e, 0x93, 0x9b, 0xb7, 0x5e, 0xb8, 0x22, 0x58, 0x65, 0xa1, 0x2f, 0xa0,
+ 0x15, 0x7a, 0x97, 0x24, 0xcc, 0x2c, 0xe3, 0x2e, 0xbe, 0x24, 0xd8, 0x0e, 0xb4, 0x2f, 0x08, 0x7d,
+ 0x17, 0xa7, 0x57, 0x85, 0xcf, 0x9a, 0xe2, 0x33, 0x82, 0x46, 0x90, 0x5c, 0xbf, 0x94, 0x91, 0xe1,
+ 0x6b, 0x29, 0xfb, 0x55, 0x1e, 0x1b, 0xb6, 0xb6, 0xff, 0xa9, 0x41, 0xbd, 0x9f, 0x24, 0x1b, 0x75,
+ 0x7c, 0x0e, 0xcd, 0x80, 0x85, 0x9d, 0x2b, 0xe9, 0x1c, 0xef, 0xac, 0x27, 0x03, 0x0b, 0x10, 0x3d,
+ 0x87, 0x66, 0x46, 0x3d, 0x2a, 0x42, 0xbe, 0xa3, 0x98, 0xdc, 0x4f, 0x92, 0x37, 0x0c, 0xc0, 0x02,
+ 0x47, 0x8f, 0xc1, 0x20, 0x3f, 0x06, 0xd4, 0x9d, 0xc5, 0x3e, 0xe1, 0x89, 0xd8, 0xc3, 0x3a, 0x13,
+ 0x0c, 0x62, 0xff, 0x56, 0xb8, 0x9a, 0x1f, 0x12, 0x2e, 0xfb, 0x3f, 0x35, 0xa8, 0x4f, 0x62, 0x5f,
+ 0x16, 0x81, 0x56, 0x14, 0x81, 0x09, 0xf5, 0x44, 0x56, 0xc5, 0x1e, 0x66, 0xcb, 0xbb, 0x8d, 0x9c,
+ 0xc4, 0xfe, 0x9a, 0x91, 0x3d, 0x68, 0x78, 0x49, 0x92, 0x59, 0x0d, 0x6e, 0xc0, 0x96, 0xea, 0x0c,
+ 0xe6, 0x08, 0xfa, 0x12, 0xf4, 0x48, 0x04, 0x3e, 0x37, 0xd3, 0x2c, 0x58, 0x32, 0x23, 0xb8, 0x60,
+ 0xbc, 0xb7, 0x6c, 0x2a, 0x3e, 0xb7, 0x3f, 0xa8, 0x44, 0x1e, 0x40, 0x6b, 0xb6, 0x48, 0xe3, 0x55,
+ 0x62, 0xe9, 0xdc, 0x5f, 0xb9, 0x43, 0x4f, 0x00, 0x66, 0x29, 0xf1, 0x28, 0xf1, 0x5d, 0x8f, 0x5a,
+ 0x06, 0xaf, 0x44, 0x43, 0x4a, 0xfa, 0x94, 0xc1, 0x19, 0xf5, 0x52, 0x09, 0x83, 0x80, 0xa5, 0xa4,
+ 0x4f, 0x51, 0x0f, 0xb6, 0x16, 0x33, 0x77, 0xe9, 0xa5, 0x57, 0x82, 0xd0, 0xe1, 0x04, 0x58, 0xcc,
+ 0xce, 0xb9, 0xa8, 0x4f, 0xed, 0x63, 0xd0, 0x73, 0x83, 0x58, 0x7c, 0x5f, 0x93, 0x1b, 0x19, 0x70,
+ 0xb6, 0x44, 0xfb, 0xd0, 0xbc, 0x66, 0x90, 0xac, 0x37, 0xb1, 0xb1, 0xff, 0x56, 0x03, 0x63, 0x12,
+ 0xfb, 0x27, 0x41, 0x48, 0x49, 0xca, 0xbe, 0x0a, 0xfc, 0xcc, 0xd2, 0x7a, 0x75, 0xf6, 0x55, 0xe0,
+ 0xf3, 0x72, 0xe7, 0x51, 0xcf, 0xac, 0x5a, 0xaf, 0xbe, 0x39, 0x2d, 0x92, 0xc0, 0x8a, 0xc7, 0x4b,
+ 0x12, 0x97, 0xd5, 0x65, 0x66, 0xd5, 0xb9, 0x0a, 0xdd, 0x4b, 0x92, 0x0b, 0xb6, 0x67, 0x20, 0xaf,
+ 0x45, 0x97, 0xe9, 0x6f, 0x08, 0x90, 0x0b, 0x46, 0x7e, 0x86, 0x3e, 0x83, 0x6d, 0x99, 0x0d, 0xf9,
+ 0x75, 0x93, 0x13, 0xb6, 0xa4, 0x50, 0x68, 0xa8, 0xa4, 0xa2, 0xf5, 0x41, 0xa9, 0xb0, 0xa0, 0x2d,
+ 0x82, 0x2f, 0x72, 0x67, 0xe0, 0x7c, 0x8b, 0x9e, 0xc1, 0x6e, 0x12, 0xfb, 0x6e, 0xb6, 0xba, 0x74,
+ 0x73, 0x86, 0xce, 0x19, 0xdb, 0x49, 0xec, 0xbf, 0x59, 0x5d, 0x0e, 0x84, 0xd0, 0xfe, 0x57, 0x2d,
+ 0x6f, 0xa8, 0x77, 0x85, 0xa8, 0x0b, 0x7a, 0x92, 0x92, 0x79, 0xf0, 0xa3, 0x0c, 0x92, 0x81, 0x8b,
+ 0x3d, 0xcb, 0x29, 0x6f, 0x99, 0x6a, 0x50, 0x0c, 0x26, 0x11, 0x3e, 0x75, 0x41, 0xbf, 0x22, 0x37,
+ 0xef, 0xe2, 0xb4, 0x0c, 0x4a, 0xbe, 0x57, 0x1a, 0x4d, 0xf3, 0xff, 0x34, 0x1a, 0xf4, 0x14, 0x76,
+ 0x44, 0x2f, 0x64, 0x95, 0x31, 0xa7, 0x24, 0xe5, 0x75, 0x5c, 0xc7, 0xdb, 0xb9, 0xb4, 0xcf, 0x84,
+ 0xe8, 0x39, 0xec, 0x16, 0xb4, 0x4b, 0x32, 0x8f, 0xd3, 0xbc, 0x1d, 0x16, 0x5f, 0xbf, 0xe2, 0xd2,
+ 0x9f, 0xd7, 0x18, 0x9f, 0x00, 0xcc, 0x57, 0x61, 0x28, 0x5d, 0x35, 0x84, 0xab, 0x4c, 0xc2, 0x5d,
+ 0xb5, 0xff, 0xab, 0x41, 0xe7, 0x34, 0x8c, 0x2f, 0xbd, 0xf0, 0x24, 0xf4, 0x16, 0x19, 0x8b, 0xa3,
+ 0x1f, 0xa4, 0x79, 0x81, 0xfa, 0x41, 0x8a, 0x8e, 0x60, 0x2f, 0xbb, 0xc9, 0x28, 0x59, 0xba, 0xb3,
+ 0x38, 0x9a, 0x07, 0x0b, 0x97, 0xe1, 0xa2, 0x58, 0x77, 0x05, 0x30, 0xe0, 0xf2, 0x61, 0x90, 0xa2,
+ 0x43, 0x30, 0xc3, 0x78, 0xe6, 0x85, 0x2a, 0x55, 0xf4, 0xcc, 0x1d, 0x2e, 0x2f, 0x99, 0xcf, 0x60,
+ 0x77, 0x95, 0x91, 0x54, 0x25, 0x8a, 0x09, 0xb3, 0xcd, 0xc4, 0x25, 0x8f, 0xc5, 0x30, 0xca, 0xc8,
+ 0x6c, 0x95, 0x12, 0x77, 0xce, 0x2c, 0xe4, 0x53, 0xc6, 0xc0, 0xdb, 0xb9, 0x54, 0x98, 0xfd, 0x15,
+ 0x1c, 0xd0, 0x74, 0x95, 0x51, 0xf7, 0x8a, 0xdc, 0x64, 0xee, 0x3c, 0x8d, 0x97, 0xee, 0x0f, 0x94,
+ 0x26, 0x19, 0x8f, 0xb8, 0x8e, 0x11, 0x07, 0x5f, 0x93, 0x9b, 0xec, 0x24, 0x8d, 0x97, 0xbf, 0x65,
+ 0x88, 0xfd, 0x77, 0x0d, 0x1a, 0xa3, 0x68, 0x1e, 0xa3, 0x4f, 0xa0, 0x93, 0x5e, 0x51, 0x37, 0x1f,
+ 0x74, 0xc2, 0x75, 0x48, 0xaf, 0xe8, 0x5b, 0x39, 0xeb, 0x3e, 0x85, 0x2d, 0x2f, 0x49, 0x66, 0xee,
+ 0xfa, 0x24, 0xee, 0x30, 0x59, 0x4e, 0xf9, 0x04, 0x3a, 0x5e, 0x12, 0x14, 0x0c, 0xe1, 0x33, 0x78,
+ 0x49, 0x90, 0x13, 0x7e, 0x0d, 0x5b, 0x0b, 0x1e, 0x66, 0xe9, 0x45, 0xa3, 0x32, 0xa5, 0x95, 0x1c,
+ 0xe0, 0xce, 0xa2, 0xdc, 0xd8, 0x7f, 0xd1, 0xa0, 0xe9, 0x5c, 0x93, 0xe8, 0xee, 0x37, 0x03, 0x47,
+ 0x95, 0x37, 0xc3, 0x86, 0xc1, 0xce, 0x02, 0x92, 0x0f, 0x2f, 0xb6, 0x66, 0x32, 0x36, 0xb7, 0xb9,
+ 0x19, 0x75, 0xcc, 0xd7, 0xe8, 0x29, 0x34, 0x7c, 0x8f, 0x7a, 0x77, 0xd7, 0x35, 0x87, 0xed, 0xbf,
+ 0x6a, 0xd0, 0xe1, 0x47, 0xca, 0x9b, 0x77, 0x08, 0x4d, 0x76, 0xac, 0xb8, 0x7b, 0x9b, 0xed, 0x12,
+ 0x84, 0xfc, 0x8e, 0xd6, 0xca, 0x3b, 0xba, 0x0f, 0x4d, 0xf5, 0x0a, 0x8a, 0x0d, 0xef, 0xb8, 0x41,
+ 0x34, 0x23, 0xae, 0x62, 0xa2, 0xc1, 0x25, 0xec, 0x51, 0xc1, 0xe0, 0x55, 0x44, 0x83, 0x50, 0xc0,
+ 0xe2, 0xd1, 0x61, 0x70, 0x09, 0x83, 0x6d, 0x13, 0x76, 0x4e, 0x09, 0x65, 0x99, 0xc5, 0xe4, 0x8f,
+ 0x2b, 0x92, 0x51, 0xfb, 0x25, 0xec, 0x16, 0x92, 0x2c, 0x89, 0xa3, 0x8c, 0xa0, 0x4f, 0xa1, 0x11,
+ 0x44, 0xf3, 0x58, 0x3e, 0x96, 0xb6, 0xcb, 0xf9, 0xcc, 0x48, 0x1c, 0xb2, 0x7f, 0x07, 0xbb, 0x67,
+ 0x41, 0x46, 0x27, 0xb1, 0x9f, 0x49, 0x45, 0xe8, 0x4b, 0x68, 0xcf, 0xb9, 0xd3, 0xc2, 0xd9, 0x8e,
+ 0xe2, 0x6c, 0xd1, 0xac, 0x71, 0x4e, 0x61, 0xf3, 0xc6, 0x27, 0xd4, 0x0b, 0x42, 0x9e, 0x0b, 0x1d,
+ 0xcb, 0x9d, 0xfd, 0x12, 0xcc, 0x52, 0xb1, 0xb4, 0xa7, 0x07, 0x8d, 0x24, 0xf6, 0x73, 0xb5, 0x5b,
+ 0xaa, 0x5a, 0xcc, 0x11, 0xfb, 0x33, 0xd8, 0x1b, 0x45, 0x59, 0x42, 0x66, 0xec, 0xc3, 0xdc, 0xa0,
+ 0xca, 0xf8, 0xb6, 0x5f, 0x02, 0x52, 0x49, 0x52, 0xf9, 0xc7, 0x50, 0x4f, 0x62, 0x5f, 0xfa, 0xba,
+ 0xae, 0x9b, 0x01, 0xf6, 0x1f, 0x60, 0x8f, 0x19, 0xc4, 0xdb, 0x69, 0xe1, 0xeb, 0x8b, 0xaa, 0xaf,
+ 0xd5, 0x17, 0xe5, 0x07, 0x7a, 0xfb, 0x0d, 0x20, 0x55, 0xb9, 0x34, 0xe9, 0x19, 0xb4, 0xf8, 0x98,
+ 0xc9, 0x95, 0x57, 0x5f, 0x48, 0x12, 0xb5, 0x9f, 0xc2, 0x7d, 0xe9, 0x90, 0x90, 0xdf, 0xe1, 0xf7,
+ 0x37, 0xb0, 0xbf, 0x4e, 0x93, 0xc7, 0x14, 0xef, 0x30, 0xed, 0x3d, 0xef, 0x30, 0x7b, 0x00, 0xf7,
+ 0x99, 0x89, 0x24, 0xe2, 0x15, 0xab, 0x64, 0xbb, 0x25, 0x9c, 0xbb, 0xf5, 0xa4, 0x56, 0xca, 0x1f,
+ 0x4b, 0x8e, 0xfd, 0x2d, 0xec, 0xaf, 0x2b, 0x29, 0x3d, 0x25, 0x5c, 0x72, 0xcb, 0x53, 0x4e, 0xc4,
+ 0x12, 0xb5, 0xff, 0xa1, 0xf1, 0xba, 0x3d, 0x8b, 0x17, 0x85, 0x01, 0x07, 0xd0, 0x62, 0xb3, 0xb0,
+ 0xf0, 0xb4, 0x99, 0xc4, 0xfe, 0xc8, 0x47, 0x8f, 0x40, 0xcf, 0x07, 0x7a, 0xfe, 0xa3, 0x40, 0xce,
+ 0x73, 0x76, 0x9f, 0xc2, 0x20, 0xe2, 0xf7, 0x49, 0x3b, 0x6c, 0x62, 0xb1, 0x61, 0xa9, 0x99, 0xc7,
+ 0x61, 0x18, 0xbf, 0xe3, 0x77, 0x49, 0xc7, 0x72, 0x57, 0xb9, 0x67, 0xcd, 0xf7, 0xdf, 0xb3, 0x56,
+ 0xf5, 0x9e, 0x3d, 0xe7, 0xb7, 0x4a, 0xd8, 0x2b, 0x7d, 0x2d, 0x8e, 0x17, 0x63, 0x58, 0x6c, 0x8e,
+ 0x08, 0x18, 0xc5, 0xcf, 0x1a, 0x64, 0xc1, 0xfe, 0xe8, 0xbc, 0x7f, 0xea, 0xb8, 0xd3, 0xef, 0x27,
+ 0x8e, 0xfb, 0xdd, 0xc5, 0xd0, 0x39, 0x19, 0x5d, 0x38, 0x43, 0xf3, 0x1e, 0xba, 0x0f, 0xbb, 0x0a,
+ 0xd2, 0x9f, 0x4c, 0x06, 0xa6, 0x86, 0x0e, 0x60, 0x4f, 0x11, 0x0e, 0xc7, 0x83, 0xd7, 0x0e, 0x36,
+ 0x6b, 0x08, 0xc1, 0x8e, 0x22, 0x1e, 0x0f, 0x46, 0x66, 0xfd, 0x68, 0x02, 0x7a, 0xfe, 0x6e, 0x46,
+ 0x0f, 0xe1, 0x7e, 0x7f, 0x32, 0x71, 0xdf, 0x4c, 0xfb, 0xd3, 0xf5, 0x43, 0x0e, 0x60, 0xaf, 0x04,
+ 0xf0, 0x77, 0x17, 0x17, 0xa3, 0x8b, 0x53, 0x53, 0x43, 0xfb, 0x60, 0x96, 0x62, 0xe7, 0xf7, 0xa3,
+ 0xa9, 0x33, 0x34, 0x6b, 0x47, 0xff, 0xd6, 0x40, 0xcf, 0x9f, 0x53, 0x4c, 0xe5, 0x64, 0x3c, 0xdc,
+ 0xa0, 0x72, 0x1f, 0xcc, 0x12, 0x70, 0xce, 0x5f, 0xe1, 0xef, 0xc7, 0xa6, 0xb6, 0x4e, 0x9f, 0x60,
+ 0x67, 0xd2, 0xc7, 0xec, 0xa8, 0x1a, 0x7a, 0x00, 0xa8, 0x0a, 0x38, 0x43, 0xb3, 0xce, 0x2c, 0x2b,
+ 0xe5, 0xb9, 0x65, 0x0d, 0xf4, 0x04, 0x1e, 0x95, 0xe2, 0xfe, 0xab, 0x31, 0x9e, 0x3a, 0xc3, 0xfc,
+ 0x33, 0xb3, 0x59, 0x39, 0x5c, 0x18, 0xde, 0x5a, 0x3f, 0x63, 0xe8, 0x9c, 0x39, 0x53, 0xa6, 0xac,
+ 0xbd, 0x7e, 0xc6, 0x69, 0x1f, 0xbf, 0xea, 0x9f, 0x3a, 0xa6, 0x7e, 0xf4, 0x53, 0x0d, 0x8c, 0xa2,
+ 0x59, 0xb3, 0x0c, 0x39, 0x6f, 0x9d, 0x8b, 0xe9, 0xed, 0x0c, 0x3d, 0x86, 0x87, 0x0a, 0xc2, 0x34,
+ 0x15, 0xf6, 0x6b, 0xc8, 0x86, 0x8f, 0x37, 0x83, 0xb9, 0xd5, 0x66, 0x0d, 0x75, 0xe1, 0x41, 0x85,
+ 0xf3, 0x66, 0xda, 0xe7, 0x58, 0x1d, 0x3d, 0x82, 0x83, 0x0a, 0x26, 0xdd, 0x69, 0xa0, 0xcf, 0xa1,
+ 0x57, 0x81, 0xa4, 0xed, 0xee, 0x60, 0x7c, 0x76, 0xe6, 0x0c, 0x18, 0xab, 0x59, 0x51, 0x2e, 0xd3,
+ 0x89, 0x45, 0x40, 0xd6, 0x95, 0x33, 0x4c, 0x2a, 0x6f, 0xb3, 0x00, 0x2b, 0x90, 0xa8, 0xaa, 0xd1,
+ 0xf9, 0x44, 0x98, 0xac, 0xa3, 0x8f, 0xc0, 0xba, 0x05, 0x63, 0xe7, 0x7c, 0xfc, 0xd6, 0x19, 0x9a,
+ 0xc6, 0xf1, 0x9f, 0x1b, 0x60, 0x4c, 0x56, 0x97, 0x61, 0x30, 0xeb, 0x4f, 0x46, 0xe8, 0x5b, 0x68,
+ 0xcb, 0x39, 0x83, 0x1e, 0x96, 0x83, 0x7d, 0x6d, 0x16, 0x75, 0xad, 0xdb, 0x80, 0xb8, 0x3c, 0xf6,
+ 0x3d, 0xd4, 0x07, 0x3d, 0x1f, 0x0c, 0xa8, 0xe4, 0x55, 0x86, 0x50, 0xf7, 0xd1, 0x06, 0xa4, 0x50,
+ 0x71, 0x0a, 0x50, 0x0e, 0x00, 0xd4, 0x55, 0xe6, 0x5a, 0x65, 0x74, 0x74, 0x1f, 0x6f, 0xc4, 0x54,
+ 0x45, 0x65, 0xdb, 0x56, 0x14, 0xdd, 0x1a, 0x14, 0x8a, 0xa2, 0xdb, 0x7d, 0xde, 0xbe, 0x87, 0xce,
+ 0x61, 0x4b, 0x6d, 0xcd, 0xe8, 0xa3, 0xea, 0xb9, 0x6a, 0x63, 0xef, 0x3e, 0xb9, 0x03, 0x2d, 0xd4,
+ 0x8d, 0x61, 0x4b, 0x6d, 0xb3, 0x8a, 0xba, 0x0d, 0x2d, 0x5c, 0x51, 0xb7, 0xa9, 0x37, 0xdb, 0xf7,
+ 0x7e, 0xa1, 0xa1, 0xdf, 0xf0, 0xa4, 0xb1, 0x36, 0xb6, 0x9e, 0x34, 0xa5, 0x11, 0xaf, 0x27, 0x4d,
+ 0xed, 0x78, 0x4c, 0xc3, 0x65, 0x8b, 0xff, 0xd5, 0xf3, 0xf5, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff,
+ 0x12, 0x12, 0x68, 0x59, 0xf7, 0x11, 0x00, 0x00,
+}
diff --git a/vendor/github.com/rkt/rkt/api/v1alpha/api.proto b/vendor/github.com/rkt/rkt/api/v1alpha/api.proto
new file mode 100644
index 000000000..51f817c12
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/api/v1alpha/api.proto
@@ -0,0 +1,487 @@
+// Copyright 2015 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// *************************************************** //
+// ************ WARNING - HERE BE DRAGONS ************ //
+// //
+// The API defined here is proposed, experimental, //
+// and (for now) subject to change at any time. //
+// //
+// If you think you want to use it, or for any other //
+// queries, contact <rkt-dev@googlegroups.com> //
+// or file an issue on github.com/rkt/rkt //
+// //
+// *************************************************** //
+// ****************** END WARNING ******************** //
+
+syntax = "proto3";
+
+package v1alpha;
+
+// ImageType defines the supported image type.
+enum ImageType {
+ IMAGE_TYPE_UNDEFINED = 0;
+ IMAGE_TYPE_APPC = 1;
+ IMAGE_TYPE_DOCKER = 2;
+ IMAGE_TYPE_OCI = 3;
+}
+
+// ImageFormat defines the format of the image.
+message ImageFormat {
+ // Type of the image, required.
+ ImageType type = 1;
+
+ // Version of the image format, required.
+ string version = 2;
+}
+
+// Image describes the image's information.
+message Image {
+ // Base format of the image, required. This indicates the original format
+ // for the image as nowadays all the image formats will be transformed to
+ // ACI.
+ ImageFormat base_format = 1;
+
+ // ID of the image, a string that can be used to uniquely identify the image,
+ // e.g. sha512 hash of the ACIs, required.
+ string id = 2;
+
+ // Name of the image in the image manifest, e.g. 'coreos.com/etcd', optional.
+ string name = 3;
+
+ // Version of the image, e.g. 'latest', '2.0.10', optional.
+ string version = 4;
+
+ // Timestamp of when the image is imported, it is the seconds since epoch, optional.
+ int64 import_timestamp = 5;
+
+ // JSON-encoded byte array that represents the image manifest, optional.
+ bytes manifest = 6;
+
+ // Size is the size in bytes of this image in the store.
+ int64 size = 7;
+
+ // Annotations on this image.
+ repeated KeyValue annotations = 8;
+
+ // Labels of this image.
+ repeated KeyValue labels = 9;
+}
+
+// Network describes the network information of a pod.
+message Network {
+ // Name of the network that a pod belongs to, required.
+ string name = 1;
+
+ // Pod's IPv4 address within the network, optional if IPv6 address is given.
+ string ipv4 = 2;
+
+ // Pod's IPv6 address within the network, optional if IPv4 address is given.
+ string ipv6 = 3;
+}
+
+// AppState defines the possible states of the app.
+enum AppState {
+ APP_STATE_UNDEFINED = 0;
+ APP_STATE_RUNNING = 1;
+ APP_STATE_EXITED = 2;
+}
+
+// App describes the information of an app that's running in a pod.
+message App {
+ // Name of the app, required.
+ string name = 1;
+
+ // Image used by the app, required. However, this may only contain the image id
+ // if it is returned by ListPods().
+ Image image = 2;
+
+ // State of the app. optional, non-empty only if it's returned by InspectPod().
+ AppState state = 3;
+
+ // Exit code of the app. optional, only valid if it's returned by InspectPod() and
+ // the app has already exited.
+ sint32 exit_code = 4;
+
+ // Annotations for this app.
+ repeated KeyValue annotations = 5;
+}
+
+// PodState defines the possible states of the pod.
+// See https://github.com/rkt/rkt/blob/master/Documentation/devel/pod-lifecycle.md for a detailed
+// explanation of each state.
+enum PodState {
+ POD_STATE_UNDEFINED = 0;
+
+ // States before the pod is running.
+ POD_STATE_EMBRYO = 1; // Pod is created, ready to entering 'preparing' state.
+ POD_STATE_PREPARING = 2; // Pod is being prepared. On success it will become 'prepared', otherwise it will become 'aborted prepared'.
+ POD_STATE_PREPARED = 3; // Pod has been successfully prepared, ready to enter 'running' state. it can also enter 'deleting' if it's garbage collected before running.
+
+ // State that indicates the pod is running.
+ POD_STATE_RUNNING = 4; // Pod is running, when it exits, it will become 'exited'.
+
+ // States that indicates the pod is exited, and will never run.
+ POD_STATE_ABORTED_PREPARE = 5; // Pod failed to prepare, it will only be garbage collected and will never run again.
+ POD_STATE_EXITED = 6; // Pod has exited, it now can be garbage collected.
+ POD_STATE_DELETING = 7; // Pod is being garbage collected, after that it will enter 'garbage' state.
+ POD_STATE_GARBAGE = 8; // Pod is marked as garbage collected, it no longer exists on the machine.
+}
+
+// Pod describes a pod's information.
+// If a pod is in Embryo, Preparing, AbortedPrepare state,
+// only id and state will be returned.
+//
+// If a pod is in other states, the pod manifest and
+// apps will be returned when 'detailed' is true in the request.
+//
+// A valid pid of the stage1 process of the pod will be returned
+// if the pod is Running has run once.
+//
+// Networks are only returned when a pod is in Running.
+message Pod {
+ // ID of the pod, in the form of a UUID.
+ string id = 1;
+
+ // PID of the stage1 process of the pod.
+ sint32 pid = 2;
+
+ // State of the pod.
+ PodState state = 3;
+
+ // List of apps in the pod.
+ repeated App apps = 4;
+
+ // Network information of the pod.
+ // Note that a pod can be in multiple networks.
+ repeated Network networks = 5;
+
+ // JSON-encoded byte array that represents the pod manifest of the pod.
+ bytes manifest = 6;
+
+ // Annotations on this pod.
+ repeated KeyValue annotations = 7;
+
+ // Cgroup of the pod, empty if the pod is not running.
+ string cgroup = 8;
+
+ // Timestamp of when the pod is created, nanoseconds since epoch.
+ // Zero if the pod is not created.
+ int64 created_at = 9;
+
+ // Timestamp of when the pod is started, nanoseconds since epoch.
+ // Zero if the pod is not started.
+ int64 started_at = 10;
+
+ // Timestamp of when the pod is moved to exited-garbage/garbage,
+ // in nanoseconds since epoch.
+ // Zero if the pod is not moved to exited-garbage/garbage yet.
+ int64 gc_marked_at = 11;
+}
+
+message KeyValue {
+ // Key part of the key-value pair.
+ string Key = 1;
+ // Value part of the key-value pair.
+ string value = 2;
+}
+
+// PodFilter defines the condition that the returned pods need to satisfy in ListPods().
+// The conditions are combined by 'AND', and different filters are combined by 'OR'.
+message PodFilter {
+ // If not empty, the pods that have any of the ids will be returned.
+ repeated string ids = 1;
+
+ // If not empty, the pods that have any of the states will be returned.
+ repeated PodState states = 2;
+
+ // If not empty, the pods that all of the apps will be returned.
+ repeated string app_names = 3;
+
+ // If not empty, the pods that have all of the images(in the apps) will be returned
+ repeated string image_ids = 4;
+
+ // If not empty, the pods that are in all of the networks will be returned.
+ repeated string network_names = 5;
+
+ // If not empty, the pods that have all of the annotations will be returned.
+ repeated KeyValue annotations = 6;
+
+ // If not empty, the pods whose cgroup are listed will be returned.
+ repeated string cgroups = 7;
+
+ // If not empty, the pods whose these cgroup belong to will be returned.
+ // i.e. the pod's cgroup is a prefix of the specified cgroup
+ repeated string pod_sub_cgroups = 8;
+}
+
+// ImageFilter defines the condition that the returned images need to satisfy in ListImages().
+// The conditions are combined by 'AND', and different filters are combined by 'OR'.
+message ImageFilter {
+ // If not empty, the images that have any of the ids will be returned.
+ repeated string ids = 1;
+
+ // if not empty, the images that have any of the prefixes in the name will be returned.
+ repeated string prefixes = 2;
+
+ // If not empty, the images that have any of the base names will be returned.
+ // For example, both 'coreos.com/etcd' and 'k8s.io/etcd' will be returned if 'etcd' is included,
+ // however 'k8s.io/etcd-backup' will not be returned.
+ repeated string base_names = 3;
+
+ // If not empty, the images that have any of the keywords in the name will be returned.
+ // For example, both 'kubernetes-etcd', 'etcd:latest' will be returned if 'etcd' is included,
+ repeated string keywords = 4;
+
+ // If not empty, the images that have all of the labels will be returned.
+ repeated KeyValue labels = 5;
+
+ // If set, the images that are imported after this timestamp will be returned.
+ int64 imported_after = 6;
+
+ // If set, the images that are imported before this timestamp will be returned.
+ int64 imported_before = 7;
+
+ // If not empty, the images that have all of the annotations will be returned.
+ repeated KeyValue annotations = 8;
+
+ // If not empty, the images that have any of the exact full names will be returned.
+ repeated string full_names = 9;
+}
+
+// GlobalFlags describes the flags that passed to rkt api service when it is launched.
+message GlobalFlags {
+ // Data directory.
+ string dir = 1;
+
+ // System configuration directory.
+ string system_config_dir = 2;
+
+ // Local configuration directory.
+ string local_config_dir = 3;
+
+ // User configuration directory.
+ string user_config_dir = 4;
+
+ // Insecure flags configurates what security features to disable.
+ string insecure_flags = 5;
+
+ // Whether to automatically trust gpg keys fetched from https
+ bool trust_keys_from_https = 6;
+}
+
+// Info describes the information of rkt on the machine.
+message Info {
+ // Version of rkt, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
+ string rkt_version = 1;
+
+ // Version of appc, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
+ string appc_version = 2;
+
+ // Latest version of the api that's supported by the service, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
+ string api_version = 3;
+
+ // The global flags that passed to the rkt api service when it's launched.
+ GlobalFlags global_flags = 4;
+}
+
+// EventType defines the type of the events that will be received via ListenEvents().
+enum EventType {
+ EVENT_TYPE_UNDEFINED = 0;
+
+ // Pod events.
+ EVENT_TYPE_POD_PREPARED = 1;
+ EVENT_TYPE_POD_PREPARE_ABORTED = 2;
+ EVENT_TYPE_POD_STARTED = 3;
+ EVENT_TYPE_POD_EXITED = 4;
+ EVENT_TYPE_POD_GARBAGE_COLLECTED = 5;
+
+ // App events.
+ EVENT_TYPE_APP_STARTED = 6;
+ EVENT_TYPE_APP_EXITED = 7; // (XXX)yifan: Maybe also return exit code in the event object?
+
+ // Image events.
+ EVENT_TYPE_IMAGE_IMPORTED = 8;
+ EVENT_TYPE_IMAGE_REMOVED = 9;
+}
+
+// Event describes the events that will be received via ListenEvents().
+message Event {
+ // Type of the event, required.
+ EventType type = 1;
+
+ // ID of the subject that causes the event, required.
+ // If the event is a pod or app event, the id is the pod's uuid.
+ // If the event is an image event, the id is the image's id.
+ string id = 2;
+
+ // Name of the subject that causes the event, required.
+ // If the event is a pod event, the name is the pod's name.
+ // If the event is an app event, the name is the app's name.
+ // If the event is an image event, the name is the image's name.
+ string from = 3;
+
+ // Timestamp of when the event happens, it is the seconds since epoch, required.
+ int64 time = 4;
+
+ // Data of the event, in the form of key-value pairs, optional.
+ repeated KeyValue data = 5;
+}
+
+// EventFilter defines the condition that the returned events needs to satisfy in ListImages().
+// The condition are combined by 'AND'.
+message EventFilter {
+ // If not empty, then only returns the events that have the listed types.
+ repeated EventType types = 1;
+
+ // If not empty, then only returns the events whose 'id' is included in the listed ids.
+ repeated string ids = 2;
+
+ // If not empty, then only returns the events whose 'from' is included in the listed names.
+ repeated string names = 3;
+
+ // If set, then only returns the events after this timestamp.
+ // If the server starts after since_time, then only the events happened after the start of the server will be returned.
+ // If since_time is a future timestamp, then no events will be returned until that time.
+ int64 since_time = 4;
+
+ // If set, then only returns the events before this timestamp.
+ // If it is a future timestamp, then the event stream will be closed at that moment.
+ int64 until_time = 5;
+}
+
+// Request for GetInfo().
+message GetInfoRequest {}
+
+// Response for GetInfo().
+message GetInfoResponse {
+ Info info = 1; // Required.
+}
+
+// Request for ListPods().
+message ListPodsRequest {
+ repeated PodFilter filters = 1; // Optional.
+ bool detail = 2; // Optional.
+}
+
+// Response for ListPods().
+message ListPodsResponse {
+ repeated Pod pods = 1; // Required.
+}
+
+// Request for InspectPod().
+message InspectPodRequest {
+ // ID of the pod which we are querying status for, required.
+ string id = 1;
+}
+
+// Response for InspectPod().
+message InspectPodResponse {
+ Pod pod = 1; // Required.
+}
+
+// Request for ListImages().
+message ListImagesRequest {
+ repeated ImageFilter filters = 1; // Optional.
+ bool detail = 2; // Optional.
+}
+
+// Response for ListImages().
+message ListImagesResponse {
+ repeated Image images = 1; // Required.
+}
+
+// Request for InspectImage().
+message InspectImageRequest {
+ string id = 1; // Required.
+}
+
+// Response for InspectImage().
+message InspectImageResponse {
+ Image image = 1; // Required.
+}
+
+// Request for ListenEvents().
+message ListenEventsRequest {
+ EventFilter filter = 1; // Optional.
+}
+
+// Response for ListenEvents().
+message ListenEventsResponse {
+ // Aggregate multiple events to reduce round trips, optional as the response can contain no events.
+ repeated Event events = 1;
+}
+
+// Request for GetLogs().
+message GetLogsRequest {
+ // ID of the pod which we will get logs from, required.
+ string pod_id = 1;
+
+ // Name of the app within the pod which we will get logs
+ // from, optional. If not set, then the logs of all the
+ // apps within the pod will be returned.
+ string app_name = 2;
+
+ // Number of most recent lines to return, optional.
+ int32 lines = 3;
+
+ // If true, then a response stream will not be closed,
+ // and new log response will be sent via the stream, default is false.
+ bool follow = 4;
+
+ // If set, then only the logs after the timestamp will
+ // be returned, optional.
+ int64 since_time = 5;
+
+ // If set, then only the logs before the timestamp will
+ // be returned, optional.
+ int64 until_time = 6;
+}
+
+// Response for GetLogs().
+message GetLogsResponse {
+ // List of the log lines that returned, optional as the response can contain no logs.
+ repeated string lines = 1;
+}
+
+// PublicAPI defines the read-only APIs that will be supported.
+// These will be handled over TCP sockets.
+service PublicAPI {
+ // GetInfo gets the rkt's information on the machine.
+ rpc GetInfo (GetInfoRequest) returns (GetInfoResponse) {}
+
+ // ListPods lists rkt pods on the machine.
+ rpc ListPods (ListPodsRequest) returns (ListPodsResponse) {}
+
+ // InspectPod gets detailed pod information of the specified pod.
+ rpc InspectPod (InspectPodRequest) returns (InspectPodResponse) {}
+
+ // ListImages lists the images on the machine.
+ rpc ListImages (ListImagesRequest) returns (ListImagesResponse) {}
+
+ // InspectImage gets the detailed image information of the specified image.
+ rpc InspectImage (InspectImageRequest) returns (InspectImageResponse) {}
+
+ // ListenEvents listens for the events, it will return a response stream
+ // that will contain event objects.
+ rpc ListenEvents (ListenEventsRequest) returns (stream ListenEventsResponse) {}
+
+ // GetLogs gets the logs for a pod, if the app is also specified, then only the logs
+ // of the app will be returned.
+ //
+ // If 'follow' in the 'GetLogsRequest' is set to 'true', then the response stream
+ // will not be closed after the first response, the future logs will be sent via
+ // the stream.
+ rpc GetLogs(GetLogsRequest) returns (stream GetLogsResponse) {}
+}
diff --git a/vendor/github.com/rkt/rkt/api/v1alpha/client_example.go b/vendor/github.com/rkt/rkt/api/v1alpha/client_example.go
new file mode 100644
index 000000000..07ac0597d
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/api/v1alpha/client_example.go
@@ -0,0 +1,154 @@
+// Copyright 2015 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build ignore
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/rkt/rkt/api/v1alpha"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+func getLogsWithoutFollow(c v1alpha.PublicAPIClient, p *v1alpha.Pod) {
+ if len(p.Apps) == 0 {
+ fmt.Printf("Pod %q has no apps\n", p.Id)
+ return
+ }
+
+ logsResp, err := c.GetLogs(context.Background(), &v1alpha.GetLogsRequest{
+ PodId: p.Id,
+ Follow: false,
+ AppName: p.Apps[0].Name,
+ SinceTime: time.Now().Add(-time.Second * 5).Unix(),
+ Lines: 10,
+ })
+
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(254)
+ }
+
+ logsRecvResp, err := logsResp.Recv()
+
+ if err == io.EOF {
+ return
+ }
+
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ for _, l := range logsRecvResp.Lines {
+ fmt.Println(l)
+ }
+}
+
+func getLogsWithFollow(c v1alpha.PublicAPIClient, p *v1alpha.Pod) {
+ if len(p.Apps) == 0 {
+ fmt.Printf("Pod %q has no apps\n", p.Id)
+ return
+ }
+
+ logsResp, err := c.GetLogs(context.Background(), &v1alpha.GetLogsRequest{
+ PodId: p.Id,
+ Follow: true,
+ AppName: p.Apps[0].Name,
+ })
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(254)
+ }
+
+ for {
+ logsRecvResp, err := logsResp.Recv()
+ if err == io.EOF {
+ return
+ }
+
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ for _, l := range logsRecvResp.Lines {
+ fmt.Println(l)
+ }
+ }
+}
+
+func main() {
+ followFlag := flag.Bool("follow", false, "enable 'follow' option on GetLogs")
+ flag.Parse()
+
+ conn, err := grpc.Dial("localhost:15441", grpc.WithInsecure())
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(254)
+ }
+ c := v1alpha.NewPublicAPIClient(conn)
+ defer conn.Close()
+
+ // List pods.
+ podResp, err := c.ListPods(context.Background(), &v1alpha.ListPodsRequest{
+ // Specify the request: Fetch and print only running pods and their details.
+ Detail: true,
+ Filters: []*v1alpha.PodFilter{
+ {
+ States: []v1alpha.PodState{v1alpha.PodState_POD_STATE_RUNNING},
+ },
+ },
+ })
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(254)
+ }
+
+ for _, p := range podResp.Pods {
+ if *followFlag {
+ fmt.Printf("Pod %q is running. Following logs:\n", p.Id)
+ getLogsWithFollow(c, p)
+ } else {
+ fmt.Printf("Pod %q is running.\n", p.Id)
+ getLogsWithoutFollow(c, p)
+ }
+ }
+
+ // List images.
+ imgResp, err := c.ListImages(context.Background(), &v1alpha.ListImagesRequest{
+ // In this request, we fetch the details of images whose names are prefixed with "coreos.com".
+ Detail: true,
+ Filters: []*v1alpha.ImageFilter{
+ {
+ Prefixes: []string{"coreos.com"},
+ },
+ },
+ })
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(254)
+ }
+
+ for _, im := range imgResp.Images {
+ fmt.Printf("Found image %q\n", im.Name)
+ }
+}
diff --git a/vendor/github.com/rkt/rkt/pkg/acl/LICENSE.MIT b/vendor/github.com/rkt/rkt/pkg/acl/LICENSE.MIT
new file mode 100644
index 000000000..ed21c8b02
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/pkg/acl/LICENSE.MIT
@@ -0,0 +1,22 @@
+This project includes code derived from the MIT licensed naegelejd/go-acl
+project. Here's a copy of its license:
+
+ Copyright (c) 2015 Joseph Naegele
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/bash.manifest b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/bash.manifest
new file mode 120000
index 000000000..5eac6843c
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/bash.manifest
@@ -0,0 +1 @@
+../../usr_from_coreos/manifest-amd64-usr.d/bash.manifest \ No newline at end of file
diff --git a/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/journald.manifest b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/journald.manifest
new file mode 120000
index 000000000..cdc5df962
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/journald.manifest
@@ -0,0 +1 @@
+../../usr_from_coreos/manifest-amd64-usr.d/journald.manifest \ No newline at end of file
diff --git a/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/systemd.manifest b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/systemd.manifest
new file mode 120000
index 000000000..d9346f64a
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-amd64-usr.d/systemd.manifest
@@ -0,0 +1 @@
+../../usr_from_coreos/manifest-amd64-usr.d/systemd.manifest \ No newline at end of file
diff --git a/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/bash.manifest b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/bash.manifest
new file mode 120000
index 000000000..c3b0ee0f4
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/bash.manifest
@@ -0,0 +1 @@
+../../usr_from_coreos/manifest-arm64-usr.d/bash.manifest \ No newline at end of file
diff --git a/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/journald.manifest b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/journald.manifest
new file mode 120000
index 000000000..8306b4af1
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/journald.manifest
@@ -0,0 +1 @@
+../../usr_from_coreos/manifest-arm64-usr.d/journald.manifest \ No newline at end of file
diff --git a/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/systemd.manifest b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/systemd.manifest
new file mode 120000
index 000000000..4e1057657
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/stage1/usr_from_kvm/manifest-arm64-usr.d/systemd.manifest
@@ -0,0 +1 @@
+../../usr_from_coreos/manifest-arm64-usr.d/systemd.manifest \ No newline at end of file
diff --git a/vendor/github.com/rkt/rkt/store/imagestore/LICENSE.BSD b/vendor/github.com/rkt/rkt/store/imagestore/LICENSE.BSD
new file mode 100644
index 000000000..6b4b6efb9
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/store/imagestore/LICENSE.BSD
@@ -0,0 +1,30 @@
+This project includes code derived from the BSD licensed golang/go project.
+Here's a copy of its license:
+
+ Copyright (c) 2012 The Go Authors. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/rkt/rkt/tests/cloudinit/fedora-rawhide.cloudinit b/vendor/github.com/rkt/rkt/tests/cloudinit/fedora-rawhide.cloudinit
new file mode 120000
index 000000000..322c58a77
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/tests/cloudinit/fedora-rawhide.cloudinit
@@ -0,0 +1 @@
+fedora-25.cloudinit \ No newline at end of file
diff --git a/vendor/github.com/rkt/rkt/tests/cloudinit/ubuntu-1510.cloudinit b/vendor/github.com/rkt/rkt/tests/cloudinit/ubuntu-1510.cloudinit
new file mode 120000
index 000000000..4ad53ad73
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/tests/cloudinit/ubuntu-1510.cloudinit
@@ -0,0 +1 @@
+ubuntu.cloudinit \ No newline at end of file
diff --git a/vendor/github.com/rkt/rkt/tests/cloudinit/ubuntu-1604.cloudinit b/vendor/github.com/rkt/rkt/tests/cloudinit/ubuntu-1604.cloudinit
new file mode 120000
index 000000000..4ad53ad73
--- /dev/null
+++ b/vendor/github.com/rkt/rkt/tests/cloudinit/ubuntu-1604.cloudinit
@@ -0,0 +1 @@
+ubuntu.cloudinit \ No newline at end of file
diff --git a/vendor/github.com/seccomp/libseccomp-golang/.gitignore b/vendor/github.com/seccomp/libseccomp-golang/.gitignore
new file mode 100644
index 000000000..b4826968b
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/.gitignore
@@ -0,0 +1,4 @@
+*~
+*.swp
+*.orig
+tags
diff --git a/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG b/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG
new file mode 100644
index 000000000..823aeb7f8
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG
@@ -0,0 +1,6 @@
+libseccomp-golang: Releases
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+
+* Version 0.9.0 - January 5, 2017
+- Initial tagged release
diff --git a/vendor/github.com/seccomp/libseccomp-golang/LICENSE b/vendor/github.com/seccomp/libseccomp-golang/LICENSE
new file mode 100644
index 000000000..81cf60de2
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2015 Matthew Heon <mheon@redhat.com>
+Copyright (c) 2015 Paul Moore <pmoore@redhat.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/seccomp/libseccomp-golang/Makefile b/vendor/github.com/seccomp/libseccomp-golang/Makefile
new file mode 100644
index 000000000..1ff4cc898
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/Makefile
@@ -0,0 +1,26 @@
+# libseccomp-golang
+
+.PHONY: all check check-build check-syntax fix-syntax vet test lint
+
+all: check-build
+
+check: vet test
+
+check-build:
+ go build
+
+check-syntax:
+ gofmt -d .
+
+fix-syntax:
+ gofmt -w .
+
+vet:
+ go vet -v
+
+test:
+ go test -v
+
+lint:
+ @$(if $(shell which golint),true,$(error "install golint and include it in your PATH"))
+ golint -set_exit_status
diff --git a/vendor/github.com/seccomp/libseccomp-golang/README b/vendor/github.com/seccomp/libseccomp-golang/README
new file mode 100644
index 000000000..66839a466
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/README
@@ -0,0 +1,51 @@
+libseccomp-golang: Go Language Bindings for the libseccomp Project
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+https://github.com/seccomp/libseccomp
+
+The libseccomp library provides an easy to use, platform independent, interface
+to the Linux Kernel's syscall filtering mechanism. The libseccomp API is
+designed to abstract away the underlying BPF based syscall filter language and
+present a more conventional function-call based filtering interface that should
+be familiar to, and easily adopted by, application developers.
+
+The libseccomp-golang library provides a Go based interface to the libseccomp
+library.
+
+* Online Resources
+
+The library source repository currently lives on GitHub at the following URLs:
+
+ -> https://github.com/seccomp/libseccomp-golang
+ -> https://github.com/seccomp/libseccomp
+
+The project mailing list is currently hosted on Google Groups at the URL below,
+please note that a Google account is not required to subscribe to the mailing
+list.
+
+ -> https://groups.google.com/d/forum/libseccomp
+
+Documentation is also available at:
+
+ -> https://godoc.org/github.com/seccomp/libseccomp-golang
+
+* Installing the package
+
+The libseccomp-golang bindings require at least Go v1.2.1 and GCC v4.8.4;
+earlier versions may yield unpredictable results. If you meet these
+requirements you can install this package using the command below:
+
+ $ go get github.com/seccomp/libseccomp-golang
+
+* Testing the Library
+
+A number of tests and lint related recipes are provided in the Makefile, if
+you want to run the standard regression tests, you can excute the following:
+
+ $ make check
+
+In order to execute the 'make lint' recipe the 'golint' tool is needed, it
+can be found at:
+
+ -> https://github.com/golang/lint
+
diff --git a/vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES b/vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES
new file mode 100644
index 000000000..744e5cd64
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES
@@ -0,0 +1,112 @@
+How to Submit Patches to the libseccomp Project
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+
+This document is intended to act as a guide to help you contribute to the
+libseccomp project. It is not perfect, and there will always be exceptions
+to the rules described here, but by following the instructions below you
+should have a much easier time getting your work merged with the upstream
+project.
+
+* Test Your Code
+
+There are two possible tests you can run to verify your code. The first test
+is used to check the formatting and coding style of your changes, you can run
+the test with the following command:
+
+ # make check-syntax
+
+... if there are any problems with your changes a diff/patch will be shown
+which indicates the problems and how to fix them.
+
+The second possible test is used to ensure the sanity of your code changes
+and to test these changes against the included tests. You can run the test
+with the following command:
+
+ # make check
+
+... if there are any faults or errors they will be displayed.
+
+* Generate the Patch(es)
+
+Depending on how you decided to work with the libseccomp code base and what
+tools you are using there are different ways to generate your patch(es).
+However, regardless of what tools you use, you should always generate your
+patches using the "unified" diff/patch format and the patches should always
+apply to the libseccomp source tree using the following command from the top
+directory of the libseccomp sources:
+
+ # patch -p1 < changes.patch
+
+If you are not using git, stacked git (stgit), or some other tool which can
+generate patch files for you automatically, you may find the following command
+helpful in generating patches, where "libseccomp.orig/" is the unmodified
+source code directory and "libseccomp/" is the source code directory with your
+changes:
+
+ # diff -purN libseccomp-golang.orig/ libseccomp-golang/
+
+When in doubt please generate your patch and try applying it to an unmodified
+copy of the libseccomp sources; if it fails for you, it will fail for the rest
+of us.
+
+* Explain Your Work
+
+At the top of every patch you should include a description of the problem you
+are trying to solve, how you solved it, and why you chose the solution you
+implemented. If you are submitting a bug fix, it is also incredibly helpful
+if you can describe/include a reproducer for the problem in the description as
+well as instructions on how to test for the bug and verify that it has been
+fixed.
+
+* Sign Your Work
+
+The sign-off is a simple line at the end of the patch description, which
+certifies that you wrote it or otherwise have the right to pass it on as an
+open-source patch. The "Developer's Certificate of Origin" pledge is taken
+from the Linux Kernel and the rules are pretty simple:
+
+ Developer's Certificate of Origin 1.1
+
+ By making a contribution to this project, I certify that:
+
+ (a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+ (b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+ (c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+ (d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
+... then you just add a line to the bottom of your patch description, with
+your real name, saying:
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+
+* Email Your Patch(es)
+
+Finally, you will need to email your patches to the mailing list so they can
+be reviewed and potentially merged into the main libseccomp-golang repository.
+When sending patches to the mailing list it is important to send your email in
+text form, no HTML mail please, and ensure that your email client does not
+mangle your patches. It should be possible to save your raw email to disk and
+apply it directly to the libseccomp source code; if that fails then you likely
+have a problem with your email client. When in doubt try a test first by
+sending yourself an email with your patch and attempting to apply the emailed
+patch to the libseccomp-golang repository; if it fails for you, it will fail
+for the rest of us trying to test your patch and include it in the main
+libseccomp-golang repository.
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
new file mode 100644
index 000000000..53bcb024d
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
@@ -0,0 +1,902 @@
+// +build linux
+
+// Public API specification for libseccomp Go bindings
+// Contains public API for the bindings
+
+// Package seccomp provides bindings for libseccomp, a library wrapping the Linux
+// seccomp syscall. Seccomp enables an application to restrict system call use
+// for itself and its children.
+package seccomp
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// C wrapping code
+
+// #cgo pkg-config: libseccomp
+// #include <stdlib.h>
+// #include <seccomp.h>
+import "C"
+
+// Exported types
+
+// VersionError denotes that the system libseccomp version is incompatible
+// with this package.
+type VersionError struct {
+ message string
+ minimum string
+}
+
+func (e VersionError) Error() string {
+ format := "Libseccomp version too low: "
+ if e.message != "" {
+ format += e.message + ": "
+ }
+ format += "minimum supported is "
+ if e.minimum != "" {
+ format += e.minimum + ": "
+ } else {
+ format += "2.1.0: "
+ }
+ format += "detected %d.%d.%d"
+ return fmt.Sprintf(format, verMajor, verMinor, verMicro)
+}
+
+// ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a
+// per-architecture basis.
+type ScmpArch uint
+
+// ScmpAction represents an action to be taken on a filter rule match in
+// libseccomp
+type ScmpAction uint
+
+// ScmpCompareOp represents a comparison operator which can be used in a filter
+// rule
+type ScmpCompareOp uint
+
+// ScmpCondition represents a rule in a libseccomp filter context
+type ScmpCondition struct {
+ Argument uint `json:"argument,omitempty"`
+ Op ScmpCompareOp `json:"operator,omitempty"`
+ Operand1 uint64 `json:"operand_one,omitempty"`
+ Operand2 uint64 `json:"operand_two,omitempty"`
+}
+
+// ScmpSyscall represents a Linux System Call
+type ScmpSyscall int32
+
+// Exported Constants
+
+const (
+ // Valid architectures recognized by libseccomp
+ // ARM64 and all MIPS architectures are unsupported by versions of the
+ // library before v2.2 and will return errors if used
+
+ // ArchInvalid is a placeholder to ensure uninitialized ScmpArch
+ // variables are invalid
+ ArchInvalid ScmpArch = iota
+ // ArchNative is the native architecture of the kernel
+ ArchNative ScmpArch = iota
+ // ArchX86 represents 32-bit x86 syscalls
+ ArchX86 ScmpArch = iota
+ // ArchAMD64 represents 64-bit x86-64 syscalls
+ ArchAMD64 ScmpArch = iota
+ // ArchX32 represents 64-bit x86-64 syscalls (32-bit pointers)
+ ArchX32 ScmpArch = iota
+ // ArchARM represents 32-bit ARM syscalls
+ ArchARM ScmpArch = iota
+ // ArchARM64 represents 64-bit ARM syscalls
+ ArchARM64 ScmpArch = iota
+ // ArchMIPS represents 32-bit MIPS syscalls
+ ArchMIPS ScmpArch = iota
+ // ArchMIPS64 represents 64-bit MIPS syscalls
+ ArchMIPS64 ScmpArch = iota
+ // ArchMIPS64N32 represents 64-bit MIPS syscalls (32-bit pointers)
+ ArchMIPS64N32 ScmpArch = iota
+ // ArchMIPSEL represents 32-bit MIPS syscalls (little endian)
+ ArchMIPSEL ScmpArch = iota
+ // ArchMIPSEL64 represents 64-bit MIPS syscalls (little endian)
+ ArchMIPSEL64 ScmpArch = iota
+ // ArchMIPSEL64N32 represents 64-bit MIPS syscalls (little endian,
+ // 32-bit pointers)
+ ArchMIPSEL64N32 ScmpArch = iota
+ // ArchPPC represents 32-bit POWERPC syscalls
+ ArchPPC ScmpArch = iota
+ // ArchPPC64 represents 64-bit POWER syscalls (big endian)
+ ArchPPC64 ScmpArch = iota
+ // ArchPPC64LE represents 64-bit POWER syscalls (little endian)
+ ArchPPC64LE ScmpArch = iota
+ // ArchS390 represents 31-bit System z/390 syscalls
+ ArchS390 ScmpArch = iota
+ // ArchS390X represents 64-bit System z/390 syscalls
+ ArchS390X ScmpArch = iota
+)
+
+const (
+ // Supported actions on filter match
+
+ // ActInvalid is a placeholder to ensure uninitialized ScmpAction
+ // variables are invalid
+ ActInvalid ScmpAction = iota
+ // ActKill kills the process
+ ActKill ScmpAction = iota
+ // ActTrap throws SIGSYS
+ ActTrap ScmpAction = iota
+ // ActErrno causes the syscall to return a negative error code. This
+ // code can be set with the SetReturnCode method
+ ActErrno ScmpAction = iota
+ // ActTrace causes the syscall to notify tracing processes with the
+ // given error code. This code can be set with the SetReturnCode method
+ ActTrace ScmpAction = iota
+ // ActAllow permits the syscall to continue execution
+ ActAllow ScmpAction = iota
+)
+
+const (
+ // These are comparison operators used in conditional seccomp rules
+ // They are used to compare the value of a single argument of a syscall
+ // against a user-defined constant
+
+ // CompareInvalid is a placeholder to ensure uninitialized ScmpCompareOp
+ // variables are invalid
+ CompareInvalid ScmpCompareOp = iota
+ // CompareNotEqual returns true if the argument is not equal to the
+ // given value
+ CompareNotEqual ScmpCompareOp = iota
+ // CompareLess returns true if the argument is less than the given value
+ CompareLess ScmpCompareOp = iota
+ // CompareLessOrEqual returns true if the argument is less than or equal
+ // to the given value
+ CompareLessOrEqual ScmpCompareOp = iota
+ // CompareEqual returns true if the argument is equal to the given value
+ CompareEqual ScmpCompareOp = iota
+ // CompareGreaterEqual returns true if the argument is greater than or
+ // equal to the given value
+ CompareGreaterEqual ScmpCompareOp = iota
+ // CompareGreater returns true if the argument is greater than the given
+ // value
+ CompareGreater ScmpCompareOp = iota
+ // CompareMaskedEqual returns true if the argument is equal to the given
+ // value, when masked (bitwise &) against the second given value
+ CompareMaskedEqual ScmpCompareOp = iota
+)
+
+// Helpers for types
+
+// GetArchFromString returns an ScmpArch constant from a string representing an
+// architecture
+func GetArchFromString(arch string) (ScmpArch, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return ArchInvalid, err
+ }
+
+ switch strings.ToLower(arch) {
+ case "x86":
+ return ArchX86, nil
+ case "amd64", "x86-64", "x86_64", "x64":
+ return ArchAMD64, nil
+ case "x32":
+ return ArchX32, nil
+ case "arm":
+ return ArchARM, nil
+ case "arm64", "aarch64":
+ return ArchARM64, nil
+ case "mips":
+ return ArchMIPS, nil
+ case "mips64":
+ return ArchMIPS64, nil
+ case "mips64n32":
+ return ArchMIPS64N32, nil
+ case "mipsel":
+ return ArchMIPSEL, nil
+ case "mipsel64":
+ return ArchMIPSEL64, nil
+ case "mipsel64n32":
+ return ArchMIPSEL64N32, nil
+ case "ppc":
+ return ArchPPC, nil
+ case "ppc64":
+ return ArchPPC64, nil
+ case "ppc64le":
+ return ArchPPC64LE, nil
+ case "s390":
+ return ArchS390, nil
+ case "s390x":
+ return ArchS390X, nil
+ default:
+ return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %s", arch)
+ }
+}
+
+// String returns a string representation of an architecture constant
+func (a ScmpArch) String() string {
+ switch a {
+ case ArchX86:
+ return "x86"
+ case ArchAMD64:
+ return "amd64"
+ case ArchX32:
+ return "x32"
+ case ArchARM:
+ return "arm"
+ case ArchARM64:
+ return "arm64"
+ case ArchMIPS:
+ return "mips"
+ case ArchMIPS64:
+ return "mips64"
+ case ArchMIPS64N32:
+ return "mips64n32"
+ case ArchMIPSEL:
+ return "mipsel"
+ case ArchMIPSEL64:
+ return "mipsel64"
+ case ArchMIPSEL64N32:
+ return "mipsel64n32"
+ case ArchPPC:
+ return "ppc"
+ case ArchPPC64:
+ return "ppc64"
+ case ArchPPC64LE:
+ return "ppc64le"
+ case ArchS390:
+ return "s390"
+ case ArchS390X:
+ return "s390x"
+ case ArchNative:
+ return "native"
+ case ArchInvalid:
+ return "Invalid architecture"
+ default:
+ return "Unknown architecture"
+ }
+}
+
+// String returns a string representation of a comparison operator constant
+func (a ScmpCompareOp) String() string {
+ switch a {
+ case CompareNotEqual:
+ return "Not equal"
+ case CompareLess:
+ return "Less than"
+ case CompareLessOrEqual:
+ return "Less than or equal to"
+ case CompareEqual:
+ return "Equal"
+ case CompareGreaterEqual:
+ return "Greater than or equal to"
+ case CompareGreater:
+ return "Greater than"
+ case CompareMaskedEqual:
+ return "Masked equality"
+ case CompareInvalid:
+ return "Invalid comparison operator"
+ default:
+ return "Unrecognized comparison operator"
+ }
+}
+
+// String returns a string representation of a seccomp match action
+func (a ScmpAction) String() string {
+ switch a & 0xFFFF {
+ case ActKill:
+ return "Action: Kill Process"
+ case ActTrap:
+ return "Action: Send SIGSYS"
+ case ActErrno:
+ return fmt.Sprintf("Action: Return error code %d", (a >> 16))
+ case ActTrace:
+ return fmt.Sprintf("Action: Notify tracing processes with code %d",
+ (a >> 16))
+ case ActAllow:
+ return "Action: Allow system call"
+ default:
+ return "Unrecognized Action"
+ }
+}
+
+// SetReturnCode adds a return code to a supporting ScmpAction, clearing any
+// existing code Only valid on ActErrno and ActTrace. Takes no action otherwise.
+// Accepts 16-bit return code as argument.
+// Returns a valid ScmpAction of the original type with the new error code set.
+func (a ScmpAction) SetReturnCode(code int16) ScmpAction {
+ aTmp := a & 0x0000FFFF
+ if aTmp == ActErrno || aTmp == ActTrace {
+ return (aTmp | (ScmpAction(code)&0xFFFF)<<16)
+ }
+ return a
+}
+
+// GetReturnCode returns the return code of an ScmpAction
+func (a ScmpAction) GetReturnCode() int16 {
+ return int16(a >> 16)
+}
+
+// General utility functions
+
+// GetLibraryVersion returns the version of the library the bindings are built
+// against.
+// The version is formatted as follows: Major.Minor.Micro
+func GetLibraryVersion() (major, minor, micro int) {
+ return verMajor, verMinor, verMicro
+}
+
+// Syscall functions
+
+// GetName retrieves the name of a syscall from its number.
+// Acts on any syscall number.
+// Returns either a string containing the name of the syscall, or an error.
+func (s ScmpSyscall) GetName() (string, error) {
+ return s.GetNameByArch(ArchNative)
+}
+
+// GetNameByArch retrieves the name of a syscall from its number for a given
+// architecture.
+// Acts on any syscall number.
+// Accepts a valid architecture constant.
+// Returns either a string containing the name of the syscall, or an error.
+// if the syscall is unrecognized or an issue occurred.
+func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) {
+ if err := sanitizeArch(arch); err != nil {
+ return "", err
+ }
+
+ cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s))
+ if cString == nil {
+ return "", fmt.Errorf("could not resolve syscall name")
+ }
+ defer C.free(unsafe.Pointer(cString))
+
+ finalStr := C.GoString(cString)
+ return finalStr, nil
+}
+
+// GetSyscallFromName returns the number of a syscall by name on the kernel's
+// native architecture.
+// Accepts a string containing the name of a syscall.
+// Returns the number of the syscall, or an error if no syscall with that name
+// was found.
+func GetSyscallFromName(name string) (ScmpSyscall, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return 0, err
+ }
+
+ cString := C.CString(name)
+ defer C.free(unsafe.Pointer(cString))
+
+ result := C.seccomp_syscall_resolve_name(cString)
+ if result == scmpError {
+ return 0, fmt.Errorf("could not resolve name to syscall")
+ }
+
+ return ScmpSyscall(result), nil
+}
+
+// GetSyscallFromNameByArch returns the number of a syscall by name for a given
+// architecture's ABI.
+// Accepts the name of a syscall and an architecture constant.
+// Returns the number of the syscall, or an error if an invalid architecture is
+// passed or a syscall with that name was not found.
+func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return 0, err
+ }
+ if err := sanitizeArch(arch); err != nil {
+ return 0, err
+ }
+
+ cString := C.CString(name)
+ defer C.free(unsafe.Pointer(cString))
+
+ result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString)
+ if result == scmpError {
+ return 0, fmt.Errorf("could not resolve name to syscall")
+ }
+
+ return ScmpSyscall(result), nil
+}
+
+// MakeCondition creates and returns a new condition to attach to a filter rule.
+// Associated rules will only match if this condition is true.
+// Accepts the number the argument we are checking, and a comparison operator
+// and value to compare to.
+// The rule will match if argument $arg (zero-indexed) of the syscall is
+// $COMPARE_OP the provided comparison value.
+// Some comparison operators accept two values. Masked equals, for example,
+// will mask $arg of the syscall with the second value provided (via bitwise
+// AND) and then compare against the first value provided.
+// For example, in the less than or equal case, if the syscall argument was
+// 0 and the value provided was 1, the condition would match, as 0 is less
+// than or equal to 1.
+// Return either an error on bad argument or a valid ScmpCondition struct.
+func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCondition, error) {
+ var condStruct ScmpCondition
+
+ if err := ensureSupportedVersion(); err != nil {
+ return condStruct, err
+ }
+
+ if comparison == CompareInvalid {
+ return condStruct, fmt.Errorf("invalid comparison operator")
+ } else if arg > 5 {
+ return condStruct, fmt.Errorf("syscalls only have up to 6 arguments")
+ } else if len(values) > 2 {
+ return condStruct, fmt.Errorf("conditions can have at most 2 arguments")
+ } else if len(values) == 0 {
+ return condStruct, fmt.Errorf("must provide at least one value to compare against")
+ }
+
+ condStruct.Argument = arg
+ condStruct.Op = comparison
+ condStruct.Operand1 = values[0]
+ if len(values) == 2 {
+ condStruct.Operand2 = values[1]
+ } else {
+ condStruct.Operand2 = 0 // Unused
+ }
+
+ return condStruct, nil
+}
+
+// Utility Functions
+
+// GetNativeArch returns architecture token representing the native kernel
+// architecture
+func GetNativeArch() (ScmpArch, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return ArchInvalid, err
+ }
+
+ arch := C.seccomp_arch_native()
+
+ return archFromNative(arch)
+}
+
+// Public Filter API
+
+// ScmpFilter represents a filter context in libseccomp.
+// A filter context is initially empty. Rules can be added to it, and it can
+// then be loaded into the kernel.
+type ScmpFilter struct {
+ filterCtx C.scmp_filter_ctx
+ valid bool
+ lock sync.Mutex
+}
+
+// NewFilter creates and returns a new filter context.
+// Accepts a default action to be taken for syscalls which match no rules in
+// the filter.
+// Returns a reference to a valid filter context, or nil and an error if the
+// filter context could not be created or an invalid default action was given.
+func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return nil, err
+ }
+
+ if err := sanitizeAction(defaultAction); err != nil {
+ return nil, err
+ }
+
+ fPtr := C.seccomp_init(defaultAction.toNative())
+ if fPtr == nil {
+ return nil, fmt.Errorf("could not create filter")
+ }
+
+ filter := new(ScmpFilter)
+ filter.filterCtx = fPtr
+ filter.valid = true
+ runtime.SetFinalizer(filter, filterFinalizer)
+
+ return filter, nil
+}
+
+// IsValid determines whether a filter context is valid to use.
+// Some operations (Release and Merge) render filter contexts invalid and
+// consequently prevent further use.
+func (f *ScmpFilter) IsValid() bool {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ return f.valid
+}
+
+// Reset resets a filter context, removing all its existing state.
+// Accepts a new default action to be taken for syscalls which do not match.
+// Returns an error if the filter or action provided are invalid.
+func (f *ScmpFilter) Reset(defaultAction ScmpAction) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeAction(defaultAction); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ retCode := C.seccomp_reset(f.filterCtx, defaultAction.toNative())
+ if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Release releases a filter context, freeing its memory. Should be called after
+// loading into the kernel, when the filter is no longer needed.
+// After calling this function, the given filter is no longer valid and cannot
+// be used.
+// Release() will be invoked automatically when a filter context is garbage
+// collected, but can also be called manually to free memory.
+func (f *ScmpFilter) Release() {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return
+ }
+
+ f.valid = false
+ C.seccomp_release(f.filterCtx)
+}
+
+// Merge merges two filter contexts.
+// The source filter src will be released as part of the process, and will no
+// longer be usable or valid after this call.
+// To be merged, filters must NOT share any architectures, and all their
+// attributes (Default Action, Bad Arch Action, No New Privs and TSync bools)
+// must match.
+// The filter src will be merged into the filter this is called on.
+// The architectures of the src filter not present in the destination, and all
+// associated rules, will be added to the destination.
+// Returns an error if merging the filters failed.
+func (f *ScmpFilter) Merge(src *ScmpFilter) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ src.lock.Lock()
+ defer src.lock.Unlock()
+
+ if !src.valid || !f.valid {
+ return fmt.Errorf("one or more of the filter contexts is invalid or uninitialized")
+ }
+
+ // Merge the filters
+ retCode := C.seccomp_merge(f.filterCtx, src.filterCtx)
+ if syscall.Errno(-1*retCode) == syscall.EINVAL {
+ return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter")
+ } else if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ src.valid = false
+
+ return nil
+}
+
+// IsArchPresent checks if an architecture is present in a filter.
+// If a filter contains an architecture, it uses its default action for
+// syscalls which do not match rules in it, and its rules can match syscalls
+// for that ABI.
+// If a filter does not contain an architecture, all syscalls made to that
+// kernel ABI will fail with the filter's default Bad Architecture Action
+// (by default, killing the process).
+// Accepts an architecture constant.
+// Returns true if the architecture is present in the filter, false otherwise,
+// and an error on an invalid filter context, architecture constant, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) IsArchPresent(arch ScmpArch) (bool, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return false, err
+ } else if !f.valid {
+ return false, errBadFilter
+ }
+
+ retCode := C.seccomp_arch_exist(f.filterCtx, arch.toNative())
+ if syscall.Errno(-1*retCode) == syscall.EEXIST {
+ // -EEXIST is "arch not present"
+ return false, nil
+ } else if retCode != 0 {
+ return false, syscall.Errno(-1 * retCode)
+ }
+
+ return true, nil
+}
+
+// AddArch adds an architecture to the filter.
+// Accepts an architecture constant.
+// Returns an error on invalid filter context or architecture token, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) AddArch(arch ScmpArch) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ // Libseccomp returns -EEXIST if the specified architecture is already
+ // present. Succeed silently in this case, as it's not fatal, and the
+ // architecture is present already.
+ retCode := C.seccomp_arch_add(f.filterCtx, arch.toNative())
+ if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// RemoveArch removes an architecture from the filter.
+// Accepts an architecture constant.
+// Returns an error on invalid filter context or architecture token, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) RemoveArch(arch ScmpArch) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ // Similar to AddArch, -EEXIST is returned if the arch is not present
+ // Succeed silently in that case, this is not fatal and the architecture
+ // is not present in the filter after RemoveArch
+ retCode := C.seccomp_arch_remove(f.filterCtx, arch.toNative())
+ if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Load loads a filter context into the kernel.
+// Returns an error if the filter context is invalid or the syscall failed.
+func (f *ScmpFilter) Load() error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_load(f.filterCtx); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// GetDefaultAction returns the default action taken on a syscall which does not
+// match a rule in the filter, or an error if an issue was encountered
+// retrieving the value.
+func (f *ScmpFilter) GetDefaultAction() (ScmpAction, error) {
+ action, err := f.getFilterAttr(filterAttrActDefault)
+ if err != nil {
+ return 0x0, err
+ }
+
+ return actionFromNative(action)
+}
+
+// GetBadArchAction returns the default action taken on a syscall for an
+// architecture not in the filter, or an error if an issue was encountered
+// retrieving the value.
+func (f *ScmpFilter) GetBadArchAction() (ScmpAction, error) {
+ action, err := f.getFilterAttr(filterAttrActBadArch)
+ if err != nil {
+ return 0x0, err
+ }
+
+ return actionFromNative(action)
+}
+
+// GetNoNewPrivsBit returns the current state the No New Privileges bit will be set
+// to on the filter being loaded, or an error if an issue was encountered
+// retrieving the value.
+// The No New Privileges bit tells the kernel that new processes run with exec()
+// cannot gain more privileges than the process that ran exec().
+// For example, a process with No New Privileges set would be unable to exec
+// setuid/setgid executables.
+func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
+ noNewPrivs, err := f.getFilterAttr(filterAttrNNP)
+ if err != nil {
+ return false, err
+ }
+
+ if noNewPrivs == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// GetTsyncBit returns whether Thread Synchronization will be enabled on the
+// filter being loaded, or an error if an issue was encountered retrieving the
+// value.
+// Thread Sync ensures that all members of the thread group of the calling
+// process will share the same Seccomp filter set.
+// Tsync is a fairly recent addition to the Linux kernel and older kernels
+// lack support. If the running kernel does not support Tsync and it is
+// requested in a filter, Libseccomp will not enable TSync support and will
+// proceed as normal.
+// This function is unavailable before v2.2 of libseccomp and will return an
+// error.
+func (f *ScmpFilter) GetTsyncBit() (bool, error) {
+ tSync, err := f.getFilterAttr(filterAttrTsync)
+ if err != nil {
+ return false, err
+ }
+
+ if tSync == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// SetBadArchAction sets the default action taken on a syscall for an
+// architecture not in the filter, or an error if an issue was encountered
+// setting the value.
+func (f *ScmpFilter) SetBadArchAction(action ScmpAction) error {
+ if err := sanitizeAction(action); err != nil {
+ return err
+ }
+
+ return f.setFilterAttr(filterAttrActBadArch, action.toNative())
+}
+
+// SetNoNewPrivsBit sets the state of the No New Privileges bit, which will be
+// applied on filter load, or an error if an issue was encountered setting the
+// value.
+// Filters with No New Privileges set to 0 can only be loaded if the process
+// has the CAP_SYS_ADMIN capability.
+func (f *ScmpFilter) SetNoNewPrivsBit(state bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if state {
+ toSet = 0x1
+ }
+
+ return f.setFilterAttr(filterAttrNNP, toSet)
+}
+
+// SetTsync sets whether Thread Synchronization will be enabled on the filter
+// being loaded. Returns an error if setting Tsync failed, or the filter is
+// invalid.
+// Thread Sync ensures that all members of the thread group of the calling
+// process will share the same Seccomp filter set.
+// Tsync is a fairly recent addition to the Linux kernel and older kernels
+// lack support. If the running kernel does not support Tsync and it is
+// requested in a filter, Libseccomp will not enable TSync support and will
+// proceed as normal.
+// This function is unavailable before v2.2 of libseccomp and will return an
+// error.
+func (f *ScmpFilter) SetTsync(enable bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if enable {
+ toSet = 0x1
+ }
+
+ return f.setFilterAttr(filterAttrTsync, toSet)
+}
+
+// SetSyscallPriority sets a syscall's priority.
+// This provides a hint to the filter generator in libseccomp about the
+// importance of this syscall. High-priority syscalls are placed
+// first in the filter code, and incur less overhead (at the expense of
+// lower-priority syscalls).
+func (f *ScmpFilter) SetSyscallPriority(call ScmpSyscall, priority uint8) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_syscall_priority(f.filterCtx, C.int(call),
+ C.uint8_t(priority)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// AddRule adds a single rule for an unconditional action on a syscall.
+// Accepts the number of the syscall and the action to be taken on the call
+// being made.
+// Returns an error if an issue was encountered adding the rule.
+func (f *ScmpFilter) AddRule(call ScmpSyscall, action ScmpAction) error {
+ return f.addRuleGeneric(call, action, false, nil)
+}
+
+// AddRuleExact adds a single rule for an unconditional action on a syscall.
+// Accepts the number of the syscall and the action to be taken on the call
+// being made.
+// No modifications will be made to the rule, and it will fail to add if it
+// cannot be applied to the current architecture without modification.
+// The rule will function exactly as described, but it may not function identically
+// (or be able to be applied to) all architectures.
+// Returns an error if an issue was encountered adding the rule.
+func (f *ScmpFilter) AddRuleExact(call ScmpSyscall, action ScmpAction) error {
+ return f.addRuleGeneric(call, action, true, nil)
+}
+
+// AddRuleConditional adds a single rule for a conditional action on a syscall.
+// Returns an error if an issue was encountered adding the rule.
+// All conditions must match for the rule to match.
+// There is a bug in library versions below v2.2.1 which can, in some cases,
+// cause conditions to be lost when more than one are used. Consequently,
+// AddRuleConditional is disabled on library versions lower than v2.2.1
+func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
+ return f.addRuleGeneric(call, action, false, conds)
+}
+
+// AddRuleConditionalExact adds a single rule for a conditional action on a
+// syscall.
+// No modifications will be made to the rule, and it will fail to add if it
+// cannot be applied to the current architecture without modification.
+// The rule will function exactly as described, but it may not function identically
+// (or be able to be applied to) all architectures.
+// Returns an error if an issue was encountered adding the rule.
+// There is a bug in library versions below v2.2.1 which can, in some cases,
+// cause conditions to be lost when more than one are used. Consequently,
+// AddRuleConditionalExact is disabled on library versions lower than v2.2.1
+func (f *ScmpFilter) AddRuleConditionalExact(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
+ return f.addRuleGeneric(call, action, true, conds)
+}
+
+// ExportPFC output PFC-formatted, human-readable dump of a filter context's
+// rules to a file.
+// Accepts file to write to (must be open for writing).
+// Returns an error if writing to the file fails.
+func (f *ScmpFilter) ExportPFC(file *os.File) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ fd := file.Fd()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_export_pfc(f.filterCtx, C.int(fd)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// ExportBPF outputs Berkeley Packet Filter-formatted, kernel-readable dump of a
+// filter context's rules to a file.
+// Accepts file to write to (must be open for writing).
+// Returns an error if writing to the file fails.
+func (f *ScmpFilter) ExportBPF(file *os.File) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ fd := file.Fd()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_export_bpf(f.filterCtx, C.int(fd)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
new file mode 100644
index 000000000..b0caac91b
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
@@ -0,0 +1,514 @@
+// +build linux
+
+// Internal functions for libseccomp Go bindings
+// No exported functions
+
+package seccomp
+
+import (
+ "fmt"
+ "syscall"
+)
+
+// Unexported C wrapping code - provides the C-Golang interface
+// Get the seccomp header in scope
+// Need stdlib.h for free() on cstrings
+
+// #cgo pkg-config: libseccomp
+/*
+#include <stdlib.h>
+#include <seccomp.h>
+
+#if SCMP_VER_MAJOR < 2
+#error Minimum supported version of Libseccomp is v2.1.0
+#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 1
+#error Minimum supported version of Libseccomp is v2.1.0
+#endif
+
+#define ARCH_BAD ~0
+
+const uint32_t C_ARCH_BAD = ARCH_BAD;
+
+#ifndef SCMP_ARCH_AARCH64
+#define SCMP_ARCH_AARCH64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS
+#define SCMP_ARCH_MIPS ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS64
+#define SCMP_ARCH_MIPS64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS64N32
+#define SCMP_ARCH_MIPS64N32 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL
+#define SCMP_ARCH_MIPSEL ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL64
+#define SCMP_ARCH_MIPSEL64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL64N32
+#define SCMP_ARCH_MIPSEL64N32 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_PPC
+#define SCMP_ARCH_PPC ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_PPC64
+#define SCMP_ARCH_PPC64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_PPC64LE
+#define SCMP_ARCH_PPC64LE ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_S390
+#define SCMP_ARCH_S390 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_S390X
+#define SCMP_ARCH_S390X ARCH_BAD
+#endif
+
+const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE;
+const uint32_t C_ARCH_X86 = SCMP_ARCH_X86;
+const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64;
+const uint32_t C_ARCH_X32 = SCMP_ARCH_X32;
+const uint32_t C_ARCH_ARM = SCMP_ARCH_ARM;
+const uint32_t C_ARCH_AARCH64 = SCMP_ARCH_AARCH64;
+const uint32_t C_ARCH_MIPS = SCMP_ARCH_MIPS;
+const uint32_t C_ARCH_MIPS64 = SCMP_ARCH_MIPS64;
+const uint32_t C_ARCH_MIPS64N32 = SCMP_ARCH_MIPS64N32;
+const uint32_t C_ARCH_MIPSEL = SCMP_ARCH_MIPSEL;
+const uint32_t C_ARCH_MIPSEL64 = SCMP_ARCH_MIPSEL64;
+const uint32_t C_ARCH_MIPSEL64N32 = SCMP_ARCH_MIPSEL64N32;
+const uint32_t C_ARCH_PPC = SCMP_ARCH_PPC;
+const uint32_t C_ARCH_PPC64 = SCMP_ARCH_PPC64;
+const uint32_t C_ARCH_PPC64LE = SCMP_ARCH_PPC64LE;
+const uint32_t C_ARCH_S390 = SCMP_ARCH_S390;
+const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X;
+
+const uint32_t C_ACT_KILL = SCMP_ACT_KILL;
+const uint32_t C_ACT_TRAP = SCMP_ACT_TRAP;
+const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0);
+const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0);
+const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW;
+
+// If TSync is not supported, make sure it doesn't map to a supported filter attribute
+// Don't worry about major version < 2, the minimum version checks should catch that case
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
+#define SCMP_FLTATR_CTL_TSYNC _SCMP_CMP_MIN
+#endif
+
+const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
+const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
+const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
+const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
+
+const int C_CMP_NE = (int)SCMP_CMP_NE;
+const int C_CMP_LT = (int)SCMP_CMP_LT;
+const int C_CMP_LE = (int)SCMP_CMP_LE;
+const int C_CMP_EQ = (int)SCMP_CMP_EQ;
+const int C_CMP_GE = (int)SCMP_CMP_GE;
+const int C_CMP_GT = (int)SCMP_CMP_GT;
+const int C_CMP_MASKED_EQ = (int)SCMP_CMP_MASKED_EQ;
+
+const int C_VERSION_MAJOR = SCMP_VER_MAJOR;
+const int C_VERSION_MINOR = SCMP_VER_MINOR;
+const int C_VERSION_MICRO = SCMP_VER_MICRO;
+
+typedef struct scmp_arg_cmp* scmp_cast_t;
+
+// Wrapper to create an scmp_arg_cmp struct
+void*
+make_struct_arg_cmp(
+ unsigned int arg,
+ int compare,
+ uint64_t a,
+ uint64_t b
+ )
+{
+ struct scmp_arg_cmp *s = malloc(sizeof(struct scmp_arg_cmp));
+
+ s->arg = arg;
+ s->op = compare;
+ s->datum_a = a;
+ s->datum_b = b;
+
+ return s;
+}
+*/
+import "C"
+
+// Nonexported types
+type scmpFilterAttr uint32
+
+// Nonexported constants
+
+const (
+ filterAttrActDefault scmpFilterAttr = iota
+ filterAttrActBadArch scmpFilterAttr = iota
+ filterAttrNNP scmpFilterAttr = iota
+ filterAttrTsync scmpFilterAttr = iota
+)
+
+const (
+ // An error return from certain libseccomp functions
+ scmpError C.int = -1
+ // Comparison boundaries to check for architecture validity
+ archStart ScmpArch = ArchNative
+ archEnd ScmpArch = ArchS390X
+ // Comparison boundaries to check for action validity
+ actionStart ScmpAction = ActKill
+ actionEnd ScmpAction = ActAllow
+ // Comparison boundaries to check for comparison operator validity
+ compareOpStart ScmpCompareOp = CompareNotEqual
+ compareOpEnd ScmpCompareOp = CompareMaskedEqual
+)
+
+var (
+ // Error thrown on bad filter context
+ errBadFilter = fmt.Errorf("filter is invalid or uninitialized")
+ // Constants representing library major, minor, and micro versions
+ verMajor = int(C.C_VERSION_MAJOR)
+ verMinor = int(C.C_VERSION_MINOR)
+ verMicro = int(C.C_VERSION_MICRO)
+)
+
+// Nonexported functions
+
+// Check if library version is greater than or equal to the given one
+func checkVersionAbove(major, minor, micro int) bool {
+ return (verMajor > major) ||
+ (verMajor == major && verMinor > minor) ||
+ (verMajor == major && verMinor == minor && verMicro >= micro)
+}
+
+// Ensure that the library is supported, i.e. >= 2.1.0.
+func ensureSupportedVersion() error {
+ if !checkVersionAbove(2, 1, 0) {
+ return VersionError{}
+ }
+ return nil
+}
+
+// Filter helpers
+
+// Filter finalizer - ensure that kernel context for filters is freed
+func filterFinalizer(f *ScmpFilter) {
+ f.Release()
+}
+
+// Get a raw filter attribute
+func (f *ScmpFilter) getFilterAttr(attr scmpFilterAttr) (C.uint32_t, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return 0x0, errBadFilter
+ }
+
+ if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
+ return 0x0, VersionError{
+ message: "thread synchronization attribute is not supported",
+ minimum: "2.2.0",
+ }
+ }
+
+ var attribute C.uint32_t
+
+ retCode := C.seccomp_attr_get(f.filterCtx, attr.toNative(), &attribute)
+ if retCode != 0 {
+ return 0x0, syscall.Errno(-1 * retCode)
+ }
+
+ return attribute, nil
+}
+
+// Set a raw filter attribute
+func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
+ return VersionError{
+ message: "thread synchronization attribute is not supported",
+ minimum: "2.2.0",
+ }
+ }
+
+ retCode := C.seccomp_attr_set(f.filterCtx, attr.toNative(), value)
+ if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// DOES NOT LOCK OR CHECK VALIDITY
+// Assumes caller has already done this
+// Wrapper for seccomp_rule_add_... functions
+func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, cond C.scmp_cast_t) error {
+ var length C.uint
+ if cond != nil {
+ length = 1
+ } else {
+ length = 0
+ }
+
+ var retCode C.int
+ if exact {
+ retCode = C.seccomp_rule_add_exact_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
+ } else {
+ retCode = C.seccomp_rule_add_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
+ }
+
+ if syscall.Errno(-1*retCode) == syscall.EFAULT {
+ return fmt.Errorf("unrecognized syscall")
+ } else if syscall.Errno(-1*retCode) == syscall.EPERM {
+ return fmt.Errorf("requested action matches default action of filter")
+ } else if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Generic add function for filter rules
+func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact bool, conds []ScmpCondition) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if len(conds) == 0 {
+ if err := f.addRuleWrapper(call, action, exact, nil); err != nil {
+ return err
+ }
+ } else {
+ // We don't support conditional filtering in library version v2.1
+ if !checkVersionAbove(2, 2, 1) {
+ return VersionError{
+ message: "conditional filtering is not supported",
+ minimum: "2.2.1",
+ }
+ }
+
+ for _, cond := range conds {
+ cmpStruct := C.make_struct_arg_cmp(C.uint(cond.Argument), cond.Op.toNative(), C.uint64_t(cond.Operand1), C.uint64_t(cond.Operand2))
+ defer C.free(cmpStruct)
+
+ if err := f.addRuleWrapper(call, action, exact, C.scmp_cast_t(cmpStruct)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// Generic Helpers
+
+// Helper - Sanitize Arch token input
+func sanitizeArch(in ScmpArch) error {
+ if in < archStart || in > archEnd {
+ return fmt.Errorf("unrecognized architecture")
+ }
+
+ if in.toNative() == C.C_ARCH_BAD {
+ return fmt.Errorf("architecture is not supported on this version of the library")
+ }
+
+ return nil
+}
+
+func sanitizeAction(in ScmpAction) error {
+ inTmp := in & 0x0000FFFF
+ if inTmp < actionStart || inTmp > actionEnd {
+ return fmt.Errorf("unrecognized action")
+ }
+
+ if inTmp != ActTrace && inTmp != ActErrno && (in&0xFFFF0000) != 0 {
+ return fmt.Errorf("highest 16 bits must be zeroed except for Trace and Errno")
+ }
+
+ return nil
+}
+
+func sanitizeCompareOp(in ScmpCompareOp) error {
+ if in < compareOpStart || in > compareOpEnd {
+ return fmt.Errorf("unrecognized comparison operator")
+ }
+
+ return nil
+}
+
+func archFromNative(a C.uint32_t) (ScmpArch, error) {
+ switch a {
+ case C.C_ARCH_X86:
+ return ArchX86, nil
+ case C.C_ARCH_X86_64:
+ return ArchAMD64, nil
+ case C.C_ARCH_X32:
+ return ArchX32, nil
+ case C.C_ARCH_ARM:
+ return ArchARM, nil
+ case C.C_ARCH_NATIVE:
+ return ArchNative, nil
+ case C.C_ARCH_AARCH64:
+ return ArchARM64, nil
+ case C.C_ARCH_MIPS:
+ return ArchMIPS, nil
+ case C.C_ARCH_MIPS64:
+ return ArchMIPS64, nil
+ case C.C_ARCH_MIPS64N32:
+ return ArchMIPS64N32, nil
+ case C.C_ARCH_MIPSEL:
+ return ArchMIPSEL, nil
+ case C.C_ARCH_MIPSEL64:
+ return ArchMIPSEL64, nil
+ case C.C_ARCH_MIPSEL64N32:
+ return ArchMIPSEL64N32, nil
+ case C.C_ARCH_PPC:
+ return ArchPPC, nil
+ case C.C_ARCH_PPC64:
+ return ArchPPC64, nil
+ case C.C_ARCH_PPC64LE:
+ return ArchPPC64LE, nil
+ case C.C_ARCH_S390:
+ return ArchS390, nil
+ case C.C_ARCH_S390X:
+ return ArchS390X, nil
+ default:
+ return 0x0, fmt.Errorf("unrecognized architecture")
+ }
+}
+
+// Only use with sanitized arches, no error handling
+func (a ScmpArch) toNative() C.uint32_t {
+ switch a {
+ case ArchX86:
+ return C.C_ARCH_X86
+ case ArchAMD64:
+ return C.C_ARCH_X86_64
+ case ArchX32:
+ return C.C_ARCH_X32
+ case ArchARM:
+ return C.C_ARCH_ARM
+ case ArchARM64:
+ return C.C_ARCH_AARCH64
+ case ArchMIPS:
+ return C.C_ARCH_MIPS
+ case ArchMIPS64:
+ return C.C_ARCH_MIPS64
+ case ArchMIPS64N32:
+ return C.C_ARCH_MIPS64N32
+ case ArchMIPSEL:
+ return C.C_ARCH_MIPSEL
+ case ArchMIPSEL64:
+ return C.C_ARCH_MIPSEL64
+ case ArchMIPSEL64N32:
+ return C.C_ARCH_MIPSEL64N32
+ case ArchPPC:
+ return C.C_ARCH_PPC
+ case ArchPPC64:
+ return C.C_ARCH_PPC64
+ case ArchPPC64LE:
+ return C.C_ARCH_PPC64LE
+ case ArchS390:
+ return C.C_ARCH_S390
+ case ArchS390X:
+ return C.C_ARCH_S390X
+ case ArchNative:
+ return C.C_ARCH_NATIVE
+ default:
+ return 0x0
+ }
+}
+
+// Only use with sanitized ops, no error handling
+func (a ScmpCompareOp) toNative() C.int {
+ switch a {
+ case CompareNotEqual:
+ return C.C_CMP_NE
+ case CompareLess:
+ return C.C_CMP_LT
+ case CompareLessOrEqual:
+ return C.C_CMP_LE
+ case CompareEqual:
+ return C.C_CMP_EQ
+ case CompareGreaterEqual:
+ return C.C_CMP_GE
+ case CompareGreater:
+ return C.C_CMP_GT
+ case CompareMaskedEqual:
+ return C.C_CMP_MASKED_EQ
+ default:
+ return 0x0
+ }
+}
+
+func actionFromNative(a C.uint32_t) (ScmpAction, error) {
+ aTmp := a & 0xFFFF
+ switch a & 0xFFFF0000 {
+ case C.C_ACT_KILL:
+ return ActKill, nil
+ case C.C_ACT_TRAP:
+ return ActTrap, nil
+ case C.C_ACT_ERRNO:
+ return ActErrno.SetReturnCode(int16(aTmp)), nil
+ case C.C_ACT_TRACE:
+ return ActTrace.SetReturnCode(int16(aTmp)), nil
+ case C.C_ACT_ALLOW:
+ return ActAllow, nil
+ default:
+ return 0x0, fmt.Errorf("unrecognized action")
+ }
+}
+
+// Only use with sanitized actions, no error handling
+func (a ScmpAction) toNative() C.uint32_t {
+ switch a & 0xFFFF {
+ case ActKill:
+ return C.C_ACT_KILL
+ case ActTrap:
+ return C.C_ACT_TRAP
+ case ActErrno:
+ return C.C_ACT_ERRNO | (C.uint32_t(a) >> 16)
+ case ActTrace:
+ return C.C_ACT_TRACE | (C.uint32_t(a) >> 16)
+ case ActAllow:
+ return C.C_ACT_ALLOW
+ default:
+ return 0x0
+ }
+}
+
+// Internal only, assumes safe attribute
+func (a scmpFilterAttr) toNative() uint32 {
+ switch a {
+ case filterAttrActDefault:
+ return uint32(C.C_ATTRIBUTE_DEFAULT)
+ case filterAttrActBadArch:
+ return uint32(C.C_ATTRIBUTE_BADARCH)
+ case filterAttrNNP:
+ return uint32(C.C_ATTRIBUTE_NNP)
+ case filterAttrTsync:
+ return uint32(C.C_ATTRIBUTE_TSYNC)
+ default:
+ return 0x0
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
index b4ffca275..96c2ce15f 100644
--- a/vendor/github.com/sirupsen/logrus/appveyor.yml
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -1,14 +1,14 @@
-version: "{build}"
-platform: x64
-clone_folder: c:\gopath\src\github.com\sirupsen\logrus
-environment:
- GOPATH: c:\gopath
-branches:
- only:
- - master
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
-build_script:
- - go get -t
- - go test
+version: "{build}"
+platform: x64
+clone_folder: c:\gopath\src\github.com\sirupsen\logrus
+environment:
+ GOPATH: c:\gopath
+branches:
+ only:
+ - master
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+build_script:
+ - go get -t
+ - go test
diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml
index 8fc1261cb..0637db726 100644
--- a/vendor/github.com/spf13/afero/.travis.yml
+++ b/vendor/github.com/spf13/afero/.travis.yml
@@ -1,21 +1,21 @@
-sudo: false
-language: go
-
-go:
- - 1.9
- - "1.10"
- - tip
-
-os:
- - linux
- - osx
-
-matrix:
- allow_failures:
- - go: tip
- fast_finish: true
-
-script:
- - go build
- - go test -race -v ./...
-
+sudo: false
+language: go
+
+go:
+ - 1.9
+ - "1.10"
+ - tip
+
+os:
+ - linux
+ - osx
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+script:
+ - go build
+ - go test -race -v ./...
+
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
index 943bad0b7..09498e70f 100644
--- a/vendor/github.com/spf13/afero/memmap.go
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -269,7 +269,7 @@ func (m *MemMapFs) RemoveAll(path string) error {
m.mu.RLock()
defer m.mu.RUnlock()
- for p := range m.getData() {
+ for p, _ := range m.getData() {
if strings.HasPrefix(p, path) {
m.mu.RUnlock()
m.mu.Lock()
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 000000000..1b8c7c261
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,36 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+
+cobra.test
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 000000000..94ec53068
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia <steve.francia@gmail.com>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Fabiano Franz <ffranz@redhat.com> <contact@fabianofranz.com>
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
new file mode 100644
index 000000000..5afcb2096
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+
+matrix:
+ include:
+ - go: 1.9.4
+ - go: 1.10.0
+ - go: tip
+ allow_failures:
+ - go: tip
+
+before_install:
+ - mkdir -p bin
+ - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck
+ - chmod +x bin/shellcheck
+script:
+ - PATH=$PATH:$PWD/bin go test -v ./...
+ - go build
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - if [ -z $NOVET ]; then
+ diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
+ fi
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 000000000..851fcc087
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,736 @@
+![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
+
+Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+
+Many of the most widely used Go projects are built using Cobra including:
+
+* [Kubernetes](http://kubernetes.io/)
+* [Hugo](http://gohugo.io)
+* [rkt](https://github.com/coreos/rkt)
+* [etcd](https://github.com/coreos/etcd)
+* [Moby (former Docker)](https://github.com/moby/moby)
+* [Docker (distribution)](https://github.com/docker/distribution)
+* [OpenShift](https://www.openshift.com/)
+* [Delve](https://github.com/derekparker/delve)
+* [GopherJS](http://www.gopherjs.org/)
+* [CockroachDB](http://www.cockroachlabs.com/)
+* [Bleve](http://www.blevesearch.com/)
+* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [rclone](http://rclone.org/)
+* [nehm](https://github.com/bogem/nehm)
+* [Pouch](https://github.com/alibaba/pouch)
+
+[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
+[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
+[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+
+# Table of Contents
+
+- [Overview](#overview)
+- [Concepts](#concepts)
+ * [Commands](#commands)
+ * [Flags](#flags)
+- [Installing](#installing)
+- [Getting Started](#getting-started)
+ * [Using the Cobra Generator](#using-the-cobra-generator)
+ * [Using the Cobra Library](#using-the-cobra-library)
+ * [Working with Flags](#working-with-flags)
+ * [Positional and Custom Arguments](#positional-and-custom-arguments)
+ * [Example](#example)
+ * [Help Command](#help-command)
+ * [Usage Message](#usage-message)
+ * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
+ * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
+ * [Generating documentation for your command](#generating-documentation-for-your-command)
+ * [Generating bash completions](#generating-bash-completions)
+- [Contributing](#contributing)
+- [License](#license)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra is also an application that will generate your application scaffolding to rapidly
+develop a Cobra-based application.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated bash autocomplete for your application
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications will read like sentences when used. Users will know how
+to use the application because they will natively understand how to use it.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE.`
+ or
+`APPNAME COMMAND ARG --FLAG`
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library. This command will install the `cobra` generator executable
+along with the library and its dependencies:
+
+ go get -u github.com/spf13/cobra/cobra
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Getting Started
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+ ▾ appName/
+ ▾ cmd/
+ add.go
+ your.go
+ commands.go
+ here.go
+ main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra provides its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
+ love by spf13 and friends in Go.
+ Complete documentation is available at http://hugo.spf13.com`,
+ Run: func(cmd *cobra.Command, args []string) {
+ // Do Stuff Here
+ },
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+import (
+ "fmt"
+ "os"
+
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+func init() {
+ cobra.OnInitialize(initConfig)
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
+ rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
+ viper.SetDefault("license", "apache")
+}
+
+func initConfig() {
+ // Don't forget to read config either from cfgFile or from home directory!
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".cobra")
+ }
+
+ if err := viper.ReadInConfig(); err != nil {
+ fmt.Println("Can't read config:", err)
+ os.Exit(1)
+ }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+ Use: "version",
+ Short: "Print the version number of Hugo",
+ Long: `All software has versions. This is Hugo's`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+ },
+}
+```
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent' meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally which will only apply to that specific command.
+
+```go
+rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default Cobra only parses local flags on the target command, any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+ Use: "print [OPTIONS] [COMMANDS]",
+ TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+ rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example the persistent flag `author` is bound with `viper`.
+**Note**, that the variable `author` will not be set to the value from config,
+when the `--author` flag is not provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
+
+The following validators are built in:
+
+- `NoArgs` - the command will report an error if there are any positional args.
+- `ArbitraryArgs` - the command will accept any args.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
+- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
+- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
+- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+
+An example of setting the custom validator:
+
+```go
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: func(cmd *cobra.Command, args []string) error {
+ if len(args) < 1 {
+ return errors.New("requires at least one arg")
+ }
+ if myapp.IsValidColor(args[0]) {
+ return nil
+ }
+ return fmt.Errorf("invalid color specified: %s", args[0])
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+ var echoTimes int
+
+ var cmdPrint = &cobra.Command{
+ Use: "print [string to print]",
+ Short: "Print anything to the screen",
+ Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdEcho = &cobra.Command{
+ Use: "echo [string to echo]",
+ Short: "Echo anything to the screen",
+ Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdTimes = &cobra.Command{
+ Use: "times [# times] [string to echo]",
+ Short: "Echo anything to the screen more times",
+ Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ for i := 0; i < echoTimes; i++ {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ }
+ },
+ }
+
+ cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+ var rootCmd = &cobra.Command{Use: "app"}
+ rootCmd.AddCommand(cmdPrint, cmdEcho)
+ cmdEcho.AddCommand(cmdTimes)
+ rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called. Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+ $ cobra help
+
+ Cobra is a CLI library for Go that empowers applications.
+ This application is a tool to generate the needed files
+ to quickly create a Cobra application.
+
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+ $ cobra --invalid
+ Error: unknown flag: --invalid
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+
+ var rootCmd = &cobra.Command{
+ Use: "root [sub]",
+ Short: "My root command",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+ },
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ var subCmd = &cobra.Command{
+ Use: "sub [no options!]",
+ Short: "My subcommand",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ rootCmd.AddCommand(subCmd)
+
+ rootCmd.SetArgs([]string{""})
+ rootCmd.Execute()
+ fmt.Println()
+ rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+ rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+ server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+ delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. in the following formats:
+
+- [Markdown](doc/md_docs.md)
+- [ReStructured Text](doc/rest_docs.md)
+- [Man Page](doc/man_docs.md)
+
+## Generating bash completions
+
+Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
+
+# Contributing
+
+1. Fork it
+2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+3. Create your feature branch (`git checkout -b my-new-feature`)
+4. Make changes and add them (`git add .`)
+5. Commit your changes (`git commit -m 'Add some feature'`)
+6. Push to the branch (`git push origin my-new-feature`)
+7. Create new pull request
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 000000000..a5d8a9273
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,89 @@
+package cobra
+
+import (
+ "fmt"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// Legacy arg validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ for _, v := range args {
+ if !stringInSlice(v, cmd.ValidArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 000000000..8fa8f486f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,584 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf *bytes.Buffer, name string) {
+ buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ buf.WriteString(fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+ return 0;
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions=("${must_have_one_noun[@]}")
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name))
+}
+
+func writePostscript(buf *bytes.Buffer, name string) {
+ name = strings.Replace(name, ":", "__", -1)
+ buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
+ buf.WriteString(fmt.Sprintf(`{
+ local cur prev words cword
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local last_command
+ local nouns=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c == cmd.helpCommand {
+ continue
+ }
+ buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ buf.WriteString("\n")
+}
+
+func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ buf.WriteString(" flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+}
+
+func writeFlags(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ if localNonPersistentFlags.Lookup(flag.Name) != nil {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ buf.WriteString("\n")
+}
+
+func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ for key := range flag.Annotations {
+ switch key {
+ case BashCompOneRequiredFlag:
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
+ }
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" must_have_one_noun=()\n")
+ sort.Sort(sort.StringSlice(cmd.ValidArgs))
+ for _, value := range cmd.ValidArgs {
+ buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+}
+
+func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Sort(sort.StringSlice(cmd.Aliases))
+
+ buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ buf.WriteString(` fi`)
+ buf.WriteString("\n")
+}
+func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" noun_aliases=()\n")
+ sort.Sort(sort.StringSlice(cmd.ArgAliases))
+ for _, value := range cmd.ArgAliases {
+ buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf *bytes.Buffer, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c == cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.Replace(commandName, " ", "_", -1)
+ commandName = strings.Replace(commandName, ":", "__", -1)
+
+ if cmd.Root() == cmd {
+ buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
+ buf.WriteString("\n")
+ buf.WriteString(" command_aliases=()\n")
+ buf.WriteString("\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ buf.WriteString("}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
new file mode 100644
index 000000000..e79d4769d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -0,0 +1,221 @@
+# Generating Bash Completions For Your Own cobra.Command
+
+Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows:
+
+```go
+package main
+
+import (
+ "io/ioutil"
+ "os"
+
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
+ "k8s.io/kubernetes/pkg/kubectl/cmd/util"
+)
+
+func main() {
+ kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+ kubectl.GenBashCompletionFile("out.sh")
+}
+```
+
+`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
+
+## Creating your own custom functions
+
+Some more actual code that works in kubernetes:
+
+```bash
+const (
+ bash_completion_func = `__kubectl_parse_get()
+{
+ local kubectl_output out
+ if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
+ out=($(echo "${kubectl_output}" | awk '{print $1}'))
+ COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
+ fi
+}
+
+__kubectl_get_resource()
+{
+ if [[ ${#nouns[@]} -eq 0 ]]; then
+ return 1
+ fi
+ __kubectl_parse_get ${nouns[${#nouns[@]} -1]}
+ if [[ $? -eq 0 ]]; then
+ return 0
+ fi
+}
+
+__custom_func() {
+ case ${last_command} in
+ kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
+ __kubectl_get_resource
+ return
+ ;;
+ *)
+ ;;
+ esac
+}
+`)
+```
+
+And then I set that in my command definition:
+
+```go
+cmds := &cobra.Command{
+ Use: "kubectl",
+ Short: "kubectl controls the Kubernetes cluster manager",
+ Long: `kubectl controls the Kubernetes cluster manager.
+
+Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
+ Run: runHelp,
+ BashCompletionFunction: bash_completion_func,
+}
+```
+
+The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
+
+## Have the completions code complete your 'nouns'
+
+In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+ Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+ Short: "Display one or many resources",
+ Long: get_long,
+ Example: get_example,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := RunGet(f, out, cmd, args)
+ util.CheckErr(err)
+ },
+ ValidArgs: validArgs,
+}
+```
+
+Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
+
+```bash
+# kubectl get [tab][tab]
+node pod replicationcontroller service
+```
+
+## Plural form and shortcuts for nouns
+
+If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+ ...
+ ValidArgs: validArgs,
+ ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+# kubectl get rc [tab][tab]
+backend frontend database
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
+in this example again instead of the replication controllers.
+
+## Mark flags as required
+
+Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy.
+
+```go
+cmd.MarkFlagRequired("pod")
+cmd.MarkFlagRequired("container")
+```
+
+and you'll get something like
+
+```bash
+# kubectl exec [tab][tab][tab]
+-c --container= -p --pod=
+```
+
+# Specify valid filename extensions for flags that take a filename
+
+In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions.
+
+```go
+ annotations := []string{"json", "yaml", "yml"}
+ annotation := make(map[string][]string)
+ annotation[cobra.BashCompFilenameExt] = annotations
+
+ flag := &pflag.Flag{
+ Name: "filename",
+ Shorthand: "f",
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ Annotations: annotation,
+ }
+ cmd.Flags().AddFlag(flag)
+```
+
+Now when you run a command with this filename flag you'll get something like
+
+```bash
+# kubectl create -f
+test/ example/ rpmbuild/
+hello.yml test.json
+```
+
+So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.
+
+# Specify custom flag completion
+
+Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify
+a custom flag completion function with cobra.BashCompCustom:
+
+```go
+ annotation := make(map[string][]string)
+ annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
+
+ flag := &pflag.Flag{
+ Name: "namespace",
+ Usage: usage,
+ Annotations: annotation,
+ }
+ cmd.Flags().AddFlag(flag)
+```
+
+In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
+value, e.g.:
+
+```bash
+__kubectl_get_namespaces()
+{
+ local template
+ template="{{ range .items }}{{ .metadata.name }} {{ end }}"
+ local kubectl_out
+ if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
+ COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
+ fi
+}
+```
+# Using bash aliases for commands
+
+You can also configure the `bash aliases` for the commands and they will also support completions.
+
+```bash
+alias aliasname=origcommand
+complete -o default -F __start_origcommand aliasname
+
+# and now when you run `aliasname` completion will make
+# suggestions as it did for `origcommand`.
+
+$) aliasname <tab><tab>
+completion firstcommand secondcommand
+```
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 000000000..7010fd15b
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,200 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = false
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = true
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText string = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ template := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(template, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ }
+ for i := range d {
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 000000000..34d1bf367
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1517 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // Long is the long message shown in the 'help <this-command>' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
+ ValidArgs []string
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the bash completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom functions used by the bash autocompletion generator.
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ //FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // output is an output writer defined by user.
+ output io.Writer
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate string
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate string
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // versionTemplate is the version template defined by user.
+ versionTemplate string
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (c *Command) SetOutput(output io.Writer) {
+ c.output = output
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ c.versionTemplate = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.output != nil {
+ return c.output
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ }
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString return usage string.
+func (c *Command) UsageString() string {
+ tmpOutput := c.output
+ bb := new(bytes.Buffer)
+ c.SetOutput(bb)
+ c.Usage()
+ c.output = tmpOutput
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != "" {
+ return c.usageTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != "" {
+ return c.helpTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != "" {
+ return c.versionTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+func argsMinusFirstX(args []string, x string) []string {
+ for i, y := range args {
+ if x == y {
+ ret := []string{}
+ ret = append(ret, args[:i]...)
+ ret = append(ret, args[i+1:]...)
+ return ret
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[1] == '-') ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ suggestionsString := ""
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ suggestionsString += "\n\nDid you mean this?\n"
+ for _, s := range suggestions {
+ suggestionsString += fmt.Sprintf("\t%v\n", s)
+ }
+ }
+ return suggestionsString
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if cmd.Name() == next || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ return matches[0]
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("Called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool("help")
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ break
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.validateRequiredFlags(); err != nil {
+ return err
+ }
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ break
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help as the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpCmd()
+
+ var args []string
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ } else {
+ args = c.args
+ }
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.Println("Error:", err.Error())
+ c.Printf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if err == flag.ErrHelp {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilentErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.Println("Error:", err.Error())
+ }
+
+ // If root command has SilentUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return nil
+ }
+ return c.Args(c, args)
+}
+
+func (c *Command) validateRequiredFlags() error {
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("help") == nil {
+ usage := "help for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().BoolP("help", "h", false, usage)
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().Bool("version", false, usage)
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.Name() + ` help [path to command] for full details.`,
+
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ c.Root().Usage()
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.Help()
+ }
+ },
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + c.Use
+ } else {
+ useline = c.Use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if a == s {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parents commands.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ //do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 000000000..6159c1cc1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 000000000..edec728e4
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,20 @@
+// +build windows
+
+package cobra
+
+import (
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ time.Sleep(5 * time.Second)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 000000000..889c22e27
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,126 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// GenZshCompletionFile generates zsh completion file.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenZshCompletion(outFile)
+}
+
+// GenZshCompletion generates a zsh completion file and writes to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+
+ writeHeader(buf, c)
+ maxDepth := maxDepth(c)
+ writeLevelMapping(buf, maxDepth)
+ writeLevelCases(buf, maxDepth, c)
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func writeHeader(w io.Writer, cmd *Command) {
+ fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name())
+}
+
+func maxDepth(c *Command) int {
+ if len(c.Commands()) == 0 {
+ return 0
+ }
+ maxDepthSub := 0
+ for _, s := range c.Commands() {
+ subDepth := maxDepth(s)
+ if subDepth > maxDepthSub {
+ maxDepthSub = subDepth
+ }
+ }
+ return 1 + maxDepthSub
+}
+
+func writeLevelMapping(w io.Writer, numLevels int) {
+ fmt.Fprintln(w, `_arguments \`)
+ for i := 1; i <= numLevels; i++ {
+ fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i)
+ fmt.Fprintln(w)
+ }
+ fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files")
+ fmt.Fprintln(w)
+}
+
+func writeLevelCases(w io.Writer, maxDepth int, root *Command) {
+ fmt.Fprintln(w, "case $state in")
+ defer fmt.Fprintln(w, "esac")
+
+ for i := 1; i <= maxDepth; i++ {
+ fmt.Fprintf(w, " level%d)\n", i)
+ writeLevel(w, root, i)
+ fmt.Fprintln(w, " ;;")
+ }
+ fmt.Fprintln(w, " *)")
+ fmt.Fprintln(w, " _arguments '*: :_files'")
+ fmt.Fprintln(w, " ;;")
+}
+
+func writeLevel(w io.Writer, root *Command, i int) {
+ fmt.Fprintf(w, " case $words[%d] in\n", i)
+ defer fmt.Fprintln(w, " esac")
+
+ commands := filterByLevel(root, i)
+ byParent := groupByParent(commands)
+
+ for p, c := range byParent {
+ names := names(c)
+ fmt.Fprintf(w, " %s)\n", p)
+ fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " "))
+ fmt.Fprintln(w, " ;;")
+ }
+ fmt.Fprintln(w, " *)")
+ fmt.Fprintln(w, " _arguments '*: :_files'")
+ fmt.Fprintln(w, " ;;")
+
+}
+
+func filterByLevel(c *Command, l int) []*Command {
+ cs := make([]*Command, 0)
+ if l == 0 {
+ cs = append(cs, c)
+ return cs
+ }
+ for _, s := range c.Commands() {
+ cs = append(cs, filterByLevel(s, l-1)...)
+ }
+ return cs
+}
+
+func groupByParent(commands []*Command) map[string][]*Command {
+ m := make(map[string][]*Command)
+ for _, c := range commands {
+ parent := c.Parent()
+ if parent == nil {
+ continue
+ }
+ m[parent.Name()] = append(m[parent.Name()], c)
+ }
+ return m
+}
+
+func names(commands []*Command) []string {
+ ns := make([]string, len(commands))
+ for i, c := range commands {
+ ns[i] = c.Name()
+ }
+ return ns
+}
diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go
index 7622d674a..b61a8180e 100644
--- a/vendor/github.com/ugorji/go/codec/0doc.go
+++ b/vendor/github.com/ugorji/go/codec/0doc.go
@@ -245,7 +245,7 @@ package codec
// - critical shared objects that are read many times
// TypeInfos
// - pooled objects:
-// decNaked, decNakedContainers, codecFner, typeInfoLoadArray,
+// decNaked, decNakedContainers, codecFner, typeInfoLoadArray,
// - small objects allocated independently, that we read/use much across threads:
// codecFn, typeInfo
// - Objects allocated independently and used a lot
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
index 5050f07de..87f2562f6 100644
--- a/vendor/github.com/ugorji/go/codec/fast-path.generated.go
+++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
@@ -2281,7 +2281,7 @@ func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2337,7 +2337,7 @@ func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2393,7 +2393,7 @@ func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2449,7 +2449,7 @@ func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2505,7 +2505,7 @@ func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2561,7 +2561,7 @@ func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2617,7 +2617,7 @@ func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2673,7 +2673,7 @@ func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2729,7 +2729,7 @@ func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2785,7 +2785,7 @@ func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2841,7 +2841,7 @@ func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2897,7 +2897,7 @@ func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -2953,7 +2953,7 @@ func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -3009,7 +3009,7 @@ func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -3065,7 +3065,7 @@ func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -3121,7 +3121,7 @@ func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) {
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI
- for k2 := range v {
+ for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
@@ -3174,7 +3174,7 @@ func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3223,7 +3223,7 @@ func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3272,7 +3272,7 @@ func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3321,7 +3321,7 @@ func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3370,7 +3370,7 @@ func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3419,7 +3419,7 @@ func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3468,7 +3468,7 @@ func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3517,7 +3517,7 @@ func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3566,7 +3566,7 @@ func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3615,7 +3615,7 @@ func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3664,7 +3664,7 @@ func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3713,7 +3713,7 @@ func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3762,7 +3762,7 @@ func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3811,7 +3811,7 @@ func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3860,7 +3860,7 @@ func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3909,7 +3909,7 @@ func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]string, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = string(k)
i++
}
@@ -3958,7 +3958,7 @@ func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4007,7 +4007,7 @@ func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4056,7 +4056,7 @@ func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4105,7 +4105,7 @@ func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4154,7 +4154,7 @@ func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4203,7 +4203,7 @@ func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4252,7 +4252,7 @@ func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4301,7 +4301,7 @@ func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4350,7 +4350,7 @@ func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4399,7 +4399,7 @@ func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4448,7 +4448,7 @@ func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4497,7 +4497,7 @@ func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4546,7 +4546,7 @@ func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4595,7 +4595,7 @@ func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4644,7 +4644,7 @@ func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4693,7 +4693,7 @@ func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4742,7 +4742,7 @@ func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4791,7 +4791,7 @@ func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4840,7 +4840,7 @@ func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4889,7 +4889,7 @@ func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4938,7 +4938,7 @@ func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -4987,7 +4987,7 @@ func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5036,7 +5036,7 @@ func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5085,7 +5085,7 @@ func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5134,7 +5134,7 @@ func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5183,7 +5183,7 @@ func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5232,7 +5232,7 @@ func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5281,7 +5281,7 @@ func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5330,7 +5330,7 @@ func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5379,7 +5379,7 @@ func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5428,7 +5428,7 @@ func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5477,7 +5477,7 @@ func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]float64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = float64(k)
i++
}
@@ -5526,7 +5526,7 @@ func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5575,7 +5575,7 @@ func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5624,7 +5624,7 @@ func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5673,7 +5673,7 @@ func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5722,7 +5722,7 @@ func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5771,7 +5771,7 @@ func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5820,7 +5820,7 @@ func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5869,7 +5869,7 @@ func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5918,7 +5918,7 @@ func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -5967,7 +5967,7 @@ func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6016,7 +6016,7 @@ func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6065,7 +6065,7 @@ func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6114,7 +6114,7 @@ func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6163,7 +6163,7 @@ func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6212,7 +6212,7 @@ func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6261,7 +6261,7 @@ func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6310,7 +6310,7 @@ func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6359,7 +6359,7 @@ func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6408,7 +6408,7 @@ func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6457,7 +6457,7 @@ func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6506,7 +6506,7 @@ func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6555,7 +6555,7 @@ func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6604,7 +6604,7 @@ func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6653,7 +6653,7 @@ func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6702,7 +6702,7 @@ func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6751,7 +6751,7 @@ func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6800,7 +6800,7 @@ func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6849,7 +6849,7 @@ func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6898,7 +6898,7 @@ func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6947,7 +6947,7 @@ func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -6996,7 +6996,7 @@ func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7045,7 +7045,7 @@ func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7094,7 +7094,7 @@ func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7143,7 +7143,7 @@ func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7192,7 +7192,7 @@ func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7241,7 +7241,7 @@ func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7290,7 +7290,7 @@ func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7339,7 +7339,7 @@ func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7388,7 +7388,7 @@ func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7437,7 +7437,7 @@ func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7486,7 +7486,7 @@ func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7535,7 +7535,7 @@ func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7584,7 +7584,7 @@ func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7633,7 +7633,7 @@ func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7682,7 +7682,7 @@ func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7731,7 +7731,7 @@ func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7780,7 +7780,7 @@ func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7829,7 +7829,7 @@ func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7878,7 +7878,7 @@ func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7927,7 +7927,7 @@ func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -7976,7 +7976,7 @@ func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8025,7 +8025,7 @@ func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8074,7 +8074,7 @@ func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8123,7 +8123,7 @@ func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8172,7 +8172,7 @@ func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8221,7 +8221,7 @@ func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8270,7 +8270,7 @@ func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8319,7 +8319,7 @@ func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8368,7 +8368,7 @@ func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8417,7 +8417,7 @@ func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8466,7 +8466,7 @@ func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8515,7 +8515,7 @@ func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8564,7 +8564,7 @@ func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8613,7 +8613,7 @@ func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8662,7 +8662,7 @@ func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8711,7 +8711,7 @@ func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8760,7 +8760,7 @@ func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8809,7 +8809,7 @@ func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8858,7 +8858,7 @@ func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8907,7 +8907,7 @@ func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -8956,7 +8956,7 @@ func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9005,7 +9005,7 @@ func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9054,7 +9054,7 @@ func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9103,7 +9103,7 @@ func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9152,7 +9152,7 @@ func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9201,7 +9201,7 @@ func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9250,7 +9250,7 @@ func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9299,7 +9299,7 @@ func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9348,7 +9348,7 @@ func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9397,7 +9397,7 @@ func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9446,7 +9446,7 @@ func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9495,7 +9495,7 @@ func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9544,7 +9544,7 @@ func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9593,7 +9593,7 @@ func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9642,7 +9642,7 @@ func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9691,7 +9691,7 @@ func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9740,7 +9740,7 @@ func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9789,7 +9789,7 @@ func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9838,7 +9838,7 @@ func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9887,7 +9887,7 @@ func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9936,7 +9936,7 @@ func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -9985,7 +9985,7 @@ func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -10034,7 +10034,7 @@ func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -10083,7 +10083,7 @@ func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -10132,7 +10132,7 @@ func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -10181,7 +10181,7 @@ func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]uint64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = uint64(k)
i++
}
@@ -10230,7 +10230,7 @@ func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10279,7 +10279,7 @@ func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10328,7 +10328,7 @@ func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10377,7 +10377,7 @@ func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10426,7 +10426,7 @@ func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10475,7 +10475,7 @@ func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10524,7 +10524,7 @@ func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10573,7 +10573,7 @@ func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10622,7 +10622,7 @@ func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10671,7 +10671,7 @@ func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10720,7 +10720,7 @@ func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10769,7 +10769,7 @@ func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10818,7 +10818,7 @@ func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10867,7 +10867,7 @@ func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10916,7 +10916,7 @@ func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -10965,7 +10965,7 @@ func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11014,7 +11014,7 @@ func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11063,7 +11063,7 @@ func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11112,7 +11112,7 @@ func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11161,7 +11161,7 @@ func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11210,7 +11210,7 @@ func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11259,7 +11259,7 @@ func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11308,7 +11308,7 @@ func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11357,7 +11357,7 @@ func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11406,7 +11406,7 @@ func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11455,7 +11455,7 @@ func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11504,7 +11504,7 @@ func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11553,7 +11553,7 @@ func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11602,7 +11602,7 @@ func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11651,7 +11651,7 @@ func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11700,7 +11700,7 @@ func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11749,7 +11749,7 @@ func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11798,7 +11798,7 @@ func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11847,7 +11847,7 @@ func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11896,7 +11896,7 @@ func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11945,7 +11945,7 @@ func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -11994,7 +11994,7 @@ func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12043,7 +12043,7 @@ func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12092,7 +12092,7 @@ func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12141,7 +12141,7 @@ func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12190,7 +12190,7 @@ func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12239,7 +12239,7 @@ func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12288,7 +12288,7 @@ func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12337,7 +12337,7 @@ func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12386,7 +12386,7 @@ func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12435,7 +12435,7 @@ func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12484,7 +12484,7 @@ func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12533,7 +12533,7 @@ func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12582,7 +12582,7 @@ func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12631,7 +12631,7 @@ func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12680,7 +12680,7 @@ func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12729,7 +12729,7 @@ func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12778,7 +12778,7 @@ func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12827,7 +12827,7 @@ func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12876,7 +12876,7 @@ func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12925,7 +12925,7 @@ func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -12974,7 +12974,7 @@ func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13023,7 +13023,7 @@ func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13072,7 +13072,7 @@ func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13121,7 +13121,7 @@ func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13170,7 +13170,7 @@ func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13219,7 +13219,7 @@ func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13268,7 +13268,7 @@ func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13317,7 +13317,7 @@ func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13366,7 +13366,7 @@ func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13415,7 +13415,7 @@ func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13464,7 +13464,7 @@ func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13513,7 +13513,7 @@ func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13562,7 +13562,7 @@ func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13611,7 +13611,7 @@ func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13660,7 +13660,7 @@ func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13709,7 +13709,7 @@ func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13758,7 +13758,7 @@ func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13807,7 +13807,7 @@ func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13856,7 +13856,7 @@ func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13905,7 +13905,7 @@ func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -13954,7 +13954,7 @@ func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -14003,7 +14003,7 @@ func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -14052,7 +14052,7 @@ func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -14101,7 +14101,7 @@ func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]int64, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = int64(k)
i++
}
@@ -14150,7 +14150,7 @@ func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14199,7 +14199,7 @@ func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14248,7 +14248,7 @@ func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14297,7 +14297,7 @@ func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14346,7 +14346,7 @@ func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14395,7 +14395,7 @@ func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14444,7 +14444,7 @@ func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14493,7 +14493,7 @@ func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14542,7 +14542,7 @@ func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14591,7 +14591,7 @@ func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14640,7 +14640,7 @@ func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14689,7 +14689,7 @@ func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14738,7 +14738,7 @@ func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14787,7 +14787,7 @@ func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14836,7 +14836,7 @@ func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
@@ -14885,7 +14885,7 @@ func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) {
if e.h.Canonical {
v2 := make([]bool, len(v))
var i int
- for k := range v {
+ for k, _ := range v {
v2[i] = bool(k)
i++
}
diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go
index bfcb4e380..579cadfb1 100644
--- a/vendor/golang.org/x/text/encoding/unicode/unicode.go
+++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go
@@ -158,12 +158,12 @@ func UTF16(e Endianness, b BOMPolicy) encoding.Encoding {
// and recommendations. Some of the "configurations" are merely recommendations,
// so multiple configurations could match.
var mibValue = map[Endianness][numBOMValues]identifier.MIB{
- BigEndian: {
+ BigEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF16BE,
UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781.
// TODO: acceptBOM | strictBOM would map to UTF16BE as well.
},
- LittleEndian: {
+ LittleEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF16LE,
UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows.
// TODO: acceptBOM | strictBOM would map to UTF16LE as well.
diff --git a/vendor/golang.org/x/text/language/gen.go b/vendor/golang.org/x/text/language/gen.go
index fea288d46..302f1940a 100644
--- a/vendor/golang.org/x/text/language/gen.go
+++ b/vendor/golang.org/x/text/language/gen.go
@@ -1050,7 +1050,7 @@ func (b *builder) writeRegion() {
m49Index := [9]int16{}
fromM49 := []uint16{}
m49 := []int{}
- for k := range fromM49map {
+ for k, _ := range fromM49map {
m49 = append(m49, int(k))
}
sort.Ints(m49)
diff --git a/vendor/golang.org/x/text/language/lookup.go b/vendor/golang.org/x/text/language/lookup.go
index 3ee0e5dd3..1d80ac370 100644
--- a/vendor/golang.org/x/text/language/lookup.go
+++ b/vendor/golang.org/x/text/language/lookup.go
@@ -344,39 +344,39 @@ var (
// grandfatheredMap holds a mapping from legacy and grandfathered tags to
// their base language or index to more elaborate tag.
grandfatheredMap = map[[maxLen]byte]int16{
- {'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
- {'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
- {'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
- {'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
- {'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
- {'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
- {'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
- {'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
- {'i', '-', 't', 'a', 'o'}: _tao, // i-tao
- {'i', '-', 't', 'a', 'y'}: _tay, // i-tay
- {'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
- {'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
- {'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
- {'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
- {'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
- {'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
- {'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
- {'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
- {'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
- {'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
+ [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
+ [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
+ [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
+ [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
+ [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
+ [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
+ [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
+ [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
+ [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao
+ [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay
+ [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
+ [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
+ [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
+ [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
+ [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
+ [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
+ [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
+ [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
+ [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
+ [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
// Grandfathered tags with no modern replacement will be converted as
// follows:
- {'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
- {'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
- {'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
- {'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
- {'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
- {'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
+ [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
+ [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
+ [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
+ [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
+ [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
+ [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
// CLDR-specific tag.
- {'r', 'o', 'o', 't'}: 0, // root
- {'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
+ [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root
+ [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
}
altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102}
diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go
index a28524e1d..b738d457b 100644
--- a/vendor/golang.org/x/text/language/tables.go
+++ b/vendor/golang.org/x/text/language/tables.go
@@ -3348,9 +3348,9 @@ var regionToGroups = [358]uint8{
// Size: 18 bytes, 3 elements
var paradigmLocales = [3][3]uint16{
- 0: {0x139, 0x0, 0x7b},
- 1: {0x13e, 0x0, 0x1f},
- 2: {0x3c0, 0x41, 0xee},
+ 0: [3]uint16{0x139, 0x0, 0x7b},
+ 1: [3]uint16{0x13e, 0x0, 0x1f},
+ 2: [3]uint16{0x3c0, 0x41, 0xee},
}
type mutualIntelligibility struct {
diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go
index 19b8cefd7..2197f8ac2 100644
--- a/vendor/golang.org/x/text/unicode/cldr/cldr.go
+++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go
@@ -110,7 +110,7 @@ func (cldr *CLDR) Supplemental() *SupplementalData {
func (cldr *CLDR) Locales() []string {
loc := []string{"root"}
hasRoot := false
- for l := range cldr.locale {
+ for l, _ := range cldr.locale {
if l == "root" {
hasRoot = true
continue
diff --git a/vendor/golang.org/x/text/unicode/cldr/resolve.go b/vendor/golang.org/x/text/unicode/cldr/resolve.go
index c6919216b..691b5903f 100644
--- a/vendor/golang.org/x/text/unicode/cldr/resolve.go
+++ b/vendor/golang.org/x/text/unicode/cldr/resolve.go
@@ -289,7 +289,7 @@ var distinguishing = map[string][]string{
"mzone": nil,
"from": nil,
"to": nil,
- "type": {
+ "type": []string{
"abbreviationFallback",
"default",
"mapping",
@@ -527,7 +527,7 @@ func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value,
}
}
keys := make([]string, 0, len(index))
- for k := range index {
+ for k, _ := range index {
keys = append(keys, k)
}
sort.Strings(keys)
diff --git a/vendor/golang.org/x/text/unicode/cldr/slice.go b/vendor/golang.org/x/text/unicode/cldr/slice.go
index ea5f31a39..388c983ff 100644
--- a/vendor/golang.org/x/text/unicode/cldr/slice.go
+++ b/vendor/golang.org/x/text/unicode/cldr/slice.go
@@ -83,7 +83,7 @@ func (s Slice) Group(fn func(e Elem) string) []Slice {
m[key] = append(m[key], vi)
}
keys := []string{}
- for k := range m {
+ for k, _ := range m {
keys = append(keys, k)
}
sort.Strings(keys)
diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go
index f66778d45..338c395ee 100644
--- a/vendor/golang.org/x/text/unicode/norm/maketables.go
+++ b/vendor/golang.org/x/text/unicode/norm/maketables.go
@@ -241,7 +241,7 @@ func compactCCC() {
m[c.ccc] = 0
}
cccs := []int{}
- for v := range m {
+ for v, _ := range m {
cccs = append(cccs, int(v))
}
sort.Ints(cccs)
diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/tools/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/tools/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/golang.org/x/tools/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/tools/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/tools/cmd/getgo/LICENSE b/vendor/golang.org/x/tools/cmd/getgo/LICENSE
new file mode 100644
index 000000000..32017f8fa
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/getgo/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
new file mode 100644
index 000000000..6b7052b89
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
@@ -0,0 +1,627 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package astutil
+
+// This file defines utilities for working with source positions.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "sort"
+)
+
+// PathEnclosingInterval returns the node that encloses the source
+// interval [start, end), and all its ancestors up to the AST root.
+//
+// The definition of "enclosing" used by this function considers
+// additional whitespace abutting a node to be enclosed by it.
+// In this example:
+//
+// z := x + y // add them
+// <-A->
+// <----B----->
+//
+// the ast.BinaryExpr(+) node is considered to enclose interval B
+// even though its [Pos()..End()) is actually only interval A.
+// This behaviour makes user interfaces more tolerant of imperfect
+// input.
+//
+// This function treats tokens as nodes, though they are not included
+// in the result. e.g. PathEnclosingInterval("+") returns the
+// enclosing ast.BinaryExpr("x + y").
+//
+// If start==end, the 1-char interval following start is used instead.
+//
+// The 'exact' result is true if the interval contains only path[0]
+// and perhaps some adjacent whitespace. It is false if the interval
+// overlaps multiple children of path[0], or if it contains only
+// interior whitespace of path[0].
+// In this example:
+//
+// z := x + y // add them
+// <--C--> <---E-->
+// ^
+// D
+//
+// intervals C, D and E are inexact. C is contained by the
+// z-assignment statement, because it spans three of its children (:=,
+// x, +). So too is the 1-char interval D, because it contains only
+// interior whitespace of the assignment. E is considered interior
+// whitespace of the BlockStmt containing the assignment.
+//
+// Precondition: [start, end) both lie within the same file as root.
+// TODO(adonovan): return (nil, false) in this case and remove precond.
+// Requires FileSet; see loader.tokenFileContainsPos.
+//
+// Postcondition: path is never nil; it always contains at least 'root'.
+//
+func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
+ // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
+
+ // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
+ var visit func(node ast.Node) bool
+ visit = func(node ast.Node) bool {
+ path = append(path, node)
+
+ nodePos := node.Pos()
+ nodeEnd := node.End()
+
+ // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
+
+ // Intersect [start, end) with interval of node.
+ if start < nodePos {
+ start = nodePos
+ }
+ if end > nodeEnd {
+ end = nodeEnd
+ }
+
+ // Find sole child that contains [start, end).
+ children := childrenOf(node)
+ l := len(children)
+ for i, child := range children {
+ // [childPos, childEnd) is unaugmented interval of child.
+ childPos := child.Pos()
+ childEnd := child.End()
+
+ // [augPos, augEnd) is whitespace-augmented interval of child.
+ augPos := childPos
+ augEnd := childEnd
+ if i > 0 {
+ augPos = children[i-1].End() // start of preceding whitespace
+ }
+ if i < l-1 {
+ nextChildPos := children[i+1].Pos()
+ // Does [start, end) lie between child and next child?
+ if start >= augEnd && end <= nextChildPos {
+ return false // inexact match
+ }
+ augEnd = nextChildPos // end of following whitespace
+ }
+
+ // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
+ // i, augPos, augEnd, start, end) // debugging
+
+ // Does augmented child strictly contain [start, end)?
+ if augPos <= start && end <= augEnd {
+ _, isToken := child.(tokenNode)
+ return isToken || visit(child)
+ }
+
+ // Does [start, end) overlap multiple children?
+ // i.e. left-augmented child contains start
+ // but LR-augmented child does not contain end.
+ if start < childEnd && end > augEnd {
+ break
+ }
+ }
+
+ // No single child contained [start, end),
+ // so node is the result. Is it exact?
+
+ // (It's tempting to put this condition before the
+ // child loop, but it gives the wrong result in the
+ // case where a node (e.g. ExprStmt) and its sole
+ // child have equal intervals.)
+ if start == nodePos && end == nodeEnd {
+ return true // exact match
+ }
+
+ return false // inexact: overlaps multiple children
+ }
+
+ if start > end {
+ start, end = end, start
+ }
+
+ if start < root.End() && end > root.Pos() {
+ if start == end {
+ end = start + 1 // empty interval => interval of size 1
+ }
+ exact = visit(root)
+
+ // Reverse the path:
+ for i, l := 0, len(path); i < l/2; i++ {
+ path[i], path[l-1-i] = path[l-1-i], path[i]
+ }
+ } else {
+ // Selection lies within whitespace preceding the
+ // first (or following the last) declaration in the file.
+ // The result nonetheless always includes the ast.File.
+ path = append(path, root)
+ }
+
+ return
+}
+
+// tokenNode is a dummy implementation of ast.Node for a single token.
+// They are used transiently by PathEnclosingInterval but never escape
+// this package.
+//
+type tokenNode struct {
+ pos token.Pos
+ end token.Pos
+}
+
+func (n tokenNode) Pos() token.Pos {
+ return n.pos
+}
+
+func (n tokenNode) End() token.Pos {
+ return n.end
+}
+
+func tok(pos token.Pos, len int) ast.Node {
+ return tokenNode{pos, pos + token.Pos(len)}
+}
+
+// childrenOf returns the direct non-nil children of ast.Node n.
+// It may include fake ast.Node implementations for bare tokens.
+// it is not safe to call (e.g.) ast.Walk on such nodes.
+//
+func childrenOf(n ast.Node) []ast.Node {
+ var children []ast.Node
+
+ // First add nodes for all true subtrees.
+ ast.Inspect(n, func(node ast.Node) bool {
+ if node == n { // push n
+ return true // recur
+ }
+ if node != nil { // push child
+ children = append(children, node)
+ }
+ return false // no recursion
+ })
+
+ // Then add fake Nodes for bare tokens.
+ switch n := n.(type) {
+ case *ast.ArrayType:
+ children = append(children,
+ tok(n.Lbrack, len("[")),
+ tok(n.Elt.End(), len("]")))
+
+ case *ast.AssignStmt:
+ children = append(children,
+ tok(n.TokPos, len(n.Tok.String())))
+
+ case *ast.BasicLit:
+ children = append(children,
+ tok(n.ValuePos, len(n.Value)))
+
+ case *ast.BinaryExpr:
+ children = append(children, tok(n.OpPos, len(n.Op.String())))
+
+ case *ast.BlockStmt:
+ children = append(children,
+ tok(n.Lbrace, len("{")),
+ tok(n.Rbrace, len("}")))
+
+ case *ast.BranchStmt:
+ children = append(children,
+ tok(n.TokPos, len(n.Tok.String())))
+
+ case *ast.CallExpr:
+ children = append(children,
+ tok(n.Lparen, len("(")),
+ tok(n.Rparen, len(")")))
+ if n.Ellipsis != 0 {
+ children = append(children, tok(n.Ellipsis, len("...")))
+ }
+
+ case *ast.CaseClause:
+ if n.List == nil {
+ children = append(children,
+ tok(n.Case, len("default")))
+ } else {
+ children = append(children,
+ tok(n.Case, len("case")))
+ }
+ children = append(children, tok(n.Colon, len(":")))
+
+ case *ast.ChanType:
+ switch n.Dir {
+ case ast.RECV:
+ children = append(children, tok(n.Begin, len("<-chan")))
+ case ast.SEND:
+ children = append(children, tok(n.Begin, len("chan<-")))
+ case ast.RECV | ast.SEND:
+ children = append(children, tok(n.Begin, len("chan")))
+ }
+
+ case *ast.CommClause:
+ if n.Comm == nil {
+ children = append(children,
+ tok(n.Case, len("default")))
+ } else {
+ children = append(children,
+ tok(n.Case, len("case")))
+ }
+ children = append(children, tok(n.Colon, len(":")))
+
+ case *ast.Comment:
+ // nop
+
+ case *ast.CommentGroup:
+ // nop
+
+ case *ast.CompositeLit:
+ children = append(children,
+ tok(n.Lbrace, len("{")),
+ tok(n.Rbrace, len("{")))
+
+ case *ast.DeclStmt:
+ // nop
+
+ case *ast.DeferStmt:
+ children = append(children,
+ tok(n.Defer, len("defer")))
+
+ case *ast.Ellipsis:
+ children = append(children,
+ tok(n.Ellipsis, len("...")))
+
+ case *ast.EmptyStmt:
+ // nop
+
+ case *ast.ExprStmt:
+ // nop
+
+ case *ast.Field:
+ // TODO(adonovan): Field.{Doc,Comment,Tag}?
+
+ case *ast.FieldList:
+ children = append(children,
+ tok(n.Opening, len("(")),
+ tok(n.Closing, len(")")))
+
+ case *ast.File:
+ // TODO test: Doc
+ children = append(children,
+ tok(n.Package, len("package")))
+
+ case *ast.ForStmt:
+ children = append(children,
+ tok(n.For, len("for")))
+
+ case *ast.FuncDecl:
+ // TODO(adonovan): FuncDecl.Comment?
+
+ // Uniquely, FuncDecl breaks the invariant that
+ // preorder traversal yields tokens in lexical order:
+ // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
+ //
+ // As a workaround, we inline the case for FuncType
+ // here and order things correctly.
+ //
+ children = nil // discard ast.Walk(FuncDecl) info subtrees
+ children = append(children, tok(n.Type.Func, len("func")))
+ if n.Recv != nil {
+ children = append(children, n.Recv)
+ }
+ children = append(children, n.Name)
+ if n.Type.Params != nil {
+ children = append(children, n.Type.Params)
+ }
+ if n.Type.Results != nil {
+ children = append(children, n.Type.Results)
+ }
+ if n.Body != nil {
+ children = append(children, n.Body)
+ }
+
+ case *ast.FuncLit:
+ // nop
+
+ case *ast.FuncType:
+ if n.Func != 0 {
+ children = append(children,
+ tok(n.Func, len("func")))
+ }
+
+ case *ast.GenDecl:
+ children = append(children,
+ tok(n.TokPos, len(n.Tok.String())))
+ if n.Lparen != 0 {
+ children = append(children,
+ tok(n.Lparen, len("(")),
+ tok(n.Rparen, len(")")))
+ }
+
+ case *ast.GoStmt:
+ children = append(children,
+ tok(n.Go, len("go")))
+
+ case *ast.Ident:
+ children = append(children,
+ tok(n.NamePos, len(n.Name)))
+
+ case *ast.IfStmt:
+ children = append(children,
+ tok(n.If, len("if")))
+
+ case *ast.ImportSpec:
+ // TODO(adonovan): ImportSpec.{Doc,EndPos}?
+
+ case *ast.IncDecStmt:
+ children = append(children,
+ tok(n.TokPos, len(n.Tok.String())))
+
+ case *ast.IndexExpr:
+ children = append(children,
+ tok(n.Lbrack, len("{")),
+ tok(n.Rbrack, len("}")))
+
+ case *ast.InterfaceType:
+ children = append(children,
+ tok(n.Interface, len("interface")))
+
+ case *ast.KeyValueExpr:
+ children = append(children,
+ tok(n.Colon, len(":")))
+
+ case *ast.LabeledStmt:
+ children = append(children,
+ tok(n.Colon, len(":")))
+
+ case *ast.MapType:
+ children = append(children,
+ tok(n.Map, len("map")))
+
+ case *ast.ParenExpr:
+ children = append(children,
+ tok(n.Lparen, len("(")),
+ tok(n.Rparen, len(")")))
+
+ case *ast.RangeStmt:
+ children = append(children,
+ tok(n.For, len("for")),
+ tok(n.TokPos, len(n.Tok.String())))
+
+ case *ast.ReturnStmt:
+ children = append(children,
+ tok(n.Return, len("return")))
+
+ case *ast.SelectStmt:
+ children = append(children,
+ tok(n.Select, len("select")))
+
+ case *ast.SelectorExpr:
+ // nop
+
+ case *ast.SendStmt:
+ children = append(children,
+ tok(n.Arrow, len("<-")))
+
+ case *ast.SliceExpr:
+ children = append(children,
+ tok(n.Lbrack, len("[")),
+ tok(n.Rbrack, len("]")))
+
+ case *ast.StarExpr:
+ children = append(children, tok(n.Star, len("*")))
+
+ case *ast.StructType:
+ children = append(children, tok(n.Struct, len("struct")))
+
+ case *ast.SwitchStmt:
+ children = append(children, tok(n.Switch, len("switch")))
+
+ case *ast.TypeAssertExpr:
+ children = append(children,
+ tok(n.Lparen-1, len(".")),
+ tok(n.Lparen, len("(")),
+ tok(n.Rparen, len(")")))
+
+ case *ast.TypeSpec:
+ // TODO(adonovan): TypeSpec.{Doc,Comment}?
+
+ case *ast.TypeSwitchStmt:
+ children = append(children, tok(n.Switch, len("switch")))
+
+ case *ast.UnaryExpr:
+ children = append(children, tok(n.OpPos, len(n.Op.String())))
+
+ case *ast.ValueSpec:
+ // TODO(adonovan): ValueSpec.{Doc,Comment}?
+
+ case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
+ // nop
+ }
+
+ // TODO(adonovan): opt: merge the logic of ast.Inspect() into
+ // the switch above so we can make interleaved callbacks for
+ // both Nodes and Tokens in the right order and avoid the need
+ // to sort.
+ sort.Sort(byPos(children))
+
+ return children
+}
+
+type byPos []ast.Node
+
+func (sl byPos) Len() int {
+ return len(sl)
+}
+func (sl byPos) Less(i, j int) bool {
+ return sl[i].Pos() < sl[j].Pos()
+}
+func (sl byPos) Swap(i, j int) {
+ sl[i], sl[j] = sl[j], sl[i]
+}
+
+// NodeDescription returns a description of the concrete type of n suitable
+// for a user interface.
+//
+// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
+// StarExpr) we could be much more specific given the path to the AST
+// root. Perhaps we should do that.
+//
+func NodeDescription(n ast.Node) string {
+ switch n := n.(type) {
+ case *ast.ArrayType:
+ return "array type"
+ case *ast.AssignStmt:
+ return "assignment"
+ case *ast.BadDecl:
+ return "bad declaration"
+ case *ast.BadExpr:
+ return "bad expression"
+ case *ast.BadStmt:
+ return "bad statement"
+ case *ast.BasicLit:
+ return "basic literal"
+ case *ast.BinaryExpr:
+ return fmt.Sprintf("binary %s operation", n.Op)
+ case *ast.BlockStmt:
+ return "block"
+ case *ast.BranchStmt:
+ switch n.Tok {
+ case token.BREAK:
+ return "break statement"
+ case token.CONTINUE:
+ return "continue statement"
+ case token.GOTO:
+ return "goto statement"
+ case token.FALLTHROUGH:
+ return "fall-through statement"
+ }
+ case *ast.CallExpr:
+ if len(n.Args) == 1 && !n.Ellipsis.IsValid() {
+ return "function call (or conversion)"
+ }
+ return "function call"
+ case *ast.CaseClause:
+ return "case clause"
+ case *ast.ChanType:
+ return "channel type"
+ case *ast.CommClause:
+ return "communication clause"
+ case *ast.Comment:
+ return "comment"
+ case *ast.CommentGroup:
+ return "comment group"
+ case *ast.CompositeLit:
+ return "composite literal"
+ case *ast.DeclStmt:
+ return NodeDescription(n.Decl) + " statement"
+ case *ast.DeferStmt:
+ return "defer statement"
+ case *ast.Ellipsis:
+ return "ellipsis"
+ case *ast.EmptyStmt:
+ return "empty statement"
+ case *ast.ExprStmt:
+ return "expression statement"
+ case *ast.Field:
+ // Can be any of these:
+ // struct {x, y int} -- struct field(s)
+ // struct {T} -- anon struct field
+ // interface {I} -- interface embedding
+ // interface {f()} -- interface method
+ // func (A) func(B) C -- receiver, param(s), result(s)
+ return "field/method/parameter"
+ case *ast.FieldList:
+ return "field/method/parameter list"
+ case *ast.File:
+ return "source file"
+ case *ast.ForStmt:
+ return "for loop"
+ case *ast.FuncDecl:
+ return "function declaration"
+ case *ast.FuncLit:
+ return "function literal"
+ case *ast.FuncType:
+ return "function type"
+ case *ast.GenDecl:
+ switch n.Tok {
+ case token.IMPORT:
+ return "import declaration"
+ case token.CONST:
+ return "constant declaration"
+ case token.TYPE:
+ return "type declaration"
+ case token.VAR:
+ return "variable declaration"
+ }
+ case *ast.GoStmt:
+ return "go statement"
+ case *ast.Ident:
+ return "identifier"
+ case *ast.IfStmt:
+ return "if statement"
+ case *ast.ImportSpec:
+ return "import specification"
+ case *ast.IncDecStmt:
+ if n.Tok == token.INC {
+ return "increment statement"
+ }
+ return "decrement statement"
+ case *ast.IndexExpr:
+ return "index expression"
+ case *ast.InterfaceType:
+ return "interface type"
+ case *ast.KeyValueExpr:
+ return "key/value association"
+ case *ast.LabeledStmt:
+ return "statement label"
+ case *ast.MapType:
+ return "map type"
+ case *ast.Package:
+ return "package"
+ case *ast.ParenExpr:
+ return "parenthesized " + NodeDescription(n.X)
+ case *ast.RangeStmt:
+ return "range loop"
+ case *ast.ReturnStmt:
+ return "return statement"
+ case *ast.SelectStmt:
+ return "select statement"
+ case *ast.SelectorExpr:
+ return "selector"
+ case *ast.SendStmt:
+ return "channel send"
+ case *ast.SliceExpr:
+ return "slice expression"
+ case *ast.StarExpr:
+ return "*-operation" // load/store expr or pointer type
+ case *ast.StructType:
+ return "struct type"
+ case *ast.SwitchStmt:
+ return "switch statement"
+ case *ast.TypeAssertExpr:
+ return "type assertion"
+ case *ast.TypeSpec:
+ return "type specification"
+ case *ast.TypeSwitchStmt:
+ return "type switch"
+ case *ast.UnaryExpr:
+ return fmt.Sprintf("unary %s operation", n.Op)
+ case *ast.ValueSpec:
+ return "value specification"
+
+ }
+ panic(fmt.Sprintf("unexpected node type: %T", n))
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
new file mode 100644
index 000000000..3e4b19536
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
@@ -0,0 +1,481 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package astutil contains common utilities for working with the Go AST.
+package astutil // import "golang.org/x/tools/go/ast/astutil"
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strconv"
+ "strings"
+)
+
+// AddImport adds the import path to the file f, if absent.
+func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
+ return AddNamedImport(fset, f, "", path)
+}
+
+// AddNamedImport adds the import with the given name and path to the file f, if absent.
+// If name is not empty, it is used to rename the import.
+//
+// For example, calling
+// AddNamedImport(fset, f, "pathpkg", "path")
+// adds
+// import pathpkg "path"
+func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
+ if imports(f, name, path) {
+ return false
+ }
+
+ newImport := &ast.ImportSpec{
+ Path: &ast.BasicLit{
+ Kind: token.STRING,
+ Value: strconv.Quote(path),
+ },
+ }
+ if name != "" {
+ newImport.Name = &ast.Ident{Name: name}
+ }
+
+ // Find an import decl to add to.
+ // The goal is to find an existing import
+ // whose import path has the longest shared
+ // prefix with path.
+ var (
+ bestMatch = -1 // length of longest shared prefix
+ lastImport = -1 // index in f.Decls of the file's final import decl
+ impDecl *ast.GenDecl // import decl containing the best match
+ impIndex = -1 // spec index in impDecl containing the best match
+
+ isThirdPartyPath = isThirdParty(path)
+ )
+ for i, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if ok && gen.Tok == token.IMPORT {
+ lastImport = i
+ // Do not add to import "C", to avoid disrupting the
+ // association with its doc comment, breaking cgo.
+ if declImports(gen, "C") {
+ continue
+ }
+
+ // Match an empty import decl if that's all that is available.
+ if len(gen.Specs) == 0 && bestMatch == -1 {
+ impDecl = gen
+ }
+
+ // Compute longest shared prefix with imports in this group and find best
+ // matched import spec.
+ // 1. Always prefer import spec with longest shared prefix.
+ // 2. While match length is 0,
+ // - for stdlib package: prefer first import spec.
+ // - for third party package: prefer first third party import spec.
+ // We cannot use last import spec as best match for third party package
+ // because grouped imports are usually placed last by goimports -local
+ // flag.
+ // See issue #19190.
+ seenAnyThirdParty := false
+ for j, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ p := importPath(impspec)
+ n := matchLen(p, path)
+ if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
+ bestMatch = n
+ impDecl = gen
+ impIndex = j
+ }
+ seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p)
+ }
+ }
+ }
+
+ // If no import decl found, add one after the last import.
+ if impDecl == nil {
+ impDecl = &ast.GenDecl{
+ Tok: token.IMPORT,
+ }
+ if lastImport >= 0 {
+ impDecl.TokPos = f.Decls[lastImport].End()
+ } else {
+ // There are no existing imports.
+ // Our new import, preceded by a blank line, goes after the package declaration
+ // and after the comment, if any, that starts on the same line as the
+ // package declaration.
+ impDecl.TokPos = f.Package
+
+ file := fset.File(f.Package)
+ pkgLine := file.Line(f.Package)
+ for _, c := range f.Comments {
+ if file.Line(c.Pos()) > pkgLine {
+ break
+ }
+ // +2 for a blank line
+ impDecl.TokPos = c.End() + 2
+ }
+ }
+ f.Decls = append(f.Decls, nil)
+ copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
+ f.Decls[lastImport+1] = impDecl
+ }
+
+ // Insert new import at insertAt.
+ insertAt := 0
+ if impIndex >= 0 {
+ // insert after the found import
+ insertAt = impIndex + 1
+ }
+ impDecl.Specs = append(impDecl.Specs, nil)
+ copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
+ impDecl.Specs[insertAt] = newImport
+ pos := impDecl.Pos()
+ if insertAt > 0 {
+ // If there is a comment after an existing import, preserve the comment
+ // position by adding the new import after the comment.
+ if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
+ pos = spec.Comment.End()
+ } else {
+ // Assign same position as the previous import,
+ // so that the sorter sees it as being in the same block.
+ pos = impDecl.Specs[insertAt-1].Pos()
+ }
+ }
+ if newImport.Name != nil {
+ newImport.Name.NamePos = pos
+ }
+ newImport.Path.ValuePos = pos
+ newImport.EndPos = pos
+
+ // Clean up parens. impDecl contains at least one spec.
+ if len(impDecl.Specs) == 1 {
+ // Remove unneeded parens.
+ impDecl.Lparen = token.NoPos
+ } else if !impDecl.Lparen.IsValid() {
+ // impDecl needs parens added.
+ impDecl.Lparen = impDecl.Specs[0].Pos()
+ }
+
+ f.Imports = append(f.Imports, newImport)
+
+ if len(f.Decls) <= 1 {
+ return true
+ }
+
+ // Merge all the import declarations into the first one.
+ var first *ast.GenDecl
+ for i := 0; i < len(f.Decls); i++ {
+ decl := f.Decls[i]
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
+ continue
+ }
+ if first == nil {
+ first = gen
+ continue // Don't touch the first one.
+ }
+ // We now know there is more than one package in this import
+ // declaration. Ensure that it ends up parenthesized.
+ first.Lparen = first.Pos()
+ // Move the imports of the other import declaration to the first one.
+ for _, spec := range gen.Specs {
+ spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
+ first.Specs = append(first.Specs, spec)
+ }
+ f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
+ i--
+ }
+
+ return true
+}
+
+func isThirdParty(importPath string) bool {
+ // Third party package import path usually contains "." (".com", ".org", ...)
+ // This logic is taken from golang.org/x/tools/imports package.
+ return strings.Contains(importPath, ".")
+}
+
+// DeleteImport deletes the import path from the file f, if present.
+// If there are duplicate import declarations, all matching ones are deleted.
+func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
+ return DeleteNamedImport(fset, f, "", path)
+}
+
+// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
+// If there are duplicate import declarations, all matching ones are deleted.
+func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
+ var delspecs []*ast.ImportSpec
+ var delcomments []*ast.CommentGroup
+
+ // Find the import nodes that import path, if any.
+ for i := 0; i < len(f.Decls); i++ {
+ decl := f.Decls[i]
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok || gen.Tok != token.IMPORT {
+ continue
+ }
+ for j := 0; j < len(gen.Specs); j++ {
+ spec := gen.Specs[j]
+ impspec := spec.(*ast.ImportSpec)
+ if importName(impspec) != name || importPath(impspec) != path {
+ continue
+ }
+
+ // We found an import spec that imports path.
+ // Delete it.
+ delspecs = append(delspecs, impspec)
+ deleted = true
+ copy(gen.Specs[j:], gen.Specs[j+1:])
+ gen.Specs = gen.Specs[:len(gen.Specs)-1]
+
+ // If this was the last import spec in this decl,
+ // delete the decl, too.
+ if len(gen.Specs) == 0 {
+ copy(f.Decls[i:], f.Decls[i+1:])
+ f.Decls = f.Decls[:len(f.Decls)-1]
+ i--
+ break
+ } else if len(gen.Specs) == 1 {
+ if impspec.Doc != nil {
+ delcomments = append(delcomments, impspec.Doc)
+ }
+ if impspec.Comment != nil {
+ delcomments = append(delcomments, impspec.Comment)
+ }
+ for _, cg := range f.Comments {
+ // Found comment on the same line as the import spec.
+ if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
+ delcomments = append(delcomments, cg)
+ break
+ }
+ }
+
+ spec := gen.Specs[0].(*ast.ImportSpec)
+
+ // Move the documentation right after the import decl.
+ if spec.Doc != nil {
+ for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line {
+ fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
+ }
+ }
+ for _, cg := range f.Comments {
+ if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line {
+ for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line {
+ fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
+ }
+ break
+ }
+ }
+ }
+ if j > 0 {
+ lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
+ lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
+ line := fset.Position(impspec.Path.ValuePos).Line
+
+ // We deleted an entry but now there may be
+ // a blank line-sized hole where the import was.
+ if line-lastLine > 1 {
+ // There was a blank line immediately preceding the deleted import,
+ // so there's no need to close the hole.
+ // Do nothing.
+ } else if line != fset.File(gen.Rparen).LineCount() {
+ // There was no blank line. Close the hole.
+ fset.File(gen.Rparen).MergeLine(line)
+ }
+ }
+ j--
+ }
+ }
+
+ // Delete imports from f.Imports.
+ for i := 0; i < len(f.Imports); i++ {
+ imp := f.Imports[i]
+ for j, del := range delspecs {
+ if imp == del {
+ copy(f.Imports[i:], f.Imports[i+1:])
+ f.Imports = f.Imports[:len(f.Imports)-1]
+ copy(delspecs[j:], delspecs[j+1:])
+ delspecs = delspecs[:len(delspecs)-1]
+ i--
+ break
+ }
+ }
+ }
+
+ // Delete comments from f.Comments.
+ for i := 0; i < len(f.Comments); i++ {
+ cg := f.Comments[i]
+ for j, del := range delcomments {
+ if cg == del {
+ copy(f.Comments[i:], f.Comments[i+1:])
+ f.Comments = f.Comments[:len(f.Comments)-1]
+ copy(delcomments[j:], delcomments[j+1:])
+ delcomments = delcomments[:len(delcomments)-1]
+ i--
+ break
+ }
+ }
+ }
+
+ if len(delspecs) > 0 {
+ panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
+ }
+
+ return
+}
+
+// RewriteImport rewrites any import of path oldPath to path newPath.
+func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
+ for _, imp := range f.Imports {
+ if importPath(imp) == oldPath {
+ rewrote = true
+ // record old End, because the default is to compute
+ // it using the length of imp.Path.Value.
+ imp.EndPos = imp.End()
+ imp.Path.Value = strconv.Quote(newPath)
+ }
+ }
+ return
+}
+
+// UsesImport reports whether a given import is used.
+func UsesImport(f *ast.File, path string) (used bool) {
+ spec := importSpec(f, path)
+ if spec == nil {
+ return
+ }
+
+ name := spec.Name.String()
+ switch name {
+ case "<nil>":
+ // If the package name is not explicitly specified,
+ // make an educated guess. This is not guaranteed to be correct.
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 {
+ name = path
+ } else {
+ name = path[lastSlash+1:]
+ }
+ case "_", ".":
+ // Not sure if this import is used - err on the side of caution.
+ return true
+ }
+
+ ast.Walk(visitFn(func(n ast.Node) {
+ sel, ok := n.(*ast.SelectorExpr)
+ if ok && isTopName(sel.X, name) {
+ used = true
+ }
+ }), f)
+
+ return
+}
+
+type visitFn func(node ast.Node)
+
+func (fn visitFn) Visit(node ast.Node) ast.Visitor {
+ fn(node)
+ return fn
+}
+
+// imports reports whether f has an import with the specified name and path.
+func imports(f *ast.File, name, path string) bool {
+ for _, s := range f.Imports {
+ if importName(s) == name && importPath(s) == path {
+ return true
+ }
+ }
+ return false
+}
+
+// importSpec returns the import spec if f imports path,
+// or nil otherwise.
+func importSpec(f *ast.File, path string) *ast.ImportSpec {
+ for _, s := range f.Imports {
+ if importPath(s) == path {
+ return s
+ }
+ }
+ return nil
+}
+
+// importName returns the name of s,
+// or "" if the import is not named.
+func importName(s *ast.ImportSpec) string {
+ if s.Name == nil {
+ return ""
+ }
+ return s.Name.Name
+}
+
+// importPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err != nil {
+ return ""
+ }
+ return t
+}
+
+// declImports reports whether gen contains an import of path.
+func declImports(gen *ast.GenDecl, path string) bool {
+ if gen.Tok != token.IMPORT {
+ return false
+ }
+ for _, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ if importPath(impspec) == path {
+ return true
+ }
+ }
+ return false
+}
+
+// matchLen returns the length of the longest path segment prefix shared by x and y.
+func matchLen(x, y string) int {
+ n := 0
+ for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
+ if x[i] == '/' {
+ n++
+ }
+ }
+ return n
+}
+
+// isTopName returns true if n is a top-level unresolved identifier with the given name.
+func isTopName(n ast.Expr, name string) bool {
+ id, ok := n.(*ast.Ident)
+ return ok && id.Name == name && id.Obj == nil
+}
+
+// Imports returns the file imports grouped by paragraph.
+func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
+ var groups [][]*ast.ImportSpec
+
+ for _, decl := range f.Decls {
+ genDecl, ok := decl.(*ast.GenDecl)
+ if !ok || genDecl.Tok != token.IMPORT {
+ break
+ }
+
+ group := []*ast.ImportSpec{}
+
+ var lastLine int
+ for _, spec := range genDecl.Specs {
+ importSpec := spec.(*ast.ImportSpec)
+ pos := importSpec.Path.ValuePos
+ line := fset.Position(pos).Line
+ if lastLine > 0 && pos > 0 && line-lastLine > 1 {
+ groups = append(groups, group)
+ group = []*ast.ImportSpec{}
+ }
+ group = append(group, importSpec)
+ lastLine = line
+ }
+ groups = append(groups, group)
+ }
+
+ return groups
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
new file mode 100644
index 000000000..cf72ea990
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
@@ -0,0 +1,477 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package astutil
+
+import (
+ "fmt"
+ "go/ast"
+ "reflect"
+ "sort"
+)
+
+// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
+// before and/or after the node's children, using a Cursor describing
+// the current node and providing operations on it.
+//
+// The return value of ApplyFunc controls the syntax tree traversal.
+// See Apply for details.
+type ApplyFunc func(*Cursor) bool
+
+// Apply traverses a syntax tree recursively, starting with root,
+// and calling pre and post for each node as described below.
+// Apply returns the syntax tree, possibly modified.
+//
+// If pre is not nil, it is called for each node before the node's
+// children are traversed (pre-order). If pre returns false, no
+// children are traversed, and post is not called for that node.
+//
+// If post is not nil, and a prior call of pre didn't return false,
+// post is called for each node after its children are traversed
+// (post-order). If post returns false, traversal is terminated and
+// Apply returns immediately.
+//
+// Only fields that refer to AST nodes are considered children;
+// i.e., token.Pos, Scopes, Objects, and fields of basic types
+// (strings, etc.) are ignored.
+//
+// Children are traversed in the order in which they appear in the
+// respective node's struct definition. A package's files are
+// traversed in the filenames' alphabetical order.
+//
+func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
+ parent := &struct{ ast.Node }{root}
+ defer func() {
+ if r := recover(); r != nil && r != abort {
+ panic(r)
+ }
+ result = parent.Node
+ }()
+ a := &application{pre: pre, post: post}
+ a.apply(parent, "Node", nil, root)
+ return
+}
+
+var abort = new(int) // singleton, to signal termination of Apply
+
+// A Cursor describes a node encountered during Apply.
+// Information about the node and its parent is available
+// from the Node, Parent, Name, and Index methods.
+//
+// If p is a variable of type and value of the current parent node
+// c.Parent(), and f is the field identifier with name c.Name(),
+// the following invariants hold:
+//
+// p.f == c.Node() if c.Index() < 0
+// p.f[c.Index()] == c.Node() if c.Index() >= 0
+//
+// The methods Replace, Delete, InsertBefore, and InsertAfter
+// can be used to change the AST without disrupting Apply.
+type Cursor struct {
+ parent ast.Node
+ name string
+ iter *iterator // valid if non-nil
+ node ast.Node
+}
+
+// Node returns the current Node.
+func (c *Cursor) Node() ast.Node { return c.node }
+
+// Parent returns the parent of the current Node.
+func (c *Cursor) Parent() ast.Node { return c.parent }
+
+// Name returns the name of the parent Node field that contains the current Node.
+// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
+// the filename for the current Node.
+func (c *Cursor) Name() string { return c.name }
+
+// Index reports the index >= 0 of the current Node in the slice of Nodes that
+// contains it, or a value < 0 if the current Node is not part of a slice.
+// The index of the current node changes if InsertBefore is called while
+// processing the current node.
+func (c *Cursor) Index() int {
+ if c.iter != nil {
+ return c.iter.index
+ }
+ return -1
+}
+
+// field returns the current node's parent field value.
+func (c *Cursor) field() reflect.Value {
+ return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
+}
+
+// Replace replaces the current Node with n.
+// The replacement node is not walked by Apply.
+func (c *Cursor) Replace(n ast.Node) {
+ if _, ok := c.node.(*ast.File); ok {
+ file, ok := n.(*ast.File)
+ if !ok {
+ panic("attempt to replace *ast.File with non-*ast.File")
+ }
+ c.parent.(*ast.Package).Files[c.name] = file
+ return
+ }
+
+ v := c.field()
+ if i := c.Index(); i >= 0 {
+ v = v.Index(i)
+ }
+ v.Set(reflect.ValueOf(n))
+}
+
+// Delete deletes the current Node from its containing slice.
+// If the current Node is not part of a slice, Delete panics.
+// As a special case, if the current node is a package file,
+// Delete removes it from the package's Files map.
+func (c *Cursor) Delete() {
+ if _, ok := c.node.(*ast.File); ok {
+ delete(c.parent.(*ast.Package).Files, c.name)
+ return
+ }
+
+ i := c.Index()
+ if i < 0 {
+ panic("Delete node not contained in slice")
+ }
+ v := c.field()
+ l := v.Len()
+ reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
+ v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
+ v.SetLen(l - 1)
+ c.iter.step--
+}
+
+// InsertAfter inserts n after the current Node in its containing slice.
+// If the current Node is not part of a slice, InsertAfter panics.
+// Apply does not walk n.
+func (c *Cursor) InsertAfter(n ast.Node) {
+ i := c.Index()
+ if i < 0 {
+ panic("InsertAfter node not contained in slice")
+ }
+ v := c.field()
+ v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
+ l := v.Len()
+ reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
+ v.Index(i + 1).Set(reflect.ValueOf(n))
+ c.iter.step++
+}
+
+// InsertBefore inserts n before the current Node in its containing slice.
+// If the current Node is not part of a slice, InsertBefore panics.
+// Apply will not walk n.
+func (c *Cursor) InsertBefore(n ast.Node) {
+ i := c.Index()
+ if i < 0 {
+ panic("InsertBefore node not contained in slice")
+ }
+ v := c.field()
+ v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
+ l := v.Len()
+ reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
+ v.Index(i).Set(reflect.ValueOf(n))
+ c.iter.index++
+}
+
+// application carries all the shared data so we can pass it around cheaply.
+type application struct {
+ pre, post ApplyFunc
+ cursor Cursor
+ iter iterator
+}
+
+func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
+ // convert typed nil into untyped nil
+ if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
+ n = nil
+ }
+
+ // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
+ saved := a.cursor
+ a.cursor.parent = parent
+ a.cursor.name = name
+ a.cursor.iter = iter
+ a.cursor.node = n
+
+ if a.pre != nil && !a.pre(&a.cursor) {
+ a.cursor = saved
+ return
+ }
+
+ // walk children
+ // (the order of the cases matches the order of the corresponding node types in go/ast)
+ switch n := n.(type) {
+ case nil:
+ // nothing to do
+
+ // Comments and fields
+ case *ast.Comment:
+ // nothing to do
+
+ case *ast.CommentGroup:
+ if n != nil {
+ a.applyList(n, "List")
+ }
+
+ case *ast.Field:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Names")
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Tag", nil, n.Tag)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.FieldList:
+ a.applyList(n, "List")
+
+ // Expressions
+ case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+ // nothing to do
+
+ case *ast.Ellipsis:
+ a.apply(n, "Elt", nil, n.Elt)
+
+ case *ast.FuncLit:
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.CompositeLit:
+ a.apply(n, "Type", nil, n.Type)
+ a.applyList(n, "Elts")
+
+ case *ast.ParenExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.SelectorExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Sel", nil, n.Sel)
+
+ case *ast.IndexExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Index", nil, n.Index)
+
+ case *ast.SliceExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Low", nil, n.Low)
+ a.apply(n, "High", nil, n.High)
+ a.apply(n, "Max", nil, n.Max)
+
+ case *ast.TypeAssertExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Type", nil, n.Type)
+
+ case *ast.CallExpr:
+ a.apply(n, "Fun", nil, n.Fun)
+ a.applyList(n, "Args")
+
+ case *ast.StarExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.UnaryExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.BinaryExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Y", nil, n.Y)
+
+ case *ast.KeyValueExpr:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+
+ // Types
+ case *ast.ArrayType:
+ a.apply(n, "Len", nil, n.Len)
+ a.apply(n, "Elt", nil, n.Elt)
+
+ case *ast.StructType:
+ a.apply(n, "Fields", nil, n.Fields)
+
+ case *ast.FuncType:
+ a.apply(n, "Params", nil, n.Params)
+ a.apply(n, "Results", nil, n.Results)
+
+ case *ast.InterfaceType:
+ a.apply(n, "Methods", nil, n.Methods)
+
+ case *ast.MapType:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+
+ case *ast.ChanType:
+ a.apply(n, "Value", nil, n.Value)
+
+ // Statements
+ case *ast.BadStmt:
+ // nothing to do
+
+ case *ast.DeclStmt:
+ a.apply(n, "Decl", nil, n.Decl)
+
+ case *ast.EmptyStmt:
+ // nothing to do
+
+ case *ast.LabeledStmt:
+ a.apply(n, "Label", nil, n.Label)
+ a.apply(n, "Stmt", nil, n.Stmt)
+
+ case *ast.ExprStmt:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.SendStmt:
+ a.apply(n, "Chan", nil, n.Chan)
+ a.apply(n, "Value", nil, n.Value)
+
+ case *ast.IncDecStmt:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.AssignStmt:
+ a.applyList(n, "Lhs")
+ a.applyList(n, "Rhs")
+
+ case *ast.GoStmt:
+ a.apply(n, "Call", nil, n.Call)
+
+ case *ast.DeferStmt:
+ a.apply(n, "Call", nil, n.Call)
+
+ case *ast.ReturnStmt:
+ a.applyList(n, "Results")
+
+ case *ast.BranchStmt:
+ a.apply(n, "Label", nil, n.Label)
+
+ case *ast.BlockStmt:
+ a.applyList(n, "List")
+
+ case *ast.IfStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Cond", nil, n.Cond)
+ a.apply(n, "Body", nil, n.Body)
+ a.apply(n, "Else", nil, n.Else)
+
+ case *ast.CaseClause:
+ a.applyList(n, "List")
+ a.applyList(n, "Body")
+
+ case *ast.SwitchStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Tag", nil, n.Tag)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.TypeSwitchStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Assign", nil, n.Assign)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.CommClause:
+ a.apply(n, "Comm", nil, n.Comm)
+ a.applyList(n, "Body")
+
+ case *ast.SelectStmt:
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.ForStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Cond", nil, n.Cond)
+ a.apply(n, "Post", nil, n.Post)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.RangeStmt:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Body", nil, n.Body)
+
+ // Declarations
+ case *ast.ImportSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Path", nil, n.Path)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.ValueSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Names")
+ a.apply(n, "Type", nil, n.Type)
+ a.applyList(n, "Values")
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.TypeSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.BadDecl:
+ // nothing to do
+
+ case *ast.GenDecl:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Specs")
+
+ case *ast.FuncDecl:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Recv", nil, n.Recv)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Body", nil, n.Body)
+
+ // Files and packages
+ case *ast.File:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.applyList(n, "Decls")
+ // Don't walk n.Comments; they have either been walked already if
+ // they are Doc comments, or they can be easily walked explicitly.
+
+ case *ast.Package:
+ // collect and sort names for reproducible behavior
+ var names []string
+ for name := range n.Files {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ a.apply(n, name, nil, n.Files[name])
+ }
+
+ default:
+ panic(fmt.Sprintf("Apply: unexpected node type %T", n))
+ }
+
+ if a.post != nil && !a.post(&a.cursor) {
+ panic(abort)
+ }
+
+ a.cursor = saved
+}
+
+// An iterator controls iteration over a slice of nodes.
+type iterator struct {
+ index, step int
+}
+
+func (a *application) applyList(parent ast.Node, name string) {
+ // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
+ saved := a.iter
+ a.iter.index = 0
+ for {
+ // must reload parent.name each time, since cursor modifications might change it
+ v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
+ if a.iter.index >= v.Len() {
+ break
+ }
+
+ // element x may be nil in a bad AST - be cautious
+ var x ast.Node
+ if e := v.Index(a.iter.index); e.IsValid() {
+ x = e.Interface().(ast.Node)
+ }
+
+ a.iter.step = 1
+ a.apply(parent, name, &a.iter, x)
+ a.iter.index += a.iter.step
+ }
+ a.iter = saved
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go
new file mode 100644
index 000000000..763062982
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go
@@ -0,0 +1,14 @@
+package astutil
+
+import "go/ast"
+
+// Unparen returns e with any enclosing parentheses stripped.
+func Unparen(e ast.Expr) ast.Expr {
+ for {
+ p, ok := e.(*ast.ParenExpr)
+ if !ok {
+ return e
+ }
+ e = p.X
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
new file mode 100644
index 000000000..98b3987b9
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -0,0 +1,109 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gcexportdata provides functions for locating, reading, and
+// writing export data files containing type information produced by the
+// gc compiler. This package supports go1.7 export data format and all
+// later versions.
+//
+// Although it might seem convenient for this package to live alongside
+// go/types in the standard library, this would cause version skew
+// problems for developer tools that use it, since they must be able to
+// consume the outputs of the gc compiler both before and after a Go
+// update such as from Go 1.7 to Go 1.8. Because this package lives in
+// golang.org/x/tools, sites can update their version of this repo some
+// time before the Go 1.8 release and rebuild and redeploy their
+// developer tools, which will then be able to consume both Go 1.7 and
+// Go 1.8 export data files, so they will work before and after the
+// Go update. (See discussion at https://golang.org/issue/15651.)
+//
+package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/token"
+ "go/types"
+ "io"
+ "io/ioutil"
+
+ "golang.org/x/tools/go/internal/gcimporter"
+)
+
+// Find returns the name of an object (.o) or archive (.a) file
+// containing type information for the specified import path,
+// using the workspace layout conventions of go/build.
+// If no file was found, an empty filename is returned.
+//
+// A relative srcDir is interpreted relative to the current working directory.
+//
+// Find also returns the package's resolved (canonical) import path,
+// reflecting the effects of srcDir and vendoring on importPath.
+func Find(importPath, srcDir string) (filename, path string) {
+ return gcimporter.FindPkg(importPath, srcDir)
+}
+
+// NewReader returns a reader for the export data section of an object
+// (.o) or archive (.a) file read from r. The new reader may provide
+// additional trailing data beyond the end of the export data.
+func NewReader(r io.Reader) (io.Reader, error) {
+ buf := bufio.NewReader(r)
+ _, err := gcimporter.FindExportData(buf)
+ // If we ever switch to a zip-like archive format with the ToC
+ // at the end, we can return the correct portion of export data,
+ // but for now we must return the entire rest of the file.
+ return buf, err
+}
+
+// Read reads export data from in, decodes it, and returns type
+// information for the package.
+// The package name is specified by path.
+// File position information is added to fset.
+//
+// Read may inspect and add to the imports map to ensure that references
+// within the export data to other packages are consistent. The caller
+// must ensure that imports[path] does not exist, or exists but is
+// incomplete (see types.Package.Complete), and Read inserts the
+// resulting package into this map entry.
+//
+// On return, the state of the reader is undefined.
+func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
+ data, err := ioutil.ReadAll(in)
+ if err != nil {
+ return nil, fmt.Errorf("reading export data for %q: %v", path, err)
+ }
+
+ if bytes.HasPrefix(data, []byte("!<arch>")) {
+ return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
+ }
+
+ // The App Engine Go runtime v1.6 uses the old export data format.
+ // TODO(adonovan): delete once v1.7 has been around for a while.
+ if bytes.HasPrefix(data, []byte("package ")) {
+ return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
+ }
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 && data[0] == 'i' {
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+ return pkg, err
+ }
+
+ _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
+ return pkg, err
+}
+
+// Write writes encoded type information for the specified package to out.
+// The FileSet provides file position information for named objects.
+func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ b, err := gcimporter.BExportData(fset, pkg)
+ if err != nil {
+ return err
+ }
+ _, err = out.Write(b)
+ return err
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go
new file mode 100644
index 000000000..efe221e7e
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go
@@ -0,0 +1,73 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcexportdata
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "os"
+)
+
+// NewImporter returns a new instance of the types.Importer interface
+// that reads type information from export data files written by gc.
+// The Importer also satisfies types.ImporterFrom.
+//
+// Export data files are located using "go build" workspace conventions
+// and the build.Default context.
+//
+// Use this importer instead of go/importer.For("gc", ...) to avoid the
+// version-skew problems described in the documentation of this package,
+// or to control the FileSet or access the imports map populated during
+// package loading.
+//
+func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
+ return importer{fset, imports}
+}
+
+type importer struct {
+ fset *token.FileSet
+ imports map[string]*types.Package
+}
+
+func (imp importer) Import(importPath string) (*types.Package, error) {
+ return imp.ImportFrom(importPath, "", 0)
+}
+
+func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
+ filename, path := Find(importPath, srcDir)
+ if filename == "" {
+ if importPath == "unsafe" {
+ // Even for unsafe, call Find first in case
+ // the package was vendored.
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %s", importPath)
+ }
+
+ if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
+ return pkg, nil // cache hit
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ f.Close()
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ }
+ }()
+
+ r, err := NewReader(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return Read(r, imp.fset, imp.imports, path)
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go
new file mode 100644
index 000000000..2713dce64
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/main.go
@@ -0,0 +1,99 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// The gcexportdata command is a diagnostic tool that displays the
+// contents of gc export data files.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/token"
+ "go/types"
+ "log"
+ "os"
+
+ "golang.org/x/tools/go/gcexportdata"
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+var packageFlag = flag.String("package", "", "alternative package to print")
+
+func main() {
+ log.SetPrefix("gcexportdata: ")
+ log.SetFlags(0)
+ flag.Usage = func() {
+ fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a")
+ }
+ flag.Parse()
+ if flag.NArg() != 1 {
+ flag.Usage()
+ os.Exit(2)
+ }
+ filename := flag.Args()[0]
+
+ f, err := os.Open(filename)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ log.Fatalf("%s: %s", filename, err)
+ }
+
+ // Decode the package.
+ const primary = "<primary>"
+ imports := make(map[string]*types.Package)
+ fset := token.NewFileSet()
+ pkg, err := gcexportdata.Read(r, fset, imports, primary)
+ if err != nil {
+ log.Fatalf("%s: %s", filename, err)
+ }
+
+ // Optionally select an indirectly mentioned package.
+ if *packageFlag != "" {
+ pkg = imports[*packageFlag]
+ if pkg == nil {
+ fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n",
+ filename, *packageFlag)
+ for p := range imports {
+ if p != primary {
+ fmt.Fprintf(os.Stderr, "\t%s\n", p)
+ }
+ }
+ os.Exit(1)
+ }
+ }
+
+ // Print all package-level declarations, including non-exported ones.
+ fmt.Printf("package %s\n", pkg.Name())
+ for _, imp := range pkg.Imports() {
+ fmt.Printf("import %q\n", imp.Path())
+ }
+ qual := func(p *types.Package) string {
+ if pkg == p {
+ return ""
+ }
+ return p.Name()
+ }
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ fmt.Printf("%s: %s\n",
+ fset.Position(obj.Pos()),
+ types.ObjectString(obj, qual))
+
+ // For types, print each method.
+ if _, ok := obj.(*types.TypeName); ok {
+ for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) {
+ fmt.Printf("%s: %s\n",
+ fset.Position(method.Obj().Pos()),
+ types.SelectionString(method, qual))
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
new file mode 100644
index 000000000..a807d0aaa
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
@@ -0,0 +1,852 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
+// see that file for specification of the format.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "math"
+ "math/big"
+ "sort"
+ "strings"
+)
+
+// If debugFormat is set, each integer and string value is preceded by a marker
+// and position information in the encoding. This mechanism permits an importer
+// to recognize immediately when it is out of sync. The importer recognizes this
+// mode automatically (i.e., it can import export data produced with debugging
+// support even if debugFormat is not set at the time of import). This mode will
+// lead to massively larger export data (by a factor of 2 to 3) and should only
+// be enabled during development and debugging.
+//
+// NOTE: This flag is the first flag to enable if importing dies because of
+// (suspected) format errors, and whenever a change is made to the format.
+const debugFormat = false // default: false
+
+// If trace is set, debugging output is printed to std out.
+const trace = false // default: false
+
+// Current export format version. Increase with each format change.
+// Note: The latest binary (non-indexed) export format is at version 6.
+// This exporter is still at level 4, but it doesn't matter since
+// the binary importer can handle older versions just fine.
+// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
+// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
+// 4: type name objects support type aliases, uses aliasTag
+// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
+// 2: removed unused bool in ODCL export (compiler only)
+// 1: header format change (more regular), export package for _ struct fields
+// 0: Go1.7 encoding
+const exportVersion = 4
+
+// trackAllTypes enables cycle tracking for all types, not just named
+// types. The existing compiler invariants assume that unnamed types
+// that are not completely set up are not used, or else there are spurious
+// errors.
+// If disabled, only named types are tracked, possibly leading to slightly
+// less efficient encoding in rare cases. It also prevents the export of
+// some corner-case type declarations (but those are not handled correctly
+// with with the textual export format either).
+// TODO(gri) enable and remove once issues caused by it are fixed
+const trackAllTypes = false
+
+type exporter struct {
+ fset *token.FileSet
+ out bytes.Buffer
+
+ // object -> index maps, indexed in order of serialization
+ strIndex map[string]int
+ pkgIndex map[*types.Package]int
+ typIndex map[types.Type]int
+
+ // position encoding
+ posInfoFormat bool
+ prevFile string
+ prevLine int
+
+ // debugging support
+ written int // bytes written
+ indent int // for trace
+}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
+
+// BExportData returns binary export data for pkg.
+// If no file set is provided, position info will be missing.
+func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+
+ p := exporter{
+ fset: fset,
+ strIndex: map[string]int{"": 0}, // empty string is mapped to 0
+ pkgIndex: make(map[*types.Package]int),
+ typIndex: make(map[types.Type]int),
+ posInfoFormat: true, // TODO(gri) might become a flag, eventually
+ }
+
+ // write version info
+ // The version string must start with "version %d" where %d is the version
+ // number. Additional debugging information may follow after a blank; that
+ // text is ignored by the importer.
+ p.rawStringln(fmt.Sprintf("version %d", exportVersion))
+ var debug string
+ if debugFormat {
+ debug = "debug"
+ }
+ p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
+ p.bool(trackAllTypes)
+ p.bool(p.posInfoFormat)
+
+ // --- generic export data ---
+
+ // populate type map with predeclared "known" types
+ for index, typ := range predeclared() {
+ p.typIndex[typ] = index
+ }
+ if len(p.typIndex) != len(predeclared()) {
+ return nil, internalError("duplicate entries in type map?")
+ }
+
+ // write package data
+ p.pkg(pkg, true)
+ if trace {
+ p.tracef("\n")
+ }
+
+ // write objects
+ objcount := 0
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if !ast.IsExported(name) {
+ continue
+ }
+ if trace {
+ p.tracef("\n")
+ }
+ p.obj(scope.Lookup(name))
+ objcount++
+ }
+
+ // indicate end of list
+ if trace {
+ p.tracef("\n")
+ }
+ p.tag(endTag)
+
+ // for self-verification only (redundant)
+ p.int(objcount)
+
+ if trace {
+ p.tracef("\n")
+ }
+
+ // --- end of export data ---
+
+ return p.out.Bytes(), nil
+}
+
+func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
+ if pkg == nil {
+ panic(internalError("unexpected nil pkg"))
+ }
+
+ // if we saw the package before, write its index (>= 0)
+ if i, ok := p.pkgIndex[pkg]; ok {
+ p.index('P', i)
+ return
+ }
+
+ // otherwise, remember the package, write the package tag (< 0) and package data
+ if trace {
+ p.tracef("P%d = { ", len(p.pkgIndex))
+ defer p.tracef("} ")
+ }
+ p.pkgIndex[pkg] = len(p.pkgIndex)
+
+ p.tag(packageTag)
+ p.string(pkg.Name())
+ if emptypath {
+ p.string("")
+ } else {
+ p.string(pkg.Path())
+ }
+}
+
+func (p *exporter) obj(obj types.Object) {
+ switch obj := obj.(type) {
+ case *types.Const:
+ p.tag(constTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ p.typ(obj.Type())
+ p.value(obj.Val())
+
+ case *types.TypeName:
+ if obj.IsAlias() {
+ p.tag(aliasTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ } else {
+ p.tag(typeTag)
+ }
+ p.typ(obj.Type())
+
+ case *types.Var:
+ p.tag(varTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ p.typ(obj.Type())
+
+ case *types.Func:
+ p.tag(funcTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ sig := obj.Type().(*types.Signature)
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+
+ default:
+ panic(internalErrorf("unexpected object %v (%T)", obj, obj))
+ }
+}
+
+func (p *exporter) pos(obj types.Object) {
+ if !p.posInfoFormat {
+ return
+ }
+
+ file, line := p.fileLine(obj)
+ if file == p.prevFile {
+ // common case: write line delta
+ // delta == 0 means different file or no line change
+ delta := line - p.prevLine
+ p.int(delta)
+ if delta == 0 {
+ p.int(-1) // -1 means no file change
+ }
+ } else {
+ // different file
+ p.int(0)
+ // Encode filename as length of common prefix with previous
+ // filename, followed by (possibly empty) suffix. Filenames
+ // frequently share path prefixes, so this can save a lot
+ // of space and make export data size less dependent on file
+ // path length. The suffix is unlikely to be empty because
+ // file names tend to end in ".go".
+ n := commonPrefixLen(p.prevFile, file)
+ p.int(n) // n >= 0
+ p.string(file[n:]) // write suffix only
+ p.prevFile = file
+ p.int(line)
+ }
+ p.prevLine = line
+}
+
+func (p *exporter) fileLine(obj types.Object) (file string, line int) {
+ if p.fset != nil {
+ pos := p.fset.Position(obj.Pos())
+ file = pos.Filename
+ line = pos.Line
+ }
+ return
+}
+
+func commonPrefixLen(a, b string) int {
+ if len(a) > len(b) {
+ a, b = b, a
+ }
+ // len(a) <= len(b)
+ i := 0
+ for i < len(a) && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+func (p *exporter) qualifiedName(obj types.Object) {
+ p.string(obj.Name())
+ p.pkg(obj.Pkg(), false)
+}
+
+func (p *exporter) typ(t types.Type) {
+ if t == nil {
+ panic(internalError("nil type"))
+ }
+
+ // Possible optimization: Anonymous pointer types *T where
+ // T is a named type are common. We could canonicalize all
+ // such types *T to a single type PT = *T. This would lead
+ // to at most one *T entry in typIndex, and all future *T's
+ // would be encoded as the respective index directly. Would
+ // save 1 byte (pointerTag) per *T and reduce the typIndex
+ // size (at the cost of a canonicalization map). We can do
+ // this later, without encoding format change.
+
+ // if we saw the type before, write its index (>= 0)
+ if i, ok := p.typIndex[t]; ok {
+ p.index('T', i)
+ return
+ }
+
+ // otherwise, remember the type, write the type tag (< 0) and type data
+ if trackAllTypes {
+ if trace {
+ p.tracef("T%d = {>\n", len(p.typIndex))
+ defer p.tracef("<\n} ")
+ }
+ p.typIndex[t] = len(p.typIndex)
+ }
+
+ switch t := t.(type) {
+ case *types.Named:
+ if !trackAllTypes {
+ // if we don't track all types, track named types now
+ p.typIndex[t] = len(p.typIndex)
+ }
+
+ p.tag(namedTag)
+ p.pos(t.Obj())
+ p.qualifiedName(t.Obj())
+ p.typ(t.Underlying())
+ if !types.IsInterface(t) {
+ p.assocMethods(t)
+ }
+
+ case *types.Array:
+ p.tag(arrayTag)
+ p.int64(t.Len())
+ p.typ(t.Elem())
+
+ case *types.Slice:
+ p.tag(sliceTag)
+ p.typ(t.Elem())
+
+ case *dddSlice:
+ p.tag(dddTag)
+ p.typ(t.elem)
+
+ case *types.Struct:
+ p.tag(structTag)
+ p.fieldList(t)
+
+ case *types.Pointer:
+ p.tag(pointerTag)
+ p.typ(t.Elem())
+
+ case *types.Signature:
+ p.tag(signatureTag)
+ p.paramList(t.Params(), t.Variadic())
+ p.paramList(t.Results(), false)
+
+ case *types.Interface:
+ p.tag(interfaceTag)
+ p.iface(t)
+
+ case *types.Map:
+ p.tag(mapTag)
+ p.typ(t.Key())
+ p.typ(t.Elem())
+
+ case *types.Chan:
+ p.tag(chanTag)
+ p.int(int(3 - t.Dir())) // hack
+ p.typ(t.Elem())
+
+ default:
+ panic(internalErrorf("unexpected type %T: %s", t, t))
+ }
+}
+
+func (p *exporter) assocMethods(named *types.Named) {
+ // Sort methods (for determinism).
+ var methods []*types.Func
+ for i := 0; i < named.NumMethods(); i++ {
+ methods = append(methods, named.Method(i))
+ }
+ sort.Sort(methodsByName(methods))
+
+ p.int(len(methods))
+
+ if trace && methods != nil {
+ p.tracef("associated methods {>\n")
+ }
+
+ for i, m := range methods {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+
+ p.pos(m)
+ name := m.Name()
+ p.string(name)
+ if !exported(name) {
+ p.pkg(m.Pkg(), false)
+ }
+
+ sig := m.Type().(*types.Signature)
+ p.paramList(types.NewTuple(sig.Recv()), false)
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+ p.int(0) // dummy value for go:nointerface pragma - ignored by importer
+ }
+
+ if trace && methods != nil {
+ p.tracef("<\n} ")
+ }
+}
+
+type methodsByName []*types.Func
+
+func (x methodsByName) Len() int { return len(x) }
+func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
+
+func (p *exporter) fieldList(t *types.Struct) {
+ if trace && t.NumFields() > 0 {
+ p.tracef("fields {>\n")
+ defer p.tracef("<\n} ")
+ }
+
+ p.int(t.NumFields())
+ for i := 0; i < t.NumFields(); i++ {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+ p.field(t.Field(i))
+ p.string(t.Tag(i))
+ }
+}
+
+func (p *exporter) field(f *types.Var) {
+ if !f.IsField() {
+ panic(internalError("field expected"))
+ }
+
+ p.pos(f)
+ p.fieldName(f)
+ p.typ(f.Type())
+}
+
+func (p *exporter) iface(t *types.Interface) {
+ // TODO(gri): enable importer to load embedded interfaces,
+ // then emit Embeddeds and ExplicitMethods separately here.
+ p.int(0)
+
+ n := t.NumMethods()
+ if trace && n > 0 {
+ p.tracef("methods {>\n")
+ defer p.tracef("<\n} ")
+ }
+ p.int(n)
+ for i := 0; i < n; i++ {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+ p.method(t.Method(i))
+ }
+}
+
+func (p *exporter) method(m *types.Func) {
+ sig := m.Type().(*types.Signature)
+ if sig.Recv() == nil {
+ panic(internalError("method expected"))
+ }
+
+ p.pos(m)
+ p.string(m.Name())
+ if m.Name() != "_" && !ast.IsExported(m.Name()) {
+ p.pkg(m.Pkg(), false)
+ }
+
+ // interface method; no need to encode receiver.
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+}
+
+func (p *exporter) fieldName(f *types.Var) {
+ name := f.Name()
+
+ if f.Anonymous() {
+ // anonymous field - we distinguish between 3 cases:
+ // 1) field name matches base type name and is exported
+ // 2) field name matches base type name and is not exported
+ // 3) field name doesn't match base type name (alias name)
+ bname := basetypeName(f.Type())
+ if name == bname {
+ if ast.IsExported(name) {
+ name = "" // 1) we don't need to know the field name or package
+ } else {
+ name = "?" // 2) use unexported name "?" to force package export
+ }
+ } else {
+ // 3) indicate alias and export name as is
+ // (this requires an extra "@" but this is a rare case)
+ p.string("@")
+ }
+ }
+
+ p.string(name)
+ if name != "" && !ast.IsExported(name) {
+ p.pkg(f.Pkg(), false)
+ }
+}
+
+func basetypeName(typ types.Type) string {
+ switch typ := deref(typ).(type) {
+ case *types.Basic:
+ return typ.Name()
+ case *types.Named:
+ return typ.Obj().Name()
+ default:
+ return "" // unnamed type
+ }
+}
+
+func (p *exporter) paramList(params *types.Tuple, variadic bool) {
+ // use negative length to indicate unnamed parameters
+ // (look at the first parameter only since either all
+ // names are present or all are absent)
+ n := params.Len()
+ if n > 0 && params.At(0).Name() == "" {
+ n = -n
+ }
+ p.int(n)
+ for i := 0; i < params.Len(); i++ {
+ q := params.At(i)
+ t := q.Type()
+ if variadic && i == params.Len()-1 {
+ t = &dddSlice{t.(*types.Slice).Elem()}
+ }
+ p.typ(t)
+ if n > 0 {
+ name := q.Name()
+ p.string(name)
+ if name != "_" {
+ p.pkg(q.Pkg(), false)
+ }
+ }
+ p.string("") // no compiler-specific info
+ }
+}
+
+func (p *exporter) value(x constant.Value) {
+ if trace {
+ p.tracef("= ")
+ }
+
+ switch x.Kind() {
+ case constant.Bool:
+ tag := falseTag
+ if constant.BoolVal(x) {
+ tag = trueTag
+ }
+ p.tag(tag)
+
+ case constant.Int:
+ if v, exact := constant.Int64Val(x); exact {
+ // common case: x fits into an int64 - use compact encoding
+ p.tag(int64Tag)
+ p.int64(v)
+ return
+ }
+ // uncommon case: large x - use float encoding
+ // (powers of 2 will be encoded efficiently with exponent)
+ p.tag(floatTag)
+ p.float(constant.ToFloat(x))
+
+ case constant.Float:
+ p.tag(floatTag)
+ p.float(x)
+
+ case constant.Complex:
+ p.tag(complexTag)
+ p.float(constant.Real(x))
+ p.float(constant.Imag(x))
+
+ case constant.String:
+ p.tag(stringTag)
+ p.string(constant.StringVal(x))
+
+ case constant.Unknown:
+ // package contains type errors
+ p.tag(unknownTag)
+
+ default:
+ panic(internalErrorf("unexpected value %v (%T)", x, x))
+ }
+}
+
+func (p *exporter) float(x constant.Value) {
+ if x.Kind() != constant.Float {
+ panic(internalErrorf("unexpected constant %v, want float", x))
+ }
+ // extract sign (there is no -0)
+ sign := constant.Sign(x)
+ if sign == 0 {
+ // x == 0
+ p.int(0)
+ return
+ }
+ // x != 0
+
+ var f big.Float
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ r := valueToRat(num)
+ f.SetRat(r.Quo(r, valueToRat(denom)))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ f.SetFloat64(math.MaxFloat64) // FIXME
+ }
+
+ // extract exponent such that 0.5 <= m < 1.0
+ var m big.Float
+ exp := f.MantExp(&m)
+
+ // extract mantissa as *big.Int
+ // - set exponent large enough so mant satisfies mant.IsInt()
+ // - get *big.Int from mant
+ m.SetMantExp(&m, int(m.MinPrec()))
+ mant, acc := m.Int(nil)
+ if acc != big.Exact {
+ panic(internalError("internal error"))
+ }
+
+ p.int(sign)
+ p.int(exp)
+ p.string(string(mant.Bytes()))
+}
+
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+func (p *exporter) bool(b bool) bool {
+ if trace {
+ p.tracef("[")
+ defer p.tracef("= %v] ", b)
+ }
+
+ x := 0
+ if b {
+ x = 1
+ }
+ p.int(x)
+ return b
+}
+
+// ----------------------------------------------------------------------------
+// Low-level encoders
+
+func (p *exporter) index(marker byte, index int) {
+ if index < 0 {
+ panic(internalError("invalid index < 0"))
+ }
+ if debugFormat {
+ p.marker('t')
+ }
+ if trace {
+ p.tracef("%c%d ", marker, index)
+ }
+ p.rawInt64(int64(index))
+}
+
+func (p *exporter) tag(tag int) {
+ if tag >= 0 {
+ panic(internalError("invalid tag >= 0"))
+ }
+ if debugFormat {
+ p.marker('t')
+ }
+ if trace {
+ p.tracef("%s ", tagString[-tag])
+ }
+ p.rawInt64(int64(tag))
+}
+
+func (p *exporter) int(x int) {
+ p.int64(int64(x))
+}
+
+func (p *exporter) int64(x int64) {
+ if debugFormat {
+ p.marker('i')
+ }
+ if trace {
+ p.tracef("%d ", x)
+ }
+ p.rawInt64(x)
+}
+
+func (p *exporter) string(s string) {
+ if debugFormat {
+ p.marker('s')
+ }
+ if trace {
+ p.tracef("%q ", s)
+ }
+ // if we saw the string before, write its index (>= 0)
+ // (the empty string is mapped to 0)
+ if i, ok := p.strIndex[s]; ok {
+ p.rawInt64(int64(i))
+ return
+ }
+ // otherwise, remember string and write its negative length and bytes
+ p.strIndex[s] = len(p.strIndex)
+ p.rawInt64(-int64(len(s)))
+ for i := 0; i < len(s); i++ {
+ p.rawByte(s[i])
+ }
+}
+
+// marker emits a marker byte and position information which makes
+// it easy for a reader to detect if it is "out of sync". Used for
+// debugFormat format only.
+func (p *exporter) marker(m byte) {
+ p.rawByte(m)
+ // Enable this for help tracking down the location
+ // of an incorrect marker when running in debugFormat.
+ if false && trace {
+ p.tracef("#%d ", p.written)
+ }
+ p.rawInt64(int64(p.written))
+}
+
+// rawInt64 should only be used by low-level encoders.
+func (p *exporter) rawInt64(x int64) {
+ var tmp [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(tmp[:], x)
+ for i := 0; i < n; i++ {
+ p.rawByte(tmp[i])
+ }
+}
+
+// rawStringln should only be used to emit the initial version string.
+func (p *exporter) rawStringln(s string) {
+ for i := 0; i < len(s); i++ {
+ p.rawByte(s[i])
+ }
+ p.rawByte('\n')
+}
+
+// rawByte is the bottleneck interface to write to p.out.
+// rawByte escapes b as follows (any encoding does that
+// hides '$'):
+//
+// '$' => '|' 'S'
+// '|' => '|' '|'
+//
+// Necessary so other tools can find the end of the
+// export data by searching for "$$".
+// rawByte should only be used by low-level encoders.
+func (p *exporter) rawByte(b byte) {
+ switch b {
+ case '$':
+ // write '$' as '|' 'S'
+ b = 'S'
+ fallthrough
+ case '|':
+ // write '|' as '|' '|'
+ p.out.WriteByte('|')
+ p.written++
+ }
+ p.out.WriteByte(b)
+ p.written++
+}
+
+// tracef is like fmt.Printf but it rewrites the format string
+// to take care of indentation.
+func (p *exporter) tracef(format string, args ...interface{}) {
+ if strings.ContainsAny(format, "<>\n") {
+ var buf bytes.Buffer
+ for i := 0; i < len(format); i++ {
+ // no need to deal with runes
+ ch := format[i]
+ switch ch {
+ case '>':
+ p.indent++
+ continue
+ case '<':
+ p.indent--
+ continue
+ }
+ buf.WriteByte(ch)
+ if ch == '\n' {
+ for j := p.indent; j > 0; j-- {
+ buf.WriteString(". ")
+ }
+ }
+ }
+ format = buf.String()
+ }
+ fmt.Printf(format, args...)
+}
+
+// Debugging support.
+// (tagString is only used when tracing is enabled)
+var tagString = [...]string{
+ // Packages
+ -packageTag: "package",
+
+ // Types
+ -namedTag: "named type",
+ -arrayTag: "array",
+ -sliceTag: "slice",
+ -dddTag: "ddd",
+ -structTag: "struct",
+ -pointerTag: "pointer",
+ -signatureTag: "signature",
+ -interfaceTag: "interface",
+ -mapTag: "map",
+ -chanTag: "chan",
+
+ // Values
+ -falseTag: "false",
+ -trueTag: "true",
+ -int64Tag: "int64",
+ -floatTag: "float",
+ -fractionTag: "fraction",
+ -complexTag: "complex",
+ -stringTag: "string",
+ -unknownTag: "unknown",
+
+ // Type aliases
+ -aliasTag: "alias",
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
new file mode 100644
index 000000000..e3c310782
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
@@ -0,0 +1,1036 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
+
+package gcimporter
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+type importer struct {
+ imports map[string]*types.Package
+ data []byte
+ importpath string
+ buf []byte // for reading strings
+ version int // export format version
+
+ // object lists
+ strList []string // in order of appearance
+ pathList []string // in order of appearance
+ pkgList []*types.Package // in order of appearance
+ typList []types.Type // in order of appearance
+ interfaceList []*types.Interface // for delayed completion only
+ trackAllTypes bool
+
+ // position encoding
+ posInfoFormat bool
+ prevFile string
+ prevLine int
+ fake fakeFileSet
+
+ // debugging support
+ debugFormat bool
+ read int // bytes read
+}
+
+// BImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ // catch panics and return them as errors
+ const currentVersion = 6
+ version := -1 // unknown version
+ defer func() {
+ if e := recover(); e != nil {
+ // Return a (possibly nil or incomplete) package unchanged (see #16088).
+ if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
+ }
+ }()
+
+ p := importer{
+ imports: imports,
+ data: data,
+ importpath: path,
+ version: version,
+ strList: []string{""}, // empty string is mapped to 0
+ pathList: []string{""}, // empty string is mapped to 0
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*token.File),
+ },
+ }
+
+ // read version info
+ var versionstr string
+ if b := p.rawByte(); b == 'c' || b == 'd' {
+ // Go1.7 encoding; first byte encodes low-level
+ // encoding format (compact vs debug).
+ // For backward-compatibility only (avoid problems with
+ // old installed packages). Newly compiled packages use
+ // the extensible format string.
+ // TODO(gri) Remove this support eventually; after Go1.8.
+ if b == 'd' {
+ p.debugFormat = true
+ }
+ p.trackAllTypes = p.rawByte() == 'a'
+ p.posInfoFormat = p.int() != 0
+ versionstr = p.string()
+ if versionstr == "v1" {
+ version = 0
+ }
+ } else {
+ // Go1.8 extensible encoding
+ // read version string and extract version number (ignore anything after the version number)
+ versionstr = p.rawStringln(b)
+ if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
+ if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
+ version = v
+ }
+ }
+ }
+ p.version = version
+
+ // read version specific flags - extend as necessary
+ switch p.version {
+ // case currentVersion:
+ // ...
+ // fallthrough
+ case currentVersion, 5, 4, 3, 2, 1:
+ p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
+ p.trackAllTypes = p.int() != 0
+ p.posInfoFormat = p.int() != 0
+ case 0:
+ // Go1.7 encoding format - nothing to do here
+ default:
+ errorf("unknown bexport format version %d (%q)", p.version, versionstr)
+ }
+
+ // --- generic export data ---
+
+ // populate typList with predeclared "known" types
+ p.typList = append(p.typList, predeclared()...)
+
+ // read package data
+ pkg = p.pkg()
+
+ // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
+ objcount := 0
+ for {
+ tag := p.tagOrIndex()
+ if tag == endTag {
+ break
+ }
+ p.obj(tag)
+ objcount++
+ }
+
+ // self-verification
+ if count := p.int(); count != objcount {
+ errorf("got %d objects; want %d", objcount, count)
+ }
+
+ // ignore compiler-specific import data
+
+ // complete interfaces
+ // TODO(gri) re-investigate if we still need to do this in a delayed fashion
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // record all referenced packages as imports
+ list := append(([]*types.Package)(nil), p.pkgList[1:]...)
+ sort.Sort(byPath(list))
+ pkg.SetImports(list)
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+
+ return p.read, pkg, nil
+}
+
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Sprintf(format, args...))
+}
+
+func (p *importer) pkg() *types.Package {
+ // if the package was seen before, i is its index (>= 0)
+ i := p.tagOrIndex()
+ if i >= 0 {
+ return p.pkgList[i]
+ }
+
+ // otherwise, i is the package tag (< 0)
+ if i != packageTag {
+ errorf("unexpected package tag %d version %d", i, p.version)
+ }
+
+ // read package data
+ name := p.string()
+ var path string
+ if p.version >= 5 {
+ path = p.path()
+ } else {
+ path = p.string()
+ }
+ if p.version >= 6 {
+ p.int() // package height; unused by go/types
+ }
+
+ // we should never see an empty package name
+ if name == "" {
+ errorf("empty package name in import")
+ }
+
+ // an empty path denotes the package we are currently importing;
+ // it must be the first package we see
+ if (path == "") != (len(p.pkgList) == 0) {
+ errorf("package path %q for pkg index %d", path, len(p.pkgList))
+ }
+
+ // if the package was imported before, use that one; otherwise create a new one
+ if path == "" {
+ path = p.importpath
+ }
+ pkg := p.imports[path]
+ if pkg == nil {
+ pkg = types.NewPackage(path, name)
+ p.imports[path] = pkg
+ } else if pkg.Name() != name {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
+ }
+ p.pkgList = append(p.pkgList, pkg)
+
+ return pkg
+}
+
+// objTag returns the tag value for each object kind.
+func objTag(obj types.Object) int {
+ switch obj.(type) {
+ case *types.Const:
+ return constTag
+ case *types.TypeName:
+ return typeTag
+ case *types.Var:
+ return varTag
+ case *types.Func:
+ return funcTag
+ default:
+ errorf("unexpected object: %v (%T)", obj, obj) // panics
+ panic("unreachable")
+ }
+}
+
+func sameObj(a, b types.Object) bool {
+ // Because unnamed types are not canonicalized, we cannot simply compare types for
+ // (pointer) identity.
+ // Ideally we'd check equality of constant values as well, but this is good enough.
+ return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
+}
+
+func (p *importer) declare(obj types.Object) {
+ pkg := obj.Pkg()
+ if alt := pkg.Scope().Insert(obj); alt != nil {
+ // This can only trigger if we import a (non-type) object a second time.
+ // Excluding type aliases, this cannot happen because 1) we only import a package
+ // once; and b) we ignore compiler-specific export data which may contain
+ // functions whose inlined function bodies refer to other functions that
+ // were already imported.
+ // However, type aliases require reexporting the original type, so we need
+ // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
+ // method importer.obj, switch case importing functions).
+ // TODO(gri) review/update this comment once the gc compiler handles type aliases.
+ if !sameObj(obj, alt) {
+ errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
+ }
+ }
+}
+
+func (p *importer) obj(tag int) {
+ switch tag {
+ case constTag:
+ pos := p.pos()
+ pkg, name := p.qualifiedName()
+ typ := p.typ(nil, nil)
+ val := p.value()
+ p.declare(types.NewConst(pos, pkg, name, typ, val))
+
+ case aliasTag:
+ // TODO(gri) verify type alias hookup is correct
+ pos := p.pos()
+ pkg, name := p.qualifiedName()
+ typ := p.typ(nil, nil)
+ p.declare(types.NewTypeName(pos, pkg, name, typ))
+
+ case typeTag:
+ p.typ(nil, nil)
+
+ case varTag:
+ pos := p.pos()
+ pkg, name := p.qualifiedName()
+ typ := p.typ(nil, nil)
+ p.declare(types.NewVar(pos, pkg, name, typ))
+
+ case funcTag:
+ pos := p.pos()
+ pkg, name := p.qualifiedName()
+ params, isddd := p.paramList()
+ result, _ := p.paramList()
+ sig := types.NewSignature(nil, params, result, isddd)
+ p.declare(types.NewFunc(pos, pkg, name, sig))
+
+ default:
+ errorf("unexpected object tag %d", tag)
+ }
+}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+func (p *importer) pos() token.Pos {
+ if !p.posInfoFormat {
+ return token.NoPos
+ }
+
+ file := p.prevFile
+ line := p.prevLine
+ delta := p.int()
+ line += delta
+ if p.version >= 5 {
+ if delta == deltaNewFile {
+ if n := p.int(); n >= 0 {
+ // file changed
+ file = p.path()
+ line = n
+ }
+ }
+ } else {
+ if delta == 0 {
+ if n := p.int(); n >= 0 {
+ // file changed
+ file = p.prevFile[:n] + p.string()
+ line = p.int()
+ }
+ }
+ }
+ p.prevFile = file
+ p.prevLine = line
+
+ return p.fake.pos(file, line)
+}
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*token.File
+}
+
+func (s *fakeFileSet) pos(file string, line int) token.Pos {
+ // Since we don't know the set of needed file positions, we
+ // reserve maxlines positions per file.
+ const maxlines = 64 * 1024
+ f := s.files[file]
+ if f == nil {
+ f = s.fset.AddFile(file, -1, maxlines)
+ s.files[file] = f
+ // Allocate the fake linebreak indices on first use.
+ // TODO(adonovan): opt: save ~512KB using a more complex scheme?
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ f.SetLines(fakeLines)
+ }
+
+ if line > maxlines {
+ line = 1
+ }
+
+ // Treat the file as if it contained only newlines
+ // and column=1: use the line number as the offset.
+ return f.Pos(line - 1)
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func (p *importer) qualifiedName() (pkg *types.Package, name string) {
+ name = p.string()
+ pkg = p.pkg()
+ return
+}
+
+func (p *importer) record(t types.Type) {
+ p.typList = append(p.typList, t)
+}
+
+// A dddSlice is a types.Type representing ...T parameters.
+// It only appears for parameter types and does not escape
+// the importer.
+type dddSlice struct {
+ elem types.Type
+}
+
+func (t *dddSlice) Underlying() types.Type { return t }
+func (t *dddSlice) String() string { return "..." + t.elem.String() }
+
+// parent is the package which declared the type; parent == nil means
+// the package currently imported. The parent package is needed for
+// exported struct fields and interface methods which don't contain
+// explicit package information in the export data.
+//
+// A non-nil tname is used as the "owner" of the result type; i.e.,
+// the result type is the underlying type of tname. tname is used
+// to give interface methods a named receiver type where possible.
+func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
+ // if the type was seen before, i is its index (>= 0)
+ i := p.tagOrIndex()
+ if i >= 0 {
+ return p.typList[i]
+ }
+
+ // otherwise, i is the type tag (< 0)
+ switch i {
+ case namedTag:
+ // read type object
+ pos := p.pos()
+ parent, name := p.qualifiedName()
+ scope := parent.Scope()
+ obj := scope.Lookup(name)
+
+ // if the object doesn't exist yet, create and insert it
+ if obj == nil {
+ obj = types.NewTypeName(pos, parent, name, nil)
+ scope.Insert(obj)
+ }
+
+ if _, ok := obj.(*types.TypeName); !ok {
+ errorf("pkg = %s, name = %s => %s", parent, name, obj)
+ }
+
+ // associate new named type with obj if it doesn't exist yet
+ t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
+
+ // but record the existing type, if any
+ tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
+ p.record(tname)
+
+ // read underlying type
+ t0.SetUnderlying(p.typ(parent, t0))
+
+ // interfaces don't have associated methods
+ if types.IsInterface(t0) {
+ return tname
+ }
+
+ // read associated methods
+ for i := p.int(); i > 0; i-- {
+ // TODO(gri) replace this with something closer to fieldName
+ pos := p.pos()
+ name := p.string()
+ if !exported(name) {
+ p.pkg()
+ }
+
+ recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
+ params, isddd := p.paramList()
+ result, _ := p.paramList()
+ p.int() // go:nointerface pragma - discarded
+
+ sig := types.NewSignature(recv.At(0), params, result, isddd)
+ t0.AddMethod(types.NewFunc(pos, parent, name, sig))
+ }
+
+ return tname
+
+ case arrayTag:
+ t := new(types.Array)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ n := p.int64()
+ *t = *types.NewArray(p.typ(parent, nil), n)
+ return t
+
+ case sliceTag:
+ t := new(types.Slice)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ *t = *types.NewSlice(p.typ(parent, nil))
+ return t
+
+ case dddTag:
+ t := new(dddSlice)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ t.elem = p.typ(parent, nil)
+ return t
+
+ case structTag:
+ t := new(types.Struct)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ *t = *types.NewStruct(p.fieldList(parent))
+ return t
+
+ case pointerTag:
+ t := new(types.Pointer)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ *t = *types.NewPointer(p.typ(parent, nil))
+ return t
+
+ case signatureTag:
+ t := new(types.Signature)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ params, isddd := p.paramList()
+ result, _ := p.paramList()
+ *t = *types.NewSignature(nil, params, result, isddd)
+ return t
+
+ case interfaceTag:
+ // Create a dummy entry in the type list. This is safe because we
+ // cannot expect the interface type to appear in a cycle, as any
+ // such cycle must contain a named type which would have been
+ // first defined earlier.
+ // TODO(gri) Is this still true now that we have type aliases?
+ // See issue #23225.
+ n := len(p.typList)
+ if p.trackAllTypes {
+ p.record(nil)
+ }
+
+ var embeddeds []types.Type
+ for n := p.int(); n > 0; n-- {
+ p.pos()
+ embeddeds = append(embeddeds, p.typ(parent, nil))
+ }
+
+ t := newInterface(p.methodList(parent, tname), embeddeds)
+ p.interfaceList = append(p.interfaceList, t)
+ if p.trackAllTypes {
+ p.typList[n] = t
+ }
+ return t
+
+ case mapTag:
+ t := new(types.Map)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ key := p.typ(parent, nil)
+ val := p.typ(parent, nil)
+ *t = *types.NewMap(key, val)
+ return t
+
+ case chanTag:
+ t := new(types.Chan)
+ if p.trackAllTypes {
+ p.record(t)
+ }
+
+ dir := chanDir(p.int())
+ val := p.typ(parent, nil)
+ *t = *types.NewChan(dir, val)
+ return t
+
+ default:
+ errorf("unexpected type tag %d", i) // panics
+ panic("unreachable")
+ }
+}
+
+func chanDir(d int) types.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types.RecvOnly
+ case 2 /* Csend */ :
+ return types.SendOnly
+ case 3 /* Cboth */ :
+ return types.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
+
+func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
+ if n := p.int(); n > 0 {
+ fields = make([]*types.Var, n)
+ tags = make([]string, n)
+ for i := range fields {
+ fields[i], tags[i] = p.field(parent)
+ }
+ }
+ return
+}
+
+func (p *importer) field(parent *types.Package) (*types.Var, string) {
+ pos := p.pos()
+ pkg, name, alias := p.fieldName(parent)
+ typ := p.typ(parent, nil)
+ tag := p.string()
+
+ anonymous := false
+ if name == "" {
+ // anonymous field - typ must be T or *T and T must be a type name
+ switch typ := deref(typ).(type) {
+ case *types.Basic: // basic types are named types
+ pkg = nil // // objects defined in Universe scope have no package
+ name = typ.Name()
+ case *types.Named:
+ name = typ.Obj().Name()
+ default:
+ errorf("named base type expected")
+ }
+ anonymous = true
+ } else if alias {
+ // anonymous field: we have an explicit name because it's an alias
+ anonymous = true
+ }
+
+ return types.NewField(pos, pkg, name, typ, anonymous), tag
+}
+
+func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
+ if n := p.int(); n > 0 {
+ methods = make([]*types.Func, n)
+ for i := range methods {
+ methods[i] = p.method(parent, baseType)
+ }
+ }
+ return
+}
+
+func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
+ pos := p.pos()
+ pkg, name, _ := p.fieldName(parent)
+ // If we don't have a baseType, use a nil receiver.
+ // A receiver using the actual interface type (which
+ // we don't know yet) will be filled in when we call
+ // types.Interface.Complete.
+ var recv *types.Var
+ if baseType != nil {
+ recv = types.NewVar(token.NoPos, parent, "", baseType)
+ }
+ params, isddd := p.paramList()
+ result, _ := p.paramList()
+ sig := types.NewSignature(recv, params, result, isddd)
+ return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) {
+ name = p.string()
+ pkg = parent
+ if pkg == nil {
+ // use the imported package instead
+ pkg = p.pkgList[0]
+ }
+ if p.version == 0 && name == "_" {
+ // version 0 didn't export a package for _ fields
+ return
+ }
+ switch name {
+ case "":
+ // 1) field name matches base type name and is exported: nothing to do
+ case "?":
+ // 2) field name matches base type name and is not exported: need package
+ name = ""
+ pkg = p.pkg()
+ case "@":
+ // 3) field name doesn't match type name (alias)
+ name = p.string()
+ alias = true
+ fallthrough
+ default:
+ if !exported(name) {
+ pkg = p.pkg()
+ }
+ }
+ return
+}
+
+func (p *importer) paramList() (*types.Tuple, bool) {
+ n := p.int()
+ if n == 0 {
+ return nil, false
+ }
+ // negative length indicates unnamed parameters
+ named := true
+ if n < 0 {
+ n = -n
+ named = false
+ }
+ // n > 0
+ params := make([]*types.Var, n)
+ isddd := false
+ for i := range params {
+ params[i], isddd = p.param(named)
+ }
+ return types.NewTuple(params...), isddd
+}
+
+func (p *importer) param(named bool) (*types.Var, bool) {
+ t := p.typ(nil, nil)
+ td, isddd := t.(*dddSlice)
+ if isddd {
+ t = types.NewSlice(td.elem)
+ }
+
+ var pkg *types.Package
+ var name string
+ if named {
+ name = p.string()
+ if name == "" {
+ errorf("expected named parameter")
+ }
+ if name != "_" {
+ pkg = p.pkg()
+ }
+ if i := strings.Index(name, "·"); i > 0 {
+ name = name[:i] // cut off gc-specific parameter numbering
+ }
+ }
+
+ // read and discard compiler-specific info
+ p.string()
+
+ return types.NewVar(token.NoPos, pkg, name, t), isddd
+}
+
+func exported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
+
+func (p *importer) value() constant.Value {
+ switch tag := p.tagOrIndex(); tag {
+ case falseTag:
+ return constant.MakeBool(false)
+ case trueTag:
+ return constant.MakeBool(true)
+ case int64Tag:
+ return constant.MakeInt64(p.int64())
+ case floatTag:
+ return p.float()
+ case complexTag:
+ re := p.float()
+ im := p.float()
+ return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+ case stringTag:
+ return constant.MakeString(p.string())
+ case unknownTag:
+ return constant.MakeUnknown()
+ default:
+ errorf("unexpected value tag %d", tag) // panics
+ panic("unreachable")
+ }
+}
+
+func (p *importer) float() constant.Value {
+ sign := p.int()
+ if sign == 0 {
+ return constant.MakeInt64(0)
+ }
+
+ exp := p.int()
+ mant := []byte(p.string()) // big endian
+
+ // remove leading 0's if any
+ for len(mant) > 0 && mant[0] == 0 {
+ mant = mant[1:]
+ }
+
+ // convert to little endian
+ // TODO(gri) go/constant should have a more direct conversion function
+ // (e.g., once it supports a big.Float based implementation)
+ for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
+ mant[i], mant[j] = mant[j], mant[i]
+ }
+
+ // adjust exponent (constant.MakeFromBytes creates an integer value,
+ // but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
+ exp -= len(mant) << 3
+ if len(mant) > 0 {
+ for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
+ exp++
+ }
+ }
+
+ x := constant.MakeFromBytes(mant)
+ switch {
+ case exp < 0:
+ d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
+ x = constant.BinaryOp(x, token.QUO, d)
+ case exp > 0:
+ x = constant.Shift(x, token.SHL, uint(exp))
+ }
+
+ if sign < 0 {
+ x = constant.UnaryOp(token.SUB, x, 0)
+ }
+ return x
+}
+
+// ----------------------------------------------------------------------------
+// Low-level decoders
+
+func (p *importer) tagOrIndex() int {
+ if p.debugFormat {
+ p.marker('t')
+ }
+
+ return int(p.rawInt64())
+}
+
+func (p *importer) int() int {
+ x := p.int64()
+ if int64(int(x)) != x {
+ errorf("exported integer too large")
+ }
+ return int(x)
+}
+
+func (p *importer) int64() int64 {
+ if p.debugFormat {
+ p.marker('i')
+ }
+
+ return p.rawInt64()
+}
+
+func (p *importer) path() string {
+ if p.debugFormat {
+ p.marker('p')
+ }
+ // if the path was seen before, i is its index (>= 0)
+ // (the empty string is at index 0)
+ i := p.rawInt64()
+ if i >= 0 {
+ return p.pathList[i]
+ }
+ // otherwise, i is the negative path length (< 0)
+ a := make([]string, -i)
+ for n := range a {
+ a[n] = p.string()
+ }
+ s := strings.Join(a, "/")
+ p.pathList = append(p.pathList, s)
+ return s
+}
+
+func (p *importer) string() string {
+ if p.debugFormat {
+ p.marker('s')
+ }
+ // if the string was seen before, i is its index (>= 0)
+ // (the empty string is at index 0)
+ i := p.rawInt64()
+ if i >= 0 {
+ return p.strList[i]
+ }
+ // otherwise, i is the negative string length (< 0)
+ if n := int(-i); n <= cap(p.buf) {
+ p.buf = p.buf[:n]
+ } else {
+ p.buf = make([]byte, n)
+ }
+ for i := range p.buf {
+ p.buf[i] = p.rawByte()
+ }
+ s := string(p.buf)
+ p.strList = append(p.strList, s)
+ return s
+}
+
+func (p *importer) marker(want byte) {
+ if got := p.rawByte(); got != want {
+ errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
+ }
+
+ pos := p.read
+ if n := int(p.rawInt64()); n != pos {
+ errorf("incorrect position: got %d; want %d", n, pos)
+ }
+}
+
+// rawInt64 should only be used by low-level decoders.
+func (p *importer) rawInt64() int64 {
+ i, err := binary.ReadVarint(p)
+ if err != nil {
+ errorf("read error: %v", err)
+ }
+ return i
+}
+
+// rawStringln should only be used to read the initial version string.
+func (p *importer) rawStringln(b byte) string {
+ p.buf = p.buf[:0]
+ for b != '\n' {
+ p.buf = append(p.buf, b)
+ b = p.rawByte()
+ }
+ return string(p.buf)
+}
+
+// needed for binary.ReadVarint in rawInt64
+func (p *importer) ReadByte() (byte, error) {
+ return p.rawByte(), nil
+}
+
+// byte is the bottleneck interface for reading p.data.
+// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
+// rawByte should only be used by low-level decoders.
+func (p *importer) rawByte() byte {
+ b := p.data[0]
+ r := 1
+ if b == '|' {
+ b = p.data[1]
+ r = 2
+ switch b {
+ case 'S':
+ b = '$'
+ case '|':
+ // nothing to do
+ default:
+ errorf("unexpected escape sequence in export data")
+ }
+ }
+ p.data = p.data[r:]
+ p.read += r
+ return b
+
+}
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+ // Objects
+ packageTag = -(iota + 1)
+ constTag
+ typeTag
+ varTag
+ funcTag
+ endTag
+
+ // Types
+ namedTag
+ arrayTag
+ sliceTag
+ dddTag
+ structTag
+ pointerTag
+ signatureTag
+ interfaceTag
+ mapTag
+ chanTag
+
+ // Values
+ falseTag
+ trueTag
+ int64Tag
+ floatTag
+ fractionTag // not used by gc
+ complexTag
+ stringTag
+ nilTag // only used by gc (appears in exported inlined function bodies)
+ unknownTag // not used by gc (only appears in packages with errors)
+
+ // Type aliases
+ aliasTag
+)
+
+var predecl []types.Type // initialized lazily
+
+func predeclared() []types.Type {
+ if predecl == nil {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+ }
+ }
+ return predecl
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
new file mode 100644
index 000000000..f33dc5613
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
@@ -0,0 +1,93 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
+
+// This file implements FindExportData.
+
+package gcimporter
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 16+12+6+6+8+10+2)
+ _, err = io.ReadFull(r, hdr)
+ if err != nil {
+ return
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+ size, err = strconv.Atoi(s)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = fmt.Errorf("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:16]))
+ return
+}
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function. The hdr result
+// is the string before the export data, either "$$" or "$$B".
+//
+func FindExportData(r *bufio.Reader) (hdr string, err error) {
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+
+ if string(line) == "!<arch>\n" {
+ // Archive file. Scan to __.PKGDEF.
+ var name string
+ if name, _, err = readGopackHeader(r); err != nil {
+ return
+ }
+
+ // First entry should be __.PKGDEF.
+ if name != "__.PKGDEF" {
+ err = fmt.Errorf("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = fmt.Errorf("not a Go object file")
+ return
+ }
+
+ // Skip over object header to export data.
+ // Begins after first line starting with $$.
+ for line[0] != '$' {
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ }
+ hdr = string(line)
+
+ return
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
new file mode 100644
index 000000000..9cf186605
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
@@ -0,0 +1,1078 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
+// but it also contains the original source-based importer code for Go1.6.
+// Once we stop supporting 1.6, we can remove that code.
+
+// Package gcimporter provides various functions for reading
+// gc-generated object files that can be used to implement the
+// Importer interface defined by the Go 1.5 standard library package.
+package gcimporter // import "golang.org/x/tools/go/internal/gcimporter"
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "go/build"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "text/scanner"
+)
+
+// debugging/development support
+const debug = false
+
+var pkgExts = [...]string{".a", ".o"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+// If no file was found, an empty filename is returned.
+//
+func FindPkg(path, srcDir string) (filename, id string) {
+ if path == "" {
+ return
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ id = path // make sure we have an id to print in error message
+ return
+ }
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ id = bp.ImportPath
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+// ImportData imports a package by reading the gc-generated export data,
+// adds the corresponding package object to the packages map indexed by id,
+// and returns the object.
+//
+// The packages map must contains all packages already imported. The data
+// reader position must be the beginning of the export data section. The
+// filename is only used in error messages.
+//
+// If packages[id] contains the completely imported package, that package
+// can be used directly, and there is no need to call this function (but
+// there is also no harm but for extra time used).
+//
+func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
+ // support for parser error handling
+ defer func() {
+ switch r := recover().(type) {
+ case nil:
+ // nothing to do
+ case importError:
+ err = r
+ default:
+ panic(r) // internal error
+ }
+ }()
+
+ var p parser
+ p.init(filename, id, data, packages)
+ pkg = p.parseExport()
+
+ return
+}
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+//
+func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+ var rc io.ReadCloser
+ var filename, id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ filename, id = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %q", id)
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ var hdr string
+ buf := bufio.NewReader(rc)
+ if hdr, err = FindExportData(buf); err != nil {
+ return
+ }
+
+ switch hdr {
+ case "$$\n":
+ // Work-around if we don't have a filename; happens only if lookup != nil.
+ // Either way, the filename is only needed for importer error messages, so
+ // this is fine.
+ if filename == "" {
+ filename = path
+ }
+ return ImportData(packages, filename, id, buf)
+
+ case "$$B\n":
+ var data []byte
+ data, err = ioutil.ReadAll(buf)
+ if err != nil {
+ break
+ }
+
+ // TODO(gri): allow clients of go/importer to provide a FileSet.
+ // Or, define a new standard go/types/gcexportdata package.
+ fset := token.NewFileSet()
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 && data[0] == 'i' {
+ _, pkg, err = IImportData(fset, packages, data[1:], id)
+ } else {
+ _, pkg, err = BImportData(fset, packages, data, id)
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Parser
+
+// TODO(gri) Imported objects don't have position information.
+// Ideally use the debug table line info; alternatively
+// create some fake position (or the position of the
+// import). That way error messages referring to imported
+// objects can print meaningful information.
+
+// parser parses the exports inside a gc compiler-produced
+// object/archive file and populates its scope with the results.
+type parser struct {
+ scanner scanner.Scanner
+ tok rune // current token
+ lit string // literal string; only valid for Ident, Int, String tokens
+ id string // package id of imported package
+ sharedPkgs map[string]*types.Package // package id -> package object (across importer)
+ localPkgs map[string]*types.Package // package id -> package object (just this package)
+}
+
+func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) {
+ p.scanner.Init(src)
+ p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
+ p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ p.scanner.Whitespace = 1<<'\t' | 1<<' '
+ p.scanner.Filename = filename // for good error messages
+ p.next()
+ p.id = id
+ p.sharedPkgs = packages
+ if debug {
+ // check consistency of packages map
+ for _, pkg := range packages {
+ if pkg.Name() == "" {
+ fmt.Printf("no package name for %s\n", pkg.Path())
+ }
+ }
+ }
+}
+
+func (p *parser) next() {
+ p.tok = p.scanner.Scan()
+ switch p.tok {
+ case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
+ p.lit = p.scanner.TokenText()
+ default:
+ p.lit = ""
+ }
+ if debug {
+ fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
+ }
+}
+
+func declTypeName(pkg *types.Package, name string) *types.TypeName {
+ scope := pkg.Scope()
+ if obj := scope.Lookup(name); obj != nil {
+ return obj.(*types.TypeName)
+ }
+ obj := types.NewTypeName(token.NoPos, pkg, name, nil)
+ // a named type may be referred to before the underlying type
+ // is known - set it up
+ types.NewNamed(obj, nil, nil)
+ scope.Insert(obj)
+ return obj
+}
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// Internal errors are boxed as importErrors.
+type importError struct {
+ pos scanner.Position
+ err error
+}
+
+func (e importError) Error() string {
+ return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
+}
+
+func (p *parser) error(err interface{}) {
+ if s, ok := err.(string); ok {
+ err = errors.New(s)
+ }
+ // panic with a runtime.Error if err is not an error
+ panic(importError{p.scanner.Pos(), err.(error)})
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+ p.error(fmt.Sprintf(format, args...))
+}
+
+func (p *parser) expect(tok rune) string {
+ lit := p.lit
+ if p.tok != tok {
+ p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
+ }
+ p.next()
+ return lit
+}
+
+func (p *parser) expectSpecial(tok string) {
+ sep := 'x' // not white space
+ i := 0
+ for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ i++
+ }
+ if i < len(tok) {
+ p.errorf("expected %q, got %q", tok, tok[0:i])
+ }
+}
+
+func (p *parser) expectKeyword(keyword string) {
+ lit := p.expect(scanner.Ident)
+ if lit != keyword {
+ p.errorf("expected keyword %s, got %q", keyword, lit)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Qualified and unqualified names
+
+// PackageId = string_lit .
+//
+func (p *parser) parsePackageId() string {
+ id, err := strconv.Unquote(p.expect(scanner.String))
+ if err != nil {
+ p.error(err)
+ }
+ // id == "" stands for the imported package id
+ // (only known at time of package installation)
+ if id == "" {
+ id = p.id
+ }
+ return id
+}
+
+// PackageName = ident .
+//
+func (p *parser) parsePackageName() string {
+ return p.expect(scanner.Ident)
+}
+
+// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
+func (p *parser) parseDotIdent() string {
+ ident := ""
+ if p.tok != scanner.Int {
+ sep := 'x' // not white space
+ for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
+ ident += p.lit
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ }
+ }
+ if ident == "" {
+ p.expect(scanner.Ident) // use expect() for error handling
+ }
+ return ident
+}
+
+// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
+//
+func (p *parser) parseQualifiedName() (id, name string) {
+ p.expect('@')
+ id = p.parsePackageId()
+ p.expect('.')
+ // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
+ if p.tok == '?' {
+ p.next()
+ } else {
+ name = p.parseDotIdent()
+ }
+ return
+}
+
+// getPkg returns the package for a given id. If the package is
+// not found, create the package and add it to the p.localPkgs
+// and p.sharedPkgs maps. name is the (expected) name of the
+// package. If name == "", the package name is expected to be
+// set later via an import clause in the export data.
+//
+// id identifies a package, usually by a canonical package path like
+// "encoding/json" but possibly by a non-canonical import path like
+// "./json".
+//
+func (p *parser) getPkg(id, name string) *types.Package {
+ // package unsafe is not in the packages maps - handle explicitly
+ if id == "unsafe" {
+ return types.Unsafe
+ }
+
+ pkg := p.localPkgs[id]
+ if pkg == nil {
+ // first import of id from this package
+ pkg = p.sharedPkgs[id]
+ if pkg == nil {
+ // first import of id by this importer;
+ // add (possibly unnamed) pkg to shared packages
+ pkg = types.NewPackage(id, name)
+ p.sharedPkgs[id] = pkg
+ }
+ // add (possibly unnamed) pkg to local packages
+ if p.localPkgs == nil {
+ p.localPkgs = make(map[string]*types.Package)
+ }
+ p.localPkgs[id] = pkg
+ } else if name != "" {
+ // package exists already and we have an expected package name;
+ // make sure names match or set package name if necessary
+ if pname := pkg.Name(); pname == "" {
+ pkg.SetName(name)
+ } else if pname != name {
+ p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name)
+ }
+ }
+ return pkg
+}
+
+// parseExportedName is like parseQualifiedName, but
+// the package id is resolved to an imported *types.Package.
+//
+func (p *parser) parseExportedName() (pkg *types.Package, name string) {
+ id, name := p.parseQualifiedName()
+ pkg = p.getPkg(id, "")
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+// BasicType = identifier .
+//
+func (p *parser) parseBasicType() types.Type {
+ id := p.expect(scanner.Ident)
+ obj := types.Universe.Lookup(id)
+ if obj, ok := obj.(*types.TypeName); ok {
+ return obj.Type()
+ }
+ p.errorf("not a basic type: %s", id)
+ return nil
+}
+
+// ArrayType = "[" int_lit "]" Type .
+//
+func (p *parser) parseArrayType(parent *types.Package) types.Type {
+ // "[" already consumed and lookahead known not to be "]"
+ lit := p.expect(scanner.Int)
+ p.expect(']')
+ elem := p.parseType(parent)
+ n, err := strconv.ParseInt(lit, 10, 64)
+ if err != nil {
+ p.error(err)
+ }
+ return types.NewArray(elem, n)
+}
+
+// MapType = "map" "[" Type "]" Type .
+//
+func (p *parser) parseMapType(parent *types.Package) types.Type {
+ p.expectKeyword("map")
+ p.expect('[')
+ key := p.parseType(parent)
+ p.expect(']')
+ elem := p.parseType(parent)
+ return types.NewMap(key, elem)
+}
+
+// Name = identifier | "?" | QualifiedName .
+//
+// For unqualified and anonymous names, the returned package is the parent
+// package unless parent == nil, in which case the returned package is the
+// package being imported. (The parent package is not nil if the the name
+// is an unqualified struct field or interface method name belonging to a
+// type declared in another package.)
+//
+// For qualified names, the returned package is nil (and not created if
+// it doesn't exist yet) unless materializePkg is set (which creates an
+// unnamed package with valid package path). In the latter case, a
+// subsequent import clause is expected to provide a name for the package.
+//
+func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
+ pkg = parent
+ if pkg == nil {
+ pkg = p.sharedPkgs[p.id]
+ }
+ switch p.tok {
+ case scanner.Ident:
+ name = p.lit
+ p.next()
+ case '?':
+ // anonymous
+ p.next()
+ case '@':
+ // exported name prefixed with package path
+ pkg = nil
+ var id string
+ id, name = p.parseQualifiedName()
+ if materializePkg {
+ pkg = p.getPkg(id, "")
+ }
+ default:
+ p.error("name expected")
+ }
+ return
+}
+
+func deref(typ types.Type) types.Type {
+ if p, _ := typ.(*types.Pointer); p != nil {
+ return p.Elem()
+ }
+ return typ
+}
+
+// Field = Name Type [ string_lit ] .
+//
+func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
+ pkg, name := p.parseName(parent, true)
+
+ if name == "_" {
+ // Blank fields should be package-qualified because they
+ // are unexported identifiers, but gc does not qualify them.
+ // Assuming that the ident belongs to the current package
+ // causes types to change during re-exporting, leading
+ // to spurious "can't assign A to B" errors from go/types.
+ // As a workaround, pretend all blank fields belong
+ // to the same unique dummy package.
+ const blankpkg = "<_>"
+ pkg = p.getPkg(blankpkg, blankpkg)
+ }
+
+ typ := p.parseType(parent)
+ anonymous := false
+ if name == "" {
+ // anonymous field - typ must be T or *T and T must be a type name
+ switch typ := deref(typ).(type) {
+ case *types.Basic: // basic types are named types
+ pkg = nil // objects defined in Universe scope have no package
+ name = typ.Name()
+ case *types.Named:
+ name = typ.Obj().Name()
+ default:
+ p.errorf("anonymous field expected")
+ }
+ anonymous = true
+ }
+ tag := ""
+ if p.tok == scanner.String {
+ s := p.expect(scanner.String)
+ var err error
+ tag, err = strconv.Unquote(s)
+ if err != nil {
+ p.errorf("invalid struct tag %s: %s", s, err)
+ }
+ }
+ return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
+}
+
+// StructType = "struct" "{" [ FieldList ] "}" .
+// FieldList = Field { ";" Field } .
+//
+func (p *parser) parseStructType(parent *types.Package) types.Type {
+ var fields []*types.Var
+ var tags []string
+
+ p.expectKeyword("struct")
+ p.expect('{')
+ for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
+ if i > 0 {
+ p.expect(';')
+ }
+ fld, tag := p.parseField(parent)
+ if tag != "" && tags == nil {
+ tags = make([]string, i)
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+ fields = append(fields, fld)
+ }
+ p.expect('}')
+
+ return types.NewStruct(fields, tags)
+}
+
+// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
+//
+func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
+ _, name := p.parseName(nil, false)
+ // remove gc-specific parameter numbering
+ if i := strings.Index(name, "·"); i >= 0 {
+ name = name[:i]
+ }
+ if p.tok == '.' {
+ p.expectSpecial("...")
+ isVariadic = true
+ }
+ typ := p.parseType(nil)
+ if isVariadic {
+ typ = types.NewSlice(typ)
+ }
+ // ignore argument tag (e.g. "noescape")
+ if p.tok == scanner.String {
+ p.next()
+ }
+ // TODO(gri) should we provide a package?
+ par = types.NewVar(token.NoPos, nil, name, typ)
+ return
+}
+
+// Parameters = "(" [ ParameterList ] ")" .
+// ParameterList = { Parameter "," } Parameter .
+//
+func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
+ p.expect('(')
+ for p.tok != ')' && p.tok != scanner.EOF {
+ if len(list) > 0 {
+ p.expect(',')
+ }
+ par, variadic := p.parseParameter()
+ list = append(list, par)
+ if variadic {
+ if isVariadic {
+ p.error("... not on final argument")
+ }
+ isVariadic = true
+ }
+ }
+ p.expect(')')
+
+ return
+}
+
+// Signature = Parameters [ Result ] .
+// Result = Type | Parameters .
+//
+func (p *parser) parseSignature(recv *types.Var) *types.Signature {
+ params, isVariadic := p.parseParameters()
+
+ // optional result type
+ var results []*types.Var
+ if p.tok == '(' {
+ var variadic bool
+ results, variadic = p.parseParameters()
+ if variadic {
+ p.error("... not permitted on result type")
+ }
+ }
+
+ return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
+}
+
+// InterfaceType = "interface" "{" [ MethodList ] "}" .
+// MethodList = Method { ";" Method } .
+// Method = Name Signature .
+//
+// The methods of embedded interfaces are always "inlined"
+// by the compiler and thus embedded interfaces are never
+// visible in the export data.
+//
+func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
+ var methods []*types.Func
+
+ p.expectKeyword("interface")
+ p.expect('{')
+ for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
+ if i > 0 {
+ p.expect(';')
+ }
+ pkg, name := p.parseName(parent, true)
+ sig := p.parseSignature(nil)
+ methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
+ }
+ p.expect('}')
+
+ // Complete requires the type's embedded interfaces to be fully defined,
+ // but we do not define any
+ return types.NewInterface(methods, nil).Complete()
+}
+
+// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
+//
+func (p *parser) parseChanType(parent *types.Package) types.Type {
+ dir := types.SendRecv
+ if p.tok == scanner.Ident {
+ p.expectKeyword("chan")
+ if p.tok == '<' {
+ p.expectSpecial("<-")
+ dir = types.SendOnly
+ }
+ } else {
+ p.expectSpecial("<-")
+ p.expectKeyword("chan")
+ dir = types.RecvOnly
+ }
+ elem := p.parseType(parent)
+ return types.NewChan(dir, elem)
+}
+
+// Type =
+// BasicType | TypeName | ArrayType | SliceType | StructType |
+// PointerType | FuncType | InterfaceType | MapType | ChanType |
+// "(" Type ")" .
+//
+// BasicType = ident .
+// TypeName = ExportedName .
+// SliceType = "[" "]" Type .
+// PointerType = "*" Type .
+// FuncType = "func" Signature .
+//
+func (p *parser) parseType(parent *types.Package) types.Type {
+ switch p.tok {
+ case scanner.Ident:
+ switch p.lit {
+ default:
+ return p.parseBasicType()
+ case "struct":
+ return p.parseStructType(parent)
+ case "func":
+ // FuncType
+ p.next()
+ return p.parseSignature(nil)
+ case "interface":
+ return p.parseInterfaceType(parent)
+ case "map":
+ return p.parseMapType(parent)
+ case "chan":
+ return p.parseChanType(parent)
+ }
+ case '@':
+ // TypeName
+ pkg, name := p.parseExportedName()
+ return declTypeName(pkg, name).Type()
+ case '[':
+ p.next() // look ahead
+ if p.tok == ']' {
+ // SliceType
+ p.next()
+ return types.NewSlice(p.parseType(parent))
+ }
+ return p.parseArrayType(parent)
+ case '*':
+ // PointerType
+ p.next()
+ return types.NewPointer(p.parseType(parent))
+ case '<':
+ return p.parseChanType(parent)
+ case '(':
+ // "(" Type ")"
+ p.next()
+ typ := p.parseType(parent)
+ p.expect(')')
+ return typ
+ }
+ p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// ImportDecl = "import" PackageName PackageId .
+//
+func (p *parser) parseImportDecl() {
+ p.expectKeyword("import")
+ name := p.parsePackageName()
+ p.getPkg(p.parsePackageId(), name)
+}
+
+// int_lit = [ "+" | "-" ] { "0" ... "9" } .
+//
+func (p *parser) parseInt() string {
+ s := ""
+ switch p.tok {
+ case '-':
+ s = "-"
+ p.next()
+ case '+':
+ p.next()
+ }
+ return s + p.expect(scanner.Int)
+}
+
+// number = int_lit [ "p" int_lit ] .
+//
+func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
+ // mantissa
+ mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
+ if mant == nil {
+ panic("invalid mantissa")
+ }
+
+ if p.lit == "p" {
+ // exponent (base 2)
+ p.next()
+ exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
+ if err != nil {
+ p.error(err)
+ }
+ if exp < 0 {
+ denom := constant.MakeInt64(1)
+ denom = constant.Shift(denom, token.SHL, uint(-exp))
+ typ = types.Typ[types.UntypedFloat]
+ val = constant.BinaryOp(mant, token.QUO, denom)
+ return
+ }
+ if exp > 0 {
+ mant = constant.Shift(mant, token.SHL, uint(exp))
+ }
+ typ = types.Typ[types.UntypedFloat]
+ val = mant
+ return
+ }
+
+ typ = types.Typ[types.UntypedInt]
+ val = mant
+ return
+}
+
+// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
+// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
+// bool_lit = "true" | "false" .
+// complex_lit = "(" float_lit "+" float_lit "i" ")" .
+// rune_lit = "(" int_lit "+" int_lit ")" .
+// string_lit = `"` { unicode_char } `"` .
+//
+func (p *parser) parseConstDecl() {
+ p.expectKeyword("const")
+ pkg, name := p.parseExportedName()
+
+ var typ0 types.Type
+ if p.tok != '=' {
+ // constant types are never structured - no need for parent type
+ typ0 = p.parseType(nil)
+ }
+
+ p.expect('=')
+ var typ types.Type
+ var val constant.Value
+ switch p.tok {
+ case scanner.Ident:
+ // bool_lit
+ if p.lit != "true" && p.lit != "false" {
+ p.error("expected true or false")
+ }
+ typ = types.Typ[types.UntypedBool]
+ val = constant.MakeBool(p.lit == "true")
+ p.next()
+
+ case '-', scanner.Int:
+ // int_lit
+ typ, val = p.parseNumber()
+
+ case '(':
+ // complex_lit or rune_lit
+ p.next()
+ if p.tok == scanner.Char {
+ p.next()
+ p.expect('+')
+ typ = types.Typ[types.UntypedRune]
+ _, val = p.parseNumber()
+ p.expect(')')
+ break
+ }
+ _, re := p.parseNumber()
+ p.expect('+')
+ _, im := p.parseNumber()
+ p.expectKeyword("i")
+ p.expect(')')
+ typ = types.Typ[types.UntypedComplex]
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ case scanner.Char:
+ // rune_lit
+ typ = types.Typ[types.UntypedRune]
+ val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
+ p.next()
+
+ case scanner.String:
+ // string_lit
+ typ = types.Typ[types.UntypedString]
+ val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
+ p.next()
+
+ default:
+ p.errorf("expected literal got %s", scanner.TokenString(p.tok))
+ }
+
+ if typ0 == nil {
+ typ0 = typ
+ }
+
+ pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
+}
+
+// TypeDecl = "type" ExportedName Type .
+//
+func (p *parser) parseTypeDecl() {
+ p.expectKeyword("type")
+ pkg, name := p.parseExportedName()
+ obj := declTypeName(pkg, name)
+
+ // The type object may have been imported before and thus already
+ // have a type associated with it. We still need to parse the type
+ // structure, but throw it away if the object already has a type.
+ // This ensures that all imports refer to the same type object for
+ // a given type declaration.
+ typ := p.parseType(pkg)
+
+ if name := obj.Type().(*types.Named); name.Underlying() == nil {
+ name.SetUnderlying(typ)
+ }
+}
+
+// VarDecl = "var" ExportedName Type .
+//
+func (p *parser) parseVarDecl() {
+ p.expectKeyword("var")
+ pkg, name := p.parseExportedName()
+ typ := p.parseType(pkg)
+ pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
+}
+
+// Func = Signature [ Body ] .
+// Body = "{" ... "}" .
+//
+func (p *parser) parseFunc(recv *types.Var) *types.Signature {
+ sig := p.parseSignature(recv)
+ if p.tok == '{' {
+ p.next()
+ for i := 1; i > 0; p.next() {
+ switch p.tok {
+ case '{':
+ i++
+ case '}':
+ i--
+ }
+ }
+ }
+ return sig
+}
+
+// MethodDecl = "func" Receiver Name Func .
+// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
+//
+func (p *parser) parseMethodDecl() {
+ // "func" already consumed
+ p.expect('(')
+ recv, _ := p.parseParameter() // receiver
+ p.expect(')')
+
+ // determine receiver base type object
+ base := deref(recv.Type()).(*types.Named)
+
+ // parse method name, signature, and possibly inlined body
+ _, name := p.parseName(nil, false)
+ sig := p.parseFunc(recv)
+
+ // methods always belong to the same package as the base type object
+ pkg := base.Obj().Pkg()
+
+ // add method to type unless type was imported before
+ // and method exists already
+ // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
+ base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
+}
+
+// FuncDecl = "func" ExportedName Func .
+//
+func (p *parser) parseFuncDecl() {
+ // "func" already consumed
+ pkg, name := p.parseExportedName()
+ typ := p.parseFunc(nil)
+ pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
+}
+
+// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
+//
+func (p *parser) parseDecl() {
+ if p.tok == scanner.Ident {
+ switch p.lit {
+ case "import":
+ p.parseImportDecl()
+ case "const":
+ p.parseConstDecl()
+ case "type":
+ p.parseTypeDecl()
+ case "var":
+ p.parseVarDecl()
+ case "func":
+ p.next() // look ahead
+ if p.tok == '(' {
+ p.parseMethodDecl()
+ } else {
+ p.parseFuncDecl()
+ }
+ }
+ }
+ p.expect('\n')
+}
+
+// ----------------------------------------------------------------------------
+// Export
+
+// Export = "PackageClause { Decl } "$$" .
+// PackageClause = "package" PackageName [ "safe" ] "\n" .
+//
+func (p *parser) parseExport() *types.Package {
+ p.expectKeyword("package")
+ name := p.parsePackageName()
+ if p.tok == scanner.Ident && p.lit == "safe" {
+ // package was compiled with -u option - ignore
+ p.next()
+ }
+ p.expect('\n')
+
+ pkg := p.getPkg(p.id, name)
+
+ for p.tok != '$' && p.tok != scanner.EOF {
+ p.parseDecl()
+ }
+
+ if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
+ // don't call next()/expect() since reading past the
+ // export data may cause scanner errors (e.g. NUL chars)
+ p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
+ }
+
+ if n := p.scanner.ErrorCount; n != 0 {
+ p.errorf("expected no scanner errors, got %d", n)
+ }
+
+ // Record all locally referenced packages as imports.
+ var imports []*types.Package
+ for id, pkg2 := range p.localPkgs {
+ if pkg2.Name() == "" {
+ p.errorf("%s package has no name", id)
+ }
+ if id == p.id {
+ continue // avoid self-edge
+ }
+ imports = append(imports, pkg2)
+ }
+ sort.Sort(byPath(imports))
+ pkg.SetImports(imports)
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+
+ return pkg
+}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
new file mode 100644
index 000000000..be671c79b
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
@@ -0,0 +1,723 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
+// see that file for specification of the format.
+
+// +build go1.11
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "reflect"
+ "sort"
+)
+
+// Current indexed export format version. Increase with each format change.
+// 0: Go1.11 encoding
+const iexportVersion = 0
+
+// IExportData returns the binary export data for pkg.
+// If no file set is provided, position info will be missing.
+func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+
+ p := iexporter{
+ out: bytes.NewBuffer(nil),
+ fset: fset,
+ allPkgs: map[*types.Package]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[types.Object]uint64{},
+ typIndex: map[types.Type]uint64{},
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
+ }
+
+ // Initialize work queue with exported declarations.
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if ast.IsExported(name) {
+ p.pushDecl(scope.Lookup(name))
+ }
+ }
+
+ // Loop until no more work.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popHead())
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex, pkg)
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ hdr.WriteByte('i')
+ hdr.uint64(iexportVersion)
+ hdr.uint64(uint64(p.strings.Len()))
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ io.Copy(p.out, &hdr)
+ io.Copy(p.out, &p.strings)
+ io.Copy(p.out, &p.data0)
+
+ return p.out.Bytes(), nil
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) {
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Package][]types.Object{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ pkgObjs[localpkg] = nil
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+
+ for obj := range index {
+ pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
+ }
+
+ var pkgs []*types.Package
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].Name() < objs[j].Name()
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return pkgs[i].Path() < pkgs[j].Path()
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(pkg.Path())
+ w.string(pkg.Name())
+ w.uint64(uint64(0)) // package height is not needed for go/types
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, obj := range objs {
+ w.string(obj.Name())
+ w.uint64(index[obj])
+ }
+ }
+}
+
+type iexporter struct {
+ fset *token.FileSet
+ out *bytes.Buffer
+
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Package]bool
+
+ declTodo objQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ data0 intWriter
+ declIndex map[types.Object]uint64
+ typIndex map[types.Type]uint64
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(obj types.Object) {
+ // Package unsafe is known to the compiler and predeclared.
+ assert(obj.Pkg() != types.Unsafe)
+
+ if _, ok := p.declIndex[obj]; ok {
+ return
+ }
+
+ p.declIndex[obj] = ^uint64(0) // mark n present in work queue
+ p.declTodo.pushTail(obj)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+}
+
+func (p *iexporter) doDecl(obj types.Object) {
+ w := p.newWriter()
+ w.setPkg(obj.Pkg(), false)
+
+ switch obj := obj.(type) {
+ case *types.Var:
+ w.tag('V')
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+
+ case *types.Func:
+ sig, _ := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ panic(internalErrorf("unexpected method: %v", sig))
+ }
+ w.tag('F')
+ w.pos(obj.Pos())
+ w.signature(sig)
+
+ case *types.Const:
+ w.tag('C')
+ w.pos(obj.Pos())
+ w.value(obj.Type(), obj.Val())
+
+ case *types.TypeName:
+ if obj.IsAlias() {
+ w.tag('A')
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+ break
+ }
+
+ // Defined type.
+ w.tag('T')
+ w.pos(obj.Pos())
+
+ underlying := obj.Type().Underlying()
+ w.typ(underlying, obj.Pkg())
+
+ t := obj.Type()
+ if types.IsInterface(t) {
+ break
+ }
+
+ named, ok := t.(*types.Named)
+ if !ok {
+ panic(internalErrorf("%s is not a defined type", t))
+ }
+
+ n := named.NumMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := named.Method(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.param(sig.Recv())
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected object: %v", obj))
+ }
+
+ p.declIndex[obj] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) pos(pos token.Pos) {
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+
+ // When file is the same as the last position (common case),
+ // we can save a few bytes by delta encoding just the line
+ // number.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile and
+ // prevLine as fields of exportWriter.
+
+ if file == w.prevFile {
+ delta := line - w.prevLine
+ w.int64(delta)
+ if delta == deltaNewFile {
+ w.int64(-1)
+ }
+ } else {
+ w.int64(deltaNewFile)
+ w.int64(line) // line >= 0
+ w.string(file)
+ w.prevFile = file
+ }
+ w.prevLine = line
+}
+
+func (w *exportWriter) pkg(pkg *types.Package) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(pkg.Path())
+}
+
+func (w *exportWriter) qualifiedIdent(obj types.Object) {
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(obj)
+
+ w.string(obj.Name())
+ w.pkg(obj.Pkg())
+}
+
+func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
+ w.data.uint64(w.p.typOff(t, pkg))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t, pkg)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+ switch t := t.(type) {
+ case *types.Named:
+ w.startType(definedType)
+ w.qualifiedIdent(t.Obj())
+
+ case *types.Pointer:
+ w.startType(pointerType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Slice:
+ w.startType(sliceType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Array:
+ w.startType(arrayType)
+ w.uint64(uint64(t.Len()))
+ w.typ(t.Elem(), pkg)
+
+ case *types.Chan:
+ w.startType(chanType)
+ // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+ var dir uint64
+ switch t.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ w.uint64(dir)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Map:
+ w.startType(mapType)
+ w.typ(t.Key(), pkg)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Signature:
+ w.startType(signatureType)
+ w.setPkg(pkg, true)
+ w.signature(t)
+
+ case *types.Struct:
+ w.startType(structType)
+ w.setPkg(pkg, true)
+
+ n := t.NumFields()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ w.pos(f.Pos())
+ w.string(f.Name())
+ w.typ(f.Type(), pkg)
+ w.bool(f.Embedded())
+ w.string(t.Tag(i)) // note (or tag)
+ }
+
+ case *types.Interface:
+ w.startType(interfaceType)
+ w.setPkg(pkg, true)
+
+ n := t.NumEmbeddeds()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ f := t.Embedded(i)
+ w.pos(f.Obj().Pos())
+ w.typ(f.Obj().Type(), f.Obj().Pkg())
+ }
+
+ n = t.NumExplicitMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
+ }
+}
+
+func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
+ if write {
+ w.pkg(pkg)
+ }
+
+ w.currPkg = pkg
+}
+
+func (w *exportWriter) signature(sig *types.Signature) {
+ w.paramList(sig.Params())
+ w.paramList(sig.Results())
+ if sig.Params().Len() > 0 {
+ w.bool(sig.Variadic())
+ }
+}
+
+func (w *exportWriter) paramList(tup *types.Tuple) {
+ n := tup.Len()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ w.param(tup.At(i))
+ }
+}
+
+func (w *exportWriter) param(obj types.Object) {
+ w.pos(obj.Pos())
+ w.localIdent(obj)
+ w.typ(obj.Type(), obj.Pkg())
+}
+
+func (w *exportWriter) value(typ types.Type, v constant.Value) {
+ w.typ(typ, nil)
+
+ switch v.Kind() {
+ case constant.Bool:
+ w.bool(constant.BoolVal(v))
+ case constant.Int:
+ var i big.Int
+ if i64, exact := constant.Int64Val(v); exact {
+ i.SetInt64(i64)
+ } else if ui64, exact := constant.Uint64Val(v); exact {
+ i.SetUint64(ui64)
+ } else {
+ i.SetString(v.ExactString(), 10)
+ }
+ w.mpint(&i, typ)
+ case constant.Float:
+ f := constantToFloat(v)
+ w.mpfloat(f, typ)
+ case constant.Complex:
+ w.mpfloat(constantToFloat(constant.Real(v)), typ)
+ w.mpfloat(constantToFloat(constant.Imag(v)), typ)
+ case constant.String:
+ w.string(constant.StringVal(v))
+ case constant.Unknown:
+ // package contains type errors
+ default:
+ panic(internalErrorf("unexpected value %v (%T)", v, v))
+ }
+}
+
+// constantToFloat converts a constant.Value with kind constant.Float to a
+// big.Float.
+func constantToFloat(x constant.Value) *big.Float {
+ assert(x.Kind() == constant.Float)
+ // Use the same floating-point precision (512) as cmd/compile
+ // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+ const mpprec = 512
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ assert(ok)
+ }
+ return &f
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
+ basic, ok := typ.Underlying().(*types.Basic)
+ if !ok {
+ panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
+ }
+
+ signed, maxBytes := intSize(basic)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ panic(internalErrorf("leading zeros"))
+ }
+ if uint(len(b)) > maxBytes {
+ panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
+ if f.IsInf() {
+ panic("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+func (w *exportWriter) localIdent(obj types.Object) {
+ // Anonymous parameters.
+ if obj == nil {
+ w.string("")
+ return
+ }
+
+ name := obj.Name()
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("internal error: assertion failed")
+ }
+}
+
+// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
+
+// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
+// a ready-to-use empty queue.
+type objQueue struct {
+ ring []types.Object
+ head, tail int
+}
+
+// empty returns true if q contains no Nodes.
+func (q *objQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushTail appends n to the tail of the queue.
+func (q *objQueue) pushTail(obj types.Object) {
+ if len(q.ring) == 0 {
+ q.ring = make([]types.Object, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]types.Object, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = obj
+ q.tail++
+}
+
+// popHead pops a node from the head of the queue. It panics if q is empty.
+func (q *objQueue) popHead() types.Object {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ obj := q.ring[q.head%len(q.ring)]
+ q.head++
+ return obj
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
new file mode 100644
index 000000000..3cb7ae5b9
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
@@ -0,0 +1,606 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/gc/iexport.go for the export data format.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "sort"
+)
+
+type intReader struct {
+ *bytes.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+)
+
+// IImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ const currentVersion = 0
+ version := -1
+ defer func() {
+ if e := recover(); e != nil {
+ if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
+ }
+ }()
+
+ r := &intReader{bytes.NewReader(data), path}
+
+ version = int(r.uint64())
+ switch version {
+ case currentVersion:
+ default:
+ errorf("unknown iexport format version %d", version)
+ }
+
+ sLen := int64(r.uint64())
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ r.Seek(sLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ ipath: path,
+
+ stringData: stringData,
+ stringCache: make(map[uint64]string),
+ pkgCache: make(map[uint64]*types.Package),
+
+ declData: declData,
+ pkgIndex: make(map[*types.Package]map[string]uint64),
+ typCache: make(map[uint64]types.Type),
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*token.File),
+ },
+ }
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ pkgList := make([]*types.Package, r.uint64())
+ for i := range pkgList {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = r.uint64() // package height; unused by go/types
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ pkg := imports[pkgPath]
+ if pkg == nil {
+ pkg = types.NewPackage(pkgPath, pkgName)
+ imports[pkgPath] = pkg
+ } else if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+
+ p.pkgCache[pkgPathOff] = pkg
+
+ nameIndex := make(map[string]uint64)
+ for nSyms := r.uint64(); nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ p.pkgIndex[pkg] = nameIndex
+ pkgList[i] = pkg
+ }
+ var localpkg *types.Package
+ for _, pkg := range pkgList {
+ if pkg.Path() == path {
+ localpkg = pkg
+ }
+ }
+
+ names := make([]string, 0, len(p.pkgIndex[localpkg]))
+ for name := range p.pkgIndex[localpkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(localpkg, name)
+ }
+
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // record all referenced packages as imports
+ list := append(([]*types.Package)(nil), pkgList[1:]...)
+ sort.Sort(byPath(list))
+ localpkg.SetImports(list)
+
+ // package was imported completely and without errors
+ localpkg.MarkComplete()
+
+ consumed, _ := r.Seek(0, io.SeekCurrent)
+ return int(consumed), localpkg, nil
+}
+
+type iimporter struct {
+ ipath string
+
+ stringData []byte
+ stringCache map[uint64]string
+ pkgCache map[uint64]*types.Package
+
+ declData []byte
+ pkgIndex map[*types.Package]map[string]uint64
+ typCache map[uint64]types.Type
+
+ fake fakeFileSet
+ interfaceList []*types.Interface
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ if s, ok := p.stringCache[off]; ok {
+ return s
+ }
+
+ slen, n := binary.Uvarint(p.stringData[off:])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ s := string(p.stringData[spos : spos+slen])
+ p.stringCache[off] = s
+ return s
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+ if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if base == nil || !isInterface(t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+type importReader struct {
+ p *iimporter
+ declReader bytes.Reader
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
+
+ case 'C':
+ typ, val := r.value()
+
+ r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+ case 'F':
+ sig := r.signature(nil)
+
+ r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+ case 'T':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types.NewTypeName(pos, r.currPkg, name, nil)
+ named := types.NewNamed(obj, nil, nil)
+ r.declare(obj)
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+ msig := r.signature(recv)
+
+ named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case 'V':
+ typ := r.typ()
+
+ r.declare(types.NewVar(pos, r.currPkg, name, typ))
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+ typ = r.typ()
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types.IsString:
+ val = constant.MakeString(r.string())
+
+ case types.IsInteger:
+ val = r.mpint(b)
+
+ case types.IsFloat:
+ val = r.mpfloat(b)
+
+ case types.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ if b.Kind() == types.Invalid {
+ val = constant.MakeUnknown()
+ return
+ }
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types.Float32, types.Complex64:
+ return true, 3
+ case types.Float64, types.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types.IsUnsigned) == 0
+ switch b.Kind() {
+ case types.Int8, types.Uint8:
+ maxBytes = 1
+ case types.Int16, types.Uint16:
+ maxBytes = 2
+ case types.Int32, types.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(b *types.Basic) constant.Value {
+ signed, maxBytes := intSize(b)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ return constant.MakeInt64(v)
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+
+ buf := make([]byte, v)
+ io.ReadFull(&r.declReader, buf)
+
+ // convert to little endian
+ // TODO(gri) go/constant should have a more direct conversion function
+ // (e.g., once it supports a big.Float based implementation)
+ for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
+ buf[i], buf[j] = buf[j], buf[i]
+ }
+
+ x := constant.MakeFromBytes(buf)
+ if signed && n&1 != 0 {
+ x = constant.UnaryOp(token.SUB, x, 0)
+ }
+ return x
+}
+
+func (r *importReader) mpfloat(b *types.Basic) constant.Value {
+ x := r.mpint(b)
+ if constant.Sign(x) == 0 {
+ return x
+ }
+
+ exp := r.int64()
+ switch {
+ case exp > 0:
+ x = constant.Shift(x, token.SHL, uint(exp))
+ case exp < 0:
+ d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
+ x = constant.BinaryOp(x, token.QUO, d)
+ }
+ return x
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevFile = r.string()
+ r.prevLine = l
+ }
+
+ if r.prevFile == "" && r.prevLine == 0 {
+ return token.NoPos
+ }
+
+ return r.p.fake.pos(r.prevFile, int(r.prevLine))
+}
+
+func (r *importReader) typ() types.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+ _, ok := t.(*types.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) types.Type {
+ switch k := r.kind(); k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+ case pointerType:
+ return types.NewPointer(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types.NewChan(dir, r.typ())
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ tags[i] = tag
+ }
+ return types.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types.Func, r.uint64())
+ for i := range methods {
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types.Var
+ if base != nil {
+ recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+ }
+
+ msig := r.signature(recv)
+ methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+
+ typ := newInterface(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types.Var) *types.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types.NewSignature(recv, params, results, variadic)
+}
+
+func (r *importReader) paramList() *types.Tuple {
+ xs := make([]*types.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
new file mode 100644
index 000000000..463f25227
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ named := make([]*types.Named, len(embeddeds))
+ for i, e := range embeddeds {
+ var ok bool
+ named[i], ok = e.(*types.Named)
+ if !ok {
+ panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
+ }
+ }
+ return types.NewInterface(methods, named)
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
new file mode 100644
index 000000000..ab28b95cb
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ return types.NewInterfaceType(methods, embeddeds)
+}
diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
new file mode 100644
index 000000000..fdc7da056
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
@@ -0,0 +1,160 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package packagesdriver fetches type sizes for go/packages and go/analysis.
+package packagesdriver
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/types"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+)
+
+var debug = false
+
+// GetSizes returns the sizes used by the underlying driver with the given parameters.
+func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
+ // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
+ const toolPrefix = "GOPACKAGESDRIVER="
+ tool := ""
+ for _, env := range env {
+ if val := strings.TrimPrefix(env, toolPrefix); val != env {
+ tool = val
+ }
+ }
+
+ if tool == "" {
+ var err error
+ tool, err = exec.LookPath("gopackagesdriver")
+ if err != nil {
+ // We did not find the driver, so use "go list".
+ tool = "off"
+ }
+ }
+
+ if tool == "off" {
+ return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData)
+ }
+
+ req, err := json.Marshal(struct {
+ Command string `json:"command"`
+ Env []string `json:"env"`
+ BuildFlags []string `json:"build_flags"`
+ }{
+ Command: "sizes",
+ Env: env,
+ BuildFlags: buildFlags,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
+ }
+
+ buf := new(bytes.Buffer)
+ cmd := exec.CommandContext(ctx, tool)
+ cmd.Dir = dir
+ cmd.Env = env
+ cmd.Stdin = bytes.NewReader(req)
+ cmd.Stdout = buf
+ cmd.Stderr = new(bytes.Buffer)
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
+ }
+ var response struct {
+ // Sizes, if not nil, is the types.Sizes to use when type checking.
+ Sizes *types.StdSizes
+ }
+ if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
+ return nil, err
+ }
+ return response.Sizes, nil
+}
+
+func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
+ args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
+ args = append(args, buildFlags...)
+ args = append(args, "--", "unsafe")
+ stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...)
+ if err != nil {
+ return nil, err
+ }
+ fields := strings.Fields(stdout.String())
+ if len(fields) < 2 {
+ return nil, fmt.Errorf("could not determine GOARCH and Go compiler")
+ }
+ goarch := fields[0]
+ compiler := fields[1]
+ return types.SizesFor(compiler, goarch), nil
+}
+
+// InvokeGo returns the stdout of a go command invocation.
+func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) {
+ if debug {
+ defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
+ }
+ stdout := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+ cmd := exec.CommandContext(ctx, "go", args...)
+ // On darwin the cwd gets resolved to the real path, which breaks anything that
+ // expects the working directory to keep the original path, including the
+ // go command when dealing with modules.
+ // The Go stdlib has a special feature where if the cwd and the PWD are the
+ // same node then it trusts the PWD, so by setting it in the env for the child
+ // process we fix up all the paths returned by the go command.
+ cmd.Env = append(append([]string{}, env...), "PWD="+dir)
+ cmd.Dir = dir
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ if err := cmd.Run(); err != nil {
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // Catastrophic error:
+ // - executable not found
+ // - context cancellation
+ return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
+ }
+
+ // Export mode entails a build.
+ // If that build fails, errors appear on stderr
+ // (despite the -e flag) and the Export field is blank.
+ // Do not fail in that case.
+ if !usesExportData {
+ return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
+ }
+ }
+
+ // As of writing, go list -export prints some non-fatal compilation
+ // errors to stderr, even with -e set. We would prefer that it put
+ // them in the Package.Error JSON (see https://golang.org/issue/26319).
+ // In the meantime, there's nowhere good to put them, but they can
+ // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
+ // is set.
+ if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
+ fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr)
+ }
+
+ // debugging
+ if false {
+ fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
+ }
+
+ return stdout, nil
+}
+
+func cmdDebugStr(envlist []string, args ...string) string {
+ env := make(map[string]string)
+ for _, kv := range envlist {
+ split := strings.Split(kv, "=")
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
+}
diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go
new file mode 100644
index 000000000..3799f8ed8
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/doc.go
@@ -0,0 +1,222 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package packages loads Go packages for inspection and analysis.
+
+The Load function takes as input a list of patterns and return a list of Package
+structs describing individual packages matched by those patterns.
+The LoadMode controls the amount of detail in the loaded packages.
+
+Load passes most patterns directly to the underlying build tool,
+but all patterns with the prefix "query=", where query is a
+non-empty string of letters from [a-z], are reserved and may be
+interpreted as query operators.
+
+Two query operators are currently supported: "file" and "pattern".
+
+The query "file=path/to/file.go" matches the package or packages enclosing
+the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
+might return the packages "fmt" and "fmt [fmt.test]".
+
+The query "pattern=string" causes "string" to be passed directly to
+the underlying build tool. In most cases this is unnecessary,
+but an application can use Load("pattern=" + x) as an escaping mechanism
+to ensure that x is not interpreted as a query operator if it contains '='.
+
+All other query operators are reserved for future use and currently
+cause Load to report an error.
+
+The Package struct provides basic information about the package, including
+
+ - ID, a unique identifier for the package in the returned set;
+ - GoFiles, the names of the package's Go source files;
+ - Imports, a map from source import strings to the Packages they name;
+ - Types, the type information for the package's exported symbols;
+ - Syntax, the parsed syntax trees for the package's source code; and
+ - TypeInfo, the result of a complete type-check of the package syntax trees.
+
+(See the documentation for type Package for the complete list of fields
+and more detailed descriptions.)
+
+For example,
+
+ Load(nil, "bytes", "unicode...")
+
+returns four Package structs describing the standard library packages
+bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
+can match multiple packages and that a package might be matched by
+multiple patterns: in general it is not possible to determine which
+packages correspond to which patterns.
+
+Note that the list returned by Load contains only the packages matched
+by the patterns. Their dependencies can be found by walking the import
+graph using the Imports fields.
+
+The Load function can be configured by passing a pointer to a Config as
+the first argument. A nil Config is equivalent to the zero Config, which
+causes Load to run in LoadFiles mode, collecting minimal information.
+See the documentation for type Config for details.
+
+As noted earlier, the Config.Mode controls the amount of detail
+reported about the loaded packages, with each mode returning all the data of the
+previous mode with some extra added. See the documentation for type LoadMode
+for details.
+
+Most tools should pass their command-line arguments (after any flags)
+uninterpreted to the loader, so that the loader can interpret them
+according to the conventions of the underlying build system.
+See the Example function for typical usage.
+
+*/
+package packages // import "golang.org/x/tools/go/packages"
+
+/*
+
+Motivation and design considerations
+
+The new package's design solves problems addressed by two existing
+packages: go/build, which locates and describes packages, and
+golang.org/x/tools/go/loader, which loads, parses and type-checks them.
+The go/build.Package structure encodes too much of the 'go build' way
+of organizing projects, leaving us in need of a data type that describes a
+package of Go source code independent of the underlying build system.
+We wanted something that works equally well with go build and vgo, and
+also other build systems such as Bazel and Blaze, making it possible to
+construct analysis tools that work in all these environments.
+Tools such as errcheck and staticcheck were essentially unavailable to
+the Go community at Google, and some of Google's internal tools for Go
+are unavailable externally.
+This new package provides a uniform way to obtain package metadata by
+querying each of these build systems, optionally supporting their
+preferred command-line notations for packages, so that tools integrate
+neatly with users' build environments. The Metadata query function
+executes an external query tool appropriate to the current workspace.
+
+Loading packages always returns the complete import graph "all the way down",
+even if all you want is information about a single package, because the query
+mechanisms of all the build systems we currently support ({go,vgo} list, and
+blaze/bazel aspect-based query) cannot provide detailed information
+about one package without visiting all its dependencies too, so there is
+no additional asymptotic cost to providing transitive information.
+(This property might not be true of a hypothetical 5th build system.)
+
+In calls to TypeCheck, all initial packages, and any package that
+transitively depends on one of them, must be loaded from source.
+Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
+source; D may be loaded from export data, and E may not be loaded at all
+(though it's possible that D's export data mentions it, so a
+types.Package may be created for it and exposed.)
+
+The old loader had a feature to suppress type-checking of function
+bodies on a per-package basis, primarily intended to reduce the work of
+obtaining type information for imported packages. Now that imports are
+satisfied by export data, the optimization no longer seems necessary.
+
+Despite some early attempts, the old loader did not exploit export data,
+instead always using the equivalent of WholeProgram mode. This was due
+to the complexity of mixing source and export data packages (now
+resolved by the upward traversal mentioned above), and because export data
+files were nearly always missing or stale. Now that 'go build' supports
+caching, all the underlying build systems can guarantee to produce
+export data in a reasonable (amortized) time.
+
+Test "main" packages synthesized by the build system are now reported as
+first-class packages, avoiding the need for clients (such as go/ssa) to
+reinvent this generation logic.
+
+One way in which go/packages is simpler than the old loader is in its
+treatment of in-package tests. In-package tests are packages that
+consist of all the files of the library under test, plus the test files.
+The old loader constructed in-package tests by a two-phase process of
+mutation called "augmentation": first it would construct and type check
+all the ordinary library packages and type-check the packages that
+depend on them; then it would add more (test) files to the package and
+type-check again. This two-phase approach had four major problems:
+1) in processing the tests, the loader modified the library package,
+ leaving no way for a client application to see both the test
+ package and the library package; one would mutate into the other.
+2) because test files can declare additional methods on types defined in
+ the library portion of the package, the dispatch of method calls in
+ the library portion was affected by the presence of the test files.
+ This should have been a clue that the packages were logically
+ different.
+3) this model of "augmentation" assumed at most one in-package test
+ per library package, which is true of projects using 'go build',
+ but not other build systems.
+4) because of the two-phase nature of test processing, all packages that
+ import the library package had to be processed before augmentation,
+ forcing a "one-shot" API and preventing the client from calling Load
+ in several times in sequence as is now possible in WholeProgram mode.
+ (TypeCheck mode has a similar one-shot restriction for a different reason.)
+
+Early drafts of this package supported "multi-shot" operation.
+Although it allowed clients to make a sequence of calls (or concurrent
+calls) to Load, building up the graph of Packages incrementally,
+it was of marginal value: it complicated the API
+(since it allowed some options to vary across calls but not others),
+it complicated the implementation,
+it cannot be made to work in Types mode, as explained above,
+and it was less efficient than making one combined call (when this is possible).
+Among the clients we have inspected, none made multiple calls to load
+but could not be easily and satisfactorily modified to make only a single call.
+However, applications changes may be required.
+For example, the ssadump command loads the user-specified packages
+and in addition the runtime package. It is tempting to simply append
+"runtime" to the user-provided list, but that does not work if the user
+specified an ad-hoc package such as [a.go b.go].
+Instead, ssadump no longer requests the runtime package,
+but seeks it among the dependencies of the user-specified packages,
+and emits an error if it is not found.
+
+Overlays: The Overlay field in the Config allows providing alternate contents
+for Go source files, by providing a mapping from file path to contents.
+go/packages will pull in new imports added in overlay files when go/packages
+is run in LoadImports mode or greater.
+Overlay support for the go list driver isn't complete yet: if the file doesn't
+exist on disk, it will only be recognized in an overlay if it is a non-test file
+and the package would be reported even without the overlay.
+
+Questions & Tasks
+
+- Add GOARCH/GOOS?
+ They are not portable concepts, but could be made portable.
+ Our goal has been to allow users to express themselves using the conventions
+ of the underlying build system: if the build system honors GOARCH
+ during a build and during a metadata query, then so should
+ applications built atop that query mechanism.
+ Conversely, if the target architecture of the build is determined by
+ command-line flags, the application can pass the relevant
+ flags through to the build system using a command such as:
+ myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
+ However, this approach is low-level, unwieldy, and non-portable.
+ GOOS and GOARCH seem important enough to warrant a dedicated option.
+
+- How should we handle partial failures such as a mixture of good and
+ malformed patterns, existing and non-existent packages, successful and
+ failed builds, import failures, import cycles, and so on, in a call to
+ Load?
+
+- Support bazel, blaze, and go1.10 list, not just go1.11 list.
+
+- Handle (and test) various partial success cases, e.g.
+ a mixture of good packages and:
+ invalid patterns
+ nonexistent packages
+ empty packages
+ packages with malformed package or import declarations
+ unreadable files
+ import cycles
+ other parse errors
+ type errors
+ Make sure we record errors at the correct place in the graph.
+
+- Missing packages among initial arguments are not reported.
+ Return bogus packages for them, like golist does.
+
+- "undeclared name" errors (for example) are reported out of source file
+ order. I suspect this is due to the breadth-first resolution now used
+ by go/types. Is that a bug? Discuss with gri.
+
+*/
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
new file mode 100644
index 000000000..22ff769ef
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file enables an external tool to intercept package requests.
+// If the tool is present then its results are used in preference to
+// the go list command.
+
+package packages
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "strings"
+)
+
+// Driver
+type driverRequest struct {
+ Command string `json:"command"`
+ Mode LoadMode `json:"mode"`
+ Env []string `json:"env"`
+ BuildFlags []string `json:"build_flags"`
+ Tests bool `json:"tests"`
+ Overlay map[string][]byte `json:"overlay"`
+}
+
+// findExternalDriver returns the file path of a tool that supplies
+// the build system package structure, or "" if not found."
+// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
+// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
+func findExternalDriver(cfg *Config) driver {
+ const toolPrefix = "GOPACKAGESDRIVER="
+ tool := ""
+ for _, env := range cfg.Env {
+ if val := strings.TrimPrefix(env, toolPrefix); val != env {
+ tool = val
+ }
+ }
+ if tool != "" && tool == "off" {
+ return nil
+ }
+ if tool == "" {
+ var err error
+ tool, err = exec.LookPath("gopackagesdriver")
+ if err != nil {
+ return nil
+ }
+ }
+ return func(cfg *Config, words ...string) (*driverResponse, error) {
+ req, err := json.Marshal(driverRequest{
+ Mode: cfg.Mode,
+ Env: cfg.Env,
+ BuildFlags: cfg.BuildFlags,
+ Tests: cfg.Tests,
+ Overlay: cfg.Overlay,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
+ }
+
+ buf := new(bytes.Buffer)
+ cmd := exec.CommandContext(cfg.Context, tool, words...)
+ cmd.Dir = cfg.Dir
+ cmd.Env = cfg.Env
+ cmd.Stdin = bytes.NewReader(req)
+ cmd.Stdout = buf
+ cmd.Stderr = new(bytes.Buffer)
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
+ }
+ var response driverResponse
+ if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
+ return nil, err
+ }
+ return &response, nil
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
new file mode 100644
index 000000000..3a0d4b012
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -0,0 +1,832 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/go/internal/packagesdriver"
+ "golang.org/x/tools/internal/gopathwalk"
+ "golang.org/x/tools/internal/semver"
+)
+
+// debug controls verbose logging.
+var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
+
+// A goTooOldError reports that the go command
+// found by exec.LookPath is too old to use the new go list behavior.
+type goTooOldError struct {
+ error
+}
+
+// responseDeduper wraps a driverResponse, deduplicating its contents.
+type responseDeduper struct {
+ seenRoots map[string]bool
+ seenPackages map[string]*Package
+ dr *driverResponse
+}
+
+// init fills in r with a driverResponse.
+func (r *responseDeduper) init(dr *driverResponse) {
+ r.dr = dr
+ r.seenRoots = map[string]bool{}
+ r.seenPackages = map[string]*Package{}
+ for _, pkg := range dr.Packages {
+ r.seenPackages[pkg.ID] = pkg
+ }
+ for _, root := range dr.Roots {
+ r.seenRoots[root] = true
+ }
+}
+
+func (r *responseDeduper) addPackage(p *Package) {
+ if r.seenPackages[p.ID] != nil {
+ return
+ }
+ r.seenPackages[p.ID] = p
+ r.dr.Packages = append(r.dr.Packages, p)
+}
+
+func (r *responseDeduper) addRoot(id string) {
+ if r.seenRoots[id] {
+ return
+ }
+ r.seenRoots[id] = true
+ r.dr.Roots = append(r.dr.Roots, id)
+}
+
+// goListDriver uses the go list command to interpret the patterns and produce
+// the build system package structure.
+// See driver for more details.
+func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
+ var sizes types.Sizes
+ var sizeserr error
+ var sizeswg sync.WaitGroup
+ if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
+ sizeswg.Add(1)
+ go func() {
+ sizes, sizeserr = getSizes(cfg)
+ sizeswg.Done()
+ }()
+ }
+
+ // Determine files requested in contains patterns
+ var containFiles []string
+ var packagesNamed []string
+ restPatterns := make([]string, 0, len(patterns))
+ // Extract file= and other [querytype]= patterns. Report an error if querytype
+ // doesn't exist.
+extractQueries:
+ for _, pattern := range patterns {
+ eqidx := strings.Index(pattern, "=")
+ if eqidx < 0 {
+ restPatterns = append(restPatterns, pattern)
+ } else {
+ query, value := pattern[:eqidx], pattern[eqidx+len("="):]
+ switch query {
+ case "file":
+ containFiles = append(containFiles, value)
+ case "pattern":
+ restPatterns = append(restPatterns, value)
+ case "iamashamedtousethedisabledqueryname":
+ packagesNamed = append(packagesNamed, value)
+ case "": // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ default:
+ for _, rune := range query {
+ if rune < 'a' || rune > 'z' { // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ continue extractQueries
+ }
+ }
+ // Reject all other patterns containing "="
+ return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
+ }
+ }
+ }
+
+ response := &responseDeduper{}
+ var err error
+
+ // See if we have any patterns to pass through to go list. Zero initial
+ // patterns also requires a go list call, since it's the equivalent of
+ // ".".
+ if len(restPatterns) > 0 || len(patterns) == 0 {
+ dr, err := golistDriver(cfg, restPatterns...)
+ if err != nil {
+ return nil, err
+ }
+ response.init(dr)
+ } else {
+ response.init(&driverResponse{})
+ }
+
+ sizeswg.Wait()
+ if sizeserr != nil {
+ return nil, sizeserr
+ }
+ // types.SizesFor always returns nil or a *types.StdSizes
+ response.dr.Sizes, _ = sizes.(*types.StdSizes)
+
+ var containsCandidates []string
+
+ if len(containFiles) != 0 {
+ if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil {
+ return nil, err
+ }
+ }
+
+ if len(packagesNamed) != 0 {
+ if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil {
+ return nil, err
+ }
+ }
+
+ modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response.dr)
+ if err != nil {
+ return nil, err
+ }
+ if len(containFiles) > 0 {
+ containsCandidates = append(containsCandidates, modifiedPkgs...)
+ containsCandidates = append(containsCandidates, needPkgs...)
+ }
+ if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil {
+ return nil, err
+ }
+ // Check candidate packages for containFiles.
+ if len(containFiles) > 0 {
+ for _, id := range containsCandidates {
+ pkg := response.seenPackages[id]
+ for _, f := range containFiles {
+ for _, g := range pkg.GoFiles {
+ if sameFile(f, g) {
+ response.addRoot(id)
+ }
+ }
+ }
+ }
+ }
+
+ return response.dr, nil
+}
+
+func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error {
+ if len(pkgs) == 0 {
+ return nil
+ }
+ dr, err := driver(cfg, pkgs...)
+ if err != nil {
+ return err
+ }
+ for _, pkg := range dr.Packages {
+ response.addPackage(pkg)
+ }
+ _, needPkgs, err := processGolistOverlay(cfg, response.dr)
+ if err != nil {
+ return err
+ }
+ addNeededOverlayPackages(cfg, driver, response, needPkgs)
+ return nil
+}
+
+func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
+ for _, query := range queries {
+ // TODO(matloob): Do only one query per directory.
+ fdir := filepath.Dir(query)
+ // Pass absolute path of directory to go list so that it knows to treat it as a directory,
+ // not a package path.
+ pattern, err := filepath.Abs(fdir)
+ if err != nil {
+ return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
+ }
+ dirResponse, err := driver(cfg, pattern)
+ if err != nil {
+ return err
+ }
+ isRoot := make(map[string]bool, len(dirResponse.Roots))
+ for _, root := range dirResponse.Roots {
+ isRoot[root] = true
+ }
+ for _, pkg := range dirResponse.Packages {
+ // Add any new packages to the main set
+ // We don't bother to filter packages that will be dropped by the changes of roots,
+ // that will happen anyway during graph construction outside this function.
+ // Over-reporting packages is not a problem.
+ response.addPackage(pkg)
+ // if the package was not a root one, it cannot have the file
+ if !isRoot[pkg.ID] {
+ continue
+ }
+ for _, pkgFile := range pkg.GoFiles {
+ if filepath.Base(query) == filepath.Base(pkgFile) {
+ response.addRoot(pkg.ID)
+ break
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// modCacheRegexp splits a path in a module cache into module, module version, and package.
+var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
+
+func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
+ // calling `go env` isn't free; bail out if there's nothing to do.
+ if len(queries) == 0 {
+ return nil
+ }
+ // Determine which directories are relevant to scan.
+ roots, modRoot, err := roots(cfg)
+ if err != nil {
+ return err
+ }
+
+ // Scan the selected directories. Simple matches, from GOPATH/GOROOT
+ // or the local module, can simply be "go list"ed. Matches from the
+ // module cache need special treatment.
+ var matchesMu sync.Mutex
+ var simpleMatches, modCacheMatches []string
+ add := func(root gopathwalk.Root, dir string) {
+ // Walk calls this concurrently; protect the result slices.
+ matchesMu.Lock()
+ defer matchesMu.Unlock()
+
+ path := dir
+ if dir != root.Path {
+ path = dir[len(root.Path)+1:]
+ }
+ if pathMatchesQueries(path, queries) {
+ switch root.Type {
+ case gopathwalk.RootModuleCache:
+ modCacheMatches = append(modCacheMatches, path)
+ case gopathwalk.RootCurrentModule:
+ // We'd need to read go.mod to find the full
+ // import path. Relative's easier.
+ rel, err := filepath.Rel(cfg.Dir, dir)
+ if err != nil {
+ // This ought to be impossible, since
+ // we found dir in the current module.
+ panic(err)
+ }
+ simpleMatches = append(simpleMatches, "./"+rel)
+ case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
+ simpleMatches = append(simpleMatches, path)
+ }
+ }
+ }
+
+ startWalk := time.Now()
+ gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
+ if debug {
+ log.Printf("%v for walk", time.Since(startWalk))
+ }
+
+ // Weird special case: the top-level package in a module will be in
+ // whatever directory the user checked the repository out into. It's
+ // more reasonable for that to not match the package name. So, if there
+ // are any Go files in the mod root, query it just to be safe.
+ if modRoot != "" {
+ rel, err := filepath.Rel(cfg.Dir, modRoot)
+ if err != nil {
+ panic(err) // See above.
+ }
+
+ files, err := ioutil.ReadDir(modRoot)
+ for _, f := range files {
+ if strings.HasSuffix(f.Name(), ".go") {
+ simpleMatches = append(simpleMatches, rel)
+ break
+ }
+ }
+ }
+
+ addResponse := func(r *driverResponse) {
+ for _, pkg := range r.Packages {
+ response.addPackage(pkg)
+ for _, name := range queries {
+ if pkg.Name == name {
+ response.addRoot(pkg.ID)
+ break
+ }
+ }
+ }
+ }
+
+ if len(simpleMatches) != 0 {
+ resp, err := driver(cfg, simpleMatches...)
+ if err != nil {
+ return err
+ }
+ addResponse(resp)
+ }
+
+ // Module cache matches are tricky. We want to avoid downloading new
+ // versions of things, so we need to use the ones present in the cache.
+ // go list doesn't accept version specifiers, so we have to write out a
+ // temporary module, and do the list in that module.
+ if len(modCacheMatches) != 0 {
+ // Collect all the matches, deduplicating by major version
+ // and preferring the newest.
+ type modInfo struct {
+ mod string
+ major string
+ }
+ mods := make(map[modInfo]string)
+ var imports []string
+ for _, modPath := range modCacheMatches {
+ matches := modCacheRegexp.FindStringSubmatch(modPath)
+ mod, ver := filepath.ToSlash(matches[1]), matches[2]
+ importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
+
+ major := semver.Major(ver)
+ if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
+ mods[modInfo{mod, major}] = ver
+ }
+
+ imports = append(imports, importPath)
+ }
+
+ // Build the temporary module.
+ var gomod bytes.Buffer
+ gomod.WriteString("module modquery\nrequire (\n")
+ for mod, version := range mods {
+ gomod.WriteString("\t" + mod.mod + " " + version + "\n")
+ }
+ gomod.WriteString(")\n")
+
+ tmpCfg := *cfg
+
+ // We're only trying to look at stuff in the module cache, so
+ // disable the network. This should speed things up, and has
+ // prevented errors in at least one case, #28518.
+ tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...))
+
+ var err error
+ tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpCfg.Dir)
+
+ if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
+ return fmt.Errorf("writing go.mod for module cache query: %v", err)
+ }
+
+ // Run the query, using the import paths calculated from the matches above.
+ resp, err := driver(&tmpCfg, imports...)
+ if err != nil {
+ return fmt.Errorf("querying module cache matches: %v", err)
+ }
+ addResponse(resp)
+ }
+
+ return nil
+}
+
+func getSizes(cfg *Config) (types.Sizes, error) {
+ return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
+}
+
+// roots selects the appropriate paths to walk based on the passed-in configuration,
+// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
+func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
+ stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
+ if err != nil {
+ return nil, "", err
+ }
+
+ fields := strings.Split(stdout.String(), "\n")
+ if len(fields) != 4 || len(fields[3]) != 0 {
+ return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String())
+ }
+ goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
+ var modDir string
+ if gomod != "" {
+ modDir = filepath.Dir(gomod)
+ }
+
+ var roots []gopathwalk.Root
+ // Always add GOROOT.
+ roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
+ // If modules are enabled, scan the module dir.
+ if modDir != "" {
+ roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule})
+ }
+ // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
+ for _, p := range gopath {
+ if modDir != "" {
+ roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
+ } else {
+ roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
+ }
+ }
+
+ return roots, modDir, nil
+}
+
+// These functions were copied from goimports. See further documentation there.
+
+// pathMatchesQueries is adapted from pkgIsCandidate.
+// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
+func pathMatchesQueries(path string, queries []string) bool {
+ lastTwo := lastTwoComponents(path)
+ for _, query := range queries {
+ if strings.Contains(lastTwo, query) {
+ return true
+ }
+ if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
+ lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
+ if strings.Contains(lastTwo, query) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// lastTwoComponents returns at most the last two path components
+// of v, using either / or \ as the path separator.
+func lastTwoComponents(v string) string {
+ nslash := 0
+ for i := len(v) - 1; i >= 0; i-- {
+ if v[i] == '/' || v[i] == '\\' {
+ nslash++
+ if nslash == 2 {
+ return v[i:]
+ }
+ }
+ }
+ return v
+}
+
+func hasHyphenOrUpperASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if b == '-' || ('A' <= b && b <= 'Z') {
+ return true
+ }
+ }
+ return false
+}
+
+func lowerASCIIAndRemoveHyphen(s string) (ret string) {
+ buf := make([]byte, 0, len(s))
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case b == '-':
+ continue
+ case 'A' <= b && b <= 'Z':
+ buf = append(buf, b+('a'-'A'))
+ default:
+ buf = append(buf, b)
+ }
+ }
+ return string(buf)
+}
+
+// Fields must match go list;
+// see $GOROOT/src/cmd/go/internal/load/pkg.go.
+type jsonPackage struct {
+ ImportPath string
+ Dir string
+ Name string
+ Export string
+ GoFiles []string
+ CompiledGoFiles []string
+ CFiles []string
+ CgoFiles []string
+ CXXFiles []string
+ MFiles []string
+ HFiles []string
+ FFiles []string
+ SFiles []string
+ SwigFiles []string
+ SwigCXXFiles []string
+ SysoFiles []string
+ Imports []string
+ ImportMap map[string]string
+ Deps []string
+ TestGoFiles []string
+ TestImports []string
+ XTestGoFiles []string
+ XTestImports []string
+ ForTest string // q in a "p [q.test]" package, else ""
+ DepOnly bool
+
+ Error *jsonPackageError
+}
+
+type jsonPackageError struct {
+ ImportStack []string
+ Pos string
+ Err string
+}
+
+func otherFiles(p *jsonPackage) [][]string {
+ return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
+}
+
+// golistDriver uses the "go list" command to expand the pattern
+// words and return metadata for the specified packages. dir may be
+// "" and env may be nil, as per os/exec.Command.
+func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
+ // go list uses the following identifiers in ImportPath and Imports:
+ //
+ // "p" -- importable package or main (command)
+ // "q.test" -- q's test executable
+ // "p [q.test]" -- variant of p as built for q's test executable
+ // "q_test [q.test]" -- q's external test package
+ //
+ // The packages p that are built differently for a test q.test
+ // are q itself, plus any helpers used by the external test q_test,
+ // typically including "testing" and all its dependencies.
+
+ // Run "go list" for complete
+ // information on the specified packages.
+ buf, err := invokeGo(cfg, golistargs(cfg, words)...)
+ if err != nil {
+ return nil, err
+ }
+ seen := make(map[string]*jsonPackage)
+ // Decode the JSON and convert it to Package form.
+ var response driverResponse
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+
+ if p.ImportPath == "" {
+ // The documentation for go list says that “[e]rroneous packages will have
+ // a non-empty ImportPath”. If for some reason it comes back empty, we
+ // prefer to error out rather than silently discarding data or handing
+ // back a package without any way to refer to it.
+ if p.Error != nil {
+ return nil, Error{
+ Pos: p.Error.Pos,
+ Msg: p.Error.Err,
+ }
+ }
+ return nil, fmt.Errorf("package missing import path: %+v", p)
+ }
+
+ if old, found := seen[p.ImportPath]; found {
+ if !reflect.DeepEqual(p, old) {
+ return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
+ }
+ // skip the duplicate
+ continue
+ }
+ seen[p.ImportPath] = p
+
+ pkg := &Package{
+ Name: p.Name,
+ ID: p.ImportPath,
+ GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
+ CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
+ OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ }
+
+ // Work around https://golang.org/issue/28749:
+ // cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
+ // Filter out any elements of CompiledGoFiles that are also in OtherFiles.
+ // We have to keep this workaround in place until go1.12 is a distant memory.
+ if len(pkg.OtherFiles) > 0 {
+ other := make(map[string]bool, len(pkg.OtherFiles))
+ for _, f := range pkg.OtherFiles {
+ other[f] = true
+ }
+
+ out := pkg.CompiledGoFiles[:0]
+ for _, f := range pkg.CompiledGoFiles {
+ if other[f] {
+ continue
+ }
+ out = append(out, f)
+ }
+ pkg.CompiledGoFiles = out
+ }
+
+ // Extract the PkgPath from the package's ID.
+ if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
+ pkg.PkgPath = pkg.ID[:i]
+ } else {
+ pkg.PkgPath = pkg.ID
+ }
+
+ if pkg.PkgPath == "unsafe" {
+ pkg.GoFiles = nil // ignore fake unsafe.go file
+ }
+
+ // Assume go list emits only absolute paths for Dir.
+ if p.Dir != "" && !filepath.IsAbs(p.Dir) {
+ log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
+ }
+
+ if p.Export != "" && !filepath.IsAbs(p.Export) {
+ pkg.ExportFile = filepath.Join(p.Dir, p.Export)
+ } else {
+ pkg.ExportFile = p.Export
+ }
+
+ // imports
+ //
+ // Imports contains the IDs of all imported packages.
+ // ImportsMap records (path, ID) only where they differ.
+ ids := make(map[string]bool)
+ for _, id := range p.Imports {
+ ids[id] = true
+ }
+ pkg.Imports = make(map[string]*Package)
+ for path, id := range p.ImportMap {
+ pkg.Imports[path] = &Package{ID: id} // non-identity import
+ delete(ids, id)
+ }
+ for id := range ids {
+ if id == "C" {
+ continue
+ }
+
+ pkg.Imports[id] = &Package{ID: id} // identity import
+ }
+ if !p.DepOnly {
+ response.Roots = append(response.Roots, pkg.ID)
+ }
+
+ // Work around for pre-go.1.11 versions of go list.
+ // TODO(matloob): they should be handled by the fallback.
+ // Can we delete this?
+ if len(pkg.CompiledGoFiles) == 0 {
+ pkg.CompiledGoFiles = pkg.GoFiles
+ }
+
+ if p.Error != nil {
+ pkg.Errors = append(pkg.Errors, Error{
+ Pos: p.Error.Pos,
+ Msg: p.Error.Err,
+ })
+ }
+
+ response.Packages = append(response.Packages, pkg)
+ }
+
+ return &response, nil
+}
+
+// absJoin absolutizes and flattens the lists of files.
+func absJoin(dir string, fileses ...[]string) (res []string) {
+ for _, files := range fileses {
+ for _, file := range files {
+ if !filepath.IsAbs(file) {
+ file = filepath.Join(dir, file)
+ }
+ res = append(res, file)
+ }
+ }
+ return res
+}
+
+func golistargs(cfg *Config, words []string) []string {
+ const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
+ fullargs := []string{
+ "list", "-e", "-json",
+ fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
+ fmt.Sprintf("-test=%t", cfg.Tests),
+ fmt.Sprintf("-export=%t", usesExportData(cfg)),
+ fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0),
+ // go list doesn't let you pass -test and -find together,
+ // probably because you'd just get the TestMain.
+ fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
+ }
+ fullargs = append(fullargs, cfg.BuildFlags...)
+ fullargs = append(fullargs, "--")
+ fullargs = append(fullargs, words...)
+ return fullargs
+}
+
+// invokeGo returns the stdout of a go command invocation.
+func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
+ stdout := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+ cmd := exec.CommandContext(cfg.Context, "go", args...)
+ // On darwin the cwd gets resolved to the real path, which breaks anything that
+ // expects the working directory to keep the original path, including the
+ // go command when dealing with modules.
+ // The Go stdlib has a special feature where if the cwd and the PWD are the
+ // same node then it trusts the PWD, so by setting it in the env for the child
+ // process we fix up all the paths returned by the go command.
+ cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
+ cmd.Dir = cfg.Dir
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ if debug {
+ defer func(start time.Time) {
+ log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
+ }(time.Now())
+ }
+
+ if err := cmd.Run(); err != nil {
+ // Check for 'go' executable not being found.
+ if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
+ return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
+ }
+
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // Catastrophic error:
+ // - context cancellation
+ return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
+ }
+
+ // Old go version?
+ if strings.Contains(stderr.String(), "flag provided but not defined") {
+ return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
+ }
+
+ // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
+ // the error in the Err section of stdout in case -e option is provided.
+ // This fix is provided for backwards compatibility.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Export mode entails a build.
+ // If that build fails, errors appear on stderr
+ // (despite the -e flag) and the Export field is blank.
+ // Do not fail in that case.
+ // The same is true if an ad-hoc package given to go list doesn't exist.
+ // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
+ // packages don't exist or a build fails.
+ if !usesExportData(cfg) && !containsGoFile(args) {
+ return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
+ }
+ }
+
+ // As of writing, go list -export prints some non-fatal compilation
+ // errors to stderr, even with -e set. We would prefer that it put
+ // them in the Package.Error JSON (see https://golang.org/issue/26319).
+ // In the meantime, there's nowhere good to put them, but they can
+ // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
+ // is set.
+ if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
+ fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr)
+ }
+
+ // debugging
+ if false {
+ fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout)
+ }
+
+ return stdout, nil
+}
+
+func containsGoFile(s []string) bool {
+ for _, f := range s {
+ if strings.HasSuffix(f, ".go") {
+ return true
+ }
+ }
+ return false
+}
+
+func cmdDebugStr(cmd *exec.Cmd, args ...string) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.Split(kv, "=")
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+ var quotedArgs []string
+ for _, arg := range args {
+ quotedArgs = append(quotedArgs, strconv.Quote(arg))
+ }
+
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " "))
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
new file mode 100644
index 000000000..33a0a28f2
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
@@ -0,0 +1,138 @@
+package packages
+
+import (
+ "go/parser"
+ "go/token"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// processGolistOverlay provides rudimentary support for adding
+// files that don't exist on disk to an overlay. The results can be
+// sometimes incorrect.
+// TODO(matloob): Handle unsupported cases, including the following:
+// - test files
+// - adding test and non-test files to test variants of packages
+// - determining the correct package to add given a new import path
+// - creating packages that don't exist
+func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, needPkgs []string, err error) {
+ havePkgs := make(map[string]string) // importPath -> non-test package ID
+ needPkgsSet := make(map[string]bool)
+ modifiedPkgsSet := make(map[string]bool)
+
+ for _, pkg := range response.Packages {
+ // This is an approximation of import path to id. This can be
+ // wrong for tests, vendored packages, and a number of other cases.
+ havePkgs[pkg.PkgPath] = pkg.ID
+ }
+
+outer:
+ for path, contents := range cfg.Overlay {
+ base := filepath.Base(path)
+ if strings.HasSuffix(path, "_test.go") {
+ // Overlays don't support adding new test files yet.
+ // TODO(matloob): support adding new test files.
+ continue
+ }
+ dir := filepath.Dir(path)
+ for _, pkg := range response.Packages {
+ var dirContains, fileExists bool
+ for _, f := range pkg.GoFiles {
+ if sameFile(filepath.Dir(f), dir) {
+ dirContains = true
+ }
+ if filepath.Base(f) == base {
+ fileExists = true
+ }
+ }
+ // The overlay could have included an entirely new package.
+ isNewPackage := extractPackage(pkg, path, contents)
+ if dirContains || isNewPackage {
+ if !fileExists {
+ pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles?
+ pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path)
+ modifiedPkgsSet[pkg.ID] = true
+ }
+ imports, err := extractImports(path, contents)
+ if err != nil {
+ // Let the parser or type checker report errors later.
+ continue outer
+ }
+ for _, imp := range imports {
+ _, found := pkg.Imports[imp]
+ if !found {
+ needPkgsSet[imp] = true
+ // TODO(matloob): Handle cases when the following block isn't correct.
+ // These include imports of test variants, imports of vendored packages, etc.
+ id, ok := havePkgs[imp]
+ if !ok {
+ id = imp
+ }
+ pkg.Imports[imp] = &Package{ID: id}
+ }
+ }
+ continue outer
+ }
+ }
+ }
+
+ needPkgs = make([]string, 0, len(needPkgsSet))
+ for pkg := range needPkgsSet {
+ needPkgs = append(needPkgs, pkg)
+ }
+ modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
+ for pkg := range modifiedPkgsSet {
+ modifiedPkgs = append(modifiedPkgs, pkg)
+ }
+ return modifiedPkgs, needPkgs, err
+}
+
+func extractImports(filename string, contents []byte) ([]string, error) {
+ f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
+ if err != nil {
+ return nil, err
+ }
+ var res []string
+ for _, imp := range f.Imports {
+ quotedPath := imp.Path.Value
+ path, err := strconv.Unquote(quotedPath)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, path)
+ }
+ return res, nil
+}
+
+// extractPackage attempts to extract a package defined in an overlay.
+//
+// If the package has errors and has no Name, GoFiles, or Imports,
+// then it's possible that it doesn't yet exist on disk.
+func extractPackage(pkg *Package, filename string, contents []byte) bool {
+ // TODO(rstambler): Check the message of the actual error?
+ // It differs between $GOPATH and module mode.
+ if len(pkg.Errors) != 1 {
+ return false
+ }
+ if pkg.Name != "" || pkg.ExportFile != "" {
+ return false
+ }
+ if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 {
+ return false
+ }
+ if len(pkg.Imports) > 0 {
+ return false
+ }
+ f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset?
+ if err != nil {
+ return false
+ }
+ // TODO(rstambler): This doesn't work for main packages.
+ if filepath.Base(pkg.PkgPath) != f.Name.Name {
+ return false
+ }
+ pkg.Name = f.Name.Name
+ pkg.Errors = nil
+ return true
+}
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
new file mode 100644
index 000000000..eedd43bb6
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -0,0 +1,1084 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+// See doc.go for package documentation and implementation notes.
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "golang.org/x/tools/go/gcexportdata"
+)
+
+// A LoadMode specifies the amount of detail to return when loading.
+// Higher-numbered modes cause Load to return more information,
+// but may be slower. Load may return more information than requested.
+type LoadMode int
+
+const (
+ // The following constants are used to specify which fields of the Package
+ // should be filled when loading is done. As a special case to provide
+ // backwards compatibility, a LoadMode of 0 is equivalent to LoadFiles.
+ // For all other LoadModes, the bits below specify which fields will be filled
+ // in the result packages.
+ // WARNING: This part of the go/packages API is EXPERIMENTAL. It might
+ // be changed or removed up until April 15 2019. After that date it will
+ // be frozen.
+ // TODO(matloob): Remove this comment on April 15.
+
+ // ID and Errors (if present) will always be filled.
+
+ // NeedName adds Name and PkgPath.
+ NeedName LoadMode = 1 << iota
+
+ // NeedFiles adds GoFiles and OtherFiles.
+ NeedFiles
+
+ // NeedCompiledGoFiles adds CompiledGoFiles.
+ NeedCompiledGoFiles
+
+ // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
+ // "placeholder" Packages with only the ID set.
+ NeedImports
+
+ // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports
+ // is not set NeedDeps has no effect.
+ NeedDeps
+
+ // NeedExportsFile adds ExportsFile.
+ NeedExportsFile
+
+ // NeedTypes adds Types, Fset, and IllTyped.
+ NeedTypes
+
+ // NeedSyntax adds Syntax.
+ NeedSyntax
+
+ // NeedTypesInfo adds TypesInfo.
+ NeedTypesInfo
+
+ // NeedTypesSizes adds TypesSizes.
+ NeedTypesSizes
+)
+
+const (
+ // LoadFiles finds the packages and computes their source file lists.
+ // Package fields: ID, Name, Errors, GoFiles, CompiledGoFiles, and OtherFiles.
+ LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
+
+ // LoadImports adds import information for each package
+ // and its dependencies.
+ // Package fields added: Imports.
+ LoadImports = LoadFiles | NeedImports | NeedDeps
+
+ // LoadTypes adds type information for package-level
+ // declarations in the packages matching the patterns.
+ // Package fields added: Types, TypesSizes, Fset, and IllTyped.
+ // This mode uses type information provided by the build system when
+ // possible, and may fill in the ExportFile field.
+ LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
+
+ // LoadSyntax adds typed syntax trees for the packages matching the patterns.
+ // Package fields added: Syntax, and TypesInfo, for direct pattern matches only.
+ LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
+
+ // LoadAllSyntax adds typed syntax trees for the packages matching the patterns
+ // and all dependencies.
+ // Package fields added: Types, Fset, IllTyped, Syntax, and TypesInfo,
+ // for all packages in the import graph.
+ LoadAllSyntax = LoadSyntax
+)
+
+// A Config specifies details about how packages should be loaded.
+// The zero value is a valid configuration.
+// Calls to Load do not modify this struct.
+type Config struct {
+ // Mode controls the level of information returned for each package.
+ Mode LoadMode
+
+ // Context specifies the context for the load operation.
+ // If the context is cancelled, the loader may stop early
+ // and return an ErrCancelled error.
+ // If Context is nil, the load cannot be cancelled.
+ Context context.Context
+
+ // Dir is the directory in which to run the build system's query tool
+ // that provides information about the packages.
+ // If Dir is empty, the tool is run in the current directory.
+ Dir string
+
+ // Env is the environment to use when invoking the build system's query tool.
+ // If Env is nil, the current environment is used.
+ // As in os/exec's Cmd, only the last value in the slice for
+ // each environment key is used. To specify the setting of only
+ // a few variables, append to the current environment, as in:
+ //
+ // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
+ //
+ Env []string
+
+ // BuildFlags is a list of command-line flags to be passed through to
+ // the build system's query tool.
+ BuildFlags []string
+
+ // Fset provides source position information for syntax trees and types.
+ // If Fset is nil, Load will use a new fileset, but preserve Fset's value.
+ Fset *token.FileSet
+
+ // ParseFile is called to read and parse each file
+ // when preparing a package's type-checked syntax tree.
+ // It must be safe to call ParseFile simultaneously from multiple goroutines.
+ // If ParseFile is nil, the loader will uses parser.ParseFile.
+ //
+ // ParseFile should parse the source from src and use filename only for
+ // recording position information.
+ //
+ // An application may supply a custom implementation of ParseFile
+ // to change the effective file contents or the behavior of the parser,
+ // or to modify the syntax tree. For example, selectively eliminating
+ // unwanted function bodies can significantly accelerate type checking.
+ ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
+
+ // If Tests is set, the loader includes not just the packages
+ // matching a particular pattern but also any related test packages,
+ // including test-only variants of the package and the test executable.
+ //
+ // For example, when using the go command, loading "fmt" with Tests=true
+ // returns four packages, with IDs "fmt" (the standard package),
+ // "fmt [fmt.test]" (the package as compiled for the test),
+ // "fmt_test" (the test functions from source files in package fmt_test),
+ // and "fmt.test" (the test binary).
+ //
+ // In build systems with explicit names for tests,
+ // setting Tests may have no effect.
+ Tests bool
+
+ // Overlay provides a mapping of absolute file paths to file contents.
+ // If the file with the given path already exists, the parser will use the
+ // alternative file contents provided by the map.
+ //
+ // Overlays provide incomplete support for when a given file doesn't
+ // already exist on disk. See the package doc above for more details.
+ Overlay map[string][]byte
+}
+
+// driver is the type for functions that query the build system for the
+// packages named by the patterns.
+type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
+
+// driverResponse contains the results for a driver query.
+type driverResponse struct {
+ // Sizes, if not nil, is the types.Sizes to use when type checking.
+ Sizes *types.StdSizes
+
+ // Roots is the set of package IDs that make up the root packages.
+ // We have to encode this separately because when we encode a single package
+ // we cannot know if it is one of the roots as that requires knowledge of the
+ // graph it is part of.
+ Roots []string `json:",omitempty"`
+
+ // Packages is the full set of packages in the graph.
+ // The packages are not connected into a graph.
+ // The Imports if populated will be stubs that only have their ID set.
+ // Imports will be connected and then type and syntax information added in a
+ // later pass (see refine).
+ Packages []*Package
+}
+
+// Load loads and returns the Go packages named by the given patterns.
+//
+// Config specifies loading options;
+// nil behaves the same as an empty Config.
+//
+// Load returns an error if any of the patterns was invalid
+// as defined by the underlying build system.
+// It may return an empty list of packages without an error,
+// for instance for an empty expansion of a valid wildcard.
+// Errors associated with a particular package are recorded in the
+// corresponding Package's Errors list, and do not cause Load to
+// return an error. Clients may need to handle such errors before
+// proceeding with further analysis. The PrintErrors function is
+// provided for convenient display of all errors.
+func Load(cfg *Config, patterns ...string) ([]*Package, error) {
+ l := newLoader(cfg)
+ response, err := defaultDriver(&l.Config, patterns...)
+ if err != nil {
+ return nil, err
+ }
+ l.sizes = response.Sizes
+ return l.refine(response.Roots, response.Packages...)
+}
+
+// defaultDriver is a driver that looks for an external driver binary, and if
+// it does not find it falls back to the built in go list driver.
+func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
+ driver := findExternalDriver(cfg)
+ if driver == nil {
+ driver = goListDriver
+ }
+ return driver(cfg, patterns...)
+}
+
+// A Package describes a loaded Go package.
+type Package struct {
+ // ID is a unique identifier for a package,
+ // in a syntax provided by the underlying build system.
+ //
+ // Because the syntax varies based on the build system,
+ // clients should treat IDs as opaque and not attempt to
+ // interpret them.
+ ID string
+
+ // Name is the package name as it appears in the package source code.
+ Name string
+
+ // PkgPath is the package path as used by the go/types package.
+ PkgPath string
+
+ // Errors contains any errors encountered querying the metadata
+ // of the package, or while parsing or type-checking its files.
+ Errors []Error
+
+ // GoFiles lists the absolute file paths of the package's Go source files.
+ GoFiles []string
+
+ // CompiledGoFiles lists the absolute file paths of the package's source
+ // files that were presented to the compiler.
+ // This may differ from GoFiles if files are processed before compilation.
+ CompiledGoFiles []string
+
+ // OtherFiles lists the absolute file paths of the package's non-Go source files,
+ // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
+ OtherFiles []string
+
+ // ExportFile is the absolute path to a file containing type
+ // information for the package as provided by the build system.
+ ExportFile string
+
+ // Imports maps import paths appearing in the package's Go source files
+ // to corresponding loaded Packages.
+ Imports map[string]*Package
+
+ // Types provides type information for the package.
+ // Modes LoadTypes and above set this field for packages matching the
+ // patterns; type information for dependencies may be missing or incomplete.
+ // Mode LoadAllSyntax sets this field for all packages, including dependencies.
+ Types *types.Package
+
+ // Fset provides position information for Types, TypesInfo, and Syntax.
+ // It is set only when Types is set.
+ Fset *token.FileSet
+
+ // IllTyped indicates whether the package or any dependency contains errors.
+ // It is set only when Types is set.
+ IllTyped bool
+
+ // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
+ //
+ // Mode LoadSyntax sets this field for packages matching the patterns.
+ // Mode LoadAllSyntax sets this field for all packages, including dependencies.
+ Syntax []*ast.File
+
+ // TypesInfo provides type information about the package's syntax trees.
+ // It is set only when Syntax is set.
+ TypesInfo *types.Info
+
+ // TypesSizes provides the effective size function for types in TypesInfo.
+ TypesSizes types.Sizes
+}
+
+// An Error describes a problem with a package's metadata, syntax, or types.
+type Error struct {
+ Pos string // "file:line:col" or "file:line" or "" or "-"
+ Msg string
+ Kind ErrorKind
+}
+
+// ErrorKind describes the source of the error, allowing the user to
+// differentiate between errors generated by the driver, the parser, or the
+// type-checker.
+type ErrorKind int
+
+const (
+ UnknownError ErrorKind = iota
+ ListError
+ ParseError
+ TypeError
+)
+
+func (err Error) Error() string {
+ pos := err.Pos
+ if pos == "" {
+ pos = "-" // like token.Position{}.String()
+ }
+ return pos + ": " + err.Msg
+}
+
+// flatPackage is the JSON form of Package
+// It drops all the type and syntax fields, and transforms the Imports
+//
+// TODO(adonovan): identify this struct with Package, effectively
+// publishing the JSON protocol.
+type flatPackage struct {
+ ID string
+ Name string `json:",omitempty"`
+ PkgPath string `json:",omitempty"`
+ Errors []Error `json:",omitempty"`
+ GoFiles []string `json:",omitempty"`
+ CompiledGoFiles []string `json:",omitempty"`
+ OtherFiles []string `json:",omitempty"`
+ ExportFile string `json:",omitempty"`
+ Imports map[string]string `json:",omitempty"`
+}
+
+// MarshalJSON returns the Package in its JSON form.
+// For the most part, the structure fields are written out unmodified, and
+// the type and syntax fields are skipped.
+// The imports are written out as just a map of path to package id.
+// The errors are written using a custom type that tries to preserve the
+// structure of error types we know about.
+//
+// This method exists to enable support for additional build systems. It is
+// not intended for use by clients of the API and we may change the format.
+func (p *Package) MarshalJSON() ([]byte, error) {
+ flat := &flatPackage{
+ ID: p.ID,
+ Name: p.Name,
+ PkgPath: p.PkgPath,
+ Errors: p.Errors,
+ GoFiles: p.GoFiles,
+ CompiledGoFiles: p.CompiledGoFiles,
+ OtherFiles: p.OtherFiles,
+ ExportFile: p.ExportFile,
+ }
+ if len(p.Imports) > 0 {
+ flat.Imports = make(map[string]string, len(p.Imports))
+ for path, ipkg := range p.Imports {
+ flat.Imports[path] = ipkg.ID
+ }
+ }
+ return json.Marshal(flat)
+}
+
+// UnmarshalJSON reads in a Package from its JSON format.
+// See MarshalJSON for details about the format accepted.
+func (p *Package) UnmarshalJSON(b []byte) error {
+ flat := &flatPackage{}
+ if err := json.Unmarshal(b, &flat); err != nil {
+ return err
+ }
+ *p = Package{
+ ID: flat.ID,
+ Name: flat.Name,
+ PkgPath: flat.PkgPath,
+ Errors: flat.Errors,
+ GoFiles: flat.GoFiles,
+ CompiledGoFiles: flat.CompiledGoFiles,
+ OtherFiles: flat.OtherFiles,
+ ExportFile: flat.ExportFile,
+ }
+ if len(flat.Imports) > 0 {
+ p.Imports = make(map[string]*Package, len(flat.Imports))
+ for path, id := range flat.Imports {
+ p.Imports[path] = &Package{ID: id}
+ }
+ }
+ return nil
+}
+
+func (p *Package) String() string { return p.ID }
+
+// loaderPackage augments Package with state used during the loading phase
+type loaderPackage struct {
+ *Package
+ importErrors map[string]error // maps each bad import to its error
+ loadOnce sync.Once
+ color uint8 // for cycle detection
+ needsrc bool // load from source (Mode >= LoadTypes)
+ needtypes bool // type information is either requested or depended on
+ initial bool // package was matched by a pattern
+}
+
+// loader holds the working state of a single call to load.
+type loader struct {
+ pkgs map[string]*loaderPackage
+ Config
+ sizes types.Sizes
+ parseCache map[string]*parseValue
+ parseCacheMu sync.Mutex
+ exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
+
+ // TODO(matloob): Add an implied mode here and use that instead of mode.
+ // Implied mode would contain all the fields we need the data for so we can
+ // get the actually requested fields. We'll zero them out before returning
+ // packages to the user. This will make it easier for us to get the conditions
+ // where we need certain modes right.
+}
+
+type parseValue struct {
+ f *ast.File
+ err error
+ ready chan struct{}
+}
+
+func newLoader(cfg *Config) *loader {
+ ld := &loader{
+ parseCache: map[string]*parseValue{},
+ }
+ if cfg != nil {
+ ld.Config = *cfg
+ }
+ if ld.Config.Mode == 0 {
+ ld.Config.Mode = LoadFiles // Preserve zero behavior of Mode for backwards compatibility.
+ }
+ if ld.Config.Env == nil {
+ ld.Config.Env = os.Environ()
+ }
+ if ld.Context == nil {
+ ld.Context = context.Background()
+ }
+ if ld.Dir == "" {
+ if dir, err := os.Getwd(); err == nil {
+ ld.Dir = dir
+ }
+ }
+
+ if ld.Mode&NeedTypes != 0 {
+ if ld.Fset == nil {
+ ld.Fset = token.NewFileSet()
+ }
+
+ // ParseFile is required even in LoadTypes mode
+ // because we load source if export data is missing.
+ if ld.ParseFile == nil {
+ ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
+ const mode = parser.AllErrors | parser.ParseComments
+ return parser.ParseFile(fset, filename, src, mode)
+ }
+ }
+ }
+ return ld
+}
+
+// refine connects the supplied packages into a graph and then adds type and
+// and syntax information as requested by the LoadMode.
+func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
+ rootMap := make(map[string]int, len(roots))
+ for i, root := range roots {
+ rootMap[root] = i
+ }
+ ld.pkgs = make(map[string]*loaderPackage)
+ // first pass, fixup and build the map and roots
+ var initial = make([]*loaderPackage, len(roots))
+ for _, pkg := range list {
+ rootIndex := -1
+ if i, found := rootMap[pkg.ID]; found {
+ rootIndex = i
+ }
+ lpkg := &loaderPackage{
+ Package: pkg,
+ needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0,
+ needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 ||
+ len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
+ pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
+ }
+ ld.pkgs[lpkg.ID] = lpkg
+ if rootIndex >= 0 {
+ initial[rootIndex] = lpkg
+ lpkg.initial = true
+ }
+ }
+ for i, root := range roots {
+ if initial[i] == nil {
+ return nil, fmt.Errorf("root package %v is missing", root)
+ }
+ }
+
+ // Materialize the import graph.
+
+ const (
+ white = 0 // new
+ grey = 1 // in progress
+ black = 2 // complete
+ )
+
+ // visit traverses the import graph, depth-first,
+ // and materializes the graph as Packages.Imports.
+ //
+ // Valid imports are saved in the Packages.Import map.
+ // Invalid imports (cycles and missing nodes) are saved in the importErrors map.
+ // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG.
+ //
+ // visit returns whether the package needs src or has a transitive
+ // dependency on a package that does. These are the only packages
+ // for which we load source code.
+ var stack []*loaderPackage
+ var visit func(lpkg *loaderPackage) bool
+ var srcPkgs []*loaderPackage
+ visit = func(lpkg *loaderPackage) bool {
+ switch lpkg.color {
+ case black:
+ return lpkg.needsrc
+ case grey:
+ panic("internal error: grey node")
+ }
+ lpkg.color = grey
+ stack = append(stack, lpkg) // push
+ stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
+ lpkg.Imports = make(map[string]*Package, len(stubs))
+ for importPath, ipkg := range stubs {
+ var importErr error
+ imp := ld.pkgs[ipkg.ID]
+ if imp == nil {
+ // (includes package "C" when DisableCgo)
+ importErr = fmt.Errorf("missing package: %q", ipkg.ID)
+ } else if imp.color == grey {
+ importErr = fmt.Errorf("import cycle: %s", stack)
+ }
+ if importErr != nil {
+ if lpkg.importErrors == nil {
+ lpkg.importErrors = make(map[string]error)
+ }
+ lpkg.importErrors[importPath] = importErr
+ continue
+ }
+
+ if visit(imp) {
+ lpkg.needsrc = true
+ }
+ lpkg.Imports[importPath] = imp.Package
+ }
+ if lpkg.needsrc {
+ srcPkgs = append(srcPkgs, lpkg)
+ }
+ if ld.Mode&NeedTypesSizes != 0 {
+ lpkg.TypesSizes = ld.sizes
+ }
+ stack = stack[:len(stack)-1] // pop
+ lpkg.color = black
+
+ return lpkg.needsrc
+ }
+
+ if ld.Mode&(NeedImports|NeedDeps) == 0 {
+ // We do this to drop the stub import packages that we are not even going to try to resolve.
+ for _, lpkg := range initial {
+ lpkg.Imports = nil
+ }
+ } else {
+ // For each initial package, create its import DAG.
+ for _, lpkg := range initial {
+ visit(lpkg)
+ }
+ }
+ if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right?
+ for _, lpkg := range srcPkgs {
+ // Complete type information is required for the
+ // immediate dependencies of each source package.
+ for _, ipkg := range lpkg.Imports {
+ imp := ld.pkgs[ipkg.ID]
+ imp.needtypes = true
+ }
+ }
+ }
+ // Load type data if needed, starting at
+ // the initial packages (roots of the import DAG).
+ if ld.Mode&NeedTypes != 0 {
+ var wg sync.WaitGroup
+ for _, lpkg := range initial {
+ wg.Add(1)
+ go func(lpkg *loaderPackage) {
+ ld.loadRecursive(lpkg)
+ wg.Done()
+ }(lpkg)
+ }
+ wg.Wait()
+ }
+
+ result := make([]*Package, len(initial))
+ importPlaceholders := make(map[string]*Package)
+ for i, lpkg := range initial {
+ result[i] = lpkg.Package
+ }
+ for i := range ld.pkgs {
+ // Clear all unrequested fields, for extra de-Hyrum-ization.
+ if ld.Mode&NeedName == 0 {
+ ld.pkgs[i].Name = ""
+ ld.pkgs[i].PkgPath = ""
+ }
+ if ld.Mode&NeedFiles == 0 {
+ ld.pkgs[i].GoFiles = nil
+ ld.pkgs[i].OtherFiles = nil
+ }
+ if ld.Mode&NeedCompiledGoFiles == 0 {
+ ld.pkgs[i].CompiledGoFiles = nil
+ }
+ if ld.Mode&NeedImports == 0 {
+ ld.pkgs[i].Imports = nil
+ }
+ if ld.Mode&NeedExportsFile == 0 {
+ ld.pkgs[i].ExportFile = ""
+ }
+ if ld.Mode&NeedTypes == 0 {
+ ld.pkgs[i].Types = nil
+ ld.pkgs[i].Fset = nil
+ ld.pkgs[i].IllTyped = false
+ }
+ if ld.Mode&NeedSyntax == 0 {
+ ld.pkgs[i].Syntax = nil
+ }
+ if ld.Mode&NeedTypesInfo == 0 {
+ ld.pkgs[i].TypesInfo = nil
+ }
+ if ld.Mode&NeedTypesSizes == 0 {
+ ld.pkgs[i].TypesSizes = nil
+ }
+ if ld.Mode&NeedDeps == 0 {
+ for j, pkg := range ld.pkgs[i].Imports {
+ ph, ok := importPlaceholders[pkg.ID]
+ if !ok {
+ ph = &Package{ID: pkg.ID}
+ importPlaceholders[pkg.ID] = ph
+ }
+ ld.pkgs[i].Imports[j] = ph
+ }
+ }
+ }
+ return result, nil
+}
+
+// loadRecursive loads the specified package and its dependencies,
+// recursively, in parallel, in topological order.
+// It is atomic and idempotent.
+// Precondition: ld.Mode&NeedTypes.
+func (ld *loader) loadRecursive(lpkg *loaderPackage) {
+ lpkg.loadOnce.Do(func() {
+ // Load the direct dependencies, in parallel.
+ var wg sync.WaitGroup
+ for _, ipkg := range lpkg.Imports {
+ imp := ld.pkgs[ipkg.ID]
+ wg.Add(1)
+ go func(imp *loaderPackage) {
+ ld.loadRecursive(imp)
+ wg.Done()
+ }(imp)
+ }
+ wg.Wait()
+
+ ld.loadPackage(lpkg)
+ })
+}
+
+// loadPackage loads the specified package.
+// It must be called only once per Package,
+// after immediate dependencies are loaded.
+// Precondition: ld.Mode >= LoadTypes.
+func (ld *loader) loadPackage(lpkg *loaderPackage) {
+ if lpkg.PkgPath == "unsafe" {
+ // Fill in the blanks to avoid surprises.
+ lpkg.Types = types.Unsafe
+ lpkg.Fset = ld.Fset
+ lpkg.Syntax = []*ast.File{}
+ lpkg.TypesInfo = new(types.Info)
+ lpkg.TypesSizes = ld.sizes
+ return
+ }
+
+ // Call NewPackage directly with explicit name.
+ // This avoids skew between golist and go/types when the files'
+ // package declarations are inconsistent.
+ lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
+ lpkg.Fset = ld.Fset
+
+ // Subtle: we populate all Types fields with an empty Package
+ // before loading export data so that export data processing
+ // never has to create a types.Package for an indirect dependency,
+ // which would then require that such created packages be explicitly
+ // inserted back into the Import graph as a final step after export data loading.
+ // The Diamond test exercises this case.
+ if !lpkg.needtypes {
+ return
+ }
+ if !lpkg.needsrc {
+ ld.loadFromExportData(lpkg)
+ return // not a source package, don't get syntax trees
+ }
+
+ appendError := func(err error) {
+ // Convert various error types into the one true Error.
+ var errs []Error
+ switch err := err.(type) {
+ case Error:
+ // from driver
+ errs = append(errs, err)
+
+ case *os.PathError:
+ // from parser
+ errs = append(errs, Error{
+ Pos: err.Path + ":1",
+ Msg: err.Err.Error(),
+ Kind: ParseError,
+ })
+
+ case scanner.ErrorList:
+ // from parser
+ for _, err := range err {
+ errs = append(errs, Error{
+ Pos: err.Pos.String(),
+ Msg: err.Msg,
+ Kind: ParseError,
+ })
+ }
+
+ case types.Error:
+ // from type checker
+ errs = append(errs, Error{
+ Pos: err.Fset.Position(err.Pos).String(),
+ Msg: err.Msg,
+ Kind: TypeError,
+ })
+
+ default:
+ // unexpected impoverished error from parser?
+ errs = append(errs, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError,
+ })
+
+ // If you see this error message, please file a bug.
+ log.Printf("internal error: error %q (%T) without position", err, err)
+ }
+
+ lpkg.Errors = append(lpkg.Errors, errs...)
+ }
+
+ files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
+ for _, err := range errs {
+ appendError(err)
+ }
+
+ lpkg.Syntax = files
+
+ lpkg.TypesInfo = &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+ lpkg.TypesSizes = ld.sizes
+
+ importer := importerFunc(func(path string) (*types.Package, error) {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ // The imports map is keyed by import path.
+ ipkg := lpkg.Imports[path]
+ if ipkg == nil {
+ if err := lpkg.importErrors[path]; err != nil {
+ return nil, err
+ }
+ // There was skew between the metadata and the
+ // import declarations, likely due to an edit
+ // race, or because the ParseFile feature was
+ // used to supply alternative file contents.
+ return nil, fmt.Errorf("no metadata for %s", path)
+ }
+
+ if ipkg.Types != nil && ipkg.Types.Complete() {
+ return ipkg.Types, nil
+ }
+ log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg)
+ panic("unreachable")
+ })
+
+ // type-check
+ tc := &types.Config{
+ Importer: importer,
+
+ // Type-check bodies of functions only in non-initial packages.
+ // Example: for import graph A->B->C and initial packages {A,C},
+ // we can ignore function bodies in B.
+ IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial,
+
+ Error: appendError,
+ Sizes: ld.sizes,
+ }
+ types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
+
+ lpkg.importErrors = nil // no longer needed
+
+ // If !Cgo, the type-checker uses FakeImportC mode, so
+ // it doesn't invoke the importer for import "C",
+ // nor report an error for the import,
+ // or for any undefined C.f reference.
+ // We must detect this explicitly and correctly
+ // mark the package as IllTyped (by reporting an error).
+ // TODO(adonovan): if these errors are annoying,
+ // we could just set IllTyped quietly.
+ if tc.FakeImportC {
+ outer:
+ for _, f := range lpkg.Syntax {
+ for _, imp := range f.Imports {
+ if imp.Path.Value == `"C"` {
+ err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
+ appendError(err)
+ break outer
+ }
+ }
+ }
+ }
+
+ // Record accumulated errors.
+ illTyped := len(lpkg.Errors) > 0
+ if !illTyped {
+ for _, imp := range lpkg.Imports {
+ if imp.IllTyped {
+ illTyped = true
+ break
+ }
+ }
+ }
+ lpkg.IllTyped = illTyped
+}
+
+// An importFunc is an implementation of the single-method
+// types.Importer interface based on a function value.
+type importerFunc func(path string) (*types.Package, error)
+
+func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
+
+// We use a counting semaphore to limit
+// the number of parallel I/O calls per process.
+var ioLimit = make(chan bool, 20)
+
+func (ld *loader) parseFile(filename string) (*ast.File, error) {
+ ld.parseCacheMu.Lock()
+ v, ok := ld.parseCache[filename]
+ if ok {
+ // cache hit
+ ld.parseCacheMu.Unlock()
+ <-v.ready
+ } else {
+ // cache miss
+ v = &parseValue{ready: make(chan struct{})}
+ ld.parseCache[filename] = v
+ ld.parseCacheMu.Unlock()
+
+ var src []byte
+ for f, contents := range ld.Config.Overlay {
+ if sameFile(f, filename) {
+ src = contents
+ }
+ }
+ var err error
+ if src == nil {
+ ioLimit <- true // wait
+ src, err = ioutil.ReadFile(filename)
+ <-ioLimit // signal
+ }
+ if err != nil {
+ v.err = err
+ } else {
+ v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
+ }
+
+ close(v.ready)
+ }
+ return v.f, v.err
+}
+
+// parseFiles reads and parses the Go source files and returns the ASTs
+// of the ones that could be at least partially parsed, along with a
+// list of I/O and parse errors encountered.
+//
+// Because files are scanned in parallel, the token.Pos
+// positions of the resulting ast.Files are not ordered.
+//
+func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
+ var wg sync.WaitGroup
+ n := len(filenames)
+ parsed := make([]*ast.File, n)
+ errors := make([]error, n)
+ for i, file := range filenames {
+ if ld.Config.Context.Err() != nil {
+ parsed[i] = nil
+ errors[i] = ld.Config.Context.Err()
+ continue
+ }
+ wg.Add(1)
+ go func(i int, filename string) {
+ parsed[i], errors[i] = ld.parseFile(filename)
+ wg.Done()
+ }(i, file)
+ }
+ wg.Wait()
+
+ // Eliminate nils, preserving order.
+ var o int
+ for _, f := range parsed {
+ if f != nil {
+ parsed[o] = f
+ o++
+ }
+ }
+ parsed = parsed[:o]
+
+ o = 0
+ for _, err := range errors {
+ if err != nil {
+ errors[o] = err
+ o++
+ }
+ }
+ errors = errors[:o]
+
+ return parsed, errors
+}
+
+// sameFile returns true if x and y have the same basename and denote
+// the same file.
+//
+func sameFile(x, y string) bool {
+ if x == y {
+ // It could be the case that y doesn't exist.
+ // For instance, it may be an overlay file that
+ // hasn't been written to disk. To handle that case
+ // let x == y through. (We added the exact absolute path
+ // string to the CompiledGoFiles list, so the unwritten
+ // overlay case implies x==y.)
+ return true
+ }
+ if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
+ if xi, err := os.Stat(x); err == nil {
+ if yi, err := os.Stat(y); err == nil {
+ return os.SameFile(xi, yi)
+ }
+ }
+ }
+ return false
+}
+
+// loadFromExportData returns type information for the specified
+// package, loading it from an export data file on the first request.
+func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
+ if lpkg.PkgPath == "" {
+ log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
+ }
+
+ // Because gcexportdata.Read has the potential to create or
+ // modify the types.Package for each node in the transitive
+ // closure of dependencies of lpkg, all exportdata operations
+ // must be sequential. (Finer-grained locking would require
+ // changes to the gcexportdata API.)
+ //
+ // The exportMu lock guards the Package.Pkg field and the
+ // types.Package it points to, for each Package in the graph.
+ //
+ // Not all accesses to Package.Pkg need to be protected by exportMu:
+ // graph ordering ensures that direct dependencies of source
+ // packages are fully loaded before the importer reads their Pkg field.
+ ld.exportMu.Lock()
+ defer ld.exportMu.Unlock()
+
+ if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
+ return tpkg, nil // cache hit
+ }
+
+ lpkg.IllTyped = true // fail safe
+
+ if lpkg.ExportFile == "" {
+ // Errors while building export data will have been printed to stderr.
+ return nil, fmt.Errorf("no export data file")
+ }
+ f, err := os.Open(lpkg.ExportFile)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ // Read gc export data.
+ //
+ // We don't currently support gccgo export data because all
+ // underlying workspaces use the gc toolchain. (Even build
+ // systems that support gccgo don't use it for workspace
+ // queries.)
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+
+ // Build the view.
+ //
+ // The gcexportdata machinery has no concept of package ID.
+ // It identifies packages by their PkgPath, which although not
+ // globally unique is unique within the scope of one invocation
+ // of the linker, type-checker, or gcexportdata.
+ //
+ // So, we must build a PkgPath-keyed view of the global
+ // (conceptually ID-keyed) cache of packages and pass it to
+ // gcexportdata. The view must contain every existing
+ // package that might possibly be mentioned by the
+ // current package---its transitive closure.
+ //
+ // In loadPackage, we unconditionally create a types.Package for
+ // each dependency so that export data loading does not
+ // create new ones.
+ //
+ // TODO(adonovan): it would be simpler and more efficient
+ // if the export data machinery invoked a callback to
+ // get-or-create a package instead of a map.
+ //
+ view := make(map[string]*types.Package) // view seen by gcexportdata
+ seen := make(map[*loaderPackage]bool) // all visited packages
+ var visit func(pkgs map[string]*Package)
+ visit = func(pkgs map[string]*Package) {
+ for _, p := range pkgs {
+ lpkg := ld.pkgs[p.ID]
+ if !seen[lpkg] {
+ seen[lpkg] = true
+ view[lpkg.PkgPath] = lpkg.Types
+ visit(lpkg.Imports)
+ }
+ }
+ }
+ visit(lpkg.Imports)
+
+ viewLen := len(view) + 1 // adding the self package
+ // Parse the export data.
+ // (May modify incomplete packages in view but not create new ones.)
+ tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
+ if err != nil {
+ return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+ if viewLen != len(view) {
+ log.Fatalf("Unexpected package creation during export data loading")
+ }
+
+ lpkg.Types = tpkg
+ lpkg.IllTyped = false
+
+ return tpkg, nil
+}
+
+func usesExportData(cfg *Config) bool {
+ return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0
+}
diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go
new file mode 100644
index 000000000..b13cb081f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/visit.go
@@ -0,0 +1,55 @@
+package packages
+
+import (
+ "fmt"
+ "os"
+ "sort"
+)
+
+// Visit visits all the packages in the import graph whose roots are
+// pkgs, calling the optional pre function the first time each package
+// is encountered (preorder), and the optional post function after a
+// package's dependencies have been visited (postorder).
+// The boolean result of pre(pkg) determines whether
+// the imports of package pkg are visited.
+func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
+ seen := make(map[*Package]bool)
+ var visit func(*Package)
+ visit = func(pkg *Package) {
+ if !seen[pkg] {
+ seen[pkg] = true
+
+ if pre == nil || pre(pkg) {
+ paths := make([]string, 0, len(pkg.Imports))
+ for path := range pkg.Imports {
+ paths = append(paths, path)
+ }
+ sort.Strings(paths) // Imports is a map, this makes visit stable
+ for _, path := range paths {
+ visit(pkg.Imports[path])
+ }
+ }
+
+ if post != nil {
+ post(pkg)
+ }
+ }
+ }
+ for _, pkg := range pkgs {
+ visit(pkg)
+ }
+}
+
+// PrintErrors prints to os.Stderr the accumulated errors of all
+// packages in the import graph rooted at pkgs, dependencies first.
+// PrintErrors returns the number of errors printed.
+func PrintErrors(pkgs []*Package) int {
+ var n int
+ Visit(pkgs, nil, func(pkg *Package) {
+ for _, err := range pkg.Errors {
+ fmt.Fprintln(os.Stderr, err)
+ n++
+ }
+ })
+ return n
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
new file mode 100644
index 000000000..38f596daf
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import (
+ "go/ast"
+ "go/types"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// Callee returns the named target of a function call, if any:
+// a function, method, builtin, or variable.
+func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+ var obj types.Object
+ switch fun := astutil.Unparen(call.Fun).(type) {
+ case *ast.Ident:
+ obj = info.Uses[fun] // type, var, builtin, or declared func
+ case *ast.SelectorExpr:
+ if sel, ok := info.Selections[fun]; ok {
+ obj = sel.Obj() // method or field
+ } else {
+ obj = info.Uses[fun.Sel] // qualified identifier?
+ }
+ }
+ if _, ok := obj.(*types.TypeName); ok {
+ return nil // T(x) is a conversion, not a call
+ }
+ return obj
+}
+
+// StaticCallee returns the target (function or method) of a static
+// function call, if any. It returns nil for calls to builtins.
+func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
+ if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
+ return f
+ }
+ return nil
+}
+
+func interfaceMethod(f *types.Func) bool {
+ recv := f.Type().(*types.Signature).Recv()
+ return recv != nil && types.IsInterface(recv.Type())
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
new file mode 100644
index 000000000..9c441dba9
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import "go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+//
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+ var result []*types.Package
+ seen := make(map[*types.Package]bool)
+ var visit func(pkgs []*types.Package)
+ visit = func(pkgs []*types.Package) {
+ for _, p := range pkgs {
+ if !seen[p] {
+ seen[p] = true
+ visit(p.Imports())
+ result = append(result, p)
+ }
+ }
+ }
+ visit(pkgs)
+ return result
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
new file mode 100644
index 000000000..c7f754500
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -0,0 +1,313 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as Map,
+// a mapping from types.Type to interface{} values.
+package typeutil // import "golang.org/x/tools/go/types/typeutil"
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "reflect"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary interface{} values. The concrete types that implement
+// the Type interface are pointers. Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Not thread-safe.
+//
+type Map struct {
+ hasher Hasher // shared by many Maps
+ table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+ length int // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+ key types.Type
+ value interface{}
+}
+
+// SetHasher sets the hasher used by Map.
+//
+// All Hashers are functionally equivalent but contain internal state
+// used to cache the results of hashing previously seen types.
+//
+// A single Hasher created by MakeHasher() may be shared among many
+// Maps. This is recommended if the instances have many keys in
+// common, as it will amortize the cost of hash computation.
+//
+// A Hasher may grow without bound as new types are seen. Even when a
+// type is deleted from the map, the Hasher never shrinks, since other
+// types in the map may reference the deleted type indirectly.
+//
+// Hashers are not thread-safe, and read-only operations such as
+// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
+// read-lock) is require around all Map operations if a shared
+// hasher is accessed from multiple threads.
+//
+// If SetHasher is not called, the Map will create a private hasher at
+// the first call to Insert.
+//
+func (m *Map) SetHasher(hasher Hasher) {
+ m.hasher = hasher
+}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+//
+func (m *Map) Delete(key types.Type) bool {
+ if m != nil && m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ for i, e := range bucket {
+ if e.key != nil && types.Identical(key, e.key) {
+ // We can't compact the bucket as it
+ // would disturb iterators.
+ bucket[i] = entry{}
+ m.length--
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+//
+func (m *Map) At(key types.Type) interface{} {
+ if m != nil && m.table != nil {
+ for _, e := range m.table[m.hasher.Hash(key)] {
+ if e.key != nil && types.Identical(key, e.key) {
+ return e.value
+ }
+ }
+ }
+ return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
+ if m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ var hole *entry
+ for i, e := range bucket {
+ if e.key == nil {
+ hole = &bucket[i]
+ } else if types.Identical(key, e.key) {
+ prev = e.value
+ bucket[i].value = value
+ return
+ }
+ }
+
+ if hole != nil {
+ *hole = entry{key, value} // overwrite deleted entry
+ } else {
+ m.table[hash] = append(bucket, entry{key, value})
+ }
+ } else {
+ if m.hasher.memo == nil {
+ m.hasher = MakeHasher()
+ }
+ hash := m.hasher.Hash(key)
+ m.table = map[uint32][]entry{hash: {entry{key, value}}}
+ }
+
+ m.length++
+ return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+ if m != nil {
+ return m.length
+ }
+ return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+//
+func (m *Map) Iterate(f func(key types.Type, value interface{})) {
+ if m != nil {
+ for _, bucket := range m.table {
+ for _, e := range bucket {
+ if e.key != nil {
+ f(e.key, e.value)
+ }
+ }
+ }
+ }
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+ keys := make([]types.Type, 0, m.Len())
+ m.Iterate(func(key types.Type, _ interface{}) {
+ keys = append(keys, key)
+ })
+ return keys
+}
+
+func (m *Map) toString(values bool) string {
+ if m == nil {
+ return "{}"
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "{")
+ sep := ""
+ m.Iterate(func(key types.Type, value interface{}) {
+ fmt.Fprint(&buf, sep)
+ sep = ", "
+ fmt.Fprint(&buf, key)
+ if values {
+ fmt.Fprintf(&buf, ": %q", value)
+ }
+ })
+ fmt.Fprint(&buf, "}")
+ return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+//
+func (m *Map) String() string {
+ return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+//
+func (m *Map) KeysString() string {
+ return m.toString(false)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Hasher
+
+// A Hasher maps each type to its hash value.
+// For efficiency, a hasher uses memoization; thus its memory
+// footprint grows monotonically over time.
+// Hashers are not thread-safe.
+// Hashers have reference semantics.
+// Call MakeHasher to create a Hasher.
+type Hasher struct {
+ memo map[types.Type]uint32
+}
+
+// MakeHasher returns a new Hasher instance.
+func MakeHasher() Hasher {
+ return Hasher{make(map[types.Type]uint32)}
+}
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+ hash, ok := h.memo[t]
+ if !ok {
+ hash = h.hashFor(t)
+ h.memo[t] = hash
+ }
+ return hash
+}
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+ var h uint32
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// hashFor computes the hash of t.
+func (h Hasher) hashFor(t types.Type) uint32 {
+ // See Identical for rationale.
+ switch t := t.(type) {
+ case *types.Basic:
+ return uint32(t.Kind())
+
+ case *types.Array:
+ return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
+
+ case *types.Slice:
+ return 9049 + 2*h.Hash(t.Elem())
+
+ case *types.Struct:
+ var hash uint32 = 9059
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ f := t.Field(i)
+ if f.Anonymous() {
+ hash += 8861
+ }
+ hash += hashString(t.Tag(i))
+ hash += hashString(f.Name()) // (ignore f.Pkg)
+ hash += h.Hash(f.Type())
+ }
+ return hash
+
+ case *types.Pointer:
+ return 9067 + 2*h.Hash(t.Elem())
+
+ case *types.Signature:
+ var hash uint32 = 9091
+ if t.Variadic() {
+ hash *= 8863
+ }
+ return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+ case *types.Interface:
+ var hash uint32 = 9103
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ // See go/types.identicalMethods for rationale.
+ // Method order is not significant.
+ // Ignore m.Pkg().
+ m := t.Method(i)
+ hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
+ }
+ return hash
+
+ case *types.Map:
+ return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
+
+ case *types.Chan:
+ return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
+
+ case *types.Named:
+ // Not safe with a copying GC; objects may move.
+ return uint32(reflect.ValueOf(t.Obj()).Pointer())
+
+ case *types.Tuple:
+ return h.hashTuple(t)
+ }
+ panic(t)
+}
+
+func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
+ // See go/types.identicalTypes for rationale.
+ n := tuple.Len()
+ var hash uint32 = 9137 + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ hash += 3 * h.Hash(tuple.At(i).Type())
+ }
+ return hash
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
new file mode 100644
index 000000000..32084610f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package typeutil
+
+import (
+ "go/types"
+ "sync"
+)
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+ mu sync.Mutex
+ named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
+ others map[types.Type]*types.MethodSet // all other types
+}
+
+// MethodSet returns the method set of type T. It is thread-safe.
+//
+// If cache is nil, this function is equivalent to types.NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+//
+func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
+ if cache == nil {
+ return types.NewMethodSet(T)
+ }
+ cache.mu.Lock()
+ defer cache.mu.Unlock()
+
+ switch T := T.(type) {
+ case *types.Named:
+ return cache.lookupNamed(T).value
+
+ case *types.Pointer:
+ if N, ok := T.Elem().(*types.Named); ok {
+ return cache.lookupNamed(N).pointer
+ }
+ }
+
+ // all other types
+ // (The map uses pointer equivalence, not type identity.)
+ mset := cache.others[T]
+ if mset == nil {
+ mset = types.NewMethodSet(T)
+ if cache.others == nil {
+ cache.others = make(map[types.Type]*types.MethodSet)
+ }
+ cache.others[T] = mset
+ }
+ return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
+ if cache.named == nil {
+ cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
+ }
+ // Avoid recomputing mset(*T) for each distinct Pointer
+ // instance whose underlying type is a named type.
+ msets, ok := cache.named[named]
+ if !ok {
+ msets.value = types.NewMethodSet(named)
+ msets.pointer = types.NewMethodSet(types.NewPointer(named))
+ cache.named[named] = msets
+ }
+ return msets
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
new file mode 100644
index 000000000..9849c24ce
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
@@ -0,0 +1,52 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import "go/types"
+
+// IntuitiveMethodSet returns the intuitive method set of a type T,
+// which is the set of methods you can call on an addressable value of
+// that type.
+//
+// The result always contains MethodSet(T), and is exactly MethodSet(T)
+// for interface types and for pointer-to-concrete types.
+// For all other concrete types T, the result additionally
+// contains each method belonging to *T if there is no identically
+// named method on T itself.
+//
+// This corresponds to user intuition about method sets;
+// this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+//
+func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
+ isPointerToConcrete := func(T types.Type) bool {
+ ptr, ok := T.(*types.Pointer)
+ return ok && !types.IsInterface(ptr.Elem())
+ }
+
+ var result []*types.Selection
+ mset := msets.MethodSet(T)
+ if types.IsInterface(T) || isPointerToConcrete(T) {
+ for i, n := 0, mset.Len(); i < n; i++ {
+ result = append(result, mset.At(i))
+ }
+ } else {
+ // T is some other concrete type.
+ // Report methods of T and *T, preferring those of T.
+ pmset := msets.MethodSet(types.NewPointer(T))
+ for i, n := 0, pmset.Len(); i < n; i++ {
+ meth := pmset.At(i)
+ if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+ meth = m
+ }
+ result = append(result, meth)
+ }
+
+ }
+ return result
+}
diff --git a/vendor/golang.org/x/tools/imports/fix.go b/vendor/golang.org/x/tools/imports/fix.go
new file mode 100644
index 000000000..777d28ccd
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/fix.go
@@ -0,0 +1,1259 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/internal/gopathwalk"
+)
+
+// Debug controls verbose logging.
+var Debug = false
+
+// LocalPrefix is a comma-separated string of import path prefixes, which, if
+// set, instructs Process to sort the import paths with the given prefixes
+// into another group after 3rd-party packages.
+var LocalPrefix string
+
+func localPrefixes() []string {
+ if LocalPrefix != "" {
+ return strings.Split(LocalPrefix, ",")
+ }
+ return nil
+}
+
+// importToGroup is a list of functions which map from an import path to
+// a group number.
+var importToGroup = []func(importPath string) (num int, ok bool){
+ func(importPath string) (num int, ok bool) {
+ for _, p := range localPrefixes() {
+ if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath {
+ return 3, true
+ }
+ }
+ return
+ },
+ func(importPath string) (num int, ok bool) {
+ if strings.HasPrefix(importPath, "appengine") {
+ return 2, true
+ }
+ return
+ },
+ func(importPath string) (num int, ok bool) {
+ if strings.Contains(importPath, ".") {
+ return 1, true
+ }
+ return
+ },
+}
+
+func importGroup(importPath string) int {
+ for _, fn := range importToGroup {
+ if n, ok := fn(importPath); ok {
+ return n
+ }
+ }
+ return 0
+}
+
+// An importInfo represents a single import statement.
+type importInfo struct {
+ importPath string // import path, e.g. "crypto/rand".
+ name string // import name, e.g. "crand", or "" if none.
+}
+
+// A packageInfo represents what's known about a package.
+type packageInfo struct {
+ name string // real package name, if known.
+ exports map[string]bool // known exports.
+}
+
+// parseOtherFiles parses all the Go files in srcDir except filename, including
+// test files if filename looks like a test.
+func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File {
+ // This could use go/packages but it doesn't buy much, and it fails
+ // with https://golang.org/issue/26296 in LoadFiles mode in some cases.
+ considerTests := strings.HasSuffix(filename, "_test.go")
+
+ fileBase := filepath.Base(filename)
+ packageFileInfos, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ return nil
+ }
+
+ var files []*ast.File
+ for _, fi := range packageFileInfos {
+ if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") {
+ continue
+ }
+ if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") {
+ continue
+ }
+
+ f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0)
+ if err != nil {
+ continue
+ }
+
+ files = append(files, f)
+ }
+
+ return files
+}
+
+// addGlobals puts the names of package vars into the provided map.
+func addGlobals(f *ast.File, globals map[string]bool) {
+ for _, decl := range f.Decls {
+ genDecl, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+
+ for _, spec := range genDecl.Specs {
+ valueSpec, ok := spec.(*ast.ValueSpec)
+ if !ok {
+ continue
+ }
+ globals[valueSpec.Names[0].Name] = true
+ }
+ }
+}
+
+// collectReferences builds a map of selector expressions, from
+// left hand side (X) to a set of right hand sides (Sel).
+func collectReferences(f *ast.File) references {
+ refs := references{}
+
+ var visitor visitFn
+ visitor = func(node ast.Node) ast.Visitor {
+ if node == nil {
+ return visitor
+ }
+ switch v := node.(type) {
+ case *ast.SelectorExpr:
+ xident, ok := v.X.(*ast.Ident)
+ if !ok {
+ break
+ }
+ if xident.Obj != nil {
+ // If the parser can resolve it, it's not a package ref.
+ break
+ }
+ if !ast.IsExported(v.Sel.Name) {
+ // Whatever this is, it's not exported from a package.
+ break
+ }
+ pkgName := xident.Name
+ r := refs[pkgName]
+ if r == nil {
+ r = make(map[string]bool)
+ refs[pkgName] = r
+ }
+ r[v.Sel.Name] = true
+ }
+ return visitor
+ }
+ ast.Walk(visitor, f)
+ return refs
+}
+
+// collectImports returns all the imports in f, keyed by their package name as
+// determined by pathToName. Unnamed imports (., _) and "C" are ignored.
+func collectImports(f *ast.File) []*importInfo {
+ var imports []*importInfo
+ for _, imp := range f.Imports {
+ var name string
+ if imp.Name != nil {
+ name = imp.Name.Name
+ }
+ if imp.Path.Value == `"C"` || name == "_" || name == "." {
+ continue
+ }
+ path := strings.Trim(imp.Path.Value, `"`)
+ imports = append(imports, &importInfo{
+ name: name,
+ importPath: path,
+ })
+ }
+ return imports
+}
+
+// findMissingImport searches pass's candidates for an import that provides
+// pkg, containing all of syms.
+func (p *pass) findMissingImport(pkg string, syms map[string]bool) *importInfo {
+ for _, candidate := range p.candidates {
+ pkgInfo, ok := p.knownPackages[candidate.importPath]
+ if !ok {
+ continue
+ }
+ if p.importIdentifier(candidate) != pkg {
+ continue
+ }
+
+ allFound := true
+ for right := range syms {
+ if !pkgInfo.exports[right] {
+ allFound = false
+ break
+ }
+ }
+
+ if allFound {
+ return candidate
+ }
+ }
+ return nil
+}
+
+// references is set of references found in a Go file. The first map key is the
+// left hand side of a selector expression, the second key is the right hand
+// side, and the value should always be true.
+type references map[string]map[string]bool
+
+// A pass contains all the inputs and state necessary to fix a file's imports.
+// It can be modified in some ways during use; see comments below.
+type pass struct {
+ // Inputs. These must be set before a call to load, and not modified after.
+ fset *token.FileSet // fset used to parse f and its siblings.
+ f *ast.File // the file being fixed.
+ srcDir string // the directory containing f.
+ fixEnv *fixEnv // the environment to use for go commands, etc.
+ loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
+ otherFiles []*ast.File // sibling files.
+
+ // Intermediate state, generated by load.
+ existingImports map[string]*importInfo
+ allRefs references
+ missingRefs references
+
+ // Inputs to fix. These can be augmented between successive fix calls.
+ lastTry bool // indicates that this is the last call and fix should clean up as best it can.
+ candidates []*importInfo // candidate imports in priority order.
+ knownPackages map[string]*packageInfo // information about all known packages.
+}
+
+// loadPackageNames saves the package names for everything referenced by imports.
+func (p *pass) loadPackageNames(imports []*importInfo) error {
+ var unknown []string
+ for _, imp := range imports {
+ if _, ok := p.knownPackages[imp.importPath]; ok {
+ continue
+ }
+ unknown = append(unknown, imp.importPath)
+ }
+
+ names, err := p.fixEnv.getResolver().loadPackageNames(unknown, p.srcDir)
+ if err != nil {
+ return err
+ }
+
+ for path, name := range names {
+ p.knownPackages[path] = &packageInfo{
+ name: name,
+ exports: map[string]bool{},
+ }
+ }
+ return nil
+}
+
+// importIdentifier returns the identifier that imp will introduce. It will
+// guess if the package name has not been loaded, e.g. because the source
+// is not available.
+func (p *pass) importIdentifier(imp *importInfo) string {
+ if imp.name != "" {
+ return imp.name
+ }
+ known := p.knownPackages[imp.importPath]
+ if known != nil && known.name != "" {
+ return known.name
+ }
+ return importPathToAssumedName(imp.importPath)
+}
+
+// load reads in everything necessary to run a pass, and reports whether the
+// file already has all the imports it needs. It fills in p.missingRefs with the
+// file's missing symbols, if any, or removes unused imports if not.
+func (p *pass) load() bool {
+ p.knownPackages = map[string]*packageInfo{}
+ p.missingRefs = references{}
+ p.existingImports = map[string]*importInfo{}
+
+ // Load basic information about the file in question.
+ p.allRefs = collectReferences(p.f)
+
+ // Load stuff from other files in the same package:
+ // global variables so we know they don't need resolving, and imports
+ // that we might want to mimic.
+ globals := map[string]bool{}
+ for _, otherFile := range p.otherFiles {
+ // Don't load globals from files that are in the same directory
+ // but a different package. Using them to suggest imports is OK.
+ if p.f.Name.Name == otherFile.Name.Name {
+ addGlobals(otherFile, globals)
+ }
+ p.candidates = append(p.candidates, collectImports(otherFile)...)
+ }
+
+ // Resolve all the import paths we've seen to package names, and store
+ // f's imports by the identifier they introduce.
+ imports := collectImports(p.f)
+ if p.loadRealPackageNames {
+ err := p.loadPackageNames(append(imports, p.candidates...))
+ if err != nil {
+ if Debug {
+ log.Printf("loading package names: %v", err)
+ }
+ return false
+ }
+ }
+ for _, imp := range imports {
+ p.existingImports[p.importIdentifier(imp)] = imp
+ }
+
+ // Find missing references.
+ for left, rights := range p.allRefs {
+ if globals[left] {
+ continue
+ }
+ _, ok := p.existingImports[left]
+ if !ok {
+ p.missingRefs[left] = rights
+ continue
+ }
+ }
+ if len(p.missingRefs) != 0 {
+ return false
+ }
+
+ return p.fix()
+}
+
+// fix attempts to satisfy missing imports using p.candidates. If it finds
+// everything, or if p.lastTry is true, it adds the imports it found,
+// removes anything unused, and returns true.
+func (p *pass) fix() bool {
+ // Find missing imports.
+ var selected []*importInfo
+ for left, rights := range p.missingRefs {
+ if imp := p.findMissingImport(left, rights); imp != nil {
+ selected = append(selected, imp)
+ }
+ }
+
+ if !p.lastTry && len(selected) != len(p.missingRefs) {
+ return false
+ }
+
+ // Found everything, or giving up. Add the new imports and remove any unused.
+ for _, imp := range p.existingImports {
+ // We deliberately ignore globals here, because we can't be sure
+ // they're in the same package. People do things like put multiple
+ // main packages in the same directory, and we don't want to
+ // remove imports if they happen to have the same name as a var in
+ // a different package.
+ if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok {
+ astutil.DeleteNamedImport(p.fset, p.f, imp.name, imp.importPath)
+ }
+ }
+
+ for _, imp := range selected {
+ astutil.AddNamedImport(p.fset, p.f, imp.name, imp.importPath)
+ }
+
+ if p.loadRealPackageNames {
+ for _, imp := range p.f.Imports {
+ if imp.Name != nil {
+ continue
+ }
+ path := strings.Trim(imp.Path.Value, `""`)
+ ident := p.importIdentifier(&importInfo{importPath: path})
+ if ident != importPathToAssumedName(path) {
+ imp.Name = &ast.Ident{Name: ident, NamePos: imp.Pos()}
+ }
+ }
+ }
+
+ return true
+}
+
+// assumeSiblingImportsValid assumes that siblings' use of packages is valid,
+// adding the exports they use.
+func (p *pass) assumeSiblingImportsValid() {
+ for _, f := range p.otherFiles {
+ refs := collectReferences(f)
+ imports := collectImports(f)
+ importsByName := map[string]*importInfo{}
+ for _, imp := range imports {
+ importsByName[p.importIdentifier(imp)] = imp
+ }
+ for left, rights := range refs {
+ if imp, ok := importsByName[left]; ok {
+ if _, ok := stdlib[imp.importPath]; ok {
+ // We have the stdlib in memory; no need to guess.
+ rights = stdlib[imp.importPath]
+ }
+ p.addCandidate(imp, &packageInfo{
+ // no name; we already know it.
+ exports: rights,
+ })
+ }
+ }
+ }
+}
+
+// addCandidate adds a candidate import to p, and merges in the information
+// in pkg.
+func (p *pass) addCandidate(imp *importInfo, pkg *packageInfo) {
+ p.candidates = append(p.candidates, imp)
+ if existing, ok := p.knownPackages[imp.importPath]; ok {
+ if existing.name == "" {
+ existing.name = pkg.name
+ }
+ for export := range pkg.exports {
+ existing.exports[export] = true
+ }
+ } else {
+ p.knownPackages[imp.importPath] = pkg
+ }
+}
+
+// fixImports adds and removes imports from f so that all its references are
+// satisfied and there are no unused imports.
+//
+// This is declared as a variable rather than a function so goimports can
+// easily be extended by adding a file with an init function.
+var fixImports = fixImportsDefault
+
+func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *fixEnv) error {
+ abs, err := filepath.Abs(filename)
+ if err != nil {
+ return err
+ }
+ srcDir := filepath.Dir(abs)
+ if Debug {
+ log.Printf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
+ }
+
+ // First pass: looking only at f, and using the naive algorithm to
+ // derive package names from import paths, see if the file is already
+ // complete. We can't add any imports yet, because we don't know
+ // if missing references are actually package vars.
+ p := &pass{fset: fset, f: f, srcDir: srcDir}
+ if p.load() {
+ return nil
+ }
+
+ otherFiles := parseOtherFiles(fset, srcDir, filename)
+
+ // Second pass: add information from other files in the same package,
+ // like their package vars and imports.
+ p.otherFiles = otherFiles
+ if p.load() {
+ return nil
+ }
+
+ // Now we can try adding imports from the stdlib.
+ p.assumeSiblingImportsValid()
+ addStdlibCandidates(p, p.missingRefs)
+ if p.fix() {
+ return nil
+ }
+
+ // Third pass: get real package names where we had previously used
+ // the naive algorithm. This is the first step that will use the
+ // environment, so we provide it here for the first time.
+ p = &pass{fset: fset, f: f, srcDir: srcDir, fixEnv: env}
+ p.loadRealPackageNames = true
+ p.otherFiles = otherFiles
+ if p.load() {
+ return nil
+ }
+
+ addStdlibCandidates(p, p.missingRefs)
+ p.assumeSiblingImportsValid()
+ if p.fix() {
+ return nil
+ }
+
+ // Go look for candidates in $GOPATH, etc. We don't necessarily load
+ // the real exports of sibling imports, so keep assuming their contents.
+ if err := addExternalCandidates(p, p.missingRefs, filename); err != nil {
+ return err
+ }
+
+ p.lastTry = true
+ p.fix()
+ return nil
+}
+
+// fixEnv contains environment variables and settings that affect the use of
+// the go command, the go/build package, etc.
+type fixEnv struct {
+ // If non-empty, these will be used instead of the
+ // process-wide values.
+ GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS string
+ WorkingDir string
+
+ // If true, use go/packages regardless of the environment.
+ ForceGoPackages bool
+
+ resolver resolver
+}
+
+func (e *fixEnv) env() []string {
+ env := os.Environ()
+ add := func(k, v string) {
+ if v != "" {
+ env = append(env, k+"="+v)
+ }
+ }
+ add("GOPATH", e.GOPATH)
+ add("GOROOT", e.GOROOT)
+ add("GO111MODULE", e.GO111MODULE)
+ add("GOPROXY", e.GOPROXY)
+ add("GOFLAGS", e.GOFLAGS)
+ if e.WorkingDir != "" {
+ add("PWD", e.WorkingDir)
+ }
+ return env
+}
+
+func (e *fixEnv) getResolver() resolver {
+ if e.resolver != nil {
+ return e.resolver
+ }
+ if e.ForceGoPackages {
+ return &goPackagesResolver{env: e}
+ }
+
+ out, err := e.invokeGo("env", "GOMOD")
+ if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 {
+ return &gopathResolver{env: e}
+ }
+ return &moduleResolver{env: e}
+}
+
+func (e *fixEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config {
+ return &packages.Config{
+ Mode: mode,
+ Dir: e.WorkingDir,
+ Env: e.env(),
+ }
+}
+
+func (e *fixEnv) buildContext() *build.Context {
+ ctx := build.Default
+ ctx.GOROOT = e.GOROOT
+ ctx.GOPATH = e.GOPATH
+ return &ctx
+}
+
+func (e *fixEnv) invokeGo(args ...string) (*bytes.Buffer, error) {
+ cmd := exec.Command("go", args...)
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ cmd.Env = e.env()
+ cmd.Dir = e.WorkingDir
+
+ if Debug {
+ defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
+ }
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("running go: %v (stderr:\n%s)", err, stderr)
+ }
+ return stdout, nil
+}
+
+func cmdDebugStr(cmd *exec.Cmd) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.Split(kv, "=")
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args)
+}
+
+func addStdlibCandidates(pass *pass, refs references) {
+ add := func(pkg string) {
+ pass.addCandidate(
+ &importInfo{importPath: pkg},
+ &packageInfo{name: path.Base(pkg), exports: stdlib[pkg]})
+ }
+ for left := range refs {
+ if left == "rand" {
+ // Make sure we try crypto/rand before math/rand.
+ add("crypto/rand")
+ add("math/rand")
+ continue
+ }
+ for importPath := range stdlib {
+ if path.Base(importPath) == left {
+ add(importPath)
+ }
+ }
+ }
+}
+
+// A resolver does the build-system-specific parts of goimports.
+type resolver interface {
+ // loadPackageNames loads the package names in importPaths.
+ loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
+ // scan finds (at least) the packages satisfying refs. The returned slice is unordered.
+ scan(refs references) ([]*pkg, error)
+}
+
+// gopathResolver implements resolver for GOPATH and module workspaces using go/packages.
+type goPackagesResolver struct {
+ env *fixEnv
+}
+
+func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
+ cfg := r.env.newPackagesConfig(packages.LoadFiles)
+ pkgs, err := packages.Load(cfg, importPaths...)
+ if err != nil {
+ return nil, err
+ }
+ names := map[string]string{}
+ for _, pkg := range pkgs {
+ names[VendorlessPath(pkg.PkgPath)] = pkg.Name
+ }
+ // We may not have found all the packages. Guess the rest.
+ for _, path := range importPaths {
+ if _, ok := names[path]; ok {
+ continue
+ }
+ names[path] = importPathToAssumedName(path)
+ }
+ return names, nil
+
+}
+
+func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) {
+ var loadQueries []string
+ for pkgName := range refs {
+ loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName)
+ }
+ sort.Strings(loadQueries)
+ cfg := r.env.newPackagesConfig(packages.LoadFiles)
+ goPackages, err := packages.Load(cfg, loadQueries...)
+ if err != nil {
+ return nil, err
+ }
+
+ var scan []*pkg
+ for _, goPackage := range goPackages {
+ scan = append(scan, &pkg{
+ dir: filepath.Dir(goPackage.CompiledGoFiles[0]),
+ importPathShort: VendorlessPath(goPackage.PkgPath),
+ goPackage: goPackage,
+ })
+ }
+ return scan, nil
+}
+
+func addExternalCandidates(pass *pass, refs references, filename string) error {
+ dirScan, err := pass.fixEnv.getResolver().scan(refs)
+ if err != nil {
+ return err
+ }
+
+ // Search for imports matching potential package references.
+ type result struct {
+ imp *importInfo
+ pkg *packageInfo
+ }
+ results := make(chan result, len(refs))
+
+ ctx, cancel := context.WithCancel(context.TODO())
+ var wg sync.WaitGroup
+ defer func() {
+ cancel()
+ wg.Wait()
+ }()
+ var (
+ firstErr error
+ firstErrOnce sync.Once
+ )
+ for pkgName, symbols := range refs {
+ wg.Add(1)
+ go func(pkgName string, symbols map[string]bool) {
+ defer wg.Done()
+
+ found, err := findImport(ctx, pass.fixEnv, dirScan, pkgName, symbols, filename)
+
+ if err != nil {
+ firstErrOnce.Do(func() {
+ firstErr = err
+ cancel()
+ })
+ return
+ }
+
+ if found == nil {
+ return // No matching package.
+ }
+
+ imp := &importInfo{
+ importPath: found.importPathShort,
+ }
+
+ pkg := &packageInfo{
+ name: pkgName,
+ exports: symbols,
+ }
+ results <- result{imp, pkg}
+ }(pkgName, symbols)
+ }
+ go func() {
+ wg.Wait()
+ close(results)
+ }()
+
+ for result := range results {
+ pass.addCandidate(result.imp, result.pkg)
+ }
+ return firstErr
+}
+
+// notIdentifier reports whether ch is an invalid identifier character.
+func notIdentifier(ch rune) bool {
+ return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' ||
+ '0' <= ch && ch <= '9' ||
+ ch == '_' ||
+ ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch)))
+}
+
+// importPathToAssumedName returns the assumed package name of an import path.
+// It does this using only string parsing of the import path.
+// It picks the last element of the path that does not look like a major
+// version, and then picks the valid identifier off the start of that element.
+// It is used to determine if a local rename should be added to an import for
+// clarity.
+// This function could be moved to a standard package and exported if we want
+// for use in other tools.
+func importPathToAssumedName(importPath string) string {
+ base := path.Base(importPath)
+ if strings.HasPrefix(base, "v") {
+ if _, err := strconv.Atoi(base[1:]); err == nil {
+ dir := path.Dir(importPath)
+ if dir != "." {
+ base = path.Base(dir)
+ }
+ }
+ }
+ base = strings.TrimPrefix(base, "go-")
+ if i := strings.IndexFunc(base, notIdentifier); i >= 0 {
+ base = base[:i]
+ }
+ return base
+}
+
+// gopathResolver implements resolver for GOPATH workspaces.
+type gopathResolver struct {
+ env *fixEnv
+}
+
+func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
+ names := map[string]string{}
+ for _, path := range importPaths {
+ names[path] = importPathToName(r.env, path, srcDir)
+ }
+ return names, nil
+}
+
+// importPathToNameGoPath finds out the actual package name, as declared in its .go files.
+// If there's a problem, it returns "".
+func importPathToName(env *fixEnv, importPath, srcDir string) (packageName string) {
+ // Fast path for standard library without going to disk.
+ if _, ok := stdlib[importPath]; ok {
+ return path.Base(importPath) // stdlib packages always match their paths.
+ }
+
+ buildPkg, err := env.buildContext().Import(importPath, srcDir, build.FindOnly)
+ if err != nil {
+ return ""
+ }
+ pkgName, err := packageDirToName(buildPkg.Dir)
+ if err != nil {
+ return ""
+ }
+ return pkgName
+}
+
+// packageDirToName is a faster version of build.Import if
+// the only thing desired is the package name. It uses build.FindOnly
+// to find the directory and then only parses one file in the package,
+// trusting that the files in the directory are consistent.
+func packageDirToName(dir string) (packageName string, err error) {
+ d, err := os.Open(dir)
+ if err != nil {
+ return "", err
+ }
+ names, err := d.Readdirnames(-1)
+ d.Close()
+ if err != nil {
+ return "", err
+ }
+ sort.Strings(names) // to have predictable behavior
+ var lastErr error
+ var nfile int
+ for _, name := range names {
+ if !strings.HasSuffix(name, ".go") {
+ continue
+ }
+ if strings.HasSuffix(name, "_test.go") {
+ continue
+ }
+ nfile++
+ fullFile := filepath.Join(dir, name)
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ pkgName := f.Name.Name
+ if pkgName == "documentation" {
+ // Special case from go/build.ImportDir, not
+ // handled by ctx.MatchFile.
+ continue
+ }
+ if pkgName == "main" {
+ // Also skip package main, assuming it's a +build ignore generator or example.
+ // Since you can't import a package main anyway, there's no harm here.
+ continue
+ }
+ return pkgName, nil
+ }
+ if lastErr != nil {
+ return "", lastErr
+ }
+ return "", fmt.Errorf("no importable package found in %d Go files", nfile)
+}
+
+type pkg struct {
+ goPackage *packages.Package
+ dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http")
+ importPathShort string // vendorless import path ("net/http", "a/b")
+}
+
+type pkgDistance struct {
+ pkg *pkg
+ distance int // relative distance to target
+}
+
+// byDistanceOrImportPathShortLength sorts by relative distance breaking ties
+// on the short import path length and then the import string itself.
+type byDistanceOrImportPathShortLength []pkgDistance
+
+func (s byDistanceOrImportPathShortLength) Len() int { return len(s) }
+func (s byDistanceOrImportPathShortLength) Less(i, j int) bool {
+ di, dj := s[i].distance, s[j].distance
+ if di == -1 {
+ return false
+ }
+ if dj == -1 {
+ return true
+ }
+ if di != dj {
+ return di < dj
+ }
+
+ vi, vj := s[i].pkg.importPathShort, s[j].pkg.importPathShort
+ if len(vi) != len(vj) {
+ return len(vi) < len(vj)
+ }
+ return vi < vj
+}
+func (s byDistanceOrImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func distance(basepath, targetpath string) int {
+ p, err := filepath.Rel(basepath, targetpath)
+ if err != nil {
+ return -1
+ }
+ if p == "." {
+ return 0
+ }
+ return strings.Count(p, string(filepath.Separator)) + 1
+}
+
+func (r *gopathResolver) scan(_ references) ([]*pkg, error) {
+ dupCheck := make(map[string]bool)
+ var result []*pkg
+
+ var mu sync.Mutex
+
+ add := func(root gopathwalk.Root, dir string) {
+ mu.Lock()
+ defer mu.Unlock()
+
+ if _, dup := dupCheck[dir]; dup {
+ return
+ }
+ dupCheck[dir] = true
+ importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):])
+ result = append(result, &pkg{
+ importPathShort: VendorlessPath(importpath),
+ dir: dir,
+ })
+ }
+ gopathwalk.Walk(gopathwalk.SrcDirsRoots(r.env.buildContext()), add, gopathwalk.Options{Debug: Debug, ModulesEnabled: false})
+ return result, nil
+}
+
+// VendorlessPath returns the devendorized version of the import path ipath.
+// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b".
+func VendorlessPath(ipath string) string {
+ // Devendorize for use in import statement.
+ if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 {
+ return ipath[i+len("/vendor/"):]
+ }
+ if strings.HasPrefix(ipath, "vendor/") {
+ return ipath[len("vendor/"):]
+ }
+ return ipath
+}
+
+// loadExports returns the set of exported symbols in the package at dir.
+// It returns nil on error or if the package name in dir does not match expectPackage.
+func loadExports(ctx context.Context, env *fixEnv, expectPackage string, pkg *pkg) (map[string]bool, error) {
+ if Debug {
+ log.Printf("loading exports in dir %s (seeking package %s)", pkg.dir, expectPackage)
+ }
+ if pkg.goPackage != nil {
+ exports := map[string]bool{}
+ fset := token.NewFileSet()
+ for _, fname := range pkg.goPackage.CompiledGoFiles {
+ f, err := parser.ParseFile(fset, fname, nil, 0)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", fname, err)
+ }
+ for name := range f.Scope.Objects {
+ if ast.IsExported(name) {
+ exports[name] = true
+ }
+ }
+ }
+ return exports, nil
+ }
+
+ exports := make(map[string]bool)
+
+ // Look for non-test, buildable .go files which could provide exports.
+ all, err := ioutil.ReadDir(pkg.dir)
+ if err != nil {
+ return nil, err
+ }
+ var files []os.FileInfo
+ for _, fi := range all {
+ name := fi.Name()
+ if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") {
+ continue
+ }
+ match, err := env.buildContext().MatchFile(pkg.dir, fi.Name())
+ if err != nil || !match {
+ continue
+ }
+ files = append(files, fi)
+ }
+
+ if len(files) == 0 {
+ return nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", pkg.dir)
+ }
+
+ fset := token.NewFileSet()
+ for _, fi := range files {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ fullFile := filepath.Join(pkg.dir, fi.Name())
+ f, err := parser.ParseFile(fset, fullFile, nil, 0)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", fullFile, err)
+ }
+ pkgName := f.Name.Name
+ if pkgName == "documentation" {
+ // Special case from go/build.ImportDir, not
+ // handled by MatchFile above.
+ continue
+ }
+ if pkgName != expectPackage {
+ return nil, fmt.Errorf("scan of dir %v is not expected package %v (actually %v)", pkg.dir, expectPackage, pkgName)
+ }
+ for name := range f.Scope.Objects {
+ if ast.IsExported(name) {
+ exports[name] = true
+ }
+ }
+ }
+
+ if Debug {
+ exportList := make([]string, 0, len(exports))
+ for k := range exports {
+ exportList = append(exportList, k)
+ }
+ sort.Strings(exportList)
+ log.Printf("loaded exports in dir %v (package %v): %v", pkg.dir, expectPackage, strings.Join(exportList, ", "))
+ }
+ return exports, nil
+}
+
+// findImport searches for a package with the given symbols.
+// If no package is found, findImport returns ("", false, nil)
+func findImport(ctx context.Context, env *fixEnv, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
+ pkgDir, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ pkgDir = filepath.Dir(pkgDir)
+
+ // Find candidate packages, looking only at their directory names first.
+ var candidates []pkgDistance
+ for _, pkg := range dirScan {
+ if pkg.dir != pkgDir && pkgIsCandidate(filename, pkgName, pkg) {
+ candidates = append(candidates, pkgDistance{
+ pkg: pkg,
+ distance: distance(pkgDir, pkg.dir),
+ })
+ }
+ }
+
+ // Sort the candidates by their import package length,
+ // assuming that shorter package names are better than long
+ // ones. Note that this sorts by the de-vendored name, so
+ // there's no "penalty" for vendoring.
+ sort.Sort(byDistanceOrImportPathShortLength(candidates))
+ if Debug {
+ for i, c := range candidates {
+ log.Printf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
+ }
+ }
+
+ // Collect exports for packages with matching names.
+
+ rescv := make([]chan *pkg, len(candidates))
+ for i := range candidates {
+ rescv[i] = make(chan *pkg, 1)
+ }
+ const maxConcurrentPackageImport = 4
+ loadExportsSem := make(chan struct{}, maxConcurrentPackageImport)
+
+ ctx, cancel := context.WithCancel(ctx)
+ var wg sync.WaitGroup
+ defer func() {
+ cancel()
+ wg.Wait()
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i, c := range candidates {
+ select {
+ case loadExportsSem <- struct{}{}:
+ case <-ctx.Done():
+ return
+ }
+
+ wg.Add(1)
+ go func(c pkgDistance, resc chan<- *pkg) {
+ defer func() {
+ <-loadExportsSem
+ wg.Done()
+ }()
+
+ exports, err := loadExports(ctx, env, pkgName, c.pkg)
+ if err != nil {
+ if Debug {
+ log.Printf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
+ }
+ resc <- nil
+ return
+ }
+
+ // If it doesn't have the right
+ // symbols, send nil to mean no match.
+ for symbol := range symbols {
+ if !exports[symbol] {
+ resc <- nil
+ return
+ }
+ }
+ resc <- c.pkg
+ }(c, rescv[i])
+ }
+ }()
+
+ for _, resc := range rescv {
+ pkg := <-resc
+ if pkg == nil {
+ continue
+ }
+ return pkg, nil
+ }
+ return nil, nil
+}
+
+// pkgIsCandidate reports whether pkg is a candidate for satisfying the
+// finding which package pkgIdent in the file named by filename is trying
+// to refer to.
+//
+// This check is purely lexical and is meant to be as fast as possible
+// because it's run over all $GOPATH directories to filter out poor
+// candidates in order to limit the CPU and I/O later parsing the
+// exports in candidate packages.
+//
+// filename is the file being formatted.
+// pkgIdent is the package being searched for, like "client" (if
+// searching for "client.New")
+func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool {
+ // Check "internal" and "vendor" visibility:
+ if !canUse(filename, pkg.dir) {
+ return false
+ }
+
+ // Speed optimization to minimize disk I/O:
+ // the last two components on disk must contain the
+ // package name somewhere.
+ //
+ // This permits mismatch naming like directory
+ // "go-foo" being package "foo", or "pkg.v3" being "pkg",
+ // or directory "google.golang.org/api/cloudbilling/v1"
+ // being package "cloudbilling", but doesn't
+ // permit a directory "foo" to be package
+ // "bar", which is strongly discouraged
+ // anyway. There's no reason goimports needs
+ // to be slow just to accommodate that.
+ lastTwo := lastTwoComponents(pkg.importPathShort)
+ if strings.Contains(lastTwo, pkgIdent) {
+ return true
+ }
+ if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
+ lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
+ if strings.Contains(lastTwo, pkgIdent) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func hasHyphenOrUpperASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if b == '-' || ('A' <= b && b <= 'Z') {
+ return true
+ }
+ }
+ return false
+}
+
+func lowerASCIIAndRemoveHyphen(s string) (ret string) {
+ buf := make([]byte, 0, len(s))
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case b == '-':
+ continue
+ case 'A' <= b && b <= 'Z':
+ buf = append(buf, b+('a'-'A'))
+ default:
+ buf = append(buf, b)
+ }
+ }
+ return string(buf)
+}
+
+// canUse reports whether the package in dir is usable from filename,
+// respecting the Go "internal" and "vendor" visibility rules.
+func canUse(filename, dir string) bool {
+ // Fast path check, before any allocations. If it doesn't contain vendor
+ // or internal, it's not tricky:
+ // Note that this can false-negative on directories like "notinternal",
+ // but we check it correctly below. This is just a fast path.
+ if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") {
+ return true
+ }
+
+ dirSlash := filepath.ToSlash(dir)
+ if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") {
+ return true
+ }
+ // Vendor or internal directory only visible from children of parent.
+ // That means the path from the current directory to the target directory
+ // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal
+ // or bar/vendor or bar/internal.
+ // After stripping all the leading ../, the only okay place to see vendor or internal
+ // is at the very beginning of the path.
+ absfile, err := filepath.Abs(filename)
+ if err != nil {
+ return false
+ }
+ absdir, err := filepath.Abs(dir)
+ if err != nil {
+ return false
+ }
+ rel, err := filepath.Rel(absfile, absdir)
+ if err != nil {
+ return false
+ }
+ relSlash := filepath.ToSlash(rel)
+ if i := strings.LastIndex(relSlash, "../"); i >= 0 {
+ relSlash = relSlash[i+len("../"):]
+ }
+ return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal")
+}
+
+// lastTwoComponents returns at most the last two path components
+// of v, using either / or \ as the path separator.
+func lastTwoComponents(v string) string {
+ nslash := 0
+ for i := len(v) - 1; i >= 0; i-- {
+ if v[i] == '/' || v[i] == '\\' {
+ nslash++
+ if nslash == 2 {
+ return v[i:]
+ }
+ }
+ }
+ return v
+}
+
+type visitFn func(node ast.Node) ast.Visitor
+
+func (fn visitFn) Visit(node ast.Node) ast.Visitor {
+ return fn(node)
+}
diff --git a/vendor/golang.org/x/tools/imports/imports.go b/vendor/golang.org/x/tools/imports/imports.go
new file mode 100644
index 000000000..07101cb80
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/imports.go
@@ -0,0 +1,315 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkstdlib.go
+
+// Package imports implements a Go pretty-printer (like package "go/format")
+// that also adds or removes import statements as necessary.
+package imports // import "golang.org/x/tools/imports"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/format"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// Options specifies options for processing files.
+type Options struct {
+ Fragment bool // Accept fragment of a source file (no package statement)
+ AllErrors bool // Report all errors (not just the first 10 on different lines)
+
+ Comments bool // Print comments (true if nil *Options provided)
+ TabIndent bool // Use tabs for indent (true if nil *Options provided)
+ TabWidth int // Tab width (8 if nil *Options provided)
+
+ FormatOnly bool // Disable the insertion and deletion of imports
+}
+
+// Process formats and adjusts imports for the provided file.
+// If opt is nil the defaults are used.
+//
+// Note that filename's directory influences which imports can be chosen,
+// so it is important that filename be accurate.
+// To process data ``as if'' it were in filename, pass the data as a non-nil src.
+func Process(filename string, src []byte, opt *Options) ([]byte, error) {
+ env := &fixEnv{GOPATH: build.Default.GOPATH, GOROOT: build.Default.GOROOT}
+ return process(filename, src, opt, env)
+}
+
+func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, error) {
+ if opt == nil {
+ opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
+ }
+ if src == nil {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ src = b
+ }
+
+ fileSet := token.NewFileSet()
+ file, adjust, err := parse(fileSet, filename, src, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ if !opt.FormatOnly {
+ if err := fixImports(fileSet, file, filename, env); err != nil {
+ return nil, err
+ }
+ }
+
+ sortImports(fileSet, file)
+ imps := astutil.Imports(fileSet, file)
+ var spacesBefore []string // import paths we need spaces before
+ for _, impSection := range imps {
+ // Within each block of contiguous imports, see if any
+ // import lines are in different group numbers. If so,
+ // we'll need to put a space between them so it's
+ // compatible with gofmt.
+ lastGroup := -1
+ for _, importSpec := range impSection {
+ importPath, _ := strconv.Unquote(importSpec.Path.Value)
+ groupNum := importGroup(importPath)
+ if groupNum != lastGroup && lastGroup != -1 {
+ spacesBefore = append(spacesBefore, importPath)
+ }
+ lastGroup = groupNum
+ }
+
+ }
+
+ printerMode := printer.UseSpaces
+ if opt.TabIndent {
+ printerMode |= printer.TabIndent
+ }
+ printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
+
+ var buf bytes.Buffer
+ err = printConfig.Fprint(&buf, fileSet, file)
+ if err != nil {
+ return nil, err
+ }
+ out := buf.Bytes()
+ if adjust != nil {
+ out = adjust(src, out)
+ }
+ if len(spacesBefore) > 0 {
+ out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ out, err = format.Source(out)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// parse parses src, which was read from filename,
+// as a Go source file or statement list.
+func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
+ parserMode := parser.Mode(0)
+ if opt.Comments {
+ parserMode |= parser.ParseComments
+ }
+ if opt.AllErrors {
+ parserMode |= parser.AllErrors
+ }
+
+ // Try as whole source file.
+ file, err := parser.ParseFile(fset, filename, src, parserMode)
+ if err == nil {
+ return file, nil, nil
+ }
+ // If the error is that the source file didn't begin with a
+ // package line and we accept fragmented input, fall through to
+ // try as a source fragment. Stop and return on any other error.
+ if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
+ return nil, nil, err
+ }
+
+ // If this is a declaration list, make it a source file
+ // by inserting a package clause.
+ // Insert using a ;, not a newline, so that parse errors are on
+ // the correct line.
+ const prefix = "package main;"
+ psrc := append([]byte(prefix), src...)
+ file, err = parser.ParseFile(fset, filename, psrc, parserMode)
+ if err == nil {
+ // Gofmt will turn the ; into a \n.
+ // Do that ourselves now and update the file contents,
+ // so that positions and line numbers are correct going forward.
+ psrc[len(prefix)-1] = '\n'
+ fset.File(file.Package).SetLinesForContent(psrc)
+
+ // If a main function exists, we will assume this is a main
+ // package and leave the file.
+ if containsMainFunc(file) {
+ return file, nil, nil
+ }
+
+ adjust := func(orig, src []byte) []byte {
+ // Remove the package clause.
+ src = src[len(prefix):]
+ return matchSpace(orig, src)
+ }
+ return file, adjust, nil
+ }
+ // If the error is that the source file didn't begin with a
+ // declaration, fall through to try as a statement list.
+ // Stop and return on any other error.
+ if !strings.Contains(err.Error(), "expected declaration") {
+ return nil, nil, err
+ }
+
+ // If this is a statement list, make it a source file
+ // by inserting a package clause and turning the list
+ // into a function body. This handles expressions too.
+ // Insert using a ;, not a newline, so that the line numbers
+ // in fsrc match the ones in src.
+ fsrc := append(append([]byte("package p; func _() {"), src...), '}')
+ file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
+ if err == nil {
+ adjust := func(orig, src []byte) []byte {
+ // Remove the wrapping.
+ // Gofmt has turned the ; into a \n\n.
+ src = src[len("package p\n\nfunc _() {"):]
+ src = src[:len(src)-len("}\n")]
+ // Gofmt has also indented the function body one level.
+ // Remove that indent.
+ src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
+ return matchSpace(orig, src)
+ }
+ return file, adjust, nil
+ }
+
+ // Failed, and out of options.
+ return nil, nil, err
+}
+
+// containsMainFunc checks if a file contains a function declaration with the
+// function signature 'func main()'
+func containsMainFunc(file *ast.File) bool {
+ for _, decl := range file.Decls {
+ if f, ok := decl.(*ast.FuncDecl); ok {
+ if f.Name.Name != "main" {
+ continue
+ }
+
+ if len(f.Type.Params.List) != 0 {
+ continue
+ }
+
+ if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
+ continue
+ }
+
+ return true
+ }
+ }
+
+ return false
+}
+
+func cutSpace(b []byte) (before, middle, after []byte) {
+ i := 0
+ for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
+ i++
+ }
+ j := len(b)
+ for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
+ j--
+ }
+ if i <= j {
+ return b[:i], b[i:j], b[j:]
+ }
+ return nil, nil, b[j:]
+}
+
+// matchSpace reformats src to use the same space context as orig.
+// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
+// 2) matchSpace copies the indentation of the first non-blank line in orig
+// to every non-blank line in src.
+// 3) matchSpace copies the trailing space from orig and uses it in place
+// of src's trailing space.
+func matchSpace(orig []byte, src []byte) []byte {
+ before, _, after := cutSpace(orig)
+ i := bytes.LastIndex(before, []byte{'\n'})
+ before, indent := before[:i+1], before[i+1:]
+
+ _, src, _ = cutSpace(src)
+
+ var b bytes.Buffer
+ b.Write(before)
+ for len(src) > 0 {
+ line := src
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, src = line[:i+1], line[i+1:]
+ } else {
+ src = nil
+ }
+ if len(line) > 0 && line[0] != '\n' { // not blank
+ b.Write(indent)
+ }
+ b.Write(line)
+ }
+ b.Write(after)
+ return b.Bytes()
+}
+
+var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
+
+func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
+ var out bytes.Buffer
+ in := bufio.NewReader(r)
+ inImports := false
+ done := false
+ for {
+ s, err := in.ReadString('\n')
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return nil, err
+ }
+
+ if !inImports && !done && strings.HasPrefix(s, "import") {
+ inImports = true
+ }
+ if inImports && (strings.HasPrefix(s, "var") ||
+ strings.HasPrefix(s, "func") ||
+ strings.HasPrefix(s, "const") ||
+ strings.HasPrefix(s, "type")) {
+ done = true
+ inImports = false
+ }
+ if inImports && len(breaks) > 0 {
+ if m := impLine.FindStringSubmatch(s); m != nil {
+ if m[1] == breaks[0] {
+ out.WriteByte('\n')
+ breaks = breaks[1:]
+ }
+ }
+ }
+
+ fmt.Fprint(&out, s)
+ }
+ return out.Bytes(), nil
+}
diff --git a/vendor/golang.org/x/tools/imports/mkindex.go b/vendor/golang.org/x/tools/imports/mkindex.go
new file mode 100644
index 000000000..755e2394f
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/mkindex.go
@@ -0,0 +1,173 @@
+// +build ignore
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Command mkindex creates the file "pkgindex.go" containing an index of the Go
+// standard library. The file is intended to be built as part of the imports
+// package, so that the package may be used in environments where a GOROOT is
+// not available (such as App Engine).
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ pkgIndex = make(map[string][]pkg)
+ exports = make(map[string]map[string]bool)
+)
+
+func main() {
+ // Don't use GOPATH.
+ ctx := build.Default
+ ctx.GOPATH = ""
+
+ // Populate pkgIndex global from GOROOT.
+ for _, path := range ctx.SrcDirs() {
+ f, err := os.Open(path)
+ if err != nil {
+ log.Print(err)
+ continue
+ }
+ children, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ log.Print(err)
+ continue
+ }
+ for _, child := range children {
+ if child.IsDir() {
+ loadPkg(path, child.Name())
+ }
+ }
+ }
+ // Populate exports global.
+ for _, ps := range pkgIndex {
+ for _, p := range ps {
+ e := loadExports(p.dir)
+ if e != nil {
+ exports[p.dir] = e
+ }
+ }
+ }
+
+ // Construct source file.
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, pkgIndexHead)
+ fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex)
+ fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports)
+ src := buf.Bytes()
+
+ // Replace main.pkg type name with pkg.
+ src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1)
+ // Replace actual GOROOT with "/go".
+ src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1)
+ // Add some line wrapping.
+ src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1)
+ src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1)
+
+ var err error
+ src, err = format.Source(src)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Write out source file.
+ err = ioutil.WriteFile("pkgindex.go", src, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+const pkgIndexHead = `package imports
+
+func init() {
+ pkgIndexOnce.Do(func() {
+ pkgIndex.m = pkgIndexMaster
+ })
+ loadExports = func(dir string) map[string]bool {
+ return exportsMaster[dir]
+ }
+}
+`
+
+type pkg struct {
+ importpath string // full pkg import path, e.g. "net/http"
+ dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt"
+}
+
+var fset = token.NewFileSet()
+
+func loadPkg(root, importpath string) {
+ shortName := path.Base(importpath)
+ if shortName == "testdata" {
+ return
+ }
+
+ dir := filepath.Join(root, importpath)
+ pkgIndex[shortName] = append(pkgIndex[shortName], pkg{
+ importpath: importpath,
+ dir: dir,
+ })
+
+ pkgDir, err := os.Open(dir)
+ if err != nil {
+ return
+ }
+ children, err := pkgDir.Readdir(-1)
+ pkgDir.Close()
+ if err != nil {
+ return
+ }
+ for _, child := range children {
+ name := child.Name()
+ if name == "" {
+ continue
+ }
+ if c := name[0]; c == '.' || ('0' <= c && c <= '9') {
+ continue
+ }
+ if child.IsDir() {
+ loadPkg(root, filepath.Join(importpath, name))
+ }
+ }
+}
+
+func loadExports(dir string) map[string]bool {
+ exports := make(map[string]bool)
+ buildPkg, err := build.ImportDir(dir, 0)
+ if err != nil {
+ if strings.Contains(err.Error(), "no buildable Go source files in") {
+ return nil
+ }
+ log.Printf("could not import %q: %v", dir, err)
+ return nil
+ }
+ for _, file := range buildPkg.GoFiles {
+ f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)
+ if err != nil {
+ log.Printf("could not parse %q: %v", file, err)
+ continue
+ }
+ for name := range f.Scope.Objects {
+ if ast.IsExported(name) {
+ exports[name] = true
+ }
+ }
+ }
+ return exports
+}
diff --git a/vendor/golang.org/x/tools/imports/mkstdlib.go b/vendor/golang.org/x/tools/imports/mkstdlib.go
new file mode 100644
index 000000000..c8865e555
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/mkstdlib.go
@@ -0,0 +1,132 @@
+// +build ignore
+
+// mkstdlib generates the zstdlib.go file, containing the Go standard
+// library API symbols. It's baked into the binary to avoid scanning
+// GOPATH in the common case.
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strings"
+)
+
+func mustOpen(name string) io.Reader {
+ f, err := os.Open(name)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return f
+}
+
+func api(base string) string {
+ return filepath.Join(runtime.GOROOT(), "api", base)
+}
+
+var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`)
+
+var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true}
+
+func main() {
+ var buf bytes.Buffer
+ outf := func(format string, args ...interface{}) {
+ fmt.Fprintf(&buf, format, args...)
+ }
+ outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n")
+ outf("package imports\n")
+ outf("var stdlib = map[string]map[string]bool{\n")
+ f := io.MultiReader(
+ mustOpen(api("go1.txt")),
+ mustOpen(api("go1.1.txt")),
+ mustOpen(api("go1.2.txt")),
+ mustOpen(api("go1.3.txt")),
+ mustOpen(api("go1.4.txt")),
+ mustOpen(api("go1.5.txt")),
+ mustOpen(api("go1.6.txt")),
+ mustOpen(api("go1.7.txt")),
+ mustOpen(api("go1.8.txt")),
+ mustOpen(api("go1.9.txt")),
+ mustOpen(api("go1.10.txt")),
+ mustOpen(api("go1.11.txt")),
+ mustOpen(api("go1.12.txt")),
+
+ // The API of the syscall/js package needs to be computed explicitly,
+ // because it's not included in the GOROOT/api/go1.*.txt files at this time.
+ syscallJSAPI(),
+ )
+ sc := bufio.NewScanner(f)
+
+ pkgs := map[string]map[string]bool{
+ "unsafe": unsafeSyms,
+ }
+ paths := []string{"unsafe"}
+
+ for sc.Scan() {
+ l := sc.Text()
+ has := func(v string) bool { return strings.Contains(l, v) }
+ if has("struct, ") || has("interface, ") || has(", method (") {
+ continue
+ }
+ if m := sym.FindStringSubmatch(l); m != nil {
+ path, sym := m[1], m[2]
+
+ if _, ok := pkgs[path]; !ok {
+ pkgs[path] = map[string]bool{}
+ paths = append(paths, path)
+ }
+ pkgs[path][sym] = true
+ }
+ }
+ if err := sc.Err(); err != nil {
+ log.Fatal(err)
+ }
+ sort.Strings(paths)
+ for _, path := range paths {
+ outf("\t%q: map[string]bool{\n", path)
+ pkg := pkgs[path]
+ var syms []string
+ for sym := range pkg {
+ syms = append(syms, sym)
+ }
+ sort.Strings(syms)
+ for _, sym := range syms {
+ outf("\t\t%q: true,\n", sym)
+ }
+ outf("},\n")
+ }
+ outf("}\n")
+ fmtbuf, err := format.Source(buf.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+// syscallJSAPI returns the API of the syscall/js package.
+// It's computed from the contents of $(go env GOROOT)/src/syscall/js.
+func syscallJSAPI() io.Reader {
+ var exeSuffix string
+ if runtime.GOOS == "windows" {
+ exeSuffix = ".exe"
+ }
+ cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js")
+ out, err := cmd.Output()
+ if err != nil {
+ log.Fatalln(err)
+ }
+ return bytes.NewReader(out)
+}
diff --git a/vendor/golang.org/x/tools/imports/mod.go b/vendor/golang.org/x/tools/imports/mod.go
new file mode 100644
index 000000000..018c43ce8
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/mod.go
@@ -0,0 +1,355 @@
+package imports
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/gopathwalk"
+ "golang.org/x/tools/internal/module"
+)
+
+// moduleResolver implements resolver for modules using the go command as little
+// as feasible.
+type moduleResolver struct {
+ env *fixEnv
+
+ initialized bool
+ main *moduleJSON
+ modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path...
+ modsByDir []*moduleJSON // ...or Dir.
+}
+
+type moduleJSON struct {
+ Path string // module path
+ Version string // module version
+ Versions []string // available module versions (with -versions)
+ Replace *moduleJSON // replaced by this module
+ Time *time.Time // time version was created
+ Update *moduleJSON // available update, if any (with -u)
+ Main bool // is this the main module?
+ Indirect bool // is this module only an indirect dependency of main module?
+ Dir string // directory holding files for this module, if any
+ GoMod string // path to go.mod file for this module, if any
+ Error *moduleErrorJSON // error loading module
+}
+
+type moduleErrorJSON struct {
+ Err string // the error itself
+}
+
+func (r *moduleResolver) init() error {
+ if r.initialized {
+ return nil
+ }
+ stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
+ if err != nil {
+ return err
+ }
+ for dec := json.NewDecoder(stdout); dec.More(); {
+ mod := &moduleJSON{}
+ if err := dec.Decode(mod); err != nil {
+ return err
+ }
+ if mod.Dir == "" {
+ if Debug {
+ log.Printf("module %v has not been downloaded and will be ignored", mod.Path)
+ }
+ // Can't do anything with a module that's not downloaded.
+ continue
+ }
+ r.modsByModPath = append(r.modsByModPath, mod)
+ r.modsByDir = append(r.modsByDir, mod)
+ if mod.Main {
+ r.main = mod
+ }
+ }
+
+ sort.Slice(r.modsByModPath, func(i, j int) bool {
+ count := func(x int) int {
+ return strings.Count(r.modsByModPath[x].Path, "/")
+ }
+ return count(j) < count(i) // descending order
+ })
+ sort.Slice(r.modsByDir, func(i, j int) bool {
+ count := func(x int) int {
+ return strings.Count(r.modsByDir[x].Dir, "/")
+ }
+ return count(j) < count(i) // descending order
+ })
+
+ r.initialized = true
+ return nil
+}
+
+// findPackage returns the module and directory that contains the package at
+// the given import path, or returns nil, "" if no module is in scope.
+func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) {
+ for _, m := range r.modsByModPath {
+ if !strings.HasPrefix(importPath, m.Path) {
+ continue
+ }
+ pathInModule := importPath[len(m.Path):]
+ pkgDir := filepath.Join(m.Dir, pathInModule)
+ if dirIsNestedModule(pkgDir, m) {
+ continue
+ }
+
+ pkgFiles, err := ioutil.ReadDir(pkgDir)
+ if err != nil {
+ continue
+ }
+
+ // A module only contains a package if it has buildable go
+ // files in that directory. If not, it could be provided by an
+ // outer module. See #29736.
+ for _, fi := range pkgFiles {
+ if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok {
+ return m, pkgDir
+ }
+ }
+ }
+ return nil, ""
+}
+
+// findModuleByDir returns the module that contains dir, or nil if no such
+// module is in scope.
+func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
+ // This is quite tricky and may not be correct. dir could be:
+ // - a package in the main module.
+ // - a replace target underneath the main module's directory.
+ // - a nested module in the above.
+ // - a replace target somewhere totally random.
+ // - a nested module in the above.
+ // - in the mod cache.
+ // - in /vendor/ in -mod=vendor mode.
+ // - nested module? Dunno.
+ // Rumor has it that replace targets cannot contain other replace targets.
+ for _, m := range r.modsByDir {
+ if !strings.HasPrefix(dir, m.Dir) {
+ continue
+ }
+
+ if dirIsNestedModule(dir, m) {
+ continue
+ }
+
+ return m
+ }
+ return nil
+}
+
+// dirIsNestedModule reports if dir is contained in a nested module underneath
+// mod, not actually in mod.
+func dirIsNestedModule(dir string, mod *moduleJSON) bool {
+ if !strings.HasPrefix(dir, mod.Dir) {
+ return false
+ }
+ mf := findModFile(dir)
+ if mf == "" {
+ return false
+ }
+ return filepath.Dir(mf) != mod.Dir
+}
+
+func findModFile(dir string) string {
+ for {
+ f := filepath.Join(dir, "go.mod")
+ info, err := os.Stat(f)
+ if err == nil && !info.IsDir() {
+ return f
+ }
+ d := filepath.Dir(dir)
+ if len(d) >= len(dir) {
+ return "" // reached top of file system, no go.mod
+ }
+ dir = d
+ }
+}
+
+func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
+ if err := r.init(); err != nil {
+ return nil, err
+ }
+ names := map[string]string{}
+ for _, path := range importPaths {
+ _, packageDir := r.findPackage(path)
+ if packageDir == "" {
+ continue
+ }
+ name, err := packageDirToName(packageDir)
+ if err != nil {
+ continue
+ }
+ names[path] = name
+ }
+ return names, nil
+}
+
+func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
+ if err := r.init(); err != nil {
+ return nil, err
+ }
+
+ // Walk GOROOT, GOPATH/pkg/mod, and the main module.
+ roots := []gopathwalk.Root{
+ {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
+ }
+ if r.main != nil {
+ roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
+ }
+ for _, p := range filepath.SplitList(r.env.GOPATH) {
+ roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
+ }
+
+ // Walk replace targets, just in case they're not in any of the above.
+ for _, mod := range r.modsByModPath {
+ if mod.Replace != nil {
+ roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
+ }
+ }
+
+ var result []*pkg
+ dupCheck := make(map[string]bool)
+ var mu sync.Mutex
+
+ gopathwalk.Walk(roots, func(root gopathwalk.Root, dir string) {
+ mu.Lock()
+ defer mu.Unlock()
+
+ if _, dup := dupCheck[dir]; dup {
+ return
+ }
+
+ dupCheck[dir] = true
+
+ subdir := ""
+ if dir != root.Path {
+ subdir = dir[len(root.Path)+len("/"):]
+ }
+ importPath := filepath.ToSlash(subdir)
+ if strings.HasPrefix(importPath, "vendor/") {
+ // Ignore vendor dirs. If -mod=vendor is on, then things
+ // should mostly just work, but when it's not vendor/
+ // is a mess. There's no easy way to tell if it's on.
+ // We can still find things in the mod cache and
+ // map them into /vendor when -mod=vendor is on.
+ return
+ }
+ switch root.Type {
+ case gopathwalk.RootCurrentModule:
+ importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
+ case gopathwalk.RootModuleCache:
+ matches := modCacheRegexp.FindStringSubmatch(subdir)
+ modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
+ if err != nil {
+ if Debug {
+ log.Printf("decoding module cache path %q: %v", subdir, err)
+ }
+ return
+ }
+ importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
+ case gopathwalk.RootGOROOT:
+ importPath = subdir
+ }
+
+ // Check if the directory is underneath a module that's in scope.
+ if mod := r.findModuleByDir(dir); mod != nil {
+ // It is. If dir is the target of a replace directive,
+ // our guessed import path is wrong. Use the real one.
+ if mod.Dir == dir {
+ importPath = mod.Path
+ } else {
+ dirInMod := dir[len(mod.Dir)+len("/"):]
+ importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
+ }
+ } else {
+ // The package is in an unknown module. Check that it's
+ // not obviously impossible to import.
+ var modFile string
+ switch root.Type {
+ case gopathwalk.RootModuleCache:
+ matches := modCacheRegexp.FindStringSubmatch(subdir)
+ modFile = filepath.Join(matches[1], "@", matches[2], "go.mod")
+ default:
+ modFile = findModFile(dir)
+ }
+
+ modBytes, err := ioutil.ReadFile(modFile)
+ if err == nil && !strings.HasPrefix(importPath, modulePath(modBytes)) {
+ // The module's declared path does not match
+ // its expected path. It probably needs a
+ // replace directive we don't have.
+ return
+ }
+ }
+ // We may have discovered a package that has a different version
+ // in scope already. Canonicalize to that one if possible.
+ if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
+ dir = canonicalDir
+ }
+
+ result = append(result, &pkg{
+ importPathShort: VendorlessPath(importPath),
+ dir: dir,
+ })
+ }, gopathwalk.Options{Debug: Debug, ModulesEnabled: true})
+ return result, nil
+}
+
+// modCacheRegexp splits a path in a module cache into module, module version, and package.
+var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
+
+var (
+ slashSlash = []byte("//")
+ moduleStr = []byte("module")
+)
+
+// modulePath returns the module path from the gomod file text.
+// If it cannot find a module path, it returns an empty string.
+// It is tolerant of unrelated problems in the go.mod file.
+//
+// Copied from cmd/go/internal/modfile.
+func modulePath(mod []byte) string {
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+ return "" // missing module path
+}
diff --git a/vendor/golang.org/x/tools/imports/sortimports.go b/vendor/golang.org/x/tools/imports/sortimports.go
new file mode 100644
index 000000000..f3dd56c7a
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/sortimports.go
@@ -0,0 +1,230 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Hacked up copy of go/ast/import.go
+
+package imports
+
+import (
+ "go/ast"
+ "go/token"
+ "sort"
+ "strconv"
+)
+
+// sortImports sorts runs of consecutive import lines in import blocks in f.
+// It also removes duplicate imports when it is possible to do so without data loss.
+func sortImports(fset *token.FileSet, f *ast.File) {
+ for i, d := range f.Decls {
+ d, ok := d.(*ast.GenDecl)
+ if !ok || d.Tok != token.IMPORT {
+ // Not an import declaration, so we're done.
+ // Imports are always first.
+ break
+ }
+
+ if len(d.Specs) == 0 {
+ // Empty import block, remove it.
+ f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
+ }
+
+ if !d.Lparen.IsValid() {
+ // Not a block: sorted by default.
+ continue
+ }
+
+ // Identify and sort runs of specs on successive lines.
+ i := 0
+ specs := d.Specs[:0]
+ for j, s := range d.Specs {
+ if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
+ // j begins a new run. End this one.
+ specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
+ i = j
+ }
+ }
+ specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
+ d.Specs = specs
+
+ // Deduping can leave a blank line before the rparen; clean that up.
+ if len(d.Specs) > 0 {
+ lastSpec := d.Specs[len(d.Specs)-1]
+ lastLine := fset.Position(lastSpec.Pos()).Line
+ if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
+ fset.File(d.Rparen).MergeLine(rParenLine - 1)
+ }
+ }
+ }
+}
+
+func importPath(s ast.Spec) string {
+ t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+func importName(s ast.Spec) string {
+ n := s.(*ast.ImportSpec).Name
+ if n == nil {
+ return ""
+ }
+ return n.Name
+}
+
+func importComment(s ast.Spec) string {
+ c := s.(*ast.ImportSpec).Comment
+ if c == nil {
+ return ""
+ }
+ return c.Text()
+}
+
+// collapse indicates whether prev may be removed, leaving only next.
+func collapse(prev, next ast.Spec) bool {
+ if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
+ return false
+ }
+ return prev.(*ast.ImportSpec).Comment == nil
+}
+
+type posSpan struct {
+ Start token.Pos
+ End token.Pos
+}
+
+func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
+ // Can't short-circuit here even if specs are already sorted,
+ // since they might yet need deduplication.
+ // A lone import, however, may be safely ignored.
+ if len(specs) <= 1 {
+ return specs
+ }
+
+ // Record positions for specs.
+ pos := make([]posSpan, len(specs))
+ for i, s := range specs {
+ pos[i] = posSpan{s.Pos(), s.End()}
+ }
+
+ // Identify comments in this range.
+ // Any comment from pos[0].Start to the final line counts.
+ lastLine := fset.Position(pos[len(pos)-1].End).Line
+ cstart := len(f.Comments)
+ cend := len(f.Comments)
+ for i, g := range f.Comments {
+ if g.Pos() < pos[0].Start {
+ continue
+ }
+ if i < cstart {
+ cstart = i
+ }
+ if fset.Position(g.End()).Line > lastLine {
+ cend = i
+ break
+ }
+ }
+ comments := f.Comments[cstart:cend]
+
+ // Assign each comment to the import spec preceding it.
+ importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
+ specIndex := 0
+ for _, g := range comments {
+ for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
+ specIndex++
+ }
+ s := specs[specIndex].(*ast.ImportSpec)
+ importComment[s] = append(importComment[s], g)
+ }
+
+ // Sort the import specs by import path.
+ // Remove duplicates, when possible without data loss.
+ // Reassign the import paths to have the same position sequence.
+ // Reassign each comment to abut the end of its spec.
+ // Sort the comments by new position.
+ sort.Sort(byImportSpec(specs))
+
+ // Dedup. Thanks to our sorting, we can just consider
+ // adjacent pairs of imports.
+ deduped := specs[:0]
+ for i, s := range specs {
+ if i == len(specs)-1 || !collapse(s, specs[i+1]) {
+ deduped = append(deduped, s)
+ } else {
+ p := s.Pos()
+ fset.File(p).MergeLine(fset.Position(p).Line)
+ }
+ }
+ specs = deduped
+
+ // Fix up comment positions
+ for i, s := range specs {
+ s := s.(*ast.ImportSpec)
+ if s.Name != nil {
+ s.Name.NamePos = pos[i].Start
+ }
+ s.Path.ValuePos = pos[i].Start
+ s.EndPos = pos[i].End
+ nextSpecPos := pos[i].End
+
+ for _, g := range importComment[s] {
+ for _, c := range g.List {
+ c.Slash = pos[i].End
+ nextSpecPos = c.End()
+ }
+ }
+ if i < len(specs)-1 {
+ pos[i+1].Start = nextSpecPos
+ pos[i+1].End = nextSpecPos
+ }
+ }
+
+ sort.Sort(byCommentPos(comments))
+
+ // Fixup comments can insert blank lines, because import specs are on different lines.
+ // We remove those blank lines here by merging import spec to the first import spec line.
+ firstSpecLine := fset.Position(specs[0].Pos()).Line
+ for _, s := range specs[1:] {
+ p := s.Pos()
+ line := fset.File(p).Line(p)
+ for previousLine := line - 1; previousLine >= firstSpecLine; {
+ fset.File(p).MergeLine(previousLine)
+ previousLine--
+ }
+ }
+ return specs
+}
+
+type byImportSpec []ast.Spec // slice of *ast.ImportSpec
+
+func (x byImportSpec) Len() int { return len(x) }
+func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byImportSpec) Less(i, j int) bool {
+ ipath := importPath(x[i])
+ jpath := importPath(x[j])
+
+ igroup := importGroup(ipath)
+ jgroup := importGroup(jpath)
+ if igroup != jgroup {
+ return igroup < jgroup
+ }
+
+ if ipath != jpath {
+ return ipath < jpath
+ }
+ iname := importName(x[i])
+ jname := importName(x[j])
+
+ if iname != jname {
+ return iname < jname
+ }
+ return importComment(x[i]) < importComment(x[j])
+}
+
+type byCommentPos []*ast.CommentGroup
+
+func (x byCommentPos) Len() int { return len(x) }
+func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
diff --git a/vendor/golang.org/x/tools/imports/zstdlib.go b/vendor/golang.org/x/tools/imports/zstdlib.go
new file mode 100644
index 000000000..d81b8c530
--- /dev/null
+++ b/vendor/golang.org/x/tools/imports/zstdlib.go
@@ -0,0 +1,10325 @@
+// Code generated by mkstdlib.go. DO NOT EDIT.
+
+package imports
+
+var stdlib = map[string]map[string]bool{
+ "archive/tar": map[string]bool{
+ "ErrFieldTooLong": true,
+ "ErrHeader": true,
+ "ErrWriteAfterClose": true,
+ "ErrWriteTooLong": true,
+ "FileInfoHeader": true,
+ "Format": true,
+ "FormatGNU": true,
+ "FormatPAX": true,
+ "FormatUSTAR": true,
+ "FormatUnknown": true,
+ "Header": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "Reader": true,
+ "TypeBlock": true,
+ "TypeChar": true,
+ "TypeCont": true,
+ "TypeDir": true,
+ "TypeFifo": true,
+ "TypeGNULongLink": true,
+ "TypeGNULongName": true,
+ "TypeGNUSparse": true,
+ "TypeLink": true,
+ "TypeReg": true,
+ "TypeRegA": true,
+ "TypeSymlink": true,
+ "TypeXGlobalHeader": true,
+ "TypeXHeader": true,
+ "Writer": true,
+ },
+ "archive/zip": map[string]bool{
+ "Compressor": true,
+ "Decompressor": true,
+ "Deflate": true,
+ "ErrAlgorithm": true,
+ "ErrChecksum": true,
+ "ErrFormat": true,
+ "File": true,
+ "FileHeader": true,
+ "FileInfoHeader": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "OpenReader": true,
+ "ReadCloser": true,
+ "Reader": true,
+ "RegisterCompressor": true,
+ "RegisterDecompressor": true,
+ "Store": true,
+ "Writer": true,
+ },
+ "bufio": map[string]bool{
+ "ErrAdvanceTooFar": true,
+ "ErrBufferFull": true,
+ "ErrFinalToken": true,
+ "ErrInvalidUnreadByte": true,
+ "ErrInvalidUnreadRune": true,
+ "ErrNegativeAdvance": true,
+ "ErrNegativeCount": true,
+ "ErrTooLong": true,
+ "MaxScanTokenSize": true,
+ "NewReadWriter": true,
+ "NewReader": true,
+ "NewReaderSize": true,
+ "NewScanner": true,
+ "NewWriter": true,
+ "NewWriterSize": true,
+ "ReadWriter": true,
+ "Reader": true,
+ "ScanBytes": true,
+ "ScanLines": true,
+ "ScanRunes": true,
+ "ScanWords": true,
+ "Scanner": true,
+ "SplitFunc": true,
+ "Writer": true,
+ },
+ "bytes": map[string]bool{
+ "Buffer": true,
+ "Compare": true,
+ "Contains": true,
+ "ContainsAny": true,
+ "ContainsRune": true,
+ "Count": true,
+ "Equal": true,
+ "EqualFold": true,
+ "ErrTooLarge": true,
+ "Fields": true,
+ "FieldsFunc": true,
+ "HasPrefix": true,
+ "HasSuffix": true,
+ "Index": true,
+ "IndexAny": true,
+ "IndexByte": true,
+ "IndexFunc": true,
+ "IndexRune": true,
+ "Join": true,
+ "LastIndex": true,
+ "LastIndexAny": true,
+ "LastIndexByte": true,
+ "LastIndexFunc": true,
+ "Map": true,
+ "MinRead": true,
+ "NewBuffer": true,
+ "NewBufferString": true,
+ "NewReader": true,
+ "Reader": true,
+ "Repeat": true,
+ "Replace": true,
+ "ReplaceAll": true,
+ "Runes": true,
+ "Split": true,
+ "SplitAfter": true,
+ "SplitAfterN": true,
+ "SplitN": true,
+ "Title": true,
+ "ToLower": true,
+ "ToLowerSpecial": true,
+ "ToTitle": true,
+ "ToTitleSpecial": true,
+ "ToUpper": true,
+ "ToUpperSpecial": true,
+ "Trim": true,
+ "TrimFunc": true,
+ "TrimLeft": true,
+ "TrimLeftFunc": true,
+ "TrimPrefix": true,
+ "TrimRight": true,
+ "TrimRightFunc": true,
+ "TrimSpace": true,
+ "TrimSuffix": true,
+ },
+ "compress/bzip2": map[string]bool{
+ "NewReader": true,
+ "StructuralError": true,
+ },
+ "compress/flate": map[string]bool{
+ "BestCompression": true,
+ "BestSpeed": true,
+ "CorruptInputError": true,
+ "DefaultCompression": true,
+ "HuffmanOnly": true,
+ "InternalError": true,
+ "NewReader": true,
+ "NewReaderDict": true,
+ "NewWriter": true,
+ "NewWriterDict": true,
+ "NoCompression": true,
+ "ReadError": true,
+ "Reader": true,
+ "Resetter": true,
+ "WriteError": true,
+ "Writer": true,
+ },
+ "compress/gzip": map[string]bool{
+ "BestCompression": true,
+ "BestSpeed": true,
+ "DefaultCompression": true,
+ "ErrChecksum": true,
+ "ErrHeader": true,
+ "Header": true,
+ "HuffmanOnly": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "NewWriterLevel": true,
+ "NoCompression": true,
+ "Reader": true,
+ "Writer": true,
+ },
+ "compress/lzw": map[string]bool{
+ "LSB": true,
+ "MSB": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "Order": true,
+ },
+ "compress/zlib": map[string]bool{
+ "BestCompression": true,
+ "BestSpeed": true,
+ "DefaultCompression": true,
+ "ErrChecksum": true,
+ "ErrDictionary": true,
+ "ErrHeader": true,
+ "HuffmanOnly": true,
+ "NewReader": true,
+ "NewReaderDict": true,
+ "NewWriter": true,
+ "NewWriterLevel": true,
+ "NewWriterLevelDict": true,
+ "NoCompression": true,
+ "Resetter": true,
+ "Writer": true,
+ },
+ "container/heap": map[string]bool{
+ "Fix": true,
+ "Init": true,
+ "Interface": true,
+ "Pop": true,
+ "Push": true,
+ "Remove": true,
+ },
+ "container/list": map[string]bool{
+ "Element": true,
+ "List": true,
+ "New": true,
+ },
+ "container/ring": map[string]bool{
+ "New": true,
+ "Ring": true,
+ },
+ "context": map[string]bool{
+ "Background": true,
+ "CancelFunc": true,
+ "Canceled": true,
+ "Context": true,
+ "DeadlineExceeded": true,
+ "TODO": true,
+ "WithCancel": true,
+ "WithDeadline": true,
+ "WithTimeout": true,
+ "WithValue": true,
+ },
+ "crypto": map[string]bool{
+ "BLAKE2b_256": true,
+ "BLAKE2b_384": true,
+ "BLAKE2b_512": true,
+ "BLAKE2s_256": true,
+ "Decrypter": true,
+ "DecrypterOpts": true,
+ "Hash": true,
+ "MD4": true,
+ "MD5": true,
+ "MD5SHA1": true,
+ "PrivateKey": true,
+ "PublicKey": true,
+ "RIPEMD160": true,
+ "RegisterHash": true,
+ "SHA1": true,
+ "SHA224": true,
+ "SHA256": true,
+ "SHA384": true,
+ "SHA3_224": true,
+ "SHA3_256": true,
+ "SHA3_384": true,
+ "SHA3_512": true,
+ "SHA512": true,
+ "SHA512_224": true,
+ "SHA512_256": true,
+ "Signer": true,
+ "SignerOpts": true,
+ },
+ "crypto/aes": map[string]bool{
+ "BlockSize": true,
+ "KeySizeError": true,
+ "NewCipher": true,
+ },
+ "crypto/cipher": map[string]bool{
+ "AEAD": true,
+ "Block": true,
+ "BlockMode": true,
+ "NewCBCDecrypter": true,
+ "NewCBCEncrypter": true,
+ "NewCFBDecrypter": true,
+ "NewCFBEncrypter": true,
+ "NewCTR": true,
+ "NewGCM": true,
+ "NewGCMWithNonceSize": true,
+ "NewGCMWithTagSize": true,
+ "NewOFB": true,
+ "Stream": true,
+ "StreamReader": true,
+ "StreamWriter": true,
+ },
+ "crypto/des": map[string]bool{
+ "BlockSize": true,
+ "KeySizeError": true,
+ "NewCipher": true,
+ "NewTripleDESCipher": true,
+ },
+ "crypto/dsa": map[string]bool{
+ "ErrInvalidPublicKey": true,
+ "GenerateKey": true,
+ "GenerateParameters": true,
+ "L1024N160": true,
+ "L2048N224": true,
+ "L2048N256": true,
+ "L3072N256": true,
+ "ParameterSizes": true,
+ "Parameters": true,
+ "PrivateKey": true,
+ "PublicKey": true,
+ "Sign": true,
+ "Verify": true,
+ },
+ "crypto/ecdsa": map[string]bool{
+ "GenerateKey": true,
+ "PrivateKey": true,
+ "PublicKey": true,
+ "Sign": true,
+ "Verify": true,
+ },
+ "crypto/elliptic": map[string]bool{
+ "Curve": true,
+ "CurveParams": true,
+ "GenerateKey": true,
+ "Marshal": true,
+ "P224": true,
+ "P256": true,
+ "P384": true,
+ "P521": true,
+ "Unmarshal": true,
+ },
+ "crypto/hmac": map[string]bool{
+ "Equal": true,
+ "New": true,
+ },
+ "crypto/md5": map[string]bool{
+ "BlockSize": true,
+ "New": true,
+ "Size": true,
+ "Sum": true,
+ },
+ "crypto/rand": map[string]bool{
+ "Int": true,
+ "Prime": true,
+ "Read": true,
+ "Reader": true,
+ },
+ "crypto/rc4": map[string]bool{
+ "Cipher": true,
+ "KeySizeError": true,
+ "NewCipher": true,
+ },
+ "crypto/rsa": map[string]bool{
+ "CRTValue": true,
+ "DecryptOAEP": true,
+ "DecryptPKCS1v15": true,
+ "DecryptPKCS1v15SessionKey": true,
+ "EncryptOAEP": true,
+ "EncryptPKCS1v15": true,
+ "ErrDecryption": true,
+ "ErrMessageTooLong": true,
+ "ErrVerification": true,
+ "GenerateKey": true,
+ "GenerateMultiPrimeKey": true,
+ "OAEPOptions": true,
+ "PKCS1v15DecryptOptions": true,
+ "PSSOptions": true,
+ "PSSSaltLengthAuto": true,
+ "PSSSaltLengthEqualsHash": true,
+ "PrecomputedValues": true,
+ "PrivateKey": true,
+ "PublicKey": true,
+ "SignPKCS1v15": true,
+ "SignPSS": true,
+ "VerifyPKCS1v15": true,
+ "VerifyPSS": true,
+ },
+ "crypto/sha1": map[string]bool{
+ "BlockSize": true,
+ "New": true,
+ "Size": true,
+ "Sum": true,
+ },
+ "crypto/sha256": map[string]bool{
+ "BlockSize": true,
+ "New": true,
+ "New224": true,
+ "Size": true,
+ "Size224": true,
+ "Sum224": true,
+ "Sum256": true,
+ },
+ "crypto/sha512": map[string]bool{
+ "BlockSize": true,
+ "New": true,
+ "New384": true,
+ "New512_224": true,
+ "New512_256": true,
+ "Size": true,
+ "Size224": true,
+ "Size256": true,
+ "Size384": true,
+ "Sum384": true,
+ "Sum512": true,
+ "Sum512_224": true,
+ "Sum512_256": true,
+ },
+ "crypto/subtle": map[string]bool{
+ "ConstantTimeByteEq": true,
+ "ConstantTimeCompare": true,
+ "ConstantTimeCopy": true,
+ "ConstantTimeEq": true,
+ "ConstantTimeLessOrEq": true,
+ "ConstantTimeSelect": true,
+ },
+ "crypto/tls": map[string]bool{
+ "Certificate": true,
+ "CertificateRequestInfo": true,
+ "Client": true,
+ "ClientAuthType": true,
+ "ClientHelloInfo": true,
+ "ClientSessionCache": true,
+ "ClientSessionState": true,
+ "Config": true,
+ "Conn": true,
+ "ConnectionState": true,
+ "CurveID": true,
+ "CurveP256": true,
+ "CurveP384": true,
+ "CurveP521": true,
+ "Dial": true,
+ "DialWithDialer": true,
+ "ECDSAWithP256AndSHA256": true,
+ "ECDSAWithP384AndSHA384": true,
+ "ECDSAWithP521AndSHA512": true,
+ "ECDSAWithSHA1": true,
+ "Listen": true,
+ "LoadX509KeyPair": true,
+ "NewLRUClientSessionCache": true,
+ "NewListener": true,
+ "NoClientCert": true,
+ "PKCS1WithSHA1": true,
+ "PKCS1WithSHA256": true,
+ "PKCS1WithSHA384": true,
+ "PKCS1WithSHA512": true,
+ "PSSWithSHA256": true,
+ "PSSWithSHA384": true,
+ "PSSWithSHA512": true,
+ "RecordHeaderError": true,
+ "RenegotiateFreelyAsClient": true,
+ "RenegotiateNever": true,
+ "RenegotiateOnceAsClient": true,
+ "RenegotiationSupport": true,
+ "RequestClientCert": true,
+ "RequireAndVerifyClientCert": true,
+ "RequireAnyClientCert": true,
+ "Server": true,
+ "SignatureScheme": true,
+ "TLS_AES_128_GCM_SHA256": true,
+ "TLS_AES_256_GCM_SHA384": true,
+ "TLS_CHACHA20_POLY1305_SHA256": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": true,
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": true,
+ "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": true,
+ "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": true,
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": true,
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": true,
+ "TLS_ECDHE_RSA_WITH_RC4_128_SHA": true,
+ "TLS_FALLBACK_SCSV": true,
+ "TLS_RSA_WITH_3DES_EDE_CBC_SHA": true,
+ "TLS_RSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_RSA_WITH_AES_128_CBC_SHA256": true,
+ "TLS_RSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_RSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_RSA_WITH_AES_256_GCM_SHA384": true,
+ "TLS_RSA_WITH_RC4_128_SHA": true,
+ "VerifyClientCertIfGiven": true,
+ "VersionSSL30": true,
+ "VersionTLS10": true,
+ "VersionTLS11": true,
+ "VersionTLS12": true,
+ "VersionTLS13": true,
+ "X25519": true,
+ "X509KeyPair": true,
+ },
+ "crypto/x509": map[string]bool{
+ "CANotAuthorizedForExtKeyUsage": true,
+ "CANotAuthorizedForThisName": true,
+ "CertPool": true,
+ "Certificate": true,
+ "CertificateInvalidError": true,
+ "CertificateRequest": true,
+ "ConstraintViolationError": true,
+ "CreateCertificate": true,
+ "CreateCertificateRequest": true,
+ "DSA": true,
+ "DSAWithSHA1": true,
+ "DSAWithSHA256": true,
+ "DecryptPEMBlock": true,
+ "ECDSA": true,
+ "ECDSAWithSHA1": true,
+ "ECDSAWithSHA256": true,
+ "ECDSAWithSHA384": true,
+ "ECDSAWithSHA512": true,
+ "EncryptPEMBlock": true,
+ "ErrUnsupportedAlgorithm": true,
+ "Expired": true,
+ "ExtKeyUsage": true,
+ "ExtKeyUsageAny": true,
+ "ExtKeyUsageClientAuth": true,
+ "ExtKeyUsageCodeSigning": true,
+ "ExtKeyUsageEmailProtection": true,
+ "ExtKeyUsageIPSECEndSystem": true,
+ "ExtKeyUsageIPSECTunnel": true,
+ "ExtKeyUsageIPSECUser": true,
+ "ExtKeyUsageMicrosoftCommercialCodeSigning": true,
+ "ExtKeyUsageMicrosoftKernelCodeSigning": true,
+ "ExtKeyUsageMicrosoftServerGatedCrypto": true,
+ "ExtKeyUsageNetscapeServerGatedCrypto": true,
+ "ExtKeyUsageOCSPSigning": true,
+ "ExtKeyUsageServerAuth": true,
+ "ExtKeyUsageTimeStamping": true,
+ "HostnameError": true,
+ "IncompatibleUsage": true,
+ "IncorrectPasswordError": true,
+ "InsecureAlgorithmError": true,
+ "InvalidReason": true,
+ "IsEncryptedPEMBlock": true,
+ "KeyUsage": true,
+ "KeyUsageCRLSign": true,
+ "KeyUsageCertSign": true,
+ "KeyUsageContentCommitment": true,
+ "KeyUsageDataEncipherment": true,
+ "KeyUsageDecipherOnly": true,
+ "KeyUsageDigitalSignature": true,
+ "KeyUsageEncipherOnly": true,
+ "KeyUsageKeyAgreement": true,
+ "KeyUsageKeyEncipherment": true,
+ "MD2WithRSA": true,
+ "MD5WithRSA": true,
+ "MarshalECPrivateKey": true,
+ "MarshalPKCS1PrivateKey": true,
+ "MarshalPKCS1PublicKey": true,
+ "MarshalPKCS8PrivateKey": true,
+ "MarshalPKIXPublicKey": true,
+ "NameConstraintsWithoutSANs": true,
+ "NameMismatch": true,
+ "NewCertPool": true,
+ "NotAuthorizedToSign": true,
+ "PEMCipher": true,
+ "PEMCipher3DES": true,
+ "PEMCipherAES128": true,
+ "PEMCipherAES192": true,
+ "PEMCipherAES256": true,
+ "PEMCipherDES": true,
+ "ParseCRL": true,
+ "ParseCertificate": true,
+ "ParseCertificateRequest": true,
+ "ParseCertificates": true,
+ "ParseDERCRL": true,
+ "ParseECPrivateKey": true,
+ "ParsePKCS1PrivateKey": true,
+ "ParsePKCS1PublicKey": true,
+ "ParsePKCS8PrivateKey": true,
+ "ParsePKIXPublicKey": true,
+ "PublicKeyAlgorithm": true,
+ "RSA": true,
+ "SHA1WithRSA": true,
+ "SHA256WithRSA": true,
+ "SHA256WithRSAPSS": true,
+ "SHA384WithRSA": true,
+ "SHA384WithRSAPSS": true,
+ "SHA512WithRSA": true,
+ "SHA512WithRSAPSS": true,
+ "SignatureAlgorithm": true,
+ "SystemCertPool": true,
+ "SystemRootsError": true,
+ "TooManyConstraints": true,
+ "TooManyIntermediates": true,
+ "UnconstrainedName": true,
+ "UnhandledCriticalExtension": true,
+ "UnknownAuthorityError": true,
+ "UnknownPublicKeyAlgorithm": true,
+ "UnknownSignatureAlgorithm": true,
+ "VerifyOptions": true,
+ },
+ "crypto/x509/pkix": map[string]bool{
+ "AlgorithmIdentifier": true,
+ "AttributeTypeAndValue": true,
+ "AttributeTypeAndValueSET": true,
+ "CertificateList": true,
+ "Extension": true,
+ "Name": true,
+ "RDNSequence": true,
+ "RelativeDistinguishedNameSET": true,
+ "RevokedCertificate": true,
+ "TBSCertificateList": true,
+ },
+ "database/sql": map[string]bool{
+ "ColumnType": true,
+ "Conn": true,
+ "DB": true,
+ "DBStats": true,
+ "Drivers": true,
+ "ErrConnDone": true,
+ "ErrNoRows": true,
+ "ErrTxDone": true,
+ "IsolationLevel": true,
+ "LevelDefault": true,
+ "LevelLinearizable": true,
+ "LevelReadCommitted": true,
+ "LevelReadUncommitted": true,
+ "LevelRepeatableRead": true,
+ "LevelSerializable": true,
+ "LevelSnapshot": true,
+ "LevelWriteCommitted": true,
+ "Named": true,
+ "NamedArg": true,
+ "NullBool": true,
+ "NullFloat64": true,
+ "NullInt64": true,
+ "NullString": true,
+ "Open": true,
+ "OpenDB": true,
+ "Out": true,
+ "RawBytes": true,
+ "Register": true,
+ "Result": true,
+ "Row": true,
+ "Rows": true,
+ "Scanner": true,
+ "Stmt": true,
+ "Tx": true,
+ "TxOptions": true,
+ },
+ "database/sql/driver": map[string]bool{
+ "Bool": true,
+ "ColumnConverter": true,
+ "Conn": true,
+ "ConnBeginTx": true,
+ "ConnPrepareContext": true,
+ "Connector": true,
+ "DefaultParameterConverter": true,
+ "Driver": true,
+ "DriverContext": true,
+ "ErrBadConn": true,
+ "ErrRemoveArgument": true,
+ "ErrSkip": true,
+ "Execer": true,
+ "ExecerContext": true,
+ "Int32": true,
+ "IsScanValue": true,
+ "IsValue": true,
+ "IsolationLevel": true,
+ "NamedValue": true,
+ "NamedValueChecker": true,
+ "NotNull": true,
+ "Null": true,
+ "Pinger": true,
+ "Queryer": true,
+ "QueryerContext": true,
+ "Result": true,
+ "ResultNoRows": true,
+ "Rows": true,
+ "RowsAffected": true,
+ "RowsColumnTypeDatabaseTypeName": true,
+ "RowsColumnTypeLength": true,
+ "RowsColumnTypeNullable": true,
+ "RowsColumnTypePrecisionScale": true,
+ "RowsColumnTypeScanType": true,
+ "RowsNextResultSet": true,
+ "SessionResetter": true,
+ "Stmt": true,
+ "StmtExecContext": true,
+ "StmtQueryContext": true,
+ "String": true,
+ "Tx": true,
+ "TxOptions": true,
+ "Value": true,
+ "ValueConverter": true,
+ "Valuer": true,
+ },
+ "debug/dwarf": map[string]bool{
+ "AddrType": true,
+ "ArrayType": true,
+ "Attr": true,
+ "AttrAbstractOrigin": true,
+ "AttrAccessibility": true,
+ "AttrAddrClass": true,
+ "AttrAllocated": true,
+ "AttrArtificial": true,
+ "AttrAssociated": true,
+ "AttrBaseTypes": true,
+ "AttrBitOffset": true,
+ "AttrBitSize": true,
+ "AttrByteSize": true,
+ "AttrCallColumn": true,
+ "AttrCallFile": true,
+ "AttrCallLine": true,
+ "AttrCalling": true,
+ "AttrCommonRef": true,
+ "AttrCompDir": true,
+ "AttrConstValue": true,
+ "AttrContainingType": true,
+ "AttrCount": true,
+ "AttrDataLocation": true,
+ "AttrDataMemberLoc": true,
+ "AttrDeclColumn": true,
+ "AttrDeclFile": true,
+ "AttrDeclLine": true,
+ "AttrDeclaration": true,
+ "AttrDefaultValue": true,
+ "AttrDescription": true,
+ "AttrDiscr": true,
+ "AttrDiscrList": true,
+ "AttrDiscrValue": true,
+ "AttrEncoding": true,
+ "AttrEntrypc": true,
+ "AttrExtension": true,
+ "AttrExternal": true,
+ "AttrFrameBase": true,
+ "AttrFriend": true,
+ "AttrHighpc": true,
+ "AttrIdentifierCase": true,
+ "AttrImport": true,
+ "AttrInline": true,
+ "AttrIsOptional": true,
+ "AttrLanguage": true,
+ "AttrLocation": true,
+ "AttrLowerBound": true,
+ "AttrLowpc": true,
+ "AttrMacroInfo": true,
+ "AttrName": true,
+ "AttrNamelistItem": true,
+ "AttrOrdering": true,
+ "AttrPriority": true,
+ "AttrProducer": true,
+ "AttrPrototyped": true,
+ "AttrRanges": true,
+ "AttrReturnAddr": true,
+ "AttrSegment": true,
+ "AttrSibling": true,
+ "AttrSpecification": true,
+ "AttrStartScope": true,
+ "AttrStaticLink": true,
+ "AttrStmtList": true,
+ "AttrStride": true,
+ "AttrStrideSize": true,
+ "AttrStringLength": true,
+ "AttrTrampoline": true,
+ "AttrType": true,
+ "AttrUpperBound": true,
+ "AttrUseLocation": true,
+ "AttrUseUTF8": true,
+ "AttrVarParam": true,
+ "AttrVirtuality": true,
+ "AttrVisibility": true,
+ "AttrVtableElemLoc": true,
+ "BasicType": true,
+ "BoolType": true,
+ "CharType": true,
+ "Class": true,
+ "ClassAddress": true,
+ "ClassBlock": true,
+ "ClassConstant": true,
+ "ClassExprLoc": true,
+ "ClassFlag": true,
+ "ClassLinePtr": true,
+ "ClassLocListPtr": true,
+ "ClassMacPtr": true,
+ "ClassRangeListPtr": true,
+ "ClassReference": true,
+ "ClassReferenceAlt": true,
+ "ClassReferenceSig": true,
+ "ClassString": true,
+ "ClassStringAlt": true,
+ "ClassUnknown": true,
+ "CommonType": true,
+ "ComplexType": true,
+ "Data": true,
+ "DecodeError": true,
+ "DotDotDotType": true,
+ "Entry": true,
+ "EnumType": true,
+ "EnumValue": true,
+ "ErrUnknownPC": true,
+ "Field": true,
+ "FloatType": true,
+ "FuncType": true,
+ "IntType": true,
+ "LineEntry": true,
+ "LineFile": true,
+ "LineReader": true,
+ "LineReaderPos": true,
+ "New": true,
+ "Offset": true,
+ "PtrType": true,
+ "QualType": true,
+ "Reader": true,
+ "StructField": true,
+ "StructType": true,
+ "Tag": true,
+ "TagAccessDeclaration": true,
+ "TagArrayType": true,
+ "TagBaseType": true,
+ "TagCatchDwarfBlock": true,
+ "TagClassType": true,
+ "TagCommonDwarfBlock": true,
+ "TagCommonInclusion": true,
+ "TagCompileUnit": true,
+ "TagCondition": true,
+ "TagConstType": true,
+ "TagConstant": true,
+ "TagDwarfProcedure": true,
+ "TagEntryPoint": true,
+ "TagEnumerationType": true,
+ "TagEnumerator": true,
+ "TagFileType": true,
+ "TagFormalParameter": true,
+ "TagFriend": true,
+ "TagImportedDeclaration": true,
+ "TagImportedModule": true,
+ "TagImportedUnit": true,
+ "TagInheritance": true,
+ "TagInlinedSubroutine": true,
+ "TagInterfaceType": true,
+ "TagLabel": true,
+ "TagLexDwarfBlock": true,
+ "TagMember": true,
+ "TagModule": true,
+ "TagMutableType": true,
+ "TagNamelist": true,
+ "TagNamelistItem": true,
+ "TagNamespace": true,
+ "TagPackedType": true,
+ "TagPartialUnit": true,
+ "TagPointerType": true,
+ "TagPtrToMemberType": true,
+ "TagReferenceType": true,
+ "TagRestrictType": true,
+ "TagRvalueReferenceType": true,
+ "TagSetType": true,
+ "TagSharedType": true,
+ "TagStringType": true,
+ "TagStructType": true,
+ "TagSubprogram": true,
+ "TagSubrangeType": true,
+ "TagSubroutineType": true,
+ "TagTemplateAlias": true,
+ "TagTemplateTypeParameter": true,
+ "TagTemplateValueParameter": true,
+ "TagThrownType": true,
+ "TagTryDwarfBlock": true,
+ "TagTypeUnit": true,
+ "TagTypedef": true,
+ "TagUnionType": true,
+ "TagUnspecifiedParameters": true,
+ "TagUnspecifiedType": true,
+ "TagVariable": true,
+ "TagVariant": true,
+ "TagVariantPart": true,
+ "TagVolatileType": true,
+ "TagWithStmt": true,
+ "Type": true,
+ "TypedefType": true,
+ "UcharType": true,
+ "UintType": true,
+ "UnspecifiedType": true,
+ "VoidType": true,
+ },
+ "debug/elf": map[string]bool{
+ "ARM_MAGIC_TRAMP_NUMBER": true,
+ "COMPRESS_HIOS": true,
+ "COMPRESS_HIPROC": true,
+ "COMPRESS_LOOS": true,
+ "COMPRESS_LOPROC": true,
+ "COMPRESS_ZLIB": true,
+ "Chdr32": true,
+ "Chdr64": true,
+ "Class": true,
+ "CompressionType": true,
+ "DF_BIND_NOW": true,
+ "DF_ORIGIN": true,
+ "DF_STATIC_TLS": true,
+ "DF_SYMBOLIC": true,
+ "DF_TEXTREL": true,
+ "DT_BIND_NOW": true,
+ "DT_DEBUG": true,
+ "DT_ENCODING": true,
+ "DT_FINI": true,
+ "DT_FINI_ARRAY": true,
+ "DT_FINI_ARRAYSZ": true,
+ "DT_FLAGS": true,
+ "DT_HASH": true,
+ "DT_HIOS": true,
+ "DT_HIPROC": true,
+ "DT_INIT": true,
+ "DT_INIT_ARRAY": true,
+ "DT_INIT_ARRAYSZ": true,
+ "DT_JMPREL": true,
+ "DT_LOOS": true,
+ "DT_LOPROC": true,
+ "DT_NEEDED": true,
+ "DT_NULL": true,
+ "DT_PLTGOT": true,
+ "DT_PLTREL": true,
+ "DT_PLTRELSZ": true,
+ "DT_PREINIT_ARRAY": true,
+ "DT_PREINIT_ARRAYSZ": true,
+ "DT_REL": true,
+ "DT_RELA": true,
+ "DT_RELAENT": true,
+ "DT_RELASZ": true,
+ "DT_RELENT": true,
+ "DT_RELSZ": true,
+ "DT_RPATH": true,
+ "DT_RUNPATH": true,
+ "DT_SONAME": true,
+ "DT_STRSZ": true,
+ "DT_STRTAB": true,
+ "DT_SYMBOLIC": true,
+ "DT_SYMENT": true,
+ "DT_SYMTAB": true,
+ "DT_TEXTREL": true,
+ "DT_VERNEED": true,
+ "DT_VERNEEDNUM": true,
+ "DT_VERSYM": true,
+ "Data": true,
+ "Dyn32": true,
+ "Dyn64": true,
+ "DynFlag": true,
+ "DynTag": true,
+ "EI_ABIVERSION": true,
+ "EI_CLASS": true,
+ "EI_DATA": true,
+ "EI_NIDENT": true,
+ "EI_OSABI": true,
+ "EI_PAD": true,
+ "EI_VERSION": true,
+ "ELFCLASS32": true,
+ "ELFCLASS64": true,
+ "ELFCLASSNONE": true,
+ "ELFDATA2LSB": true,
+ "ELFDATA2MSB": true,
+ "ELFDATANONE": true,
+ "ELFMAG": true,
+ "ELFOSABI_86OPEN": true,
+ "ELFOSABI_AIX": true,
+ "ELFOSABI_ARM": true,
+ "ELFOSABI_AROS": true,
+ "ELFOSABI_CLOUDABI": true,
+ "ELFOSABI_FENIXOS": true,
+ "ELFOSABI_FREEBSD": true,
+ "ELFOSABI_HPUX": true,
+ "ELFOSABI_HURD": true,
+ "ELFOSABI_IRIX": true,
+ "ELFOSABI_LINUX": true,
+ "ELFOSABI_MODESTO": true,
+ "ELFOSABI_NETBSD": true,
+ "ELFOSABI_NONE": true,
+ "ELFOSABI_NSK": true,
+ "ELFOSABI_OPENBSD": true,
+ "ELFOSABI_OPENVMS": true,
+ "ELFOSABI_SOLARIS": true,
+ "ELFOSABI_STANDALONE": true,
+ "ELFOSABI_TRU64": true,
+ "EM_386": true,
+ "EM_486": true,
+ "EM_56800EX": true,
+ "EM_68HC05": true,
+ "EM_68HC08": true,
+ "EM_68HC11": true,
+ "EM_68HC12": true,
+ "EM_68HC16": true,
+ "EM_68K": true,
+ "EM_78KOR": true,
+ "EM_8051": true,
+ "EM_860": true,
+ "EM_88K": true,
+ "EM_960": true,
+ "EM_AARCH64": true,
+ "EM_ALPHA": true,
+ "EM_ALPHA_STD": true,
+ "EM_ALTERA_NIOS2": true,
+ "EM_AMDGPU": true,
+ "EM_ARC": true,
+ "EM_ARCA": true,
+ "EM_ARC_COMPACT": true,
+ "EM_ARC_COMPACT2": true,
+ "EM_ARM": true,
+ "EM_AVR": true,
+ "EM_AVR32": true,
+ "EM_BA1": true,
+ "EM_BA2": true,
+ "EM_BLACKFIN": true,
+ "EM_BPF": true,
+ "EM_C166": true,
+ "EM_CDP": true,
+ "EM_CE": true,
+ "EM_CLOUDSHIELD": true,
+ "EM_COGE": true,
+ "EM_COLDFIRE": true,
+ "EM_COOL": true,
+ "EM_COREA_1ST": true,
+ "EM_COREA_2ND": true,
+ "EM_CR": true,
+ "EM_CR16": true,
+ "EM_CRAYNV2": true,
+ "EM_CRIS": true,
+ "EM_CRX": true,
+ "EM_CSR_KALIMBA": true,
+ "EM_CUDA": true,
+ "EM_CYPRESS_M8C": true,
+ "EM_D10V": true,
+ "EM_D30V": true,
+ "EM_DSP24": true,
+ "EM_DSPIC30F": true,
+ "EM_DXP": true,
+ "EM_ECOG1": true,
+ "EM_ECOG16": true,
+ "EM_ECOG1X": true,
+ "EM_ECOG2": true,
+ "EM_ETPU": true,
+ "EM_EXCESS": true,
+ "EM_F2MC16": true,
+ "EM_FIREPATH": true,
+ "EM_FR20": true,
+ "EM_FR30": true,
+ "EM_FT32": true,
+ "EM_FX66": true,
+ "EM_H8S": true,
+ "EM_H8_300": true,
+ "EM_H8_300H": true,
+ "EM_H8_500": true,
+ "EM_HUANY": true,
+ "EM_IA_64": true,
+ "EM_INTEL205": true,
+ "EM_INTEL206": true,
+ "EM_INTEL207": true,
+ "EM_INTEL208": true,
+ "EM_INTEL209": true,
+ "EM_IP2K": true,
+ "EM_JAVELIN": true,
+ "EM_K10M": true,
+ "EM_KM32": true,
+ "EM_KMX16": true,
+ "EM_KMX32": true,
+ "EM_KMX8": true,
+ "EM_KVARC": true,
+ "EM_L10M": true,
+ "EM_LANAI": true,
+ "EM_LATTICEMICO32": true,
+ "EM_M16C": true,
+ "EM_M32": true,
+ "EM_M32C": true,
+ "EM_M32R": true,
+ "EM_MANIK": true,
+ "EM_MAX": true,
+ "EM_MAXQ30": true,
+ "EM_MCHP_PIC": true,
+ "EM_MCST_ELBRUS": true,
+ "EM_ME16": true,
+ "EM_METAG": true,
+ "EM_MICROBLAZE": true,
+ "EM_MIPS": true,
+ "EM_MIPS_RS3_LE": true,
+ "EM_MIPS_RS4_BE": true,
+ "EM_MIPS_X": true,
+ "EM_MMA": true,
+ "EM_MMDSP_PLUS": true,
+ "EM_MMIX": true,
+ "EM_MN10200": true,
+ "EM_MN10300": true,
+ "EM_MOXIE": true,
+ "EM_MSP430": true,
+ "EM_NCPU": true,
+ "EM_NDR1": true,
+ "EM_NDS32": true,
+ "EM_NONE": true,
+ "EM_NORC": true,
+ "EM_NS32K": true,
+ "EM_OPEN8": true,
+ "EM_OPENRISC": true,
+ "EM_PARISC": true,
+ "EM_PCP": true,
+ "EM_PDP10": true,
+ "EM_PDP11": true,
+ "EM_PDSP": true,
+ "EM_PJ": true,
+ "EM_PPC": true,
+ "EM_PPC64": true,
+ "EM_PRISM": true,
+ "EM_QDSP6": true,
+ "EM_R32C": true,
+ "EM_RCE": true,
+ "EM_RH32": true,
+ "EM_RISCV": true,
+ "EM_RL78": true,
+ "EM_RS08": true,
+ "EM_RX": true,
+ "EM_S370": true,
+ "EM_S390": true,
+ "EM_SCORE7": true,
+ "EM_SEP": true,
+ "EM_SE_C17": true,
+ "EM_SE_C33": true,
+ "EM_SH": true,
+ "EM_SHARC": true,
+ "EM_SLE9X": true,
+ "EM_SNP1K": true,
+ "EM_SPARC": true,
+ "EM_SPARC32PLUS": true,
+ "EM_SPARCV9": true,
+ "EM_ST100": true,
+ "EM_ST19": true,
+ "EM_ST200": true,
+ "EM_ST7": true,
+ "EM_ST9PLUS": true,
+ "EM_STARCORE": true,
+ "EM_STM8": true,
+ "EM_STXP7X": true,
+ "EM_SVX": true,
+ "EM_TILE64": true,
+ "EM_TILEGX": true,
+ "EM_TILEPRO": true,
+ "EM_TINYJ": true,
+ "EM_TI_ARP32": true,
+ "EM_TI_C2000": true,
+ "EM_TI_C5500": true,
+ "EM_TI_C6000": true,
+ "EM_TI_PRU": true,
+ "EM_TMM_GPP": true,
+ "EM_TPC": true,
+ "EM_TRICORE": true,
+ "EM_TRIMEDIA": true,
+ "EM_TSK3000": true,
+ "EM_UNICORE": true,
+ "EM_V800": true,
+ "EM_V850": true,
+ "EM_VAX": true,
+ "EM_VIDEOCORE": true,
+ "EM_VIDEOCORE3": true,
+ "EM_VIDEOCORE5": true,
+ "EM_VISIUM": true,
+ "EM_VPP500": true,
+ "EM_X86_64": true,
+ "EM_XCORE": true,
+ "EM_XGATE": true,
+ "EM_XIMO16": true,
+ "EM_XTENSA": true,
+ "EM_Z80": true,
+ "EM_ZSP": true,
+ "ET_CORE": true,
+ "ET_DYN": true,
+ "ET_EXEC": true,
+ "ET_HIOS": true,
+ "ET_HIPROC": true,
+ "ET_LOOS": true,
+ "ET_LOPROC": true,
+ "ET_NONE": true,
+ "ET_REL": true,
+ "EV_CURRENT": true,
+ "EV_NONE": true,
+ "ErrNoSymbols": true,
+ "File": true,
+ "FileHeader": true,
+ "FormatError": true,
+ "Header32": true,
+ "Header64": true,
+ "ImportedSymbol": true,
+ "Machine": true,
+ "NT_FPREGSET": true,
+ "NT_PRPSINFO": true,
+ "NT_PRSTATUS": true,
+ "NType": true,
+ "NewFile": true,
+ "OSABI": true,
+ "Open": true,
+ "PF_MASKOS": true,
+ "PF_MASKPROC": true,
+ "PF_R": true,
+ "PF_W": true,
+ "PF_X": true,
+ "PT_DYNAMIC": true,
+ "PT_HIOS": true,
+ "PT_HIPROC": true,
+ "PT_INTERP": true,
+ "PT_LOAD": true,
+ "PT_LOOS": true,
+ "PT_LOPROC": true,
+ "PT_NOTE": true,
+ "PT_NULL": true,
+ "PT_PHDR": true,
+ "PT_SHLIB": true,
+ "PT_TLS": true,
+ "Prog": true,
+ "Prog32": true,
+ "Prog64": true,
+ "ProgFlag": true,
+ "ProgHeader": true,
+ "ProgType": true,
+ "R_386": true,
+ "R_386_16": true,
+ "R_386_32": true,
+ "R_386_32PLT": true,
+ "R_386_8": true,
+ "R_386_COPY": true,
+ "R_386_GLOB_DAT": true,
+ "R_386_GOT32": true,
+ "R_386_GOT32X": true,
+ "R_386_GOTOFF": true,
+ "R_386_GOTPC": true,
+ "R_386_IRELATIVE": true,
+ "R_386_JMP_SLOT": true,
+ "R_386_NONE": true,
+ "R_386_PC16": true,
+ "R_386_PC32": true,
+ "R_386_PC8": true,
+ "R_386_PLT32": true,
+ "R_386_RELATIVE": true,
+ "R_386_SIZE32": true,
+ "R_386_TLS_DESC": true,
+ "R_386_TLS_DESC_CALL": true,
+ "R_386_TLS_DTPMOD32": true,
+ "R_386_TLS_DTPOFF32": true,
+ "R_386_TLS_GD": true,
+ "R_386_TLS_GD_32": true,
+ "R_386_TLS_GD_CALL": true,
+ "R_386_TLS_GD_POP": true,
+ "R_386_TLS_GD_PUSH": true,
+ "R_386_TLS_GOTDESC": true,
+ "R_386_TLS_GOTIE": true,
+ "R_386_TLS_IE": true,
+ "R_386_TLS_IE_32": true,
+ "R_386_TLS_LDM": true,
+ "R_386_TLS_LDM_32": true,
+ "R_386_TLS_LDM_CALL": true,
+ "R_386_TLS_LDM_POP": true,
+ "R_386_TLS_LDM_PUSH": true,
+ "R_386_TLS_LDO_32": true,
+ "R_386_TLS_LE": true,
+ "R_386_TLS_LE_32": true,
+ "R_386_TLS_TPOFF": true,
+ "R_386_TLS_TPOFF32": true,
+ "R_390": true,
+ "R_390_12": true,
+ "R_390_16": true,
+ "R_390_20": true,
+ "R_390_32": true,
+ "R_390_64": true,
+ "R_390_8": true,
+ "R_390_COPY": true,
+ "R_390_GLOB_DAT": true,
+ "R_390_GOT12": true,
+ "R_390_GOT16": true,
+ "R_390_GOT20": true,
+ "R_390_GOT32": true,
+ "R_390_GOT64": true,
+ "R_390_GOTENT": true,
+ "R_390_GOTOFF": true,
+ "R_390_GOTOFF16": true,
+ "R_390_GOTOFF64": true,
+ "R_390_GOTPC": true,
+ "R_390_GOTPCDBL": true,
+ "R_390_GOTPLT12": true,
+ "R_390_GOTPLT16": true,
+ "R_390_GOTPLT20": true,
+ "R_390_GOTPLT32": true,
+ "R_390_GOTPLT64": true,
+ "R_390_GOTPLTENT": true,
+ "R_390_GOTPLTOFF16": true,
+ "R_390_GOTPLTOFF32": true,
+ "R_390_GOTPLTOFF64": true,
+ "R_390_JMP_SLOT": true,
+ "R_390_NONE": true,
+ "R_390_PC16": true,
+ "R_390_PC16DBL": true,
+ "R_390_PC32": true,
+ "R_390_PC32DBL": true,
+ "R_390_PC64": true,
+ "R_390_PLT16DBL": true,
+ "R_390_PLT32": true,
+ "R_390_PLT32DBL": true,
+ "R_390_PLT64": true,
+ "R_390_RELATIVE": true,
+ "R_390_TLS_DTPMOD": true,
+ "R_390_TLS_DTPOFF": true,
+ "R_390_TLS_GD32": true,
+ "R_390_TLS_GD64": true,
+ "R_390_TLS_GDCALL": true,
+ "R_390_TLS_GOTIE12": true,
+ "R_390_TLS_GOTIE20": true,
+ "R_390_TLS_GOTIE32": true,
+ "R_390_TLS_GOTIE64": true,
+ "R_390_TLS_IE32": true,
+ "R_390_TLS_IE64": true,
+ "R_390_TLS_IEENT": true,
+ "R_390_TLS_LDCALL": true,
+ "R_390_TLS_LDM32": true,
+ "R_390_TLS_LDM64": true,
+ "R_390_TLS_LDO32": true,
+ "R_390_TLS_LDO64": true,
+ "R_390_TLS_LE32": true,
+ "R_390_TLS_LE64": true,
+ "R_390_TLS_LOAD": true,
+ "R_390_TLS_TPOFF": true,
+ "R_AARCH64": true,
+ "R_AARCH64_ABS16": true,
+ "R_AARCH64_ABS32": true,
+ "R_AARCH64_ABS64": true,
+ "R_AARCH64_ADD_ABS_LO12_NC": true,
+ "R_AARCH64_ADR_GOT_PAGE": true,
+ "R_AARCH64_ADR_PREL_LO21": true,
+ "R_AARCH64_ADR_PREL_PG_HI21": true,
+ "R_AARCH64_ADR_PREL_PG_HI21_NC": true,
+ "R_AARCH64_CALL26": true,
+ "R_AARCH64_CONDBR19": true,
+ "R_AARCH64_COPY": true,
+ "R_AARCH64_GLOB_DAT": true,
+ "R_AARCH64_GOT_LD_PREL19": true,
+ "R_AARCH64_IRELATIVE": true,
+ "R_AARCH64_JUMP26": true,
+ "R_AARCH64_JUMP_SLOT": true,
+ "R_AARCH64_LD64_GOTOFF_LO15": true,
+ "R_AARCH64_LD64_GOTPAGE_LO15": true,
+ "R_AARCH64_LD64_GOT_LO12_NC": true,
+ "R_AARCH64_LDST128_ABS_LO12_NC": true,
+ "R_AARCH64_LDST16_ABS_LO12_NC": true,
+ "R_AARCH64_LDST32_ABS_LO12_NC": true,
+ "R_AARCH64_LDST64_ABS_LO12_NC": true,
+ "R_AARCH64_LDST8_ABS_LO12_NC": true,
+ "R_AARCH64_LD_PREL_LO19": true,
+ "R_AARCH64_MOVW_SABS_G0": true,
+ "R_AARCH64_MOVW_SABS_G1": true,
+ "R_AARCH64_MOVW_SABS_G2": true,
+ "R_AARCH64_MOVW_UABS_G0": true,
+ "R_AARCH64_MOVW_UABS_G0_NC": true,
+ "R_AARCH64_MOVW_UABS_G1": true,
+ "R_AARCH64_MOVW_UABS_G1_NC": true,
+ "R_AARCH64_MOVW_UABS_G2": true,
+ "R_AARCH64_MOVW_UABS_G2_NC": true,
+ "R_AARCH64_MOVW_UABS_G3": true,
+ "R_AARCH64_NONE": true,
+ "R_AARCH64_NULL": true,
+ "R_AARCH64_P32_ABS16": true,
+ "R_AARCH64_P32_ABS32": true,
+ "R_AARCH64_P32_ADD_ABS_LO12_NC": true,
+ "R_AARCH64_P32_ADR_GOT_PAGE": true,
+ "R_AARCH64_P32_ADR_PREL_LO21": true,
+ "R_AARCH64_P32_ADR_PREL_PG_HI21": true,
+ "R_AARCH64_P32_CALL26": true,
+ "R_AARCH64_P32_CONDBR19": true,
+ "R_AARCH64_P32_COPY": true,
+ "R_AARCH64_P32_GLOB_DAT": true,
+ "R_AARCH64_P32_GOT_LD_PREL19": true,
+ "R_AARCH64_P32_IRELATIVE": true,
+ "R_AARCH64_P32_JUMP26": true,
+ "R_AARCH64_P32_JUMP_SLOT": true,
+ "R_AARCH64_P32_LD32_GOT_LO12_NC": true,
+ "R_AARCH64_P32_LDST128_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LDST16_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LDST32_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LDST64_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LDST8_ABS_LO12_NC": true,
+ "R_AARCH64_P32_LD_PREL_LO19": true,
+ "R_AARCH64_P32_MOVW_SABS_G0": true,
+ "R_AARCH64_P32_MOVW_UABS_G0": true,
+ "R_AARCH64_P32_MOVW_UABS_G0_NC": true,
+ "R_AARCH64_P32_MOVW_UABS_G1": true,
+ "R_AARCH64_P32_PREL16": true,
+ "R_AARCH64_P32_PREL32": true,
+ "R_AARCH64_P32_RELATIVE": true,
+ "R_AARCH64_P32_TLSDESC": true,
+ "R_AARCH64_P32_TLSDESC_ADD_LO12_NC": true,
+ "R_AARCH64_P32_TLSDESC_ADR_PAGE21": true,
+ "R_AARCH64_P32_TLSDESC_ADR_PREL21": true,
+ "R_AARCH64_P32_TLSDESC_CALL": true,
+ "R_AARCH64_P32_TLSDESC_LD32_LO12_NC": true,
+ "R_AARCH64_P32_TLSDESC_LD_PREL19": true,
+ "R_AARCH64_P32_TLSGD_ADD_LO12_NC": true,
+ "R_AARCH64_P32_TLSGD_ADR_PAGE21": true,
+ "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21": true,
+ "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC": true,
+ "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19": true,
+ "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12": true,
+ "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12": true,
+ "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC": true,
+ "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0": true,
+ "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC": true,
+ "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1": true,
+ "R_AARCH64_P32_TLS_DTPMOD": true,
+ "R_AARCH64_P32_TLS_DTPREL": true,
+ "R_AARCH64_P32_TLS_TPREL": true,
+ "R_AARCH64_P32_TSTBR14": true,
+ "R_AARCH64_PREL16": true,
+ "R_AARCH64_PREL32": true,
+ "R_AARCH64_PREL64": true,
+ "R_AARCH64_RELATIVE": true,
+ "R_AARCH64_TLSDESC": true,
+ "R_AARCH64_TLSDESC_ADD": true,
+ "R_AARCH64_TLSDESC_ADD_LO12_NC": true,
+ "R_AARCH64_TLSDESC_ADR_PAGE21": true,
+ "R_AARCH64_TLSDESC_ADR_PREL21": true,
+ "R_AARCH64_TLSDESC_CALL": true,
+ "R_AARCH64_TLSDESC_LD64_LO12_NC": true,
+ "R_AARCH64_TLSDESC_LDR": true,
+ "R_AARCH64_TLSDESC_LD_PREL19": true,
+ "R_AARCH64_TLSDESC_OFF_G0_NC": true,
+ "R_AARCH64_TLSDESC_OFF_G1": true,
+ "R_AARCH64_TLSGD_ADD_LO12_NC": true,
+ "R_AARCH64_TLSGD_ADR_PAGE21": true,
+ "R_AARCH64_TLSGD_ADR_PREL21": true,
+ "R_AARCH64_TLSGD_MOVW_G0_NC": true,
+ "R_AARCH64_TLSGD_MOVW_G1": true,
+ "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21": true,
+ "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC": true,
+ "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19": true,
+ "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC": true,
+ "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1": true,
+ "R_AARCH64_TLSLD_ADR_PAGE21": true,
+ "R_AARCH64_TLSLD_ADR_PREL21": true,
+ "R_AARCH64_TLSLD_LDST128_DTPREL_LO12": true,
+ "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC": true,
+ "R_AARCH64_TLSLE_ADD_TPREL_HI12": true,
+ "R_AARCH64_TLSLE_ADD_TPREL_LO12": true,
+ "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC": true,
+ "R_AARCH64_TLSLE_LDST128_TPREL_LO12": true,
+ "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G0": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G1": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC": true,
+ "R_AARCH64_TLSLE_MOVW_TPREL_G2": true,
+ "R_AARCH64_TLS_DTPMOD64": true,
+ "R_AARCH64_TLS_DTPREL64": true,
+ "R_AARCH64_TLS_TPREL64": true,
+ "R_AARCH64_TSTBR14": true,
+ "R_ALPHA": true,
+ "R_ALPHA_BRADDR": true,
+ "R_ALPHA_COPY": true,
+ "R_ALPHA_GLOB_DAT": true,
+ "R_ALPHA_GPDISP": true,
+ "R_ALPHA_GPREL32": true,
+ "R_ALPHA_GPRELHIGH": true,
+ "R_ALPHA_GPRELLOW": true,
+ "R_ALPHA_GPVALUE": true,
+ "R_ALPHA_HINT": true,
+ "R_ALPHA_IMMED_BR_HI32": true,
+ "R_ALPHA_IMMED_GP_16": true,
+ "R_ALPHA_IMMED_GP_HI32": true,
+ "R_ALPHA_IMMED_LO32": true,
+ "R_ALPHA_IMMED_SCN_HI32": true,
+ "R_ALPHA_JMP_SLOT": true,
+ "R_ALPHA_LITERAL": true,
+ "R_ALPHA_LITUSE": true,
+ "R_ALPHA_NONE": true,
+ "R_ALPHA_OP_PRSHIFT": true,
+ "R_ALPHA_OP_PSUB": true,
+ "R_ALPHA_OP_PUSH": true,
+ "R_ALPHA_OP_STORE": true,
+ "R_ALPHA_REFLONG": true,
+ "R_ALPHA_REFQUAD": true,
+ "R_ALPHA_RELATIVE": true,
+ "R_ALPHA_SREL16": true,
+ "R_ALPHA_SREL32": true,
+ "R_ALPHA_SREL64": true,
+ "R_ARM": true,
+ "R_ARM_ABS12": true,
+ "R_ARM_ABS16": true,
+ "R_ARM_ABS32": true,
+ "R_ARM_ABS32_NOI": true,
+ "R_ARM_ABS8": true,
+ "R_ARM_ALU_PCREL_15_8": true,
+ "R_ARM_ALU_PCREL_23_15": true,
+ "R_ARM_ALU_PCREL_7_0": true,
+ "R_ARM_ALU_PC_G0": true,
+ "R_ARM_ALU_PC_G0_NC": true,
+ "R_ARM_ALU_PC_G1": true,
+ "R_ARM_ALU_PC_G1_NC": true,
+ "R_ARM_ALU_PC_G2": true,
+ "R_ARM_ALU_SBREL_19_12_NC": true,
+ "R_ARM_ALU_SBREL_27_20_CK": true,
+ "R_ARM_ALU_SB_G0": true,
+ "R_ARM_ALU_SB_G0_NC": true,
+ "R_ARM_ALU_SB_G1": true,
+ "R_ARM_ALU_SB_G1_NC": true,
+ "R_ARM_ALU_SB_G2": true,
+ "R_ARM_AMP_VCALL9": true,
+ "R_ARM_BASE_ABS": true,
+ "R_ARM_CALL": true,
+ "R_ARM_COPY": true,
+ "R_ARM_GLOB_DAT": true,
+ "R_ARM_GNU_VTENTRY": true,
+ "R_ARM_GNU_VTINHERIT": true,
+ "R_ARM_GOT32": true,
+ "R_ARM_GOTOFF": true,
+ "R_ARM_GOTOFF12": true,
+ "R_ARM_GOTPC": true,
+ "R_ARM_GOTRELAX": true,
+ "R_ARM_GOT_ABS": true,
+ "R_ARM_GOT_BREL12": true,
+ "R_ARM_GOT_PREL": true,
+ "R_ARM_IRELATIVE": true,
+ "R_ARM_JUMP24": true,
+ "R_ARM_JUMP_SLOT": true,
+ "R_ARM_LDC_PC_G0": true,
+ "R_ARM_LDC_PC_G1": true,
+ "R_ARM_LDC_PC_G2": true,
+ "R_ARM_LDC_SB_G0": true,
+ "R_ARM_LDC_SB_G1": true,
+ "R_ARM_LDC_SB_G2": true,
+ "R_ARM_LDRS_PC_G0": true,
+ "R_ARM_LDRS_PC_G1": true,
+ "R_ARM_LDRS_PC_G2": true,
+ "R_ARM_LDRS_SB_G0": true,
+ "R_ARM_LDRS_SB_G1": true,
+ "R_ARM_LDRS_SB_G2": true,
+ "R_ARM_LDR_PC_G1": true,
+ "R_ARM_LDR_PC_G2": true,
+ "R_ARM_LDR_SBREL_11_10_NC": true,
+ "R_ARM_LDR_SB_G0": true,
+ "R_ARM_LDR_SB_G1": true,
+ "R_ARM_LDR_SB_G2": true,
+ "R_ARM_ME_TOO": true,
+ "R_ARM_MOVT_ABS": true,
+ "R_ARM_MOVT_BREL": true,
+ "R_ARM_MOVT_PREL": true,
+ "R_ARM_MOVW_ABS_NC": true,
+ "R_ARM_MOVW_BREL": true,
+ "R_ARM_MOVW_BREL_NC": true,
+ "R_ARM_MOVW_PREL_NC": true,
+ "R_ARM_NONE": true,
+ "R_ARM_PC13": true,
+ "R_ARM_PC24": true,
+ "R_ARM_PLT32": true,
+ "R_ARM_PLT32_ABS": true,
+ "R_ARM_PREL31": true,
+ "R_ARM_PRIVATE_0": true,
+ "R_ARM_PRIVATE_1": true,
+ "R_ARM_PRIVATE_10": true,
+ "R_ARM_PRIVATE_11": true,
+ "R_ARM_PRIVATE_12": true,
+ "R_ARM_PRIVATE_13": true,
+ "R_ARM_PRIVATE_14": true,
+ "R_ARM_PRIVATE_15": true,
+ "R_ARM_PRIVATE_2": true,
+ "R_ARM_PRIVATE_3": true,
+ "R_ARM_PRIVATE_4": true,
+ "R_ARM_PRIVATE_5": true,
+ "R_ARM_PRIVATE_6": true,
+ "R_ARM_PRIVATE_7": true,
+ "R_ARM_PRIVATE_8": true,
+ "R_ARM_PRIVATE_9": true,
+ "R_ARM_RABS32": true,
+ "R_ARM_RBASE": true,
+ "R_ARM_REL32": true,
+ "R_ARM_REL32_NOI": true,
+ "R_ARM_RELATIVE": true,
+ "R_ARM_RPC24": true,
+ "R_ARM_RREL32": true,
+ "R_ARM_RSBREL32": true,
+ "R_ARM_RXPC25": true,
+ "R_ARM_SBREL31": true,
+ "R_ARM_SBREL32": true,
+ "R_ARM_SWI24": true,
+ "R_ARM_TARGET1": true,
+ "R_ARM_TARGET2": true,
+ "R_ARM_THM_ABS5": true,
+ "R_ARM_THM_ALU_ABS_G0_NC": true,
+ "R_ARM_THM_ALU_ABS_G1_NC": true,
+ "R_ARM_THM_ALU_ABS_G2_NC": true,
+ "R_ARM_THM_ALU_ABS_G3": true,
+ "R_ARM_THM_ALU_PREL_11_0": true,
+ "R_ARM_THM_GOT_BREL12": true,
+ "R_ARM_THM_JUMP11": true,
+ "R_ARM_THM_JUMP19": true,
+ "R_ARM_THM_JUMP24": true,
+ "R_ARM_THM_JUMP6": true,
+ "R_ARM_THM_JUMP8": true,
+ "R_ARM_THM_MOVT_ABS": true,
+ "R_ARM_THM_MOVT_BREL": true,
+ "R_ARM_THM_MOVT_PREL": true,
+ "R_ARM_THM_MOVW_ABS_NC": true,
+ "R_ARM_THM_MOVW_BREL": true,
+ "R_ARM_THM_MOVW_BREL_NC": true,
+ "R_ARM_THM_MOVW_PREL_NC": true,
+ "R_ARM_THM_PC12": true,
+ "R_ARM_THM_PC22": true,
+ "R_ARM_THM_PC8": true,
+ "R_ARM_THM_RPC22": true,
+ "R_ARM_THM_SWI8": true,
+ "R_ARM_THM_TLS_CALL": true,
+ "R_ARM_THM_TLS_DESCSEQ16": true,
+ "R_ARM_THM_TLS_DESCSEQ32": true,
+ "R_ARM_THM_XPC22": true,
+ "R_ARM_TLS_CALL": true,
+ "R_ARM_TLS_DESCSEQ": true,
+ "R_ARM_TLS_DTPMOD32": true,
+ "R_ARM_TLS_DTPOFF32": true,
+ "R_ARM_TLS_GD32": true,
+ "R_ARM_TLS_GOTDESC": true,
+ "R_ARM_TLS_IE12GP": true,
+ "R_ARM_TLS_IE32": true,
+ "R_ARM_TLS_LDM32": true,
+ "R_ARM_TLS_LDO12": true,
+ "R_ARM_TLS_LDO32": true,
+ "R_ARM_TLS_LE12": true,
+ "R_ARM_TLS_LE32": true,
+ "R_ARM_TLS_TPOFF32": true,
+ "R_ARM_V4BX": true,
+ "R_ARM_XPC25": true,
+ "R_INFO": true,
+ "R_INFO32": true,
+ "R_MIPS": true,
+ "R_MIPS_16": true,
+ "R_MIPS_26": true,
+ "R_MIPS_32": true,
+ "R_MIPS_64": true,
+ "R_MIPS_ADD_IMMEDIATE": true,
+ "R_MIPS_CALL16": true,
+ "R_MIPS_CALL_HI16": true,
+ "R_MIPS_CALL_LO16": true,
+ "R_MIPS_DELETE": true,
+ "R_MIPS_GOT16": true,
+ "R_MIPS_GOT_DISP": true,
+ "R_MIPS_GOT_HI16": true,
+ "R_MIPS_GOT_LO16": true,
+ "R_MIPS_GOT_OFST": true,
+ "R_MIPS_GOT_PAGE": true,
+ "R_MIPS_GPREL16": true,
+ "R_MIPS_GPREL32": true,
+ "R_MIPS_HI16": true,
+ "R_MIPS_HIGHER": true,
+ "R_MIPS_HIGHEST": true,
+ "R_MIPS_INSERT_A": true,
+ "R_MIPS_INSERT_B": true,
+ "R_MIPS_JALR": true,
+ "R_MIPS_LITERAL": true,
+ "R_MIPS_LO16": true,
+ "R_MIPS_NONE": true,
+ "R_MIPS_PC16": true,
+ "R_MIPS_PJUMP": true,
+ "R_MIPS_REL16": true,
+ "R_MIPS_REL32": true,
+ "R_MIPS_RELGOT": true,
+ "R_MIPS_SCN_DISP": true,
+ "R_MIPS_SHIFT5": true,
+ "R_MIPS_SHIFT6": true,
+ "R_MIPS_SUB": true,
+ "R_MIPS_TLS_DTPMOD32": true,
+ "R_MIPS_TLS_DTPMOD64": true,
+ "R_MIPS_TLS_DTPREL32": true,
+ "R_MIPS_TLS_DTPREL64": true,
+ "R_MIPS_TLS_DTPREL_HI16": true,
+ "R_MIPS_TLS_DTPREL_LO16": true,
+ "R_MIPS_TLS_GD": true,
+ "R_MIPS_TLS_GOTTPREL": true,
+ "R_MIPS_TLS_LDM": true,
+ "R_MIPS_TLS_TPREL32": true,
+ "R_MIPS_TLS_TPREL64": true,
+ "R_MIPS_TLS_TPREL_HI16": true,
+ "R_MIPS_TLS_TPREL_LO16": true,
+ "R_PPC": true,
+ "R_PPC64": true,
+ "R_PPC64_ADDR14": true,
+ "R_PPC64_ADDR14_BRNTAKEN": true,
+ "R_PPC64_ADDR14_BRTAKEN": true,
+ "R_PPC64_ADDR16": true,
+ "R_PPC64_ADDR16_DS": true,
+ "R_PPC64_ADDR16_HA": true,
+ "R_PPC64_ADDR16_HI": true,
+ "R_PPC64_ADDR16_HIGH": true,
+ "R_PPC64_ADDR16_HIGHA": true,
+ "R_PPC64_ADDR16_HIGHER": true,
+ "R_PPC64_ADDR16_HIGHERA": true,
+ "R_PPC64_ADDR16_HIGHEST": true,
+ "R_PPC64_ADDR16_HIGHESTA": true,
+ "R_PPC64_ADDR16_LO": true,
+ "R_PPC64_ADDR16_LO_DS": true,
+ "R_PPC64_ADDR24": true,
+ "R_PPC64_ADDR32": true,
+ "R_PPC64_ADDR64": true,
+ "R_PPC64_ADDR64_LOCAL": true,
+ "R_PPC64_DTPMOD64": true,
+ "R_PPC64_DTPREL16": true,
+ "R_PPC64_DTPREL16_DS": true,
+ "R_PPC64_DTPREL16_HA": true,
+ "R_PPC64_DTPREL16_HI": true,
+ "R_PPC64_DTPREL16_HIGH": true,
+ "R_PPC64_DTPREL16_HIGHA": true,
+ "R_PPC64_DTPREL16_HIGHER": true,
+ "R_PPC64_DTPREL16_HIGHERA": true,
+ "R_PPC64_DTPREL16_HIGHEST": true,
+ "R_PPC64_DTPREL16_HIGHESTA": true,
+ "R_PPC64_DTPREL16_LO": true,
+ "R_PPC64_DTPREL16_LO_DS": true,
+ "R_PPC64_DTPREL64": true,
+ "R_PPC64_ENTRY": true,
+ "R_PPC64_GOT16": true,
+ "R_PPC64_GOT16_DS": true,
+ "R_PPC64_GOT16_HA": true,
+ "R_PPC64_GOT16_HI": true,
+ "R_PPC64_GOT16_LO": true,
+ "R_PPC64_GOT16_LO_DS": true,
+ "R_PPC64_GOT_DTPREL16_DS": true,
+ "R_PPC64_GOT_DTPREL16_HA": true,
+ "R_PPC64_GOT_DTPREL16_HI": true,
+ "R_PPC64_GOT_DTPREL16_LO_DS": true,
+ "R_PPC64_GOT_TLSGD16": true,
+ "R_PPC64_GOT_TLSGD16_HA": true,
+ "R_PPC64_GOT_TLSGD16_HI": true,
+ "R_PPC64_GOT_TLSGD16_LO": true,
+ "R_PPC64_GOT_TLSLD16": true,
+ "R_PPC64_GOT_TLSLD16_HA": true,
+ "R_PPC64_GOT_TLSLD16_HI": true,
+ "R_PPC64_GOT_TLSLD16_LO": true,
+ "R_PPC64_GOT_TPREL16_DS": true,
+ "R_PPC64_GOT_TPREL16_HA": true,
+ "R_PPC64_GOT_TPREL16_HI": true,
+ "R_PPC64_GOT_TPREL16_LO_DS": true,
+ "R_PPC64_IRELATIVE": true,
+ "R_PPC64_JMP_IREL": true,
+ "R_PPC64_JMP_SLOT": true,
+ "R_PPC64_NONE": true,
+ "R_PPC64_PLT16_LO_DS": true,
+ "R_PPC64_PLTGOT16": true,
+ "R_PPC64_PLTGOT16_DS": true,
+ "R_PPC64_PLTGOT16_HA": true,
+ "R_PPC64_PLTGOT16_HI": true,
+ "R_PPC64_PLTGOT16_LO": true,
+ "R_PPC64_PLTGOT_LO_DS": true,
+ "R_PPC64_REL14": true,
+ "R_PPC64_REL14_BRNTAKEN": true,
+ "R_PPC64_REL14_BRTAKEN": true,
+ "R_PPC64_REL16": true,
+ "R_PPC64_REL16DX_HA": true,
+ "R_PPC64_REL16_HA": true,
+ "R_PPC64_REL16_HI": true,
+ "R_PPC64_REL16_LO": true,
+ "R_PPC64_REL24": true,
+ "R_PPC64_REL24_NOTOC": true,
+ "R_PPC64_REL32": true,
+ "R_PPC64_REL64": true,
+ "R_PPC64_SECTOFF_DS": true,
+ "R_PPC64_SECTOFF_LO_DS": true,
+ "R_PPC64_TLS": true,
+ "R_PPC64_TLSGD": true,
+ "R_PPC64_TLSLD": true,
+ "R_PPC64_TOC": true,
+ "R_PPC64_TOC16": true,
+ "R_PPC64_TOC16_DS": true,
+ "R_PPC64_TOC16_HA": true,
+ "R_PPC64_TOC16_HI": true,
+ "R_PPC64_TOC16_LO": true,
+ "R_PPC64_TOC16_LO_DS": true,
+ "R_PPC64_TOCSAVE": true,
+ "R_PPC64_TPREL16": true,
+ "R_PPC64_TPREL16_DS": true,
+ "R_PPC64_TPREL16_HA": true,
+ "R_PPC64_TPREL16_HI": true,
+ "R_PPC64_TPREL16_HIGH": true,
+ "R_PPC64_TPREL16_HIGHA": true,
+ "R_PPC64_TPREL16_HIGHER": true,
+ "R_PPC64_TPREL16_HIGHERA": true,
+ "R_PPC64_TPREL16_HIGHEST": true,
+ "R_PPC64_TPREL16_HIGHESTA": true,
+ "R_PPC64_TPREL16_LO": true,
+ "R_PPC64_TPREL16_LO_DS": true,
+ "R_PPC64_TPREL64": true,
+ "R_PPC_ADDR14": true,
+ "R_PPC_ADDR14_BRNTAKEN": true,
+ "R_PPC_ADDR14_BRTAKEN": true,
+ "R_PPC_ADDR16": true,
+ "R_PPC_ADDR16_HA": true,
+ "R_PPC_ADDR16_HI": true,
+ "R_PPC_ADDR16_LO": true,
+ "R_PPC_ADDR24": true,
+ "R_PPC_ADDR32": true,
+ "R_PPC_COPY": true,
+ "R_PPC_DTPMOD32": true,
+ "R_PPC_DTPREL16": true,
+ "R_PPC_DTPREL16_HA": true,
+ "R_PPC_DTPREL16_HI": true,
+ "R_PPC_DTPREL16_LO": true,
+ "R_PPC_DTPREL32": true,
+ "R_PPC_EMB_BIT_FLD": true,
+ "R_PPC_EMB_MRKREF": true,
+ "R_PPC_EMB_NADDR16": true,
+ "R_PPC_EMB_NADDR16_HA": true,
+ "R_PPC_EMB_NADDR16_HI": true,
+ "R_PPC_EMB_NADDR16_LO": true,
+ "R_PPC_EMB_NADDR32": true,
+ "R_PPC_EMB_RELSDA": true,
+ "R_PPC_EMB_RELSEC16": true,
+ "R_PPC_EMB_RELST_HA": true,
+ "R_PPC_EMB_RELST_HI": true,
+ "R_PPC_EMB_RELST_LO": true,
+ "R_PPC_EMB_SDA21": true,
+ "R_PPC_EMB_SDA2I16": true,
+ "R_PPC_EMB_SDA2REL": true,
+ "R_PPC_EMB_SDAI16": true,
+ "R_PPC_GLOB_DAT": true,
+ "R_PPC_GOT16": true,
+ "R_PPC_GOT16_HA": true,
+ "R_PPC_GOT16_HI": true,
+ "R_PPC_GOT16_LO": true,
+ "R_PPC_GOT_TLSGD16": true,
+ "R_PPC_GOT_TLSGD16_HA": true,
+ "R_PPC_GOT_TLSGD16_HI": true,
+ "R_PPC_GOT_TLSGD16_LO": true,
+ "R_PPC_GOT_TLSLD16": true,
+ "R_PPC_GOT_TLSLD16_HA": true,
+ "R_PPC_GOT_TLSLD16_HI": true,
+ "R_PPC_GOT_TLSLD16_LO": true,
+ "R_PPC_GOT_TPREL16": true,
+ "R_PPC_GOT_TPREL16_HA": true,
+ "R_PPC_GOT_TPREL16_HI": true,
+ "R_PPC_GOT_TPREL16_LO": true,
+ "R_PPC_JMP_SLOT": true,
+ "R_PPC_LOCAL24PC": true,
+ "R_PPC_NONE": true,
+ "R_PPC_PLT16_HA": true,
+ "R_PPC_PLT16_HI": true,
+ "R_PPC_PLT16_LO": true,
+ "R_PPC_PLT32": true,
+ "R_PPC_PLTREL24": true,
+ "R_PPC_PLTREL32": true,
+ "R_PPC_REL14": true,
+ "R_PPC_REL14_BRNTAKEN": true,
+ "R_PPC_REL14_BRTAKEN": true,
+ "R_PPC_REL24": true,
+ "R_PPC_REL32": true,
+ "R_PPC_RELATIVE": true,
+ "R_PPC_SDAREL16": true,
+ "R_PPC_SECTOFF": true,
+ "R_PPC_SECTOFF_HA": true,
+ "R_PPC_SECTOFF_HI": true,
+ "R_PPC_SECTOFF_LO": true,
+ "R_PPC_TLS": true,
+ "R_PPC_TPREL16": true,
+ "R_PPC_TPREL16_HA": true,
+ "R_PPC_TPREL16_HI": true,
+ "R_PPC_TPREL16_LO": true,
+ "R_PPC_TPREL32": true,
+ "R_PPC_UADDR16": true,
+ "R_PPC_UADDR32": true,
+ "R_RISCV": true,
+ "R_RISCV_32": true,
+ "R_RISCV_32_PCREL": true,
+ "R_RISCV_64": true,
+ "R_RISCV_ADD16": true,
+ "R_RISCV_ADD32": true,
+ "R_RISCV_ADD64": true,
+ "R_RISCV_ADD8": true,
+ "R_RISCV_ALIGN": true,
+ "R_RISCV_BRANCH": true,
+ "R_RISCV_CALL": true,
+ "R_RISCV_CALL_PLT": true,
+ "R_RISCV_COPY": true,
+ "R_RISCV_GNU_VTENTRY": true,
+ "R_RISCV_GNU_VTINHERIT": true,
+ "R_RISCV_GOT_HI20": true,
+ "R_RISCV_GPREL_I": true,
+ "R_RISCV_GPREL_S": true,
+ "R_RISCV_HI20": true,
+ "R_RISCV_JAL": true,
+ "R_RISCV_JUMP_SLOT": true,
+ "R_RISCV_LO12_I": true,
+ "R_RISCV_LO12_S": true,
+ "R_RISCV_NONE": true,
+ "R_RISCV_PCREL_HI20": true,
+ "R_RISCV_PCREL_LO12_I": true,
+ "R_RISCV_PCREL_LO12_S": true,
+ "R_RISCV_RELATIVE": true,
+ "R_RISCV_RELAX": true,
+ "R_RISCV_RVC_BRANCH": true,
+ "R_RISCV_RVC_JUMP": true,
+ "R_RISCV_RVC_LUI": true,
+ "R_RISCV_SET16": true,
+ "R_RISCV_SET32": true,
+ "R_RISCV_SET6": true,
+ "R_RISCV_SET8": true,
+ "R_RISCV_SUB16": true,
+ "R_RISCV_SUB32": true,
+ "R_RISCV_SUB6": true,
+ "R_RISCV_SUB64": true,
+ "R_RISCV_SUB8": true,
+ "R_RISCV_TLS_DTPMOD32": true,
+ "R_RISCV_TLS_DTPMOD64": true,
+ "R_RISCV_TLS_DTPREL32": true,
+ "R_RISCV_TLS_DTPREL64": true,
+ "R_RISCV_TLS_GD_HI20": true,
+ "R_RISCV_TLS_GOT_HI20": true,
+ "R_RISCV_TLS_TPREL32": true,
+ "R_RISCV_TLS_TPREL64": true,
+ "R_RISCV_TPREL_ADD": true,
+ "R_RISCV_TPREL_HI20": true,
+ "R_RISCV_TPREL_I": true,
+ "R_RISCV_TPREL_LO12_I": true,
+ "R_RISCV_TPREL_LO12_S": true,
+ "R_RISCV_TPREL_S": true,
+ "R_SPARC": true,
+ "R_SPARC_10": true,
+ "R_SPARC_11": true,
+ "R_SPARC_13": true,
+ "R_SPARC_16": true,
+ "R_SPARC_22": true,
+ "R_SPARC_32": true,
+ "R_SPARC_5": true,
+ "R_SPARC_6": true,
+ "R_SPARC_64": true,
+ "R_SPARC_7": true,
+ "R_SPARC_8": true,
+ "R_SPARC_COPY": true,
+ "R_SPARC_DISP16": true,
+ "R_SPARC_DISP32": true,
+ "R_SPARC_DISP64": true,
+ "R_SPARC_DISP8": true,
+ "R_SPARC_GLOB_DAT": true,
+ "R_SPARC_GLOB_JMP": true,
+ "R_SPARC_GOT10": true,
+ "R_SPARC_GOT13": true,
+ "R_SPARC_GOT22": true,
+ "R_SPARC_H44": true,
+ "R_SPARC_HH22": true,
+ "R_SPARC_HI22": true,
+ "R_SPARC_HIPLT22": true,
+ "R_SPARC_HIX22": true,
+ "R_SPARC_HM10": true,
+ "R_SPARC_JMP_SLOT": true,
+ "R_SPARC_L44": true,
+ "R_SPARC_LM22": true,
+ "R_SPARC_LO10": true,
+ "R_SPARC_LOPLT10": true,
+ "R_SPARC_LOX10": true,
+ "R_SPARC_M44": true,
+ "R_SPARC_NONE": true,
+ "R_SPARC_OLO10": true,
+ "R_SPARC_PC10": true,
+ "R_SPARC_PC22": true,
+ "R_SPARC_PCPLT10": true,
+ "R_SPARC_PCPLT22": true,
+ "R_SPARC_PCPLT32": true,
+ "R_SPARC_PC_HH22": true,
+ "R_SPARC_PC_HM10": true,
+ "R_SPARC_PC_LM22": true,
+ "R_SPARC_PLT32": true,
+ "R_SPARC_PLT64": true,
+ "R_SPARC_REGISTER": true,
+ "R_SPARC_RELATIVE": true,
+ "R_SPARC_UA16": true,
+ "R_SPARC_UA32": true,
+ "R_SPARC_UA64": true,
+ "R_SPARC_WDISP16": true,
+ "R_SPARC_WDISP19": true,
+ "R_SPARC_WDISP22": true,
+ "R_SPARC_WDISP30": true,
+ "R_SPARC_WPLT30": true,
+ "R_SYM32": true,
+ "R_SYM64": true,
+ "R_TYPE32": true,
+ "R_TYPE64": true,
+ "R_X86_64": true,
+ "R_X86_64_16": true,
+ "R_X86_64_32": true,
+ "R_X86_64_32S": true,
+ "R_X86_64_64": true,
+ "R_X86_64_8": true,
+ "R_X86_64_COPY": true,
+ "R_X86_64_DTPMOD64": true,
+ "R_X86_64_DTPOFF32": true,
+ "R_X86_64_DTPOFF64": true,
+ "R_X86_64_GLOB_DAT": true,
+ "R_X86_64_GOT32": true,
+ "R_X86_64_GOT64": true,
+ "R_X86_64_GOTOFF64": true,
+ "R_X86_64_GOTPC32": true,
+ "R_X86_64_GOTPC32_TLSDESC": true,
+ "R_X86_64_GOTPC64": true,
+ "R_X86_64_GOTPCREL": true,
+ "R_X86_64_GOTPCREL64": true,
+ "R_X86_64_GOTPCRELX": true,
+ "R_X86_64_GOTPLT64": true,
+ "R_X86_64_GOTTPOFF": true,
+ "R_X86_64_IRELATIVE": true,
+ "R_X86_64_JMP_SLOT": true,
+ "R_X86_64_NONE": true,
+ "R_X86_64_PC16": true,
+ "R_X86_64_PC32": true,
+ "R_X86_64_PC32_BND": true,
+ "R_X86_64_PC64": true,
+ "R_X86_64_PC8": true,
+ "R_X86_64_PLT32": true,
+ "R_X86_64_PLT32_BND": true,
+ "R_X86_64_PLTOFF64": true,
+ "R_X86_64_RELATIVE": true,
+ "R_X86_64_RELATIVE64": true,
+ "R_X86_64_REX_GOTPCRELX": true,
+ "R_X86_64_SIZE32": true,
+ "R_X86_64_SIZE64": true,
+ "R_X86_64_TLSDESC": true,
+ "R_X86_64_TLSDESC_CALL": true,
+ "R_X86_64_TLSGD": true,
+ "R_X86_64_TLSLD": true,
+ "R_X86_64_TPOFF32": true,
+ "R_X86_64_TPOFF64": true,
+ "Rel32": true,
+ "Rel64": true,
+ "Rela32": true,
+ "Rela64": true,
+ "SHF_ALLOC": true,
+ "SHF_COMPRESSED": true,
+ "SHF_EXECINSTR": true,
+ "SHF_GROUP": true,
+ "SHF_INFO_LINK": true,
+ "SHF_LINK_ORDER": true,
+ "SHF_MASKOS": true,
+ "SHF_MASKPROC": true,
+ "SHF_MERGE": true,
+ "SHF_OS_NONCONFORMING": true,
+ "SHF_STRINGS": true,
+ "SHF_TLS": true,
+ "SHF_WRITE": true,
+ "SHN_ABS": true,
+ "SHN_COMMON": true,
+ "SHN_HIOS": true,
+ "SHN_HIPROC": true,
+ "SHN_HIRESERVE": true,
+ "SHN_LOOS": true,
+ "SHN_LOPROC": true,
+ "SHN_LORESERVE": true,
+ "SHN_UNDEF": true,
+ "SHN_XINDEX": true,
+ "SHT_DYNAMIC": true,
+ "SHT_DYNSYM": true,
+ "SHT_FINI_ARRAY": true,
+ "SHT_GNU_ATTRIBUTES": true,
+ "SHT_GNU_HASH": true,
+ "SHT_GNU_LIBLIST": true,
+ "SHT_GNU_VERDEF": true,
+ "SHT_GNU_VERNEED": true,
+ "SHT_GNU_VERSYM": true,
+ "SHT_GROUP": true,
+ "SHT_HASH": true,
+ "SHT_HIOS": true,
+ "SHT_HIPROC": true,
+ "SHT_HIUSER": true,
+ "SHT_INIT_ARRAY": true,
+ "SHT_LOOS": true,
+ "SHT_LOPROC": true,
+ "SHT_LOUSER": true,
+ "SHT_NOBITS": true,
+ "SHT_NOTE": true,
+ "SHT_NULL": true,
+ "SHT_PREINIT_ARRAY": true,
+ "SHT_PROGBITS": true,
+ "SHT_REL": true,
+ "SHT_RELA": true,
+ "SHT_SHLIB": true,
+ "SHT_STRTAB": true,
+ "SHT_SYMTAB": true,
+ "SHT_SYMTAB_SHNDX": true,
+ "STB_GLOBAL": true,
+ "STB_HIOS": true,
+ "STB_HIPROC": true,
+ "STB_LOCAL": true,
+ "STB_LOOS": true,
+ "STB_LOPROC": true,
+ "STB_WEAK": true,
+ "STT_COMMON": true,
+ "STT_FILE": true,
+ "STT_FUNC": true,
+ "STT_HIOS": true,
+ "STT_HIPROC": true,
+ "STT_LOOS": true,
+ "STT_LOPROC": true,
+ "STT_NOTYPE": true,
+ "STT_OBJECT": true,
+ "STT_SECTION": true,
+ "STT_TLS": true,
+ "STV_DEFAULT": true,
+ "STV_HIDDEN": true,
+ "STV_INTERNAL": true,
+ "STV_PROTECTED": true,
+ "ST_BIND": true,
+ "ST_INFO": true,
+ "ST_TYPE": true,
+ "ST_VISIBILITY": true,
+ "Section": true,
+ "Section32": true,
+ "Section64": true,
+ "SectionFlag": true,
+ "SectionHeader": true,
+ "SectionIndex": true,
+ "SectionType": true,
+ "Sym32": true,
+ "Sym32Size": true,
+ "Sym64": true,
+ "Sym64Size": true,
+ "SymBind": true,
+ "SymType": true,
+ "SymVis": true,
+ "Symbol": true,
+ "Type": true,
+ "Version": true,
+ },
+ "debug/gosym": map[string]bool{
+ "DecodingError": true,
+ "Func": true,
+ "LineTable": true,
+ "NewLineTable": true,
+ "NewTable": true,
+ "Obj": true,
+ "Sym": true,
+ "Table": true,
+ "UnknownFileError": true,
+ "UnknownLineError": true,
+ },
+ "debug/macho": map[string]bool{
+ "ARM64_RELOC_ADDEND": true,
+ "ARM64_RELOC_BRANCH26": true,
+ "ARM64_RELOC_GOT_LOAD_PAGE21": true,
+ "ARM64_RELOC_GOT_LOAD_PAGEOFF12": true,
+ "ARM64_RELOC_PAGE21": true,
+ "ARM64_RELOC_PAGEOFF12": true,
+ "ARM64_RELOC_POINTER_TO_GOT": true,
+ "ARM64_RELOC_SUBTRACTOR": true,
+ "ARM64_RELOC_TLVP_LOAD_PAGE21": true,
+ "ARM64_RELOC_TLVP_LOAD_PAGEOFF12": true,
+ "ARM64_RELOC_UNSIGNED": true,
+ "ARM_RELOC_BR24": true,
+ "ARM_RELOC_HALF": true,
+ "ARM_RELOC_HALF_SECTDIFF": true,
+ "ARM_RELOC_LOCAL_SECTDIFF": true,
+ "ARM_RELOC_PAIR": true,
+ "ARM_RELOC_PB_LA_PTR": true,
+ "ARM_RELOC_SECTDIFF": true,
+ "ARM_RELOC_VANILLA": true,
+ "ARM_THUMB_32BIT_BRANCH": true,
+ "ARM_THUMB_RELOC_BR22": true,
+ "Cpu": true,
+ "Cpu386": true,
+ "CpuAmd64": true,
+ "CpuArm": true,
+ "CpuArm64": true,
+ "CpuPpc": true,
+ "CpuPpc64": true,
+ "Dylib": true,
+ "DylibCmd": true,
+ "Dysymtab": true,
+ "DysymtabCmd": true,
+ "ErrNotFat": true,
+ "FatArch": true,
+ "FatArchHeader": true,
+ "FatFile": true,
+ "File": true,
+ "FileHeader": true,
+ "FlagAllModsBound": true,
+ "FlagAllowStackExecution": true,
+ "FlagAppExtensionSafe": true,
+ "FlagBindAtLoad": true,
+ "FlagBindsToWeak": true,
+ "FlagCanonical": true,
+ "FlagDeadStrippableDylib": true,
+ "FlagDyldLink": true,
+ "FlagForceFlat": true,
+ "FlagHasTLVDescriptors": true,
+ "FlagIncrLink": true,
+ "FlagLazyInit": true,
+ "FlagNoFixPrebinding": true,
+ "FlagNoHeapExecution": true,
+ "FlagNoMultiDefs": true,
+ "FlagNoReexportedDylibs": true,
+ "FlagNoUndefs": true,
+ "FlagPIE": true,
+ "FlagPrebindable": true,
+ "FlagPrebound": true,
+ "FlagRootSafe": true,
+ "FlagSetuidSafe": true,
+ "FlagSplitSegs": true,
+ "FlagSubsectionsViaSymbols": true,
+ "FlagTwoLevel": true,
+ "FlagWeakDefines": true,
+ "FormatError": true,
+ "GENERIC_RELOC_LOCAL_SECTDIFF": true,
+ "GENERIC_RELOC_PAIR": true,
+ "GENERIC_RELOC_PB_LA_PTR": true,
+ "GENERIC_RELOC_SECTDIFF": true,
+ "GENERIC_RELOC_TLV": true,
+ "GENERIC_RELOC_VANILLA": true,
+ "Load": true,
+ "LoadBytes": true,
+ "LoadCmd": true,
+ "LoadCmdDylib": true,
+ "LoadCmdDylinker": true,
+ "LoadCmdDysymtab": true,
+ "LoadCmdRpath": true,
+ "LoadCmdSegment": true,
+ "LoadCmdSegment64": true,
+ "LoadCmdSymtab": true,
+ "LoadCmdThread": true,
+ "LoadCmdUnixThread": true,
+ "Magic32": true,
+ "Magic64": true,
+ "MagicFat": true,
+ "NewFatFile": true,
+ "NewFile": true,
+ "Nlist32": true,
+ "Nlist64": true,
+ "Open": true,
+ "OpenFat": true,
+ "Regs386": true,
+ "RegsAMD64": true,
+ "Reloc": true,
+ "RelocTypeARM": true,
+ "RelocTypeARM64": true,
+ "RelocTypeGeneric": true,
+ "RelocTypeX86_64": true,
+ "Rpath": true,
+ "RpathCmd": true,
+ "Section": true,
+ "Section32": true,
+ "Section64": true,
+ "SectionHeader": true,
+ "Segment": true,
+ "Segment32": true,
+ "Segment64": true,
+ "SegmentHeader": true,
+ "Symbol": true,
+ "Symtab": true,
+ "SymtabCmd": true,
+ "Thread": true,
+ "Type": true,
+ "TypeBundle": true,
+ "TypeDylib": true,
+ "TypeExec": true,
+ "TypeObj": true,
+ "X86_64_RELOC_BRANCH": true,
+ "X86_64_RELOC_GOT": true,
+ "X86_64_RELOC_GOT_LOAD": true,
+ "X86_64_RELOC_SIGNED": true,
+ "X86_64_RELOC_SIGNED_1": true,
+ "X86_64_RELOC_SIGNED_2": true,
+ "X86_64_RELOC_SIGNED_4": true,
+ "X86_64_RELOC_SUBTRACTOR": true,
+ "X86_64_RELOC_TLV": true,
+ "X86_64_RELOC_UNSIGNED": true,
+ },
+ "debug/pe": map[string]bool{
+ "COFFSymbol": true,
+ "COFFSymbolSize": true,
+ "DataDirectory": true,
+ "File": true,
+ "FileHeader": true,
+ "FormatError": true,
+ "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE": true,
+ "IMAGE_DIRECTORY_ENTRY_BASERELOC": true,
+ "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT": true,
+ "IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR": true,
+ "IMAGE_DIRECTORY_ENTRY_DEBUG": true,
+ "IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT": true,
+ "IMAGE_DIRECTORY_ENTRY_EXCEPTION": true,
+ "IMAGE_DIRECTORY_ENTRY_EXPORT": true,
+ "IMAGE_DIRECTORY_ENTRY_GLOBALPTR": true,
+ "IMAGE_DIRECTORY_ENTRY_IAT": true,
+ "IMAGE_DIRECTORY_ENTRY_IMPORT": true,
+ "IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG": true,
+ "IMAGE_DIRECTORY_ENTRY_RESOURCE": true,
+ "IMAGE_DIRECTORY_ENTRY_SECURITY": true,
+ "IMAGE_DIRECTORY_ENTRY_TLS": true,
+ "IMAGE_FILE_MACHINE_AM33": true,
+ "IMAGE_FILE_MACHINE_AMD64": true,
+ "IMAGE_FILE_MACHINE_ARM": true,
+ "IMAGE_FILE_MACHINE_ARM64": true,
+ "IMAGE_FILE_MACHINE_ARMNT": true,
+ "IMAGE_FILE_MACHINE_EBC": true,
+ "IMAGE_FILE_MACHINE_I386": true,
+ "IMAGE_FILE_MACHINE_IA64": true,
+ "IMAGE_FILE_MACHINE_M32R": true,
+ "IMAGE_FILE_MACHINE_MIPS16": true,
+ "IMAGE_FILE_MACHINE_MIPSFPU": true,
+ "IMAGE_FILE_MACHINE_MIPSFPU16": true,
+ "IMAGE_FILE_MACHINE_POWERPC": true,
+ "IMAGE_FILE_MACHINE_POWERPCFP": true,
+ "IMAGE_FILE_MACHINE_R4000": true,
+ "IMAGE_FILE_MACHINE_SH3": true,
+ "IMAGE_FILE_MACHINE_SH3DSP": true,
+ "IMAGE_FILE_MACHINE_SH4": true,
+ "IMAGE_FILE_MACHINE_SH5": true,
+ "IMAGE_FILE_MACHINE_THUMB": true,
+ "IMAGE_FILE_MACHINE_UNKNOWN": true,
+ "IMAGE_FILE_MACHINE_WCEMIPSV2": true,
+ "ImportDirectory": true,
+ "NewFile": true,
+ "Open": true,
+ "OptionalHeader32": true,
+ "OptionalHeader64": true,
+ "Reloc": true,
+ "Section": true,
+ "SectionHeader": true,
+ "SectionHeader32": true,
+ "StringTable": true,
+ "Symbol": true,
+ },
+ "debug/plan9obj": map[string]bool{
+ "File": true,
+ "FileHeader": true,
+ "Magic386": true,
+ "Magic64": true,
+ "MagicAMD64": true,
+ "MagicARM": true,
+ "NewFile": true,
+ "Open": true,
+ "Section": true,
+ "SectionHeader": true,
+ "Sym": true,
+ },
+ "encoding": map[string]bool{
+ "BinaryMarshaler": true,
+ "BinaryUnmarshaler": true,
+ "TextMarshaler": true,
+ "TextUnmarshaler": true,
+ },
+ "encoding/ascii85": map[string]bool{
+ "CorruptInputError": true,
+ "Decode": true,
+ "Encode": true,
+ "MaxEncodedLen": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ },
+ "encoding/asn1": map[string]bool{
+ "BitString": true,
+ "ClassApplication": true,
+ "ClassContextSpecific": true,
+ "ClassPrivate": true,
+ "ClassUniversal": true,
+ "Enumerated": true,
+ "Flag": true,
+ "Marshal": true,
+ "MarshalWithParams": true,
+ "NullBytes": true,
+ "NullRawValue": true,
+ "ObjectIdentifier": true,
+ "RawContent": true,
+ "RawValue": true,
+ "StructuralError": true,
+ "SyntaxError": true,
+ "TagBitString": true,
+ "TagBoolean": true,
+ "TagEnum": true,
+ "TagGeneralString": true,
+ "TagGeneralizedTime": true,
+ "TagIA5String": true,
+ "TagInteger": true,
+ "TagNull": true,
+ "TagNumericString": true,
+ "TagOID": true,
+ "TagOctetString": true,
+ "TagPrintableString": true,
+ "TagSequence": true,
+ "TagSet": true,
+ "TagT61String": true,
+ "TagUTCTime": true,
+ "TagUTF8String": true,
+ "Unmarshal": true,
+ "UnmarshalWithParams": true,
+ },
+ "encoding/base32": map[string]bool{
+ "CorruptInputError": true,
+ "Encoding": true,
+ "HexEncoding": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "NewEncoding": true,
+ "NoPadding": true,
+ "StdEncoding": true,
+ "StdPadding": true,
+ },
+ "encoding/base64": map[string]bool{
+ "CorruptInputError": true,
+ "Encoding": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "NewEncoding": true,
+ "NoPadding": true,
+ "RawStdEncoding": true,
+ "RawURLEncoding": true,
+ "StdEncoding": true,
+ "StdPadding": true,
+ "URLEncoding": true,
+ },
+ "encoding/binary": map[string]bool{
+ "BigEndian": true,
+ "ByteOrder": true,
+ "LittleEndian": true,
+ "MaxVarintLen16": true,
+ "MaxVarintLen32": true,
+ "MaxVarintLen64": true,
+ "PutUvarint": true,
+ "PutVarint": true,
+ "Read": true,
+ "ReadUvarint": true,
+ "ReadVarint": true,
+ "Size": true,
+ "Uvarint": true,
+ "Varint": true,
+ "Write": true,
+ },
+ "encoding/csv": map[string]bool{
+ "ErrBareQuote": true,
+ "ErrFieldCount": true,
+ "ErrQuote": true,
+ "ErrTrailingComma": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "ParseError": true,
+ "Reader": true,
+ "Writer": true,
+ },
+ "encoding/gob": map[string]bool{
+ "CommonType": true,
+ "Decoder": true,
+ "Encoder": true,
+ "GobDecoder": true,
+ "GobEncoder": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "Register": true,
+ "RegisterName": true,
+ },
+ "encoding/hex": map[string]bool{
+ "Decode": true,
+ "DecodeString": true,
+ "DecodedLen": true,
+ "Dump": true,
+ "Dumper": true,
+ "Encode": true,
+ "EncodeToString": true,
+ "EncodedLen": true,
+ "ErrLength": true,
+ "InvalidByteError": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ },
+ "encoding/json": map[string]bool{
+ "Compact": true,
+ "Decoder": true,
+ "Delim": true,
+ "Encoder": true,
+ "HTMLEscape": true,
+ "Indent": true,
+ "InvalidUTF8Error": true,
+ "InvalidUnmarshalError": true,
+ "Marshal": true,
+ "MarshalIndent": true,
+ "Marshaler": true,
+ "MarshalerError": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "Number": true,
+ "RawMessage": true,
+ "SyntaxError": true,
+ "Token": true,
+ "Unmarshal": true,
+ "UnmarshalFieldError": true,
+ "UnmarshalTypeError": true,
+ "Unmarshaler": true,
+ "UnsupportedTypeError": true,
+ "UnsupportedValueError": true,
+ "Valid": true,
+ },
+ "encoding/pem": map[string]bool{
+ "Block": true,
+ "Decode": true,
+ "Encode": true,
+ "EncodeToMemory": true,
+ },
+ "encoding/xml": map[string]bool{
+ "Attr": true,
+ "CharData": true,
+ "Comment": true,
+ "CopyToken": true,
+ "Decoder": true,
+ "Directive": true,
+ "Encoder": true,
+ "EndElement": true,
+ "Escape": true,
+ "EscapeText": true,
+ "HTMLAutoClose": true,
+ "HTMLEntity": true,
+ "Header": true,
+ "Marshal": true,
+ "MarshalIndent": true,
+ "Marshaler": true,
+ "MarshalerAttr": true,
+ "Name": true,
+ "NewDecoder": true,
+ "NewEncoder": true,
+ "NewTokenDecoder": true,
+ "ProcInst": true,
+ "StartElement": true,
+ "SyntaxError": true,
+ "TagPathError": true,
+ "Token": true,
+ "TokenReader": true,
+ "Unmarshal": true,
+ "UnmarshalError": true,
+ "Unmarshaler": true,
+ "UnmarshalerAttr": true,
+ "UnsupportedTypeError": true,
+ },
+ "errors": map[string]bool{
+ "New": true,
+ },
+ "expvar": map[string]bool{
+ "Do": true,
+ "Float": true,
+ "Func": true,
+ "Get": true,
+ "Handler": true,
+ "Int": true,
+ "KeyValue": true,
+ "Map": true,
+ "NewFloat": true,
+ "NewInt": true,
+ "NewMap": true,
+ "NewString": true,
+ "Publish": true,
+ "String": true,
+ "Var": true,
+ },
+ "flag": map[string]bool{
+ "Arg": true,
+ "Args": true,
+ "Bool": true,
+ "BoolVar": true,
+ "CommandLine": true,
+ "ContinueOnError": true,
+ "Duration": true,
+ "DurationVar": true,
+ "ErrHelp": true,
+ "ErrorHandling": true,
+ "ExitOnError": true,
+ "Flag": true,
+ "FlagSet": true,
+ "Float64": true,
+ "Float64Var": true,
+ "Getter": true,
+ "Int": true,
+ "Int64": true,
+ "Int64Var": true,
+ "IntVar": true,
+ "Lookup": true,
+ "NArg": true,
+ "NFlag": true,
+ "NewFlagSet": true,
+ "PanicOnError": true,
+ "Parse": true,
+ "Parsed": true,
+ "PrintDefaults": true,
+ "Set": true,
+ "String": true,
+ "StringVar": true,
+ "Uint": true,
+ "Uint64": true,
+ "Uint64Var": true,
+ "UintVar": true,
+ "UnquoteUsage": true,
+ "Usage": true,
+ "Value": true,
+ "Var": true,
+ "Visit": true,
+ "VisitAll": true,
+ },
+ "fmt": map[string]bool{
+ "Errorf": true,
+ "Formatter": true,
+ "Fprint": true,
+ "Fprintf": true,
+ "Fprintln": true,
+ "Fscan": true,
+ "Fscanf": true,
+ "Fscanln": true,
+ "GoStringer": true,
+ "Print": true,
+ "Printf": true,
+ "Println": true,
+ "Scan": true,
+ "ScanState": true,
+ "Scanf": true,
+ "Scanln": true,
+ "Scanner": true,
+ "Sprint": true,
+ "Sprintf": true,
+ "Sprintln": true,
+ "Sscan": true,
+ "Sscanf": true,
+ "Sscanln": true,
+ "State": true,
+ "Stringer": true,
+ },
+ "go/ast": map[string]bool{
+ "ArrayType": true,
+ "AssignStmt": true,
+ "Bad": true,
+ "BadDecl": true,
+ "BadExpr": true,
+ "BadStmt": true,
+ "BasicLit": true,
+ "BinaryExpr": true,
+ "BlockStmt": true,
+ "BranchStmt": true,
+ "CallExpr": true,
+ "CaseClause": true,
+ "ChanDir": true,
+ "ChanType": true,
+ "CommClause": true,
+ "Comment": true,
+ "CommentGroup": true,
+ "CommentMap": true,
+ "CompositeLit": true,
+ "Con": true,
+ "DeclStmt": true,
+ "DeferStmt": true,
+ "Ellipsis": true,
+ "EmptyStmt": true,
+ "ExprStmt": true,
+ "Field": true,
+ "FieldFilter": true,
+ "FieldList": true,
+ "File": true,
+ "FileExports": true,
+ "Filter": true,
+ "FilterDecl": true,
+ "FilterFile": true,
+ "FilterFuncDuplicates": true,
+ "FilterImportDuplicates": true,
+ "FilterPackage": true,
+ "FilterUnassociatedComments": true,
+ "ForStmt": true,
+ "Fprint": true,
+ "Fun": true,
+ "FuncDecl": true,
+ "FuncLit": true,
+ "FuncType": true,
+ "GenDecl": true,
+ "GoStmt": true,
+ "Ident": true,
+ "IfStmt": true,
+ "ImportSpec": true,
+ "Importer": true,
+ "IncDecStmt": true,
+ "IndexExpr": true,
+ "Inspect": true,
+ "InterfaceType": true,
+ "IsExported": true,
+ "KeyValueExpr": true,
+ "LabeledStmt": true,
+ "Lbl": true,
+ "MapType": true,
+ "MergeMode": true,
+ "MergePackageFiles": true,
+ "NewCommentMap": true,
+ "NewIdent": true,
+ "NewObj": true,
+ "NewPackage": true,
+ "NewScope": true,
+ "Node": true,
+ "NotNilFilter": true,
+ "ObjKind": true,
+ "Object": true,
+ "Package": true,
+ "PackageExports": true,
+ "ParenExpr": true,
+ "Pkg": true,
+ "Print": true,
+ "RECV": true,
+ "RangeStmt": true,
+ "ReturnStmt": true,
+ "SEND": true,
+ "Scope": true,
+ "SelectStmt": true,
+ "SelectorExpr": true,
+ "SendStmt": true,
+ "SliceExpr": true,
+ "SortImports": true,
+ "StarExpr": true,
+ "StructType": true,
+ "SwitchStmt": true,
+ "Typ": true,
+ "TypeAssertExpr": true,
+ "TypeSpec": true,
+ "TypeSwitchStmt": true,
+ "UnaryExpr": true,
+ "ValueSpec": true,
+ "Var": true,
+ "Visitor": true,
+ "Walk": true,
+ },
+ "go/build": map[string]bool{
+ "AllowBinary": true,
+ "ArchChar": true,
+ "Context": true,
+ "Default": true,
+ "FindOnly": true,
+ "IgnoreVendor": true,
+ "Import": true,
+ "ImportComment": true,
+ "ImportDir": true,
+ "ImportMode": true,
+ "IsLocalImport": true,
+ "MultiplePackageError": true,
+ "NoGoError": true,
+ "Package": true,
+ "ToolDir": true,
+ },
+ "go/constant": map[string]bool{
+ "BinaryOp": true,
+ "BitLen": true,
+ "Bool": true,
+ "BoolVal": true,
+ "Bytes": true,
+ "Compare": true,
+ "Complex": true,
+ "Denom": true,
+ "Float": true,
+ "Float32Val": true,
+ "Float64Val": true,
+ "Imag": true,
+ "Int": true,
+ "Int64Val": true,
+ "Kind": true,
+ "MakeBool": true,
+ "MakeFloat64": true,
+ "MakeFromBytes": true,
+ "MakeFromLiteral": true,
+ "MakeImag": true,
+ "MakeInt64": true,
+ "MakeString": true,
+ "MakeUint64": true,
+ "MakeUnknown": true,
+ "Num": true,
+ "Real": true,
+ "Shift": true,
+ "Sign": true,
+ "String": true,
+ "StringVal": true,
+ "ToComplex": true,
+ "ToFloat": true,
+ "ToInt": true,
+ "Uint64Val": true,
+ "UnaryOp": true,
+ "Unknown": true,
+ },
+ "go/doc": map[string]bool{
+ "AllDecls": true,
+ "AllMethods": true,
+ "Example": true,
+ "Examples": true,
+ "Filter": true,
+ "Func": true,
+ "IllegalPrefixes": true,
+ "IsPredeclared": true,
+ "Mode": true,
+ "New": true,
+ "Note": true,
+ "Package": true,
+ "PreserveAST": true,
+ "Synopsis": true,
+ "ToHTML": true,
+ "ToText": true,
+ "Type": true,
+ "Value": true,
+ },
+ "go/format": map[string]bool{
+ "Node": true,
+ "Source": true,
+ },
+ "go/importer": map[string]bool{
+ "Default": true,
+ "For": true,
+ "ForCompiler": true,
+ "Lookup": true,
+ },
+ "go/parser": map[string]bool{
+ "AllErrors": true,
+ "DeclarationErrors": true,
+ "ImportsOnly": true,
+ "Mode": true,
+ "PackageClauseOnly": true,
+ "ParseComments": true,
+ "ParseDir": true,
+ "ParseExpr": true,
+ "ParseExprFrom": true,
+ "ParseFile": true,
+ "SpuriousErrors": true,
+ "Trace": true,
+ },
+ "go/printer": map[string]bool{
+ "CommentedNode": true,
+ "Config": true,
+ "Fprint": true,
+ "Mode": true,
+ "RawFormat": true,
+ "SourcePos": true,
+ "TabIndent": true,
+ "UseSpaces": true,
+ },
+ "go/scanner": map[string]bool{
+ "Error": true,
+ "ErrorHandler": true,
+ "ErrorList": true,
+ "Mode": true,
+ "PrintError": true,
+ "ScanComments": true,
+ "Scanner": true,
+ },
+ "go/token": map[string]bool{
+ "ADD": true,
+ "ADD_ASSIGN": true,
+ "AND": true,
+ "AND_ASSIGN": true,
+ "AND_NOT": true,
+ "AND_NOT_ASSIGN": true,
+ "ARROW": true,
+ "ASSIGN": true,
+ "BREAK": true,
+ "CASE": true,
+ "CHAN": true,
+ "CHAR": true,
+ "COLON": true,
+ "COMMA": true,
+ "COMMENT": true,
+ "CONST": true,
+ "CONTINUE": true,
+ "DEC": true,
+ "DEFAULT": true,
+ "DEFER": true,
+ "DEFINE": true,
+ "ELLIPSIS": true,
+ "ELSE": true,
+ "EOF": true,
+ "EQL": true,
+ "FALLTHROUGH": true,
+ "FLOAT": true,
+ "FOR": true,
+ "FUNC": true,
+ "File": true,
+ "FileSet": true,
+ "GEQ": true,
+ "GO": true,
+ "GOTO": true,
+ "GTR": true,
+ "HighestPrec": true,
+ "IDENT": true,
+ "IF": true,
+ "ILLEGAL": true,
+ "IMAG": true,
+ "IMPORT": true,
+ "INC": true,
+ "INT": true,
+ "INTERFACE": true,
+ "LAND": true,
+ "LBRACE": true,
+ "LBRACK": true,
+ "LEQ": true,
+ "LOR": true,
+ "LPAREN": true,
+ "LSS": true,
+ "Lookup": true,
+ "LowestPrec": true,
+ "MAP": true,
+ "MUL": true,
+ "MUL_ASSIGN": true,
+ "NEQ": true,
+ "NOT": true,
+ "NewFileSet": true,
+ "NoPos": true,
+ "OR": true,
+ "OR_ASSIGN": true,
+ "PACKAGE": true,
+ "PERIOD": true,
+ "Pos": true,
+ "Position": true,
+ "QUO": true,
+ "QUO_ASSIGN": true,
+ "RANGE": true,
+ "RBRACE": true,
+ "RBRACK": true,
+ "REM": true,
+ "REM_ASSIGN": true,
+ "RETURN": true,
+ "RPAREN": true,
+ "SELECT": true,
+ "SEMICOLON": true,
+ "SHL": true,
+ "SHL_ASSIGN": true,
+ "SHR": true,
+ "SHR_ASSIGN": true,
+ "STRING": true,
+ "STRUCT": true,
+ "SUB": true,
+ "SUB_ASSIGN": true,
+ "SWITCH": true,
+ "TYPE": true,
+ "Token": true,
+ "UnaryPrec": true,
+ "VAR": true,
+ "XOR": true,
+ "XOR_ASSIGN": true,
+ },
+ "go/types": map[string]bool{
+ "Array": true,
+ "AssertableTo": true,
+ "AssignableTo": true,
+ "Basic": true,
+ "BasicInfo": true,
+ "BasicKind": true,
+ "Bool": true,
+ "Builtin": true,
+ "Byte": true,
+ "Chan": true,
+ "ChanDir": true,
+ "Checker": true,
+ "Comparable": true,
+ "Complex128": true,
+ "Complex64": true,
+ "Config": true,
+ "Const": true,
+ "ConvertibleTo": true,
+ "DefPredeclaredTestFuncs": true,
+ "Default": true,
+ "Error": true,
+ "Eval": true,
+ "ExprString": true,
+ "FieldVal": true,
+ "Float32": true,
+ "Float64": true,
+ "Func": true,
+ "Id": true,
+ "Identical": true,
+ "IdenticalIgnoreTags": true,
+ "Implements": true,
+ "ImportMode": true,
+ "Importer": true,
+ "ImporterFrom": true,
+ "Info": true,
+ "Initializer": true,
+ "Int": true,
+ "Int16": true,
+ "Int32": true,
+ "Int64": true,
+ "Int8": true,
+ "Interface": true,
+ "Invalid": true,
+ "IsBoolean": true,
+ "IsComplex": true,
+ "IsConstType": true,
+ "IsFloat": true,
+ "IsInteger": true,
+ "IsInterface": true,
+ "IsNumeric": true,
+ "IsOrdered": true,
+ "IsString": true,
+ "IsUnsigned": true,
+ "IsUntyped": true,
+ "Label": true,
+ "LookupFieldOrMethod": true,
+ "Map": true,
+ "MethodExpr": true,
+ "MethodSet": true,
+ "MethodVal": true,
+ "MissingMethod": true,
+ "Named": true,
+ "NewArray": true,
+ "NewChan": true,
+ "NewChecker": true,
+ "NewConst": true,
+ "NewField": true,
+ "NewFunc": true,
+ "NewInterface": true,
+ "NewInterfaceType": true,
+ "NewLabel": true,
+ "NewMap": true,
+ "NewMethodSet": true,
+ "NewNamed": true,
+ "NewPackage": true,
+ "NewParam": true,
+ "NewPkgName": true,
+ "NewPointer": true,
+ "NewScope": true,
+ "NewSignature": true,
+ "NewSlice": true,
+ "NewStruct": true,
+ "NewTuple": true,
+ "NewTypeName": true,
+ "NewVar": true,
+ "Nil": true,
+ "ObjectString": true,
+ "Package": true,
+ "PkgName": true,
+ "Pointer": true,
+ "Qualifier": true,
+ "RecvOnly": true,
+ "RelativeTo": true,
+ "Rune": true,
+ "Scope": true,
+ "Selection": true,
+ "SelectionKind": true,
+ "SelectionString": true,
+ "SendOnly": true,
+ "SendRecv": true,
+ "Signature": true,
+ "Sizes": true,
+ "SizesFor": true,
+ "Slice": true,
+ "StdSizes": true,
+ "String": true,
+ "Struct": true,
+ "Tuple": true,
+ "Typ": true,
+ "Type": true,
+ "TypeAndValue": true,
+ "TypeName": true,
+ "TypeString": true,
+ "Uint": true,
+ "Uint16": true,
+ "Uint32": true,
+ "Uint64": true,
+ "Uint8": true,
+ "Uintptr": true,
+ "Universe": true,
+ "Unsafe": true,
+ "UnsafePointer": true,
+ "UntypedBool": true,
+ "UntypedComplex": true,
+ "UntypedFloat": true,
+ "UntypedInt": true,
+ "UntypedNil": true,
+ "UntypedRune": true,
+ "UntypedString": true,
+ "Var": true,
+ "WriteExpr": true,
+ "WriteSignature": true,
+ "WriteType": true,
+ },
+ "hash": map[string]bool{
+ "Hash": true,
+ "Hash32": true,
+ "Hash64": true,
+ },
+ "hash/adler32": map[string]bool{
+ "Checksum": true,
+ "New": true,
+ "Size": true,
+ },
+ "hash/crc32": map[string]bool{
+ "Castagnoli": true,
+ "Checksum": true,
+ "ChecksumIEEE": true,
+ "IEEE": true,
+ "IEEETable": true,
+ "Koopman": true,
+ "MakeTable": true,
+ "New": true,
+ "NewIEEE": true,
+ "Size": true,
+ "Table": true,
+ "Update": true,
+ },
+ "hash/crc64": map[string]bool{
+ "Checksum": true,
+ "ECMA": true,
+ "ISO": true,
+ "MakeTable": true,
+ "New": true,
+ "Size": true,
+ "Table": true,
+ "Update": true,
+ },
+ "hash/fnv": map[string]bool{
+ "New128": true,
+ "New128a": true,
+ "New32": true,
+ "New32a": true,
+ "New64": true,
+ "New64a": true,
+ },
+ "html": map[string]bool{
+ "EscapeString": true,
+ "UnescapeString": true,
+ },
+ "html/template": map[string]bool{
+ "CSS": true,
+ "ErrAmbigContext": true,
+ "ErrBadHTML": true,
+ "ErrBranchEnd": true,
+ "ErrEndContext": true,
+ "ErrNoSuchTemplate": true,
+ "ErrOutputContext": true,
+ "ErrPartialCharset": true,
+ "ErrPartialEscape": true,
+ "ErrPredefinedEscaper": true,
+ "ErrRangeLoopReentry": true,
+ "ErrSlashAmbig": true,
+ "Error": true,
+ "ErrorCode": true,
+ "FuncMap": true,
+ "HTML": true,
+ "HTMLAttr": true,
+ "HTMLEscape": true,
+ "HTMLEscapeString": true,
+ "HTMLEscaper": true,
+ "IsTrue": true,
+ "JS": true,
+ "JSEscape": true,
+ "JSEscapeString": true,
+ "JSEscaper": true,
+ "JSStr": true,
+ "Must": true,
+ "New": true,
+ "OK": true,
+ "ParseFiles": true,
+ "ParseGlob": true,
+ "Srcset": true,
+ "Template": true,
+ "URL": true,
+ "URLQueryEscaper": true,
+ },
+ "image": map[string]bool{
+ "Alpha": true,
+ "Alpha16": true,
+ "Black": true,
+ "CMYK": true,
+ "Config": true,
+ "Decode": true,
+ "DecodeConfig": true,
+ "ErrFormat": true,
+ "Gray": true,
+ "Gray16": true,
+ "Image": true,
+ "NRGBA": true,
+ "NRGBA64": true,
+ "NYCbCrA": true,
+ "NewAlpha": true,
+ "NewAlpha16": true,
+ "NewCMYK": true,
+ "NewGray": true,
+ "NewGray16": true,
+ "NewNRGBA": true,
+ "NewNRGBA64": true,
+ "NewNYCbCrA": true,
+ "NewPaletted": true,
+ "NewRGBA": true,
+ "NewRGBA64": true,
+ "NewUniform": true,
+ "NewYCbCr": true,
+ "Opaque": true,
+ "Paletted": true,
+ "PalettedImage": true,
+ "Point": true,
+ "Pt": true,
+ "RGBA": true,
+ "RGBA64": true,
+ "Rect": true,
+ "Rectangle": true,
+ "RegisterFormat": true,
+ "Transparent": true,
+ "Uniform": true,
+ "White": true,
+ "YCbCr": true,
+ "YCbCrSubsampleRatio": true,
+ "YCbCrSubsampleRatio410": true,
+ "YCbCrSubsampleRatio411": true,
+ "YCbCrSubsampleRatio420": true,
+ "YCbCrSubsampleRatio422": true,
+ "YCbCrSubsampleRatio440": true,
+ "YCbCrSubsampleRatio444": true,
+ "ZP": true,
+ "ZR": true,
+ },
+ "image/color": map[string]bool{
+ "Alpha": true,
+ "Alpha16": true,
+ "Alpha16Model": true,
+ "AlphaModel": true,
+ "Black": true,
+ "CMYK": true,
+ "CMYKModel": true,
+ "CMYKToRGB": true,
+ "Color": true,
+ "Gray": true,
+ "Gray16": true,
+ "Gray16Model": true,
+ "GrayModel": true,
+ "Model": true,
+ "ModelFunc": true,
+ "NRGBA": true,
+ "NRGBA64": true,
+ "NRGBA64Model": true,
+ "NRGBAModel": true,
+ "NYCbCrA": true,
+ "NYCbCrAModel": true,
+ "Opaque": true,
+ "Palette": true,
+ "RGBA": true,
+ "RGBA64": true,
+ "RGBA64Model": true,
+ "RGBAModel": true,
+ "RGBToCMYK": true,
+ "RGBToYCbCr": true,
+ "Transparent": true,
+ "White": true,
+ "YCbCr": true,
+ "YCbCrModel": true,
+ "YCbCrToRGB": true,
+ },
+ "image/color/palette": map[string]bool{
+ "Plan9": true,
+ "WebSafe": true,
+ },
+ "image/draw": map[string]bool{
+ "Draw": true,
+ "DrawMask": true,
+ "Drawer": true,
+ "FloydSteinberg": true,
+ "Image": true,
+ "Op": true,
+ "Over": true,
+ "Quantizer": true,
+ "Src": true,
+ },
+ "image/gif": map[string]bool{
+ "Decode": true,
+ "DecodeAll": true,
+ "DecodeConfig": true,
+ "DisposalBackground": true,
+ "DisposalNone": true,
+ "DisposalPrevious": true,
+ "Encode": true,
+ "EncodeAll": true,
+ "GIF": true,
+ "Options": true,
+ },
+ "image/jpeg": map[string]bool{
+ "Decode": true,
+ "DecodeConfig": true,
+ "DefaultQuality": true,
+ "Encode": true,
+ "FormatError": true,
+ "Options": true,
+ "Reader": true,
+ "UnsupportedError": true,
+ },
+ "image/png": map[string]bool{
+ "BestCompression": true,
+ "BestSpeed": true,
+ "CompressionLevel": true,
+ "Decode": true,
+ "DecodeConfig": true,
+ "DefaultCompression": true,
+ "Encode": true,
+ "Encoder": true,
+ "EncoderBuffer": true,
+ "EncoderBufferPool": true,
+ "FormatError": true,
+ "NoCompression": true,
+ "UnsupportedError": true,
+ },
+ "index/suffixarray": map[string]bool{
+ "Index": true,
+ "New": true,
+ },
+ "io": map[string]bool{
+ "ByteReader": true,
+ "ByteScanner": true,
+ "ByteWriter": true,
+ "Closer": true,
+ "Copy": true,
+ "CopyBuffer": true,
+ "CopyN": true,
+ "EOF": true,
+ "ErrClosedPipe": true,
+ "ErrNoProgress": true,
+ "ErrShortBuffer": true,
+ "ErrShortWrite": true,
+ "ErrUnexpectedEOF": true,
+ "LimitReader": true,
+ "LimitedReader": true,
+ "MultiReader": true,
+ "MultiWriter": true,
+ "NewSectionReader": true,
+ "Pipe": true,
+ "PipeReader": true,
+ "PipeWriter": true,
+ "ReadAtLeast": true,
+ "ReadCloser": true,
+ "ReadFull": true,
+ "ReadSeeker": true,
+ "ReadWriteCloser": true,
+ "ReadWriteSeeker": true,
+ "ReadWriter": true,
+ "Reader": true,
+ "ReaderAt": true,
+ "ReaderFrom": true,
+ "RuneReader": true,
+ "RuneScanner": true,
+ "SectionReader": true,
+ "SeekCurrent": true,
+ "SeekEnd": true,
+ "SeekStart": true,
+ "Seeker": true,
+ "StringWriter": true,
+ "TeeReader": true,
+ "WriteCloser": true,
+ "WriteSeeker": true,
+ "WriteString": true,
+ "Writer": true,
+ "WriterAt": true,
+ "WriterTo": true,
+ },
+ "io/ioutil": map[string]bool{
+ "Discard": true,
+ "NopCloser": true,
+ "ReadAll": true,
+ "ReadDir": true,
+ "ReadFile": true,
+ "TempDir": true,
+ "TempFile": true,
+ "WriteFile": true,
+ },
+ "log": map[string]bool{
+ "Fatal": true,
+ "Fatalf": true,
+ "Fatalln": true,
+ "Flags": true,
+ "LUTC": true,
+ "Ldate": true,
+ "Llongfile": true,
+ "Lmicroseconds": true,
+ "Logger": true,
+ "Lshortfile": true,
+ "LstdFlags": true,
+ "Ltime": true,
+ "New": true,
+ "Output": true,
+ "Panic": true,
+ "Panicf": true,
+ "Panicln": true,
+ "Prefix": true,
+ "Print": true,
+ "Printf": true,
+ "Println": true,
+ "SetFlags": true,
+ "SetOutput": true,
+ "SetPrefix": true,
+ },
+ "log/syslog": map[string]bool{
+ "Dial": true,
+ "LOG_ALERT": true,
+ "LOG_AUTH": true,
+ "LOG_AUTHPRIV": true,
+ "LOG_CRIT": true,
+ "LOG_CRON": true,
+ "LOG_DAEMON": true,
+ "LOG_DEBUG": true,
+ "LOG_EMERG": true,
+ "LOG_ERR": true,
+ "LOG_FTP": true,
+ "LOG_INFO": true,
+ "LOG_KERN": true,
+ "LOG_LOCAL0": true,
+ "LOG_LOCAL1": true,
+ "LOG_LOCAL2": true,
+ "LOG_LOCAL3": true,
+ "LOG_LOCAL4": true,
+ "LOG_LOCAL5": true,
+ "LOG_LOCAL6": true,
+ "LOG_LOCAL7": true,
+ "LOG_LPR": true,
+ "LOG_MAIL": true,
+ "LOG_NEWS": true,
+ "LOG_NOTICE": true,
+ "LOG_SYSLOG": true,
+ "LOG_USER": true,
+ "LOG_UUCP": true,
+ "LOG_WARNING": true,
+ "New": true,
+ "NewLogger": true,
+ "Priority": true,
+ "Writer": true,
+ },
+ "math": map[string]bool{
+ "Abs": true,
+ "Acos": true,
+ "Acosh": true,
+ "Asin": true,
+ "Asinh": true,
+ "Atan": true,
+ "Atan2": true,
+ "Atanh": true,
+ "Cbrt": true,
+ "Ceil": true,
+ "Copysign": true,
+ "Cos": true,
+ "Cosh": true,
+ "Dim": true,
+ "E": true,
+ "Erf": true,
+ "Erfc": true,
+ "Erfcinv": true,
+ "Erfinv": true,
+ "Exp": true,
+ "Exp2": true,
+ "Expm1": true,
+ "Float32bits": true,
+ "Float32frombits": true,
+ "Float64bits": true,
+ "Float64frombits": true,
+ "Floor": true,
+ "Frexp": true,
+ "Gamma": true,
+ "Hypot": true,
+ "Ilogb": true,
+ "Inf": true,
+ "IsInf": true,
+ "IsNaN": true,
+ "J0": true,
+ "J1": true,
+ "Jn": true,
+ "Ldexp": true,
+ "Lgamma": true,
+ "Ln10": true,
+ "Ln2": true,
+ "Log": true,
+ "Log10": true,
+ "Log10E": true,
+ "Log1p": true,
+ "Log2": true,
+ "Log2E": true,
+ "Logb": true,
+ "Max": true,
+ "MaxFloat32": true,
+ "MaxFloat64": true,
+ "MaxInt16": true,
+ "MaxInt32": true,
+ "MaxInt64": true,
+ "MaxInt8": true,
+ "MaxUint16": true,
+ "MaxUint32": true,
+ "MaxUint64": true,
+ "MaxUint8": true,
+ "Min": true,
+ "MinInt16": true,
+ "MinInt32": true,
+ "MinInt64": true,
+ "MinInt8": true,
+ "Mod": true,
+ "Modf": true,
+ "NaN": true,
+ "Nextafter": true,
+ "Nextafter32": true,
+ "Phi": true,
+ "Pi": true,
+ "Pow": true,
+ "Pow10": true,
+ "Remainder": true,
+ "Round": true,
+ "RoundToEven": true,
+ "Signbit": true,
+ "Sin": true,
+ "Sincos": true,
+ "Sinh": true,
+ "SmallestNonzeroFloat32": true,
+ "SmallestNonzeroFloat64": true,
+ "Sqrt": true,
+ "Sqrt2": true,
+ "SqrtE": true,
+ "SqrtPhi": true,
+ "SqrtPi": true,
+ "Tan": true,
+ "Tanh": true,
+ "Trunc": true,
+ "Y0": true,
+ "Y1": true,
+ "Yn": true,
+ },
+ "math/big": map[string]bool{
+ "Above": true,
+ "Accuracy": true,
+ "AwayFromZero": true,
+ "Below": true,
+ "ErrNaN": true,
+ "Exact": true,
+ "Float": true,
+ "Int": true,
+ "Jacobi": true,
+ "MaxBase": true,
+ "MaxExp": true,
+ "MaxPrec": true,
+ "MinExp": true,
+ "NewFloat": true,
+ "NewInt": true,
+ "NewRat": true,
+ "ParseFloat": true,
+ "Rat": true,
+ "RoundingMode": true,
+ "ToNearestAway": true,
+ "ToNearestEven": true,
+ "ToNegativeInf": true,
+ "ToPositiveInf": true,
+ "ToZero": true,
+ "Word": true,
+ },
+ "math/bits": map[string]bool{
+ "Add": true,
+ "Add32": true,
+ "Add64": true,
+ "Div": true,
+ "Div32": true,
+ "Div64": true,
+ "LeadingZeros": true,
+ "LeadingZeros16": true,
+ "LeadingZeros32": true,
+ "LeadingZeros64": true,
+ "LeadingZeros8": true,
+ "Len": true,
+ "Len16": true,
+ "Len32": true,
+ "Len64": true,
+ "Len8": true,
+ "Mul": true,
+ "Mul32": true,
+ "Mul64": true,
+ "OnesCount": true,
+ "OnesCount16": true,
+ "OnesCount32": true,
+ "OnesCount64": true,
+ "OnesCount8": true,
+ "Reverse": true,
+ "Reverse16": true,
+ "Reverse32": true,
+ "Reverse64": true,
+ "Reverse8": true,
+ "ReverseBytes": true,
+ "ReverseBytes16": true,
+ "ReverseBytes32": true,
+ "ReverseBytes64": true,
+ "RotateLeft": true,
+ "RotateLeft16": true,
+ "RotateLeft32": true,
+ "RotateLeft64": true,
+ "RotateLeft8": true,
+ "Sub": true,
+ "Sub32": true,
+ "Sub64": true,
+ "TrailingZeros": true,
+ "TrailingZeros16": true,
+ "TrailingZeros32": true,
+ "TrailingZeros64": true,
+ "TrailingZeros8": true,
+ "UintSize": true,
+ },
+ "math/cmplx": map[string]bool{
+ "Abs": true,
+ "Acos": true,
+ "Acosh": true,
+ "Asin": true,
+ "Asinh": true,
+ "Atan": true,
+ "Atanh": true,
+ "Conj": true,
+ "Cos": true,
+ "Cosh": true,
+ "Cot": true,
+ "Exp": true,
+ "Inf": true,
+ "IsInf": true,
+ "IsNaN": true,
+ "Log": true,
+ "Log10": true,
+ "NaN": true,
+ "Phase": true,
+ "Polar": true,
+ "Pow": true,
+ "Rect": true,
+ "Sin": true,
+ "Sinh": true,
+ "Sqrt": true,
+ "Tan": true,
+ "Tanh": true,
+ },
+ "math/rand": map[string]bool{
+ "ExpFloat64": true,
+ "Float32": true,
+ "Float64": true,
+ "Int": true,
+ "Int31": true,
+ "Int31n": true,
+ "Int63": true,
+ "Int63n": true,
+ "Intn": true,
+ "New": true,
+ "NewSource": true,
+ "NewZipf": true,
+ "NormFloat64": true,
+ "Perm": true,
+ "Rand": true,
+ "Read": true,
+ "Seed": true,
+ "Shuffle": true,
+ "Source": true,
+ "Source64": true,
+ "Uint32": true,
+ "Uint64": true,
+ "Zipf": true,
+ },
+ "mime": map[string]bool{
+ "AddExtensionType": true,
+ "BEncoding": true,
+ "ErrInvalidMediaParameter": true,
+ "ExtensionsByType": true,
+ "FormatMediaType": true,
+ "ParseMediaType": true,
+ "QEncoding": true,
+ "TypeByExtension": true,
+ "WordDecoder": true,
+ "WordEncoder": true,
+ },
+ "mime/multipart": map[string]bool{
+ "ErrMessageTooLarge": true,
+ "File": true,
+ "FileHeader": true,
+ "Form": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "Part": true,
+ "Reader": true,
+ "Writer": true,
+ },
+ "mime/quotedprintable": map[string]bool{
+ "NewReader": true,
+ "NewWriter": true,
+ "Reader": true,
+ "Writer": true,
+ },
+ "net": map[string]bool{
+ "Addr": true,
+ "AddrError": true,
+ "Buffers": true,
+ "CIDRMask": true,
+ "Conn": true,
+ "DNSConfigError": true,
+ "DNSError": true,
+ "DefaultResolver": true,
+ "Dial": true,
+ "DialIP": true,
+ "DialTCP": true,
+ "DialTimeout": true,
+ "DialUDP": true,
+ "DialUnix": true,
+ "Dialer": true,
+ "ErrWriteToConnected": true,
+ "Error": true,
+ "FileConn": true,
+ "FileListener": true,
+ "FilePacketConn": true,
+ "FlagBroadcast": true,
+ "FlagLoopback": true,
+ "FlagMulticast": true,
+ "FlagPointToPoint": true,
+ "FlagUp": true,
+ "Flags": true,
+ "HardwareAddr": true,
+ "IP": true,
+ "IPAddr": true,
+ "IPConn": true,
+ "IPMask": true,
+ "IPNet": true,
+ "IPv4": true,
+ "IPv4Mask": true,
+ "IPv4allrouter": true,
+ "IPv4allsys": true,
+ "IPv4bcast": true,
+ "IPv4len": true,
+ "IPv4zero": true,
+ "IPv6interfacelocalallnodes": true,
+ "IPv6len": true,
+ "IPv6linklocalallnodes": true,
+ "IPv6linklocalallrouters": true,
+ "IPv6loopback": true,
+ "IPv6unspecified": true,
+ "IPv6zero": true,
+ "Interface": true,
+ "InterfaceAddrs": true,
+ "InterfaceByIndex": true,
+ "InterfaceByName": true,
+ "Interfaces": true,
+ "InvalidAddrError": true,
+ "JoinHostPort": true,
+ "Listen": true,
+ "ListenConfig": true,
+ "ListenIP": true,
+ "ListenMulticastUDP": true,
+ "ListenPacket": true,
+ "ListenTCP": true,
+ "ListenUDP": true,
+ "ListenUnix": true,
+ "ListenUnixgram": true,
+ "Listener": true,
+ "LookupAddr": true,
+ "LookupCNAME": true,
+ "LookupHost": true,
+ "LookupIP": true,
+ "LookupMX": true,
+ "LookupNS": true,
+ "LookupPort": true,
+ "LookupSRV": true,
+ "LookupTXT": true,
+ "MX": true,
+ "NS": true,
+ "OpError": true,
+ "PacketConn": true,
+ "ParseCIDR": true,
+ "ParseError": true,
+ "ParseIP": true,
+ "ParseMAC": true,
+ "Pipe": true,
+ "ResolveIPAddr": true,
+ "ResolveTCPAddr": true,
+ "ResolveUDPAddr": true,
+ "ResolveUnixAddr": true,
+ "Resolver": true,
+ "SRV": true,
+ "SplitHostPort": true,
+ "TCPAddr": true,
+ "TCPConn": true,
+ "TCPListener": true,
+ "UDPAddr": true,
+ "UDPConn": true,
+ "UnixAddr": true,
+ "UnixConn": true,
+ "UnixListener": true,
+ "UnknownNetworkError": true,
+ },
+ "net/http": map[string]bool{
+ "CanonicalHeaderKey": true,
+ "Client": true,
+ "CloseNotifier": true,
+ "ConnState": true,
+ "Cookie": true,
+ "CookieJar": true,
+ "DefaultClient": true,
+ "DefaultMaxHeaderBytes": true,
+ "DefaultMaxIdleConnsPerHost": true,
+ "DefaultServeMux": true,
+ "DefaultTransport": true,
+ "DetectContentType": true,
+ "Dir": true,
+ "ErrAbortHandler": true,
+ "ErrBodyNotAllowed": true,
+ "ErrBodyReadAfterClose": true,
+ "ErrContentLength": true,
+ "ErrHandlerTimeout": true,
+ "ErrHeaderTooLong": true,
+ "ErrHijacked": true,
+ "ErrLineTooLong": true,
+ "ErrMissingBoundary": true,
+ "ErrMissingContentLength": true,
+ "ErrMissingFile": true,
+ "ErrNoCookie": true,
+ "ErrNoLocation": true,
+ "ErrNotMultipart": true,
+ "ErrNotSupported": true,
+ "ErrServerClosed": true,
+ "ErrShortBody": true,
+ "ErrSkipAltProtocol": true,
+ "ErrUnexpectedTrailer": true,
+ "ErrUseLastResponse": true,
+ "ErrWriteAfterFlush": true,
+ "Error": true,
+ "File": true,
+ "FileServer": true,
+ "FileSystem": true,
+ "Flusher": true,
+ "Get": true,
+ "Handle": true,
+ "HandleFunc": true,
+ "Handler": true,
+ "HandlerFunc": true,
+ "Head": true,
+ "Header": true,
+ "Hijacker": true,
+ "ListenAndServe": true,
+ "ListenAndServeTLS": true,
+ "LocalAddrContextKey": true,
+ "MaxBytesReader": true,
+ "MethodConnect": true,
+ "MethodDelete": true,
+ "MethodGet": true,
+ "MethodHead": true,
+ "MethodOptions": true,
+ "MethodPatch": true,
+ "MethodPost": true,
+ "MethodPut": true,
+ "MethodTrace": true,
+ "NewFileTransport": true,
+ "NewRequest": true,
+ "NewServeMux": true,
+ "NoBody": true,
+ "NotFound": true,
+ "NotFoundHandler": true,
+ "ParseHTTPVersion": true,
+ "ParseTime": true,
+ "Post": true,
+ "PostForm": true,
+ "ProtocolError": true,
+ "ProxyFromEnvironment": true,
+ "ProxyURL": true,
+ "PushOptions": true,
+ "Pusher": true,
+ "ReadRequest": true,
+ "ReadResponse": true,
+ "Redirect": true,
+ "RedirectHandler": true,
+ "Request": true,
+ "Response": true,
+ "ResponseWriter": true,
+ "RoundTripper": true,
+ "SameSite": true,
+ "SameSiteDefaultMode": true,
+ "SameSiteLaxMode": true,
+ "SameSiteStrictMode": true,
+ "Serve": true,
+ "ServeContent": true,
+ "ServeFile": true,
+ "ServeMux": true,
+ "ServeTLS": true,
+ "Server": true,
+ "ServerContextKey": true,
+ "SetCookie": true,
+ "StateActive": true,
+ "StateClosed": true,
+ "StateHijacked": true,
+ "StateIdle": true,
+ "StateNew": true,
+ "StatusAccepted": true,
+ "StatusAlreadyReported": true,
+ "StatusBadGateway": true,
+ "StatusBadRequest": true,
+ "StatusConflict": true,
+ "StatusContinue": true,
+ "StatusCreated": true,
+ "StatusExpectationFailed": true,
+ "StatusFailedDependency": true,
+ "StatusForbidden": true,
+ "StatusFound": true,
+ "StatusGatewayTimeout": true,
+ "StatusGone": true,
+ "StatusHTTPVersionNotSupported": true,
+ "StatusIMUsed": true,
+ "StatusInsufficientStorage": true,
+ "StatusInternalServerError": true,
+ "StatusLengthRequired": true,
+ "StatusLocked": true,
+ "StatusLoopDetected": true,
+ "StatusMethodNotAllowed": true,
+ "StatusMisdirectedRequest": true,
+ "StatusMovedPermanently": true,
+ "StatusMultiStatus": true,
+ "StatusMultipleChoices": true,
+ "StatusNetworkAuthenticationRequired": true,
+ "StatusNoContent": true,
+ "StatusNonAuthoritativeInfo": true,
+ "StatusNotAcceptable": true,
+ "StatusNotExtended": true,
+ "StatusNotFound": true,
+ "StatusNotImplemented": true,
+ "StatusNotModified": true,
+ "StatusOK": true,
+ "StatusPartialContent": true,
+ "StatusPaymentRequired": true,
+ "StatusPermanentRedirect": true,
+ "StatusPreconditionFailed": true,
+ "StatusPreconditionRequired": true,
+ "StatusProcessing": true,
+ "StatusProxyAuthRequired": true,
+ "StatusRequestEntityTooLarge": true,
+ "StatusRequestHeaderFieldsTooLarge": true,
+ "StatusRequestTimeout": true,
+ "StatusRequestURITooLong": true,
+ "StatusRequestedRangeNotSatisfiable": true,
+ "StatusResetContent": true,
+ "StatusSeeOther": true,
+ "StatusServiceUnavailable": true,
+ "StatusSwitchingProtocols": true,
+ "StatusTeapot": true,
+ "StatusTemporaryRedirect": true,
+ "StatusText": true,
+ "StatusTooEarly": true,
+ "StatusTooManyRequests": true,
+ "StatusUnauthorized": true,
+ "StatusUnavailableForLegalReasons": true,
+ "StatusUnprocessableEntity": true,
+ "StatusUnsupportedMediaType": true,
+ "StatusUpgradeRequired": true,
+ "StatusUseProxy": true,
+ "StatusVariantAlsoNegotiates": true,
+ "StripPrefix": true,
+ "TimeFormat": true,
+ "TimeoutHandler": true,
+ "TrailerPrefix": true,
+ "Transport": true,
+ },
+ "net/http/cgi": map[string]bool{
+ "Handler": true,
+ "Request": true,
+ "RequestFromMap": true,
+ "Serve": true,
+ },
+ "net/http/cookiejar": map[string]bool{
+ "Jar": true,
+ "New": true,
+ "Options": true,
+ "PublicSuffixList": true,
+ },
+ "net/http/fcgi": map[string]bool{
+ "ErrConnClosed": true,
+ "ErrRequestAborted": true,
+ "ProcessEnv": true,
+ "Serve": true,
+ },
+ "net/http/httptest": map[string]bool{
+ "DefaultRemoteAddr": true,
+ "NewRecorder": true,
+ "NewRequest": true,
+ "NewServer": true,
+ "NewTLSServer": true,
+ "NewUnstartedServer": true,
+ "ResponseRecorder": true,
+ "Server": true,
+ },
+ "net/http/httptrace": map[string]bool{
+ "ClientTrace": true,
+ "ContextClientTrace": true,
+ "DNSDoneInfo": true,
+ "DNSStartInfo": true,
+ "GotConnInfo": true,
+ "WithClientTrace": true,
+ "WroteRequestInfo": true,
+ },
+ "net/http/httputil": map[string]bool{
+ "BufferPool": true,
+ "ClientConn": true,
+ "DumpRequest": true,
+ "DumpRequestOut": true,
+ "DumpResponse": true,
+ "ErrClosed": true,
+ "ErrLineTooLong": true,
+ "ErrPersistEOF": true,
+ "ErrPipeline": true,
+ "NewChunkedReader": true,
+ "NewChunkedWriter": true,
+ "NewClientConn": true,
+ "NewProxyClientConn": true,
+ "NewServerConn": true,
+ "NewSingleHostReverseProxy": true,
+ "ReverseProxy": true,
+ "ServerConn": true,
+ },
+ "net/http/pprof": map[string]bool{
+ "Cmdline": true,
+ "Handler": true,
+ "Index": true,
+ "Profile": true,
+ "Symbol": true,
+ "Trace": true,
+ },
+ "net/mail": map[string]bool{
+ "Address": true,
+ "AddressParser": true,
+ "ErrHeaderNotPresent": true,
+ "Header": true,
+ "Message": true,
+ "ParseAddress": true,
+ "ParseAddressList": true,
+ "ParseDate": true,
+ "ReadMessage": true,
+ },
+ "net/rpc": map[string]bool{
+ "Accept": true,
+ "Call": true,
+ "Client": true,
+ "ClientCodec": true,
+ "DefaultDebugPath": true,
+ "DefaultRPCPath": true,
+ "DefaultServer": true,
+ "Dial": true,
+ "DialHTTP": true,
+ "DialHTTPPath": true,
+ "ErrShutdown": true,
+ "HandleHTTP": true,
+ "NewClient": true,
+ "NewClientWithCodec": true,
+ "NewServer": true,
+ "Register": true,
+ "RegisterName": true,
+ "Request": true,
+ "Response": true,
+ "ServeCodec": true,
+ "ServeConn": true,
+ "ServeRequest": true,
+ "Server": true,
+ "ServerCodec": true,
+ "ServerError": true,
+ },
+ "net/rpc/jsonrpc": map[string]bool{
+ "Dial": true,
+ "NewClient": true,
+ "NewClientCodec": true,
+ "NewServerCodec": true,
+ "ServeConn": true,
+ },
+ "net/smtp": map[string]bool{
+ "Auth": true,
+ "CRAMMD5Auth": true,
+ "Client": true,
+ "Dial": true,
+ "NewClient": true,
+ "PlainAuth": true,
+ "SendMail": true,
+ "ServerInfo": true,
+ },
+ "net/textproto": map[string]bool{
+ "CanonicalMIMEHeaderKey": true,
+ "Conn": true,
+ "Dial": true,
+ "Error": true,
+ "MIMEHeader": true,
+ "NewConn": true,
+ "NewReader": true,
+ "NewWriter": true,
+ "Pipeline": true,
+ "ProtocolError": true,
+ "Reader": true,
+ "TrimBytes": true,
+ "TrimString": true,
+ "Writer": true,
+ },
+ "net/url": map[string]bool{
+ "Error": true,
+ "EscapeError": true,
+ "InvalidHostError": true,
+ "Parse": true,
+ "ParseQuery": true,
+ "ParseRequestURI": true,
+ "PathEscape": true,
+ "PathUnescape": true,
+ "QueryEscape": true,
+ "QueryUnescape": true,
+ "URL": true,
+ "User": true,
+ "UserPassword": true,
+ "Userinfo": true,
+ "Values": true,
+ },
+ "os": map[string]bool{
+ "Args": true,
+ "Chdir": true,
+ "Chmod": true,
+ "Chown": true,
+ "Chtimes": true,
+ "Clearenv": true,
+ "Create": true,
+ "DevNull": true,
+ "Environ": true,
+ "ErrClosed": true,
+ "ErrExist": true,
+ "ErrInvalid": true,
+ "ErrNoDeadline": true,
+ "ErrNotExist": true,
+ "ErrPermission": true,
+ "Executable": true,
+ "Exit": true,
+ "Expand": true,
+ "ExpandEnv": true,
+ "File": true,
+ "FileInfo": true,
+ "FileMode": true,
+ "FindProcess": true,
+ "Getegid": true,
+ "Getenv": true,
+ "Geteuid": true,
+ "Getgid": true,
+ "Getgroups": true,
+ "Getpagesize": true,
+ "Getpid": true,
+ "Getppid": true,
+ "Getuid": true,
+ "Getwd": true,
+ "Hostname": true,
+ "Interrupt": true,
+ "IsExist": true,
+ "IsNotExist": true,
+ "IsPathSeparator": true,
+ "IsPermission": true,
+ "IsTimeout": true,
+ "Kill": true,
+ "Lchown": true,
+ "Link": true,
+ "LinkError": true,
+ "LookupEnv": true,
+ "Lstat": true,
+ "Mkdir": true,
+ "MkdirAll": true,
+ "ModeAppend": true,
+ "ModeCharDevice": true,
+ "ModeDevice": true,
+ "ModeDir": true,
+ "ModeExclusive": true,
+ "ModeIrregular": true,
+ "ModeNamedPipe": true,
+ "ModePerm": true,
+ "ModeSetgid": true,
+ "ModeSetuid": true,
+ "ModeSocket": true,
+ "ModeSticky": true,
+ "ModeSymlink": true,
+ "ModeTemporary": true,
+ "ModeType": true,
+ "NewFile": true,
+ "NewSyscallError": true,
+ "O_APPEND": true,
+ "O_CREATE": true,
+ "O_EXCL": true,
+ "O_RDONLY": true,
+ "O_RDWR": true,
+ "O_SYNC": true,
+ "O_TRUNC": true,
+ "O_WRONLY": true,
+ "Open": true,
+ "OpenFile": true,
+ "PathError": true,
+ "PathListSeparator": true,
+ "PathSeparator": true,
+ "Pipe": true,
+ "ProcAttr": true,
+ "Process": true,
+ "ProcessState": true,
+ "Readlink": true,
+ "Remove": true,
+ "RemoveAll": true,
+ "Rename": true,
+ "SEEK_CUR": true,
+ "SEEK_END": true,
+ "SEEK_SET": true,
+ "SameFile": true,
+ "Setenv": true,
+ "Signal": true,
+ "StartProcess": true,
+ "Stat": true,
+ "Stderr": true,
+ "Stdin": true,
+ "Stdout": true,
+ "Symlink": true,
+ "SyscallError": true,
+ "TempDir": true,
+ "Truncate": true,
+ "Unsetenv": true,
+ "UserCacheDir": true,
+ "UserHomeDir": true,
+ },
+ "os/exec": map[string]bool{
+ "Cmd": true,
+ "Command": true,
+ "CommandContext": true,
+ "ErrNotFound": true,
+ "Error": true,
+ "ExitError": true,
+ "LookPath": true,
+ },
+ "os/signal": map[string]bool{
+ "Ignore": true,
+ "Ignored": true,
+ "Notify": true,
+ "Reset": true,
+ "Stop": true,
+ },
+ "os/user": map[string]bool{
+ "Current": true,
+ "Group": true,
+ "Lookup": true,
+ "LookupGroup": true,
+ "LookupGroupId": true,
+ "LookupId": true,
+ "UnknownGroupError": true,
+ "UnknownGroupIdError": true,
+ "UnknownUserError": true,
+ "UnknownUserIdError": true,
+ "User": true,
+ },
+ "path": map[string]bool{
+ "Base": true,
+ "Clean": true,
+ "Dir": true,
+ "ErrBadPattern": true,
+ "Ext": true,
+ "IsAbs": true,
+ "Join": true,
+ "Match": true,
+ "Split": true,
+ },
+ "path/filepath": map[string]bool{
+ "Abs": true,
+ "Base": true,
+ "Clean": true,
+ "Dir": true,
+ "ErrBadPattern": true,
+ "EvalSymlinks": true,
+ "Ext": true,
+ "FromSlash": true,
+ "Glob": true,
+ "HasPrefix": true,
+ "IsAbs": true,
+ "Join": true,
+ "ListSeparator": true,
+ "Match": true,
+ "Rel": true,
+ "Separator": true,
+ "SkipDir": true,
+ "Split": true,
+ "SplitList": true,
+ "ToSlash": true,
+ "VolumeName": true,
+ "Walk": true,
+ "WalkFunc": true,
+ },
+ "plugin": map[string]bool{
+ "Open": true,
+ "Plugin": true,
+ "Symbol": true,
+ },
+ "reflect": map[string]bool{
+ "Append": true,
+ "AppendSlice": true,
+ "Array": true,
+ "ArrayOf": true,
+ "Bool": true,
+ "BothDir": true,
+ "Chan": true,
+ "ChanDir": true,
+ "ChanOf": true,
+ "Complex128": true,
+ "Complex64": true,
+ "Copy": true,
+ "DeepEqual": true,
+ "Float32": true,
+ "Float64": true,
+ "Func": true,
+ "FuncOf": true,
+ "Indirect": true,
+ "Int": true,
+ "Int16": true,
+ "Int32": true,
+ "Int64": true,
+ "Int8": true,
+ "Interface": true,
+ "Invalid": true,
+ "Kind": true,
+ "MakeChan": true,
+ "MakeFunc": true,
+ "MakeMap": true,
+ "MakeMapWithSize": true,
+ "MakeSlice": true,
+ "Map": true,
+ "MapIter": true,
+ "MapOf": true,
+ "Method": true,
+ "New": true,
+ "NewAt": true,
+ "Ptr": true,
+ "PtrTo": true,
+ "RecvDir": true,
+ "Select": true,
+ "SelectCase": true,
+ "SelectDefault": true,
+ "SelectDir": true,
+ "SelectRecv": true,
+ "SelectSend": true,
+ "SendDir": true,
+ "Slice": true,
+ "SliceHeader": true,
+ "SliceOf": true,
+ "String": true,
+ "StringHeader": true,
+ "Struct": true,
+ "StructField": true,
+ "StructOf": true,
+ "StructTag": true,
+ "Swapper": true,
+ "TypeOf": true,
+ "Uint": true,
+ "Uint16": true,
+ "Uint32": true,
+ "Uint64": true,
+ "Uint8": true,
+ "Uintptr": true,
+ "UnsafePointer": true,
+ "Value": true,
+ "ValueError": true,
+ "ValueOf": true,
+ "Zero": true,
+ },
+ "regexp": map[string]bool{
+ "Compile": true,
+ "CompilePOSIX": true,
+ "Match": true,
+ "MatchReader": true,
+ "MatchString": true,
+ "MustCompile": true,
+ "MustCompilePOSIX": true,
+ "QuoteMeta": true,
+ "Regexp": true,
+ },
+ "regexp/syntax": map[string]bool{
+ "ClassNL": true,
+ "Compile": true,
+ "DotNL": true,
+ "EmptyBeginLine": true,
+ "EmptyBeginText": true,
+ "EmptyEndLine": true,
+ "EmptyEndText": true,
+ "EmptyNoWordBoundary": true,
+ "EmptyOp": true,
+ "EmptyOpContext": true,
+ "EmptyWordBoundary": true,
+ "ErrInternalError": true,
+ "ErrInvalidCharClass": true,
+ "ErrInvalidCharRange": true,
+ "ErrInvalidEscape": true,
+ "ErrInvalidNamedCapture": true,
+ "ErrInvalidPerlOp": true,
+ "ErrInvalidRepeatOp": true,
+ "ErrInvalidRepeatSize": true,
+ "ErrInvalidUTF8": true,
+ "ErrMissingBracket": true,
+ "ErrMissingParen": true,
+ "ErrMissingRepeatArgument": true,
+ "ErrTrailingBackslash": true,
+ "ErrUnexpectedParen": true,
+ "Error": true,
+ "ErrorCode": true,
+ "Flags": true,
+ "FoldCase": true,
+ "Inst": true,
+ "InstAlt": true,
+ "InstAltMatch": true,
+ "InstCapture": true,
+ "InstEmptyWidth": true,
+ "InstFail": true,
+ "InstMatch": true,
+ "InstNop": true,
+ "InstOp": true,
+ "InstRune": true,
+ "InstRune1": true,
+ "InstRuneAny": true,
+ "InstRuneAnyNotNL": true,
+ "IsWordChar": true,
+ "Literal": true,
+ "MatchNL": true,
+ "NonGreedy": true,
+ "OneLine": true,
+ "Op": true,
+ "OpAlternate": true,
+ "OpAnyChar": true,
+ "OpAnyCharNotNL": true,
+ "OpBeginLine": true,
+ "OpBeginText": true,
+ "OpCapture": true,
+ "OpCharClass": true,
+ "OpConcat": true,
+ "OpEmptyMatch": true,
+ "OpEndLine": true,
+ "OpEndText": true,
+ "OpLiteral": true,
+ "OpNoMatch": true,
+ "OpNoWordBoundary": true,
+ "OpPlus": true,
+ "OpQuest": true,
+ "OpRepeat": true,
+ "OpStar": true,
+ "OpWordBoundary": true,
+ "POSIX": true,
+ "Parse": true,
+ "Perl": true,
+ "PerlX": true,
+ "Prog": true,
+ "Regexp": true,
+ "Simple": true,
+ "UnicodeGroups": true,
+ "WasDollar": true,
+ },
+ "runtime": map[string]bool{
+ "BlockProfile": true,
+ "BlockProfileRecord": true,
+ "Breakpoint": true,
+ "CPUProfile": true,
+ "Caller": true,
+ "Callers": true,
+ "CallersFrames": true,
+ "Compiler": true,
+ "Error": true,
+ "Frame": true,
+ "Frames": true,
+ "Func": true,
+ "FuncForPC": true,
+ "GC": true,
+ "GOARCH": true,
+ "GOMAXPROCS": true,
+ "GOOS": true,
+ "GOROOT": true,
+ "Goexit": true,
+ "GoroutineProfile": true,
+ "Gosched": true,
+ "KeepAlive": true,
+ "LockOSThread": true,
+ "MemProfile": true,
+ "MemProfileRate": true,
+ "MemProfileRecord": true,
+ "MemStats": true,
+ "MutexProfile": true,
+ "NumCPU": true,
+ "NumCgoCall": true,
+ "NumGoroutine": true,
+ "ReadMemStats": true,
+ "ReadTrace": true,
+ "SetBlockProfileRate": true,
+ "SetCPUProfileRate": true,
+ "SetCgoTraceback": true,
+ "SetFinalizer": true,
+ "SetMutexProfileFraction": true,
+ "Stack": true,
+ "StackRecord": true,
+ "StartTrace": true,
+ "StopTrace": true,
+ "ThreadCreateProfile": true,
+ "TypeAssertionError": true,
+ "UnlockOSThread": true,
+ "Version": true,
+ },
+ "runtime/debug": map[string]bool{
+ "BuildInfo": true,
+ "FreeOSMemory": true,
+ "GCStats": true,
+ "Module": true,
+ "PrintStack": true,
+ "ReadBuildInfo": true,
+ "ReadGCStats": true,
+ "SetGCPercent": true,
+ "SetMaxStack": true,
+ "SetMaxThreads": true,
+ "SetPanicOnFault": true,
+ "SetTraceback": true,
+ "Stack": true,
+ "WriteHeapDump": true,
+ },
+ "runtime/pprof": map[string]bool{
+ "Do": true,
+ "ForLabels": true,
+ "Label": true,
+ "LabelSet": true,
+ "Labels": true,
+ "Lookup": true,
+ "NewProfile": true,
+ "Profile": true,
+ "Profiles": true,
+ "SetGoroutineLabels": true,
+ "StartCPUProfile": true,
+ "StopCPUProfile": true,
+ "WithLabels": true,
+ "WriteHeapProfile": true,
+ },
+ "runtime/trace": map[string]bool{
+ "IsEnabled": true,
+ "Log": true,
+ "Logf": true,
+ "NewTask": true,
+ "Region": true,
+ "Start": true,
+ "StartRegion": true,
+ "Stop": true,
+ "Task": true,
+ "WithRegion": true,
+ },
+ "sort": map[string]bool{
+ "Float64Slice": true,
+ "Float64s": true,
+ "Float64sAreSorted": true,
+ "IntSlice": true,
+ "Interface": true,
+ "Ints": true,
+ "IntsAreSorted": true,
+ "IsSorted": true,
+ "Reverse": true,
+ "Search": true,
+ "SearchFloat64s": true,
+ "SearchInts": true,
+ "SearchStrings": true,
+ "Slice": true,
+ "SliceIsSorted": true,
+ "SliceStable": true,
+ "Sort": true,
+ "Stable": true,
+ "StringSlice": true,
+ "Strings": true,
+ "StringsAreSorted": true,
+ },
+ "strconv": map[string]bool{
+ "AppendBool": true,
+ "AppendFloat": true,
+ "AppendInt": true,
+ "AppendQuote": true,
+ "AppendQuoteRune": true,
+ "AppendQuoteRuneToASCII": true,
+ "AppendQuoteRuneToGraphic": true,
+ "AppendQuoteToASCII": true,
+ "AppendQuoteToGraphic": true,
+ "AppendUint": true,
+ "Atoi": true,
+ "CanBackquote": true,
+ "ErrRange": true,
+ "ErrSyntax": true,
+ "FormatBool": true,
+ "FormatFloat": true,
+ "FormatInt": true,
+ "FormatUint": true,
+ "IntSize": true,
+ "IsGraphic": true,
+ "IsPrint": true,
+ "Itoa": true,
+ "NumError": true,
+ "ParseBool": true,
+ "ParseFloat": true,
+ "ParseInt": true,
+ "ParseUint": true,
+ "Quote": true,
+ "QuoteRune": true,
+ "QuoteRuneToASCII": true,
+ "QuoteRuneToGraphic": true,
+ "QuoteToASCII": true,
+ "QuoteToGraphic": true,
+ "Unquote": true,
+ "UnquoteChar": true,
+ },
+ "strings": map[string]bool{
+ "Builder": true,
+ "Compare": true,
+ "Contains": true,
+ "ContainsAny": true,
+ "ContainsRune": true,
+ "Count": true,
+ "EqualFold": true,
+ "Fields": true,
+ "FieldsFunc": true,
+ "HasPrefix": true,
+ "HasSuffix": true,
+ "Index": true,
+ "IndexAny": true,
+ "IndexByte": true,
+ "IndexFunc": true,
+ "IndexRune": true,
+ "Join": true,
+ "LastIndex": true,
+ "LastIndexAny": true,
+ "LastIndexByte": true,
+ "LastIndexFunc": true,
+ "Map": true,
+ "NewReader": true,
+ "NewReplacer": true,
+ "Reader": true,
+ "Repeat": true,
+ "Replace": true,
+ "ReplaceAll": true,
+ "Replacer": true,
+ "Split": true,
+ "SplitAfter": true,
+ "SplitAfterN": true,
+ "SplitN": true,
+ "Title": true,
+ "ToLower": true,
+ "ToLowerSpecial": true,
+ "ToTitle": true,
+ "ToTitleSpecial": true,
+ "ToUpper": true,
+ "ToUpperSpecial": true,
+ "Trim": true,
+ "TrimFunc": true,
+ "TrimLeft": true,
+ "TrimLeftFunc": true,
+ "TrimPrefix": true,
+ "TrimRight": true,
+ "TrimRightFunc": true,
+ "TrimSpace": true,
+ "TrimSuffix": true,
+ },
+ "sync": map[string]bool{
+ "Cond": true,
+ "Locker": true,
+ "Map": true,
+ "Mutex": true,
+ "NewCond": true,
+ "Once": true,
+ "Pool": true,
+ "RWMutex": true,
+ "WaitGroup": true,
+ },
+ "sync/atomic": map[string]bool{
+ "AddInt32": true,
+ "AddInt64": true,
+ "AddUint32": true,
+ "AddUint64": true,
+ "AddUintptr": true,
+ "CompareAndSwapInt32": true,
+ "CompareAndSwapInt64": true,
+ "CompareAndSwapPointer": true,
+ "CompareAndSwapUint32": true,
+ "CompareAndSwapUint64": true,
+ "CompareAndSwapUintptr": true,
+ "LoadInt32": true,
+ "LoadInt64": true,
+ "LoadPointer": true,
+ "LoadUint32": true,
+ "LoadUint64": true,
+ "LoadUintptr": true,
+ "StoreInt32": true,
+ "StoreInt64": true,
+ "StorePointer": true,
+ "StoreUint32": true,
+ "StoreUint64": true,
+ "StoreUintptr": true,
+ "SwapInt32": true,
+ "SwapInt64": true,
+ "SwapPointer": true,
+ "SwapUint32": true,
+ "SwapUint64": true,
+ "SwapUintptr": true,
+ "Value": true,
+ },
+ "syscall": map[string]bool{
+ "AF_ALG": true,
+ "AF_APPLETALK": true,
+ "AF_ARP": true,
+ "AF_ASH": true,
+ "AF_ATM": true,
+ "AF_ATMPVC": true,
+ "AF_ATMSVC": true,
+ "AF_AX25": true,
+ "AF_BLUETOOTH": true,
+ "AF_BRIDGE": true,
+ "AF_CAIF": true,
+ "AF_CAN": true,
+ "AF_CCITT": true,
+ "AF_CHAOS": true,
+ "AF_CNT": true,
+ "AF_COIP": true,
+ "AF_DATAKIT": true,
+ "AF_DECnet": true,
+ "AF_DLI": true,
+ "AF_E164": true,
+ "AF_ECMA": true,
+ "AF_ECONET": true,
+ "AF_ENCAP": true,
+ "AF_FILE": true,
+ "AF_HYLINK": true,
+ "AF_IEEE80211": true,
+ "AF_IEEE802154": true,
+ "AF_IMPLINK": true,
+ "AF_INET": true,
+ "AF_INET6": true,
+ "AF_INET6_SDP": true,
+ "AF_INET_SDP": true,
+ "AF_IPX": true,
+ "AF_IRDA": true,
+ "AF_ISDN": true,
+ "AF_ISO": true,
+ "AF_IUCV": true,
+ "AF_KEY": true,
+ "AF_LAT": true,
+ "AF_LINK": true,
+ "AF_LLC": true,
+ "AF_LOCAL": true,
+ "AF_MAX": true,
+ "AF_MPLS": true,
+ "AF_NATM": true,
+ "AF_NDRV": true,
+ "AF_NETBEUI": true,
+ "AF_NETBIOS": true,
+ "AF_NETGRAPH": true,
+ "AF_NETLINK": true,
+ "AF_NETROM": true,
+ "AF_NS": true,
+ "AF_OROUTE": true,
+ "AF_OSI": true,
+ "AF_PACKET": true,
+ "AF_PHONET": true,
+ "AF_PPP": true,
+ "AF_PPPOX": true,
+ "AF_PUP": true,
+ "AF_RDS": true,
+ "AF_RESERVED_36": true,
+ "AF_ROSE": true,
+ "AF_ROUTE": true,
+ "AF_RXRPC": true,
+ "AF_SCLUSTER": true,
+ "AF_SECURITY": true,
+ "AF_SIP": true,
+ "AF_SLOW": true,
+ "AF_SNA": true,
+ "AF_SYSTEM": true,
+ "AF_TIPC": true,
+ "AF_UNIX": true,
+ "AF_UNSPEC": true,
+ "AF_VENDOR00": true,
+ "AF_VENDOR01": true,
+ "AF_VENDOR02": true,
+ "AF_VENDOR03": true,
+ "AF_VENDOR04": true,
+ "AF_VENDOR05": true,
+ "AF_VENDOR06": true,
+ "AF_VENDOR07": true,
+ "AF_VENDOR08": true,
+ "AF_VENDOR09": true,
+ "AF_VENDOR10": true,
+ "AF_VENDOR11": true,
+ "AF_VENDOR12": true,
+ "AF_VENDOR13": true,
+ "AF_VENDOR14": true,
+ "AF_VENDOR15": true,
+ "AF_VENDOR16": true,
+ "AF_VENDOR17": true,
+ "AF_VENDOR18": true,
+ "AF_VENDOR19": true,
+ "AF_VENDOR20": true,
+ "AF_VENDOR21": true,
+ "AF_VENDOR22": true,
+ "AF_VENDOR23": true,
+ "AF_VENDOR24": true,
+ "AF_VENDOR25": true,
+ "AF_VENDOR26": true,
+ "AF_VENDOR27": true,
+ "AF_VENDOR28": true,
+ "AF_VENDOR29": true,
+ "AF_VENDOR30": true,
+ "AF_VENDOR31": true,
+ "AF_VENDOR32": true,
+ "AF_VENDOR33": true,
+ "AF_VENDOR34": true,
+ "AF_VENDOR35": true,
+ "AF_VENDOR36": true,
+ "AF_VENDOR37": true,
+ "AF_VENDOR38": true,
+ "AF_VENDOR39": true,
+ "AF_VENDOR40": true,
+ "AF_VENDOR41": true,
+ "AF_VENDOR42": true,
+ "AF_VENDOR43": true,
+ "AF_VENDOR44": true,
+ "AF_VENDOR45": true,
+ "AF_VENDOR46": true,
+ "AF_VENDOR47": true,
+ "AF_WANPIPE": true,
+ "AF_X25": true,
+ "AI_CANONNAME": true,
+ "AI_NUMERICHOST": true,
+ "AI_PASSIVE": true,
+ "APPLICATION_ERROR": true,
+ "ARPHRD_ADAPT": true,
+ "ARPHRD_APPLETLK": true,
+ "ARPHRD_ARCNET": true,
+ "ARPHRD_ASH": true,
+ "ARPHRD_ATM": true,
+ "ARPHRD_AX25": true,
+ "ARPHRD_BIF": true,
+ "ARPHRD_CHAOS": true,
+ "ARPHRD_CISCO": true,
+ "ARPHRD_CSLIP": true,
+ "ARPHRD_CSLIP6": true,
+ "ARPHRD_DDCMP": true,
+ "ARPHRD_DLCI": true,
+ "ARPHRD_ECONET": true,
+ "ARPHRD_EETHER": true,
+ "ARPHRD_ETHER": true,
+ "ARPHRD_EUI64": true,
+ "ARPHRD_FCAL": true,
+ "ARPHRD_FCFABRIC": true,
+ "ARPHRD_FCPL": true,
+ "ARPHRD_FCPP": true,
+ "ARPHRD_FDDI": true,
+ "ARPHRD_FRAD": true,
+ "ARPHRD_FRELAY": true,
+ "ARPHRD_HDLC": true,
+ "ARPHRD_HIPPI": true,
+ "ARPHRD_HWX25": true,
+ "ARPHRD_IEEE1394": true,
+ "ARPHRD_IEEE802": true,
+ "ARPHRD_IEEE80211": true,
+ "ARPHRD_IEEE80211_PRISM": true,
+ "ARPHRD_IEEE80211_RADIOTAP": true,
+ "ARPHRD_IEEE802154": true,
+ "ARPHRD_IEEE802154_PHY": true,
+ "ARPHRD_IEEE802_TR": true,
+ "ARPHRD_INFINIBAND": true,
+ "ARPHRD_IPDDP": true,
+ "ARPHRD_IPGRE": true,
+ "ARPHRD_IRDA": true,
+ "ARPHRD_LAPB": true,
+ "ARPHRD_LOCALTLK": true,
+ "ARPHRD_LOOPBACK": true,
+ "ARPHRD_METRICOM": true,
+ "ARPHRD_NETROM": true,
+ "ARPHRD_NONE": true,
+ "ARPHRD_PIMREG": true,
+ "ARPHRD_PPP": true,
+ "ARPHRD_PRONET": true,
+ "ARPHRD_RAWHDLC": true,
+ "ARPHRD_ROSE": true,
+ "ARPHRD_RSRVD": true,
+ "ARPHRD_SIT": true,
+ "ARPHRD_SKIP": true,
+ "ARPHRD_SLIP": true,
+ "ARPHRD_SLIP6": true,
+ "ARPHRD_STRIP": true,
+ "ARPHRD_TUNNEL": true,
+ "ARPHRD_TUNNEL6": true,
+ "ARPHRD_VOID": true,
+ "ARPHRD_X25": true,
+ "AUTHTYPE_CLIENT": true,
+ "AUTHTYPE_SERVER": true,
+ "Accept": true,
+ "Accept4": true,
+ "AcceptEx": true,
+ "Access": true,
+ "Acct": true,
+ "AddrinfoW": true,
+ "Adjtime": true,
+ "Adjtimex": true,
+ "AttachLsf": true,
+ "B0": true,
+ "B1000000": true,
+ "B110": true,
+ "B115200": true,
+ "B1152000": true,
+ "B1200": true,
+ "B134": true,
+ "B14400": true,
+ "B150": true,
+ "B1500000": true,
+ "B1800": true,
+ "B19200": true,
+ "B200": true,
+ "B2000000": true,
+ "B230400": true,
+ "B2400": true,
+ "B2500000": true,
+ "B28800": true,
+ "B300": true,
+ "B3000000": true,
+ "B3500000": true,
+ "B38400": true,
+ "B4000000": true,
+ "B460800": true,
+ "B4800": true,
+ "B50": true,
+ "B500000": true,
+ "B57600": true,
+ "B576000": true,
+ "B600": true,
+ "B7200": true,
+ "B75": true,
+ "B76800": true,
+ "B921600": true,
+ "B9600": true,
+ "BASE_PROTOCOL": true,
+ "BIOCFEEDBACK": true,
+ "BIOCFLUSH": true,
+ "BIOCGBLEN": true,
+ "BIOCGDIRECTION": true,
+ "BIOCGDIRFILT": true,
+ "BIOCGDLT": true,
+ "BIOCGDLTLIST": true,
+ "BIOCGETBUFMODE": true,
+ "BIOCGETIF": true,
+ "BIOCGETZMAX": true,
+ "BIOCGFEEDBACK": true,
+ "BIOCGFILDROP": true,
+ "BIOCGHDRCMPLT": true,
+ "BIOCGRSIG": true,
+ "BIOCGRTIMEOUT": true,
+ "BIOCGSEESENT": true,
+ "BIOCGSTATS": true,
+ "BIOCGSTATSOLD": true,
+ "BIOCGTSTAMP": true,
+ "BIOCIMMEDIATE": true,
+ "BIOCLOCK": true,
+ "BIOCPROMISC": true,
+ "BIOCROTZBUF": true,
+ "BIOCSBLEN": true,
+ "BIOCSDIRECTION": true,
+ "BIOCSDIRFILT": true,
+ "BIOCSDLT": true,
+ "BIOCSETBUFMODE": true,
+ "BIOCSETF": true,
+ "BIOCSETFNR": true,
+ "BIOCSETIF": true,
+ "BIOCSETWF": true,
+ "BIOCSETZBUF": true,
+ "BIOCSFEEDBACK": true,
+ "BIOCSFILDROP": true,
+ "BIOCSHDRCMPLT": true,
+ "BIOCSRSIG": true,
+ "BIOCSRTIMEOUT": true,
+ "BIOCSSEESENT": true,
+ "BIOCSTCPF": true,
+ "BIOCSTSTAMP": true,
+ "BIOCSUDPF": true,
+ "BIOCVERSION": true,
+ "BPF_A": true,
+ "BPF_ABS": true,
+ "BPF_ADD": true,
+ "BPF_ALIGNMENT": true,
+ "BPF_ALIGNMENT32": true,
+ "BPF_ALU": true,
+ "BPF_AND": true,
+ "BPF_B": true,
+ "BPF_BUFMODE_BUFFER": true,
+ "BPF_BUFMODE_ZBUF": true,
+ "BPF_DFLTBUFSIZE": true,
+ "BPF_DIRECTION_IN": true,
+ "BPF_DIRECTION_OUT": true,
+ "BPF_DIV": true,
+ "BPF_H": true,
+ "BPF_IMM": true,
+ "BPF_IND": true,
+ "BPF_JA": true,
+ "BPF_JEQ": true,
+ "BPF_JGE": true,
+ "BPF_JGT": true,
+ "BPF_JMP": true,
+ "BPF_JSET": true,
+ "BPF_K": true,
+ "BPF_LD": true,
+ "BPF_LDX": true,
+ "BPF_LEN": true,
+ "BPF_LSH": true,
+ "BPF_MAJOR_VERSION": true,
+ "BPF_MAXBUFSIZE": true,
+ "BPF_MAXINSNS": true,
+ "BPF_MEM": true,
+ "BPF_MEMWORDS": true,
+ "BPF_MINBUFSIZE": true,
+ "BPF_MINOR_VERSION": true,
+ "BPF_MISC": true,
+ "BPF_MSH": true,
+ "BPF_MUL": true,
+ "BPF_NEG": true,
+ "BPF_OR": true,
+ "BPF_RELEASE": true,
+ "BPF_RET": true,
+ "BPF_RSH": true,
+ "BPF_ST": true,
+ "BPF_STX": true,
+ "BPF_SUB": true,
+ "BPF_TAX": true,
+ "BPF_TXA": true,
+ "BPF_T_BINTIME": true,
+ "BPF_T_BINTIME_FAST": true,
+ "BPF_T_BINTIME_MONOTONIC": true,
+ "BPF_T_BINTIME_MONOTONIC_FAST": true,
+ "BPF_T_FAST": true,
+ "BPF_T_FLAG_MASK": true,
+ "BPF_T_FORMAT_MASK": true,
+ "BPF_T_MICROTIME": true,
+ "BPF_T_MICROTIME_FAST": true,
+ "BPF_T_MICROTIME_MONOTONIC": true,
+ "BPF_T_MICROTIME_MONOTONIC_FAST": true,
+ "BPF_T_MONOTONIC": true,
+ "BPF_T_MONOTONIC_FAST": true,
+ "BPF_T_NANOTIME": true,
+ "BPF_T_NANOTIME_FAST": true,
+ "BPF_T_NANOTIME_MONOTONIC": true,
+ "BPF_T_NANOTIME_MONOTONIC_FAST": true,
+ "BPF_T_NONE": true,
+ "BPF_T_NORMAL": true,
+ "BPF_W": true,
+ "BPF_X": true,
+ "BRKINT": true,
+ "Bind": true,
+ "BindToDevice": true,
+ "BpfBuflen": true,
+ "BpfDatalink": true,
+ "BpfHdr": true,
+ "BpfHeadercmpl": true,
+ "BpfInsn": true,
+ "BpfInterface": true,
+ "BpfJump": true,
+ "BpfProgram": true,
+ "BpfStat": true,
+ "BpfStats": true,
+ "BpfStmt": true,
+ "BpfTimeout": true,
+ "BpfTimeval": true,
+ "BpfVersion": true,
+ "BpfZbuf": true,
+ "BpfZbufHeader": true,
+ "ByHandleFileInformation": true,
+ "BytePtrFromString": true,
+ "ByteSliceFromString": true,
+ "CCR0_FLUSH": true,
+ "CERT_CHAIN_POLICY_AUTHENTICODE": true,
+ "CERT_CHAIN_POLICY_AUTHENTICODE_TS": true,
+ "CERT_CHAIN_POLICY_BASE": true,
+ "CERT_CHAIN_POLICY_BASIC_CONSTRAINTS": true,
+ "CERT_CHAIN_POLICY_EV": true,
+ "CERT_CHAIN_POLICY_MICROSOFT_ROOT": true,
+ "CERT_CHAIN_POLICY_NT_AUTH": true,
+ "CERT_CHAIN_POLICY_SSL": true,
+ "CERT_E_CN_NO_MATCH": true,
+ "CERT_E_EXPIRED": true,
+ "CERT_E_PURPOSE": true,
+ "CERT_E_ROLE": true,
+ "CERT_E_UNTRUSTEDROOT": true,
+ "CERT_STORE_ADD_ALWAYS": true,
+ "CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG": true,
+ "CERT_STORE_PROV_MEMORY": true,
+ "CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT": true,
+ "CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT": true,
+ "CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT": true,
+ "CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT": true,
+ "CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT": true,
+ "CERT_TRUST_INVALID_BASIC_CONSTRAINTS": true,
+ "CERT_TRUST_INVALID_EXTENSION": true,
+ "CERT_TRUST_INVALID_NAME_CONSTRAINTS": true,
+ "CERT_TRUST_INVALID_POLICY_CONSTRAINTS": true,
+ "CERT_TRUST_IS_CYCLIC": true,
+ "CERT_TRUST_IS_EXPLICIT_DISTRUST": true,
+ "CERT_TRUST_IS_NOT_SIGNATURE_VALID": true,
+ "CERT_TRUST_IS_NOT_TIME_VALID": true,
+ "CERT_TRUST_IS_NOT_VALID_FOR_USAGE": true,
+ "CERT_TRUST_IS_OFFLINE_REVOCATION": true,
+ "CERT_TRUST_IS_REVOKED": true,
+ "CERT_TRUST_IS_UNTRUSTED_ROOT": true,
+ "CERT_TRUST_NO_ERROR": true,
+ "CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY": true,
+ "CERT_TRUST_REVOCATION_STATUS_UNKNOWN": true,
+ "CFLUSH": true,
+ "CLOCAL": true,
+ "CLONE_CHILD_CLEARTID": true,
+ "CLONE_CHILD_SETTID": true,
+ "CLONE_CSIGNAL": true,
+ "CLONE_DETACHED": true,
+ "CLONE_FILES": true,
+ "CLONE_FS": true,
+ "CLONE_IO": true,
+ "CLONE_NEWIPC": true,
+ "CLONE_NEWNET": true,
+ "CLONE_NEWNS": true,
+ "CLONE_NEWPID": true,
+ "CLONE_NEWUSER": true,
+ "CLONE_NEWUTS": true,
+ "CLONE_PARENT": true,
+ "CLONE_PARENT_SETTID": true,
+ "CLONE_PID": true,
+ "CLONE_PTRACE": true,
+ "CLONE_SETTLS": true,
+ "CLONE_SIGHAND": true,
+ "CLONE_SYSVSEM": true,
+ "CLONE_THREAD": true,
+ "CLONE_UNTRACED": true,
+ "CLONE_VFORK": true,
+ "CLONE_VM": true,
+ "CPUID_CFLUSH": true,
+ "CREAD": true,
+ "CREATE_ALWAYS": true,
+ "CREATE_NEW": true,
+ "CREATE_NEW_PROCESS_GROUP": true,
+ "CREATE_UNICODE_ENVIRONMENT": true,
+ "CRYPT_DEFAULT_CONTAINER_OPTIONAL": true,
+ "CRYPT_DELETEKEYSET": true,
+ "CRYPT_MACHINE_KEYSET": true,
+ "CRYPT_NEWKEYSET": true,
+ "CRYPT_SILENT": true,
+ "CRYPT_VERIFYCONTEXT": true,
+ "CS5": true,
+ "CS6": true,
+ "CS7": true,
+ "CS8": true,
+ "CSIZE": true,
+ "CSTART": true,
+ "CSTATUS": true,
+ "CSTOP": true,
+ "CSTOPB": true,
+ "CSUSP": true,
+ "CTL_MAXNAME": true,
+ "CTL_NET": true,
+ "CTL_QUERY": true,
+ "CTRL_BREAK_EVENT": true,
+ "CTRL_C_EVENT": true,
+ "CancelIo": true,
+ "CancelIoEx": true,
+ "CertAddCertificateContextToStore": true,
+ "CertChainContext": true,
+ "CertChainElement": true,
+ "CertChainPara": true,
+ "CertChainPolicyPara": true,
+ "CertChainPolicyStatus": true,
+ "CertCloseStore": true,
+ "CertContext": true,
+ "CertCreateCertificateContext": true,
+ "CertEnhKeyUsage": true,
+ "CertEnumCertificatesInStore": true,
+ "CertFreeCertificateChain": true,
+ "CertFreeCertificateContext": true,
+ "CertGetCertificateChain": true,
+ "CertInfo": true,
+ "CertOpenStore": true,
+ "CertOpenSystemStore": true,
+ "CertRevocationCrlInfo": true,
+ "CertRevocationInfo": true,
+ "CertSimpleChain": true,
+ "CertTrustListInfo": true,
+ "CertTrustStatus": true,
+ "CertUsageMatch": true,
+ "CertVerifyCertificateChainPolicy": true,
+ "Chdir": true,
+ "CheckBpfVersion": true,
+ "Chflags": true,
+ "Chmod": true,
+ "Chown": true,
+ "Chroot": true,
+ "Clearenv": true,
+ "Close": true,
+ "CloseHandle": true,
+ "CloseOnExec": true,
+ "Closesocket": true,
+ "CmsgLen": true,
+ "CmsgSpace": true,
+ "Cmsghdr": true,
+ "CommandLineToArgv": true,
+ "ComputerName": true,
+ "Conn": true,
+ "Connect": true,
+ "ConnectEx": true,
+ "ConvertSidToStringSid": true,
+ "ConvertStringSidToSid": true,
+ "CopySid": true,
+ "Creat": true,
+ "CreateDirectory": true,
+ "CreateFile": true,
+ "CreateFileMapping": true,
+ "CreateHardLink": true,
+ "CreateIoCompletionPort": true,
+ "CreatePipe": true,
+ "CreateProcess": true,
+ "CreateProcessAsUser": true,
+ "CreateSymbolicLink": true,
+ "CreateToolhelp32Snapshot": true,
+ "Credential": true,
+ "CryptAcquireContext": true,
+ "CryptGenRandom": true,
+ "CryptReleaseContext": true,
+ "DIOCBSFLUSH": true,
+ "DIOCOSFPFLUSH": true,
+ "DLL": true,
+ "DLLError": true,
+ "DLT_A429": true,
+ "DLT_A653_ICM": true,
+ "DLT_AIRONET_HEADER": true,
+ "DLT_AOS": true,
+ "DLT_APPLE_IP_OVER_IEEE1394": true,
+ "DLT_ARCNET": true,
+ "DLT_ARCNET_LINUX": true,
+ "DLT_ATM_CLIP": true,
+ "DLT_ATM_RFC1483": true,
+ "DLT_AURORA": true,
+ "DLT_AX25": true,
+ "DLT_AX25_KISS": true,
+ "DLT_BACNET_MS_TP": true,
+ "DLT_BLUETOOTH_HCI_H4": true,
+ "DLT_BLUETOOTH_HCI_H4_WITH_PHDR": true,
+ "DLT_CAN20B": true,
+ "DLT_CAN_SOCKETCAN": true,
+ "DLT_CHAOS": true,
+ "DLT_CHDLC": true,
+ "DLT_CISCO_IOS": true,
+ "DLT_C_HDLC": true,
+ "DLT_C_HDLC_WITH_DIR": true,
+ "DLT_DBUS": true,
+ "DLT_DECT": true,
+ "DLT_DOCSIS": true,
+ "DLT_DVB_CI": true,
+ "DLT_ECONET": true,
+ "DLT_EN10MB": true,
+ "DLT_EN3MB": true,
+ "DLT_ENC": true,
+ "DLT_ERF": true,
+ "DLT_ERF_ETH": true,
+ "DLT_ERF_POS": true,
+ "DLT_FC_2": true,
+ "DLT_FC_2_WITH_FRAME_DELIMS": true,
+ "DLT_FDDI": true,
+ "DLT_FLEXRAY": true,
+ "DLT_FRELAY": true,
+ "DLT_FRELAY_WITH_DIR": true,
+ "DLT_GCOM_SERIAL": true,
+ "DLT_GCOM_T1E1": true,
+ "DLT_GPF_F": true,
+ "DLT_GPF_T": true,
+ "DLT_GPRS_LLC": true,
+ "DLT_GSMTAP_ABIS": true,
+ "DLT_GSMTAP_UM": true,
+ "DLT_HDLC": true,
+ "DLT_HHDLC": true,
+ "DLT_HIPPI": true,
+ "DLT_IBM_SN": true,
+ "DLT_IBM_SP": true,
+ "DLT_IEEE802": true,
+ "DLT_IEEE802_11": true,
+ "DLT_IEEE802_11_RADIO": true,
+ "DLT_IEEE802_11_RADIO_AVS": true,
+ "DLT_IEEE802_15_4": true,
+ "DLT_IEEE802_15_4_LINUX": true,
+ "DLT_IEEE802_15_4_NOFCS": true,
+ "DLT_IEEE802_15_4_NONASK_PHY": true,
+ "DLT_IEEE802_16_MAC_CPS": true,
+ "DLT_IEEE802_16_MAC_CPS_RADIO": true,
+ "DLT_IPFILTER": true,
+ "DLT_IPMB": true,
+ "DLT_IPMB_LINUX": true,
+ "DLT_IPNET": true,
+ "DLT_IPOIB": true,
+ "DLT_IPV4": true,
+ "DLT_IPV6": true,
+ "DLT_IP_OVER_FC": true,
+ "DLT_JUNIPER_ATM1": true,
+ "DLT_JUNIPER_ATM2": true,
+ "DLT_JUNIPER_ATM_CEMIC": true,
+ "DLT_JUNIPER_CHDLC": true,
+ "DLT_JUNIPER_ES": true,
+ "DLT_JUNIPER_ETHER": true,
+ "DLT_JUNIPER_FIBRECHANNEL": true,
+ "DLT_JUNIPER_FRELAY": true,
+ "DLT_JUNIPER_GGSN": true,
+ "DLT_JUNIPER_ISM": true,
+ "DLT_JUNIPER_MFR": true,
+ "DLT_JUNIPER_MLFR": true,
+ "DLT_JUNIPER_MLPPP": true,
+ "DLT_JUNIPER_MONITOR": true,
+ "DLT_JUNIPER_PIC_PEER": true,
+ "DLT_JUNIPER_PPP": true,
+ "DLT_JUNIPER_PPPOE": true,
+ "DLT_JUNIPER_PPPOE_ATM": true,
+ "DLT_JUNIPER_SERVICES": true,
+ "DLT_JUNIPER_SRX_E2E": true,
+ "DLT_JUNIPER_ST": true,
+ "DLT_JUNIPER_VP": true,
+ "DLT_JUNIPER_VS": true,
+ "DLT_LAPB_WITH_DIR": true,
+ "DLT_LAPD": true,
+ "DLT_LIN": true,
+ "DLT_LINUX_EVDEV": true,
+ "DLT_LINUX_IRDA": true,
+ "DLT_LINUX_LAPD": true,
+ "DLT_LINUX_PPP_WITHDIRECTION": true,
+ "DLT_LINUX_SLL": true,
+ "DLT_LOOP": true,
+ "DLT_LTALK": true,
+ "DLT_MATCHING_MAX": true,
+ "DLT_MATCHING_MIN": true,
+ "DLT_MFR": true,
+ "DLT_MOST": true,
+ "DLT_MPEG_2_TS": true,
+ "DLT_MPLS": true,
+ "DLT_MTP2": true,
+ "DLT_MTP2_WITH_PHDR": true,
+ "DLT_MTP3": true,
+ "DLT_MUX27010": true,
+ "DLT_NETANALYZER": true,
+ "DLT_NETANALYZER_TRANSPARENT": true,
+ "DLT_NFC_LLCP": true,
+ "DLT_NFLOG": true,
+ "DLT_NG40": true,
+ "DLT_NULL": true,
+ "DLT_PCI_EXP": true,
+ "DLT_PFLOG": true,
+ "DLT_PFSYNC": true,
+ "DLT_PPI": true,
+ "DLT_PPP": true,
+ "DLT_PPP_BSDOS": true,
+ "DLT_PPP_ETHER": true,
+ "DLT_PPP_PPPD": true,
+ "DLT_PPP_SERIAL": true,
+ "DLT_PPP_WITH_DIR": true,
+ "DLT_PPP_WITH_DIRECTION": true,
+ "DLT_PRISM_HEADER": true,
+ "DLT_PRONET": true,
+ "DLT_RAIF1": true,
+ "DLT_RAW": true,
+ "DLT_RAWAF_MASK": true,
+ "DLT_RIO": true,
+ "DLT_SCCP": true,
+ "DLT_SITA": true,
+ "DLT_SLIP": true,
+ "DLT_SLIP_BSDOS": true,
+ "DLT_STANAG_5066_D_PDU": true,
+ "DLT_SUNATM": true,
+ "DLT_SYMANTEC_FIREWALL": true,
+ "DLT_TZSP": true,
+ "DLT_USB": true,
+ "DLT_USB_LINUX": true,
+ "DLT_USB_LINUX_MMAPPED": true,
+ "DLT_USER0": true,
+ "DLT_USER1": true,
+ "DLT_USER10": true,
+ "DLT_USER11": true,
+ "DLT_USER12": true,
+ "DLT_USER13": true,
+ "DLT_USER14": true,
+ "DLT_USER15": true,
+ "DLT_USER2": true,
+ "DLT_USER3": true,
+ "DLT_USER4": true,
+ "DLT_USER5": true,
+ "DLT_USER6": true,
+ "DLT_USER7": true,
+ "DLT_USER8": true,
+ "DLT_USER9": true,
+ "DLT_WIHART": true,
+ "DLT_X2E_SERIAL": true,
+ "DLT_X2E_XORAYA": true,
+ "DNSMXData": true,
+ "DNSPTRData": true,
+ "DNSRecord": true,
+ "DNSSRVData": true,
+ "DNSTXTData": true,
+ "DNS_INFO_NO_RECORDS": true,
+ "DNS_TYPE_A": true,
+ "DNS_TYPE_A6": true,
+ "DNS_TYPE_AAAA": true,
+ "DNS_TYPE_ADDRS": true,
+ "DNS_TYPE_AFSDB": true,
+ "DNS_TYPE_ALL": true,
+ "DNS_TYPE_ANY": true,
+ "DNS_TYPE_ATMA": true,
+ "DNS_TYPE_AXFR": true,
+ "DNS_TYPE_CERT": true,
+ "DNS_TYPE_CNAME": true,
+ "DNS_TYPE_DHCID": true,
+ "DNS_TYPE_DNAME": true,
+ "DNS_TYPE_DNSKEY": true,
+ "DNS_TYPE_DS": true,
+ "DNS_TYPE_EID": true,
+ "DNS_TYPE_GID": true,
+ "DNS_TYPE_GPOS": true,
+ "DNS_TYPE_HINFO": true,
+ "DNS_TYPE_ISDN": true,
+ "DNS_TYPE_IXFR": true,
+ "DNS_TYPE_KEY": true,
+ "DNS_TYPE_KX": true,
+ "DNS_TYPE_LOC": true,
+ "DNS_TYPE_MAILA": true,
+ "DNS_TYPE_MAILB": true,
+ "DNS_TYPE_MB": true,
+ "DNS_TYPE_MD": true,
+ "DNS_TYPE_MF": true,
+ "DNS_TYPE_MG": true,
+ "DNS_TYPE_MINFO": true,
+ "DNS_TYPE_MR": true,
+ "DNS_TYPE_MX": true,
+ "DNS_TYPE_NAPTR": true,
+ "DNS_TYPE_NBSTAT": true,
+ "DNS_TYPE_NIMLOC": true,
+ "DNS_TYPE_NS": true,
+ "DNS_TYPE_NSAP": true,
+ "DNS_TYPE_NSAPPTR": true,
+ "DNS_TYPE_NSEC": true,
+ "DNS_TYPE_NULL": true,
+ "DNS_TYPE_NXT": true,
+ "DNS_TYPE_OPT": true,
+ "DNS_TYPE_PTR": true,
+ "DNS_TYPE_PX": true,
+ "DNS_TYPE_RP": true,
+ "DNS_TYPE_RRSIG": true,
+ "DNS_TYPE_RT": true,
+ "DNS_TYPE_SIG": true,
+ "DNS_TYPE_SINK": true,
+ "DNS_TYPE_SOA": true,
+ "DNS_TYPE_SRV": true,
+ "DNS_TYPE_TEXT": true,
+ "DNS_TYPE_TKEY": true,
+ "DNS_TYPE_TSIG": true,
+ "DNS_TYPE_UID": true,
+ "DNS_TYPE_UINFO": true,
+ "DNS_TYPE_UNSPEC": true,
+ "DNS_TYPE_WINS": true,
+ "DNS_TYPE_WINSR": true,
+ "DNS_TYPE_WKS": true,
+ "DNS_TYPE_X25": true,
+ "DT_BLK": true,
+ "DT_CHR": true,
+ "DT_DIR": true,
+ "DT_FIFO": true,
+ "DT_LNK": true,
+ "DT_REG": true,
+ "DT_SOCK": true,
+ "DT_UNKNOWN": true,
+ "DT_WHT": true,
+ "DUPLICATE_CLOSE_SOURCE": true,
+ "DUPLICATE_SAME_ACCESS": true,
+ "DeleteFile": true,
+ "DetachLsf": true,
+ "DeviceIoControl": true,
+ "Dirent": true,
+ "DnsNameCompare": true,
+ "DnsQuery": true,
+ "DnsRecordListFree": true,
+ "DnsSectionAdditional": true,
+ "DnsSectionAnswer": true,
+ "DnsSectionAuthority": true,
+ "DnsSectionQuestion": true,
+ "Dup": true,
+ "Dup2": true,
+ "Dup3": true,
+ "DuplicateHandle": true,
+ "E2BIG": true,
+ "EACCES": true,
+ "EADDRINUSE": true,
+ "EADDRNOTAVAIL": true,
+ "EADV": true,
+ "EAFNOSUPPORT": true,
+ "EAGAIN": true,
+ "EALREADY": true,
+ "EAUTH": true,
+ "EBADARCH": true,
+ "EBADE": true,
+ "EBADEXEC": true,
+ "EBADF": true,
+ "EBADFD": true,
+ "EBADMACHO": true,
+ "EBADMSG": true,
+ "EBADR": true,
+ "EBADRPC": true,
+ "EBADRQC": true,
+ "EBADSLT": true,
+ "EBFONT": true,
+ "EBUSY": true,
+ "ECANCELED": true,
+ "ECAPMODE": true,
+ "ECHILD": true,
+ "ECHO": true,
+ "ECHOCTL": true,
+ "ECHOE": true,
+ "ECHOK": true,
+ "ECHOKE": true,
+ "ECHONL": true,
+ "ECHOPRT": true,
+ "ECHRNG": true,
+ "ECOMM": true,
+ "ECONNABORTED": true,
+ "ECONNREFUSED": true,
+ "ECONNRESET": true,
+ "EDEADLK": true,
+ "EDEADLOCK": true,
+ "EDESTADDRREQ": true,
+ "EDEVERR": true,
+ "EDOM": true,
+ "EDOOFUS": true,
+ "EDOTDOT": true,
+ "EDQUOT": true,
+ "EEXIST": true,
+ "EFAULT": true,
+ "EFBIG": true,
+ "EFER_LMA": true,
+ "EFER_LME": true,
+ "EFER_NXE": true,
+ "EFER_SCE": true,
+ "EFTYPE": true,
+ "EHOSTDOWN": true,
+ "EHOSTUNREACH": true,
+ "EHWPOISON": true,
+ "EIDRM": true,
+ "EILSEQ": true,
+ "EINPROGRESS": true,
+ "EINTR": true,
+ "EINVAL": true,
+ "EIO": true,
+ "EIPSEC": true,
+ "EISCONN": true,
+ "EISDIR": true,
+ "EISNAM": true,
+ "EKEYEXPIRED": true,
+ "EKEYREJECTED": true,
+ "EKEYREVOKED": true,
+ "EL2HLT": true,
+ "EL2NSYNC": true,
+ "EL3HLT": true,
+ "EL3RST": true,
+ "ELAST": true,
+ "ELF_NGREG": true,
+ "ELF_PRARGSZ": true,
+ "ELIBACC": true,
+ "ELIBBAD": true,
+ "ELIBEXEC": true,
+ "ELIBMAX": true,
+ "ELIBSCN": true,
+ "ELNRNG": true,
+ "ELOOP": true,
+ "EMEDIUMTYPE": true,
+ "EMFILE": true,
+ "EMLINK": true,
+ "EMSGSIZE": true,
+ "EMT_TAGOVF": true,
+ "EMULTIHOP": true,
+ "EMUL_ENABLED": true,
+ "EMUL_LINUX": true,
+ "EMUL_LINUX32": true,
+ "EMUL_MAXID": true,
+ "EMUL_NATIVE": true,
+ "ENAMETOOLONG": true,
+ "ENAVAIL": true,
+ "ENDRUNDISC": true,
+ "ENEEDAUTH": true,
+ "ENETDOWN": true,
+ "ENETRESET": true,
+ "ENETUNREACH": true,
+ "ENFILE": true,
+ "ENOANO": true,
+ "ENOATTR": true,
+ "ENOBUFS": true,
+ "ENOCSI": true,
+ "ENODATA": true,
+ "ENODEV": true,
+ "ENOENT": true,
+ "ENOEXEC": true,
+ "ENOKEY": true,
+ "ENOLCK": true,
+ "ENOLINK": true,
+ "ENOMEDIUM": true,
+ "ENOMEM": true,
+ "ENOMSG": true,
+ "ENONET": true,
+ "ENOPKG": true,
+ "ENOPOLICY": true,
+ "ENOPROTOOPT": true,
+ "ENOSPC": true,
+ "ENOSR": true,
+ "ENOSTR": true,
+ "ENOSYS": true,
+ "ENOTBLK": true,
+ "ENOTCAPABLE": true,
+ "ENOTCONN": true,
+ "ENOTDIR": true,
+ "ENOTEMPTY": true,
+ "ENOTNAM": true,
+ "ENOTRECOVERABLE": true,
+ "ENOTSOCK": true,
+ "ENOTSUP": true,
+ "ENOTTY": true,
+ "ENOTUNIQ": true,
+ "ENXIO": true,
+ "EN_SW_CTL_INF": true,
+ "EN_SW_CTL_PREC": true,
+ "EN_SW_CTL_ROUND": true,
+ "EN_SW_DATACHAIN": true,
+ "EN_SW_DENORM": true,
+ "EN_SW_INVOP": true,
+ "EN_SW_OVERFLOW": true,
+ "EN_SW_PRECLOSS": true,
+ "EN_SW_UNDERFLOW": true,
+ "EN_SW_ZERODIV": true,
+ "EOPNOTSUPP": true,
+ "EOVERFLOW": true,
+ "EOWNERDEAD": true,
+ "EPERM": true,
+ "EPFNOSUPPORT": true,
+ "EPIPE": true,
+ "EPOLLERR": true,
+ "EPOLLET": true,
+ "EPOLLHUP": true,
+ "EPOLLIN": true,
+ "EPOLLMSG": true,
+ "EPOLLONESHOT": true,
+ "EPOLLOUT": true,
+ "EPOLLPRI": true,
+ "EPOLLRDBAND": true,
+ "EPOLLRDHUP": true,
+ "EPOLLRDNORM": true,
+ "EPOLLWRBAND": true,
+ "EPOLLWRNORM": true,
+ "EPOLL_CLOEXEC": true,
+ "EPOLL_CTL_ADD": true,
+ "EPOLL_CTL_DEL": true,
+ "EPOLL_CTL_MOD": true,
+ "EPOLL_NONBLOCK": true,
+ "EPROCLIM": true,
+ "EPROCUNAVAIL": true,
+ "EPROGMISMATCH": true,
+ "EPROGUNAVAIL": true,
+ "EPROTO": true,
+ "EPROTONOSUPPORT": true,
+ "EPROTOTYPE": true,
+ "EPWROFF": true,
+ "ERANGE": true,
+ "EREMCHG": true,
+ "EREMOTE": true,
+ "EREMOTEIO": true,
+ "ERESTART": true,
+ "ERFKILL": true,
+ "EROFS": true,
+ "ERPCMISMATCH": true,
+ "ERROR_ACCESS_DENIED": true,
+ "ERROR_ALREADY_EXISTS": true,
+ "ERROR_BROKEN_PIPE": true,
+ "ERROR_BUFFER_OVERFLOW": true,
+ "ERROR_DIR_NOT_EMPTY": true,
+ "ERROR_ENVVAR_NOT_FOUND": true,
+ "ERROR_FILE_EXISTS": true,
+ "ERROR_FILE_NOT_FOUND": true,
+ "ERROR_HANDLE_EOF": true,
+ "ERROR_INSUFFICIENT_BUFFER": true,
+ "ERROR_IO_PENDING": true,
+ "ERROR_MOD_NOT_FOUND": true,
+ "ERROR_MORE_DATA": true,
+ "ERROR_NETNAME_DELETED": true,
+ "ERROR_NOT_FOUND": true,
+ "ERROR_NO_MORE_FILES": true,
+ "ERROR_OPERATION_ABORTED": true,
+ "ERROR_PATH_NOT_FOUND": true,
+ "ERROR_PRIVILEGE_NOT_HELD": true,
+ "ERROR_PROC_NOT_FOUND": true,
+ "ESHLIBVERS": true,
+ "ESHUTDOWN": true,
+ "ESOCKTNOSUPPORT": true,
+ "ESPIPE": true,
+ "ESRCH": true,
+ "ESRMNT": true,
+ "ESTALE": true,
+ "ESTRPIPE": true,
+ "ETHERCAP_JUMBO_MTU": true,
+ "ETHERCAP_VLAN_HWTAGGING": true,
+ "ETHERCAP_VLAN_MTU": true,
+ "ETHERMIN": true,
+ "ETHERMTU": true,
+ "ETHERMTU_JUMBO": true,
+ "ETHERTYPE_8023": true,
+ "ETHERTYPE_AARP": true,
+ "ETHERTYPE_ACCTON": true,
+ "ETHERTYPE_AEONIC": true,
+ "ETHERTYPE_ALPHA": true,
+ "ETHERTYPE_AMBER": true,
+ "ETHERTYPE_AMOEBA": true,
+ "ETHERTYPE_AOE": true,
+ "ETHERTYPE_APOLLO": true,
+ "ETHERTYPE_APOLLODOMAIN": true,
+ "ETHERTYPE_APPLETALK": true,
+ "ETHERTYPE_APPLITEK": true,
+ "ETHERTYPE_ARGONAUT": true,
+ "ETHERTYPE_ARP": true,
+ "ETHERTYPE_AT": true,
+ "ETHERTYPE_ATALK": true,
+ "ETHERTYPE_ATOMIC": true,
+ "ETHERTYPE_ATT": true,
+ "ETHERTYPE_ATTSTANFORD": true,
+ "ETHERTYPE_AUTOPHON": true,
+ "ETHERTYPE_AXIS": true,
+ "ETHERTYPE_BCLOOP": true,
+ "ETHERTYPE_BOFL": true,
+ "ETHERTYPE_CABLETRON": true,
+ "ETHERTYPE_CHAOS": true,
+ "ETHERTYPE_COMDESIGN": true,
+ "ETHERTYPE_COMPUGRAPHIC": true,
+ "ETHERTYPE_COUNTERPOINT": true,
+ "ETHERTYPE_CRONUS": true,
+ "ETHERTYPE_CRONUSVLN": true,
+ "ETHERTYPE_DCA": true,
+ "ETHERTYPE_DDE": true,
+ "ETHERTYPE_DEBNI": true,
+ "ETHERTYPE_DECAM": true,
+ "ETHERTYPE_DECCUST": true,
+ "ETHERTYPE_DECDIAG": true,
+ "ETHERTYPE_DECDNS": true,
+ "ETHERTYPE_DECDTS": true,
+ "ETHERTYPE_DECEXPER": true,
+ "ETHERTYPE_DECLAST": true,
+ "ETHERTYPE_DECLTM": true,
+ "ETHERTYPE_DECMUMPS": true,
+ "ETHERTYPE_DECNETBIOS": true,
+ "ETHERTYPE_DELTACON": true,
+ "ETHERTYPE_DIDDLE": true,
+ "ETHERTYPE_DLOG1": true,
+ "ETHERTYPE_DLOG2": true,
+ "ETHERTYPE_DN": true,
+ "ETHERTYPE_DOGFIGHT": true,
+ "ETHERTYPE_DSMD": true,
+ "ETHERTYPE_ECMA": true,
+ "ETHERTYPE_ENCRYPT": true,
+ "ETHERTYPE_ES": true,
+ "ETHERTYPE_EXCELAN": true,
+ "ETHERTYPE_EXPERDATA": true,
+ "ETHERTYPE_FLIP": true,
+ "ETHERTYPE_FLOWCONTROL": true,
+ "ETHERTYPE_FRARP": true,
+ "ETHERTYPE_GENDYN": true,
+ "ETHERTYPE_HAYES": true,
+ "ETHERTYPE_HIPPI_FP": true,
+ "ETHERTYPE_HITACHI": true,
+ "ETHERTYPE_HP": true,
+ "ETHERTYPE_IEEEPUP": true,
+ "ETHERTYPE_IEEEPUPAT": true,
+ "ETHERTYPE_IMLBL": true,
+ "ETHERTYPE_IMLBLDIAG": true,
+ "ETHERTYPE_IP": true,
+ "ETHERTYPE_IPAS": true,
+ "ETHERTYPE_IPV6": true,
+ "ETHERTYPE_IPX": true,
+ "ETHERTYPE_IPXNEW": true,
+ "ETHERTYPE_KALPANA": true,
+ "ETHERTYPE_LANBRIDGE": true,
+ "ETHERTYPE_LANPROBE": true,
+ "ETHERTYPE_LAT": true,
+ "ETHERTYPE_LBACK": true,
+ "ETHERTYPE_LITTLE": true,
+ "ETHERTYPE_LLDP": true,
+ "ETHERTYPE_LOGICRAFT": true,
+ "ETHERTYPE_LOOPBACK": true,
+ "ETHERTYPE_MATRA": true,
+ "ETHERTYPE_MAX": true,
+ "ETHERTYPE_MERIT": true,
+ "ETHERTYPE_MICP": true,
+ "ETHERTYPE_MOPDL": true,
+ "ETHERTYPE_MOPRC": true,
+ "ETHERTYPE_MOTOROLA": true,
+ "ETHERTYPE_MPLS": true,
+ "ETHERTYPE_MPLS_MCAST": true,
+ "ETHERTYPE_MUMPS": true,
+ "ETHERTYPE_NBPCC": true,
+ "ETHERTYPE_NBPCLAIM": true,
+ "ETHERTYPE_NBPCLREQ": true,
+ "ETHERTYPE_NBPCLRSP": true,
+ "ETHERTYPE_NBPCREQ": true,
+ "ETHERTYPE_NBPCRSP": true,
+ "ETHERTYPE_NBPDG": true,
+ "ETHERTYPE_NBPDGB": true,
+ "ETHERTYPE_NBPDLTE": true,
+ "ETHERTYPE_NBPRAR": true,
+ "ETHERTYPE_NBPRAS": true,
+ "ETHERTYPE_NBPRST": true,
+ "ETHERTYPE_NBPSCD": true,
+ "ETHERTYPE_NBPVCD": true,
+ "ETHERTYPE_NBS": true,
+ "ETHERTYPE_NCD": true,
+ "ETHERTYPE_NESTAR": true,
+ "ETHERTYPE_NETBEUI": true,
+ "ETHERTYPE_NOVELL": true,
+ "ETHERTYPE_NS": true,
+ "ETHERTYPE_NSAT": true,
+ "ETHERTYPE_NSCOMPAT": true,
+ "ETHERTYPE_NTRAILER": true,
+ "ETHERTYPE_OS9": true,
+ "ETHERTYPE_OS9NET": true,
+ "ETHERTYPE_PACER": true,
+ "ETHERTYPE_PAE": true,
+ "ETHERTYPE_PCS": true,
+ "ETHERTYPE_PLANNING": true,
+ "ETHERTYPE_PPP": true,
+ "ETHERTYPE_PPPOE": true,
+ "ETHERTYPE_PPPOEDISC": true,
+ "ETHERTYPE_PRIMENTS": true,
+ "ETHERTYPE_PUP": true,
+ "ETHERTYPE_PUPAT": true,
+ "ETHERTYPE_QINQ": true,
+ "ETHERTYPE_RACAL": true,
+ "ETHERTYPE_RATIONAL": true,
+ "ETHERTYPE_RAWFR": true,
+ "ETHERTYPE_RCL": true,
+ "ETHERTYPE_RDP": true,
+ "ETHERTYPE_RETIX": true,
+ "ETHERTYPE_REVARP": true,
+ "ETHERTYPE_SCA": true,
+ "ETHERTYPE_SECTRA": true,
+ "ETHERTYPE_SECUREDATA": true,
+ "ETHERTYPE_SGITW": true,
+ "ETHERTYPE_SG_BOUNCE": true,
+ "ETHERTYPE_SG_DIAG": true,
+ "ETHERTYPE_SG_NETGAMES": true,
+ "ETHERTYPE_SG_RESV": true,
+ "ETHERTYPE_SIMNET": true,
+ "ETHERTYPE_SLOW": true,
+ "ETHERTYPE_SLOWPROTOCOLS": true,
+ "ETHERTYPE_SNA": true,
+ "ETHERTYPE_SNMP": true,
+ "ETHERTYPE_SONIX": true,
+ "ETHERTYPE_SPIDER": true,
+ "ETHERTYPE_SPRITE": true,
+ "ETHERTYPE_STP": true,
+ "ETHERTYPE_TALARIS": true,
+ "ETHERTYPE_TALARISMC": true,
+ "ETHERTYPE_TCPCOMP": true,
+ "ETHERTYPE_TCPSM": true,
+ "ETHERTYPE_TEC": true,
+ "ETHERTYPE_TIGAN": true,
+ "ETHERTYPE_TRAIL": true,
+ "ETHERTYPE_TRANSETHER": true,
+ "ETHERTYPE_TYMSHARE": true,
+ "ETHERTYPE_UBBST": true,
+ "ETHERTYPE_UBDEBUG": true,
+ "ETHERTYPE_UBDIAGLOOP": true,
+ "ETHERTYPE_UBDL": true,
+ "ETHERTYPE_UBNIU": true,
+ "ETHERTYPE_UBNMC": true,
+ "ETHERTYPE_VALID": true,
+ "ETHERTYPE_VARIAN": true,
+ "ETHERTYPE_VAXELN": true,
+ "ETHERTYPE_VEECO": true,
+ "ETHERTYPE_VEXP": true,
+ "ETHERTYPE_VGLAB": true,
+ "ETHERTYPE_VINES": true,
+ "ETHERTYPE_VINESECHO": true,
+ "ETHERTYPE_VINESLOOP": true,
+ "ETHERTYPE_VITAL": true,
+ "ETHERTYPE_VLAN": true,
+ "ETHERTYPE_VLTLMAN": true,
+ "ETHERTYPE_VPROD": true,
+ "ETHERTYPE_VURESERVED": true,
+ "ETHERTYPE_WATERLOO": true,
+ "ETHERTYPE_WELLFLEET": true,
+ "ETHERTYPE_X25": true,
+ "ETHERTYPE_X75": true,
+ "ETHERTYPE_XNSSM": true,
+ "ETHERTYPE_XTP": true,
+ "ETHER_ADDR_LEN": true,
+ "ETHER_ALIGN": true,
+ "ETHER_CRC_LEN": true,
+ "ETHER_CRC_POLY_BE": true,
+ "ETHER_CRC_POLY_LE": true,
+ "ETHER_HDR_LEN": true,
+ "ETHER_MAX_DIX_LEN": true,
+ "ETHER_MAX_LEN": true,
+ "ETHER_MAX_LEN_JUMBO": true,
+ "ETHER_MIN_LEN": true,
+ "ETHER_PPPOE_ENCAP_LEN": true,
+ "ETHER_TYPE_LEN": true,
+ "ETHER_VLAN_ENCAP_LEN": true,
+ "ETH_P_1588": true,
+ "ETH_P_8021Q": true,
+ "ETH_P_802_2": true,
+ "ETH_P_802_3": true,
+ "ETH_P_AARP": true,
+ "ETH_P_ALL": true,
+ "ETH_P_AOE": true,
+ "ETH_P_ARCNET": true,
+ "ETH_P_ARP": true,
+ "ETH_P_ATALK": true,
+ "ETH_P_ATMFATE": true,
+ "ETH_P_ATMMPOA": true,
+ "ETH_P_AX25": true,
+ "ETH_P_BPQ": true,
+ "ETH_P_CAIF": true,
+ "ETH_P_CAN": true,
+ "ETH_P_CONTROL": true,
+ "ETH_P_CUST": true,
+ "ETH_P_DDCMP": true,
+ "ETH_P_DEC": true,
+ "ETH_P_DIAG": true,
+ "ETH_P_DNA_DL": true,
+ "ETH_P_DNA_RC": true,
+ "ETH_P_DNA_RT": true,
+ "ETH_P_DSA": true,
+ "ETH_P_ECONET": true,
+ "ETH_P_EDSA": true,
+ "ETH_P_FCOE": true,
+ "ETH_P_FIP": true,
+ "ETH_P_HDLC": true,
+ "ETH_P_IEEE802154": true,
+ "ETH_P_IEEEPUP": true,
+ "ETH_P_IEEEPUPAT": true,
+ "ETH_P_IP": true,
+ "ETH_P_IPV6": true,
+ "ETH_P_IPX": true,
+ "ETH_P_IRDA": true,
+ "ETH_P_LAT": true,
+ "ETH_P_LINK_CTL": true,
+ "ETH_P_LOCALTALK": true,
+ "ETH_P_LOOP": true,
+ "ETH_P_MOBITEX": true,
+ "ETH_P_MPLS_MC": true,
+ "ETH_P_MPLS_UC": true,
+ "ETH_P_PAE": true,
+ "ETH_P_PAUSE": true,
+ "ETH_P_PHONET": true,
+ "ETH_P_PPPTALK": true,
+ "ETH_P_PPP_DISC": true,
+ "ETH_P_PPP_MP": true,
+ "ETH_P_PPP_SES": true,
+ "ETH_P_PUP": true,
+ "ETH_P_PUPAT": true,
+ "ETH_P_RARP": true,
+ "ETH_P_SCA": true,
+ "ETH_P_SLOW": true,
+ "ETH_P_SNAP": true,
+ "ETH_P_TEB": true,
+ "ETH_P_TIPC": true,
+ "ETH_P_TRAILER": true,
+ "ETH_P_TR_802_2": true,
+ "ETH_P_WAN_PPP": true,
+ "ETH_P_WCCP": true,
+ "ETH_P_X25": true,
+ "ETIME": true,
+ "ETIMEDOUT": true,
+ "ETOOMANYREFS": true,
+ "ETXTBSY": true,
+ "EUCLEAN": true,
+ "EUNATCH": true,
+ "EUSERS": true,
+ "EVFILT_AIO": true,
+ "EVFILT_FS": true,
+ "EVFILT_LIO": true,
+ "EVFILT_MACHPORT": true,
+ "EVFILT_PROC": true,
+ "EVFILT_READ": true,
+ "EVFILT_SIGNAL": true,
+ "EVFILT_SYSCOUNT": true,
+ "EVFILT_THREADMARKER": true,
+ "EVFILT_TIMER": true,
+ "EVFILT_USER": true,
+ "EVFILT_VM": true,
+ "EVFILT_VNODE": true,
+ "EVFILT_WRITE": true,
+ "EV_ADD": true,
+ "EV_CLEAR": true,
+ "EV_DELETE": true,
+ "EV_DISABLE": true,
+ "EV_DISPATCH": true,
+ "EV_DROP": true,
+ "EV_ENABLE": true,
+ "EV_EOF": true,
+ "EV_ERROR": true,
+ "EV_FLAG0": true,
+ "EV_FLAG1": true,
+ "EV_ONESHOT": true,
+ "EV_OOBAND": true,
+ "EV_POLL": true,
+ "EV_RECEIPT": true,
+ "EV_SYSFLAGS": true,
+ "EWINDOWS": true,
+ "EWOULDBLOCK": true,
+ "EXDEV": true,
+ "EXFULL": true,
+ "EXTA": true,
+ "EXTB": true,
+ "EXTPROC": true,
+ "Environ": true,
+ "EpollCreate": true,
+ "EpollCreate1": true,
+ "EpollCtl": true,
+ "EpollEvent": true,
+ "EpollWait": true,
+ "Errno": true,
+ "EscapeArg": true,
+ "Exchangedata": true,
+ "Exec": true,
+ "Exit": true,
+ "ExitProcess": true,
+ "FD_CLOEXEC": true,
+ "FD_SETSIZE": true,
+ "FILE_ACTION_ADDED": true,
+ "FILE_ACTION_MODIFIED": true,
+ "FILE_ACTION_REMOVED": true,
+ "FILE_ACTION_RENAMED_NEW_NAME": true,
+ "FILE_ACTION_RENAMED_OLD_NAME": true,
+ "FILE_APPEND_DATA": true,
+ "FILE_ATTRIBUTE_ARCHIVE": true,
+ "FILE_ATTRIBUTE_DIRECTORY": true,
+ "FILE_ATTRIBUTE_HIDDEN": true,
+ "FILE_ATTRIBUTE_NORMAL": true,
+ "FILE_ATTRIBUTE_READONLY": true,
+ "FILE_ATTRIBUTE_REPARSE_POINT": true,
+ "FILE_ATTRIBUTE_SYSTEM": true,
+ "FILE_BEGIN": true,
+ "FILE_CURRENT": true,
+ "FILE_END": true,
+ "FILE_FLAG_BACKUP_SEMANTICS": true,
+ "FILE_FLAG_OPEN_REPARSE_POINT": true,
+ "FILE_FLAG_OVERLAPPED": true,
+ "FILE_LIST_DIRECTORY": true,
+ "FILE_MAP_COPY": true,
+ "FILE_MAP_EXECUTE": true,
+ "FILE_MAP_READ": true,
+ "FILE_MAP_WRITE": true,
+ "FILE_NOTIFY_CHANGE_ATTRIBUTES": true,
+ "FILE_NOTIFY_CHANGE_CREATION": true,
+ "FILE_NOTIFY_CHANGE_DIR_NAME": true,
+ "FILE_NOTIFY_CHANGE_FILE_NAME": true,
+ "FILE_NOTIFY_CHANGE_LAST_ACCESS": true,
+ "FILE_NOTIFY_CHANGE_LAST_WRITE": true,
+ "FILE_NOTIFY_CHANGE_SIZE": true,
+ "FILE_SHARE_DELETE": true,
+ "FILE_SHARE_READ": true,
+ "FILE_SHARE_WRITE": true,
+ "FILE_SKIP_COMPLETION_PORT_ON_SUCCESS": true,
+ "FILE_SKIP_SET_EVENT_ON_HANDLE": true,
+ "FILE_TYPE_CHAR": true,
+ "FILE_TYPE_DISK": true,
+ "FILE_TYPE_PIPE": true,
+ "FILE_TYPE_REMOTE": true,
+ "FILE_TYPE_UNKNOWN": true,
+ "FILE_WRITE_ATTRIBUTES": true,
+ "FLUSHO": true,
+ "FORMAT_MESSAGE_ALLOCATE_BUFFER": true,
+ "FORMAT_MESSAGE_ARGUMENT_ARRAY": true,
+ "FORMAT_MESSAGE_FROM_HMODULE": true,
+ "FORMAT_MESSAGE_FROM_STRING": true,
+ "FORMAT_MESSAGE_FROM_SYSTEM": true,
+ "FORMAT_MESSAGE_IGNORE_INSERTS": true,
+ "FORMAT_MESSAGE_MAX_WIDTH_MASK": true,
+ "FSCTL_GET_REPARSE_POINT": true,
+ "F_ADDFILESIGS": true,
+ "F_ADDSIGS": true,
+ "F_ALLOCATEALL": true,
+ "F_ALLOCATECONTIG": true,
+ "F_CANCEL": true,
+ "F_CHKCLEAN": true,
+ "F_CLOSEM": true,
+ "F_DUP2FD": true,
+ "F_DUP2FD_CLOEXEC": true,
+ "F_DUPFD": true,
+ "F_DUPFD_CLOEXEC": true,
+ "F_EXLCK": true,
+ "F_FLUSH_DATA": true,
+ "F_FREEZE_FS": true,
+ "F_FSCTL": true,
+ "F_FSDIRMASK": true,
+ "F_FSIN": true,
+ "F_FSINOUT": true,
+ "F_FSOUT": true,
+ "F_FSPRIV": true,
+ "F_FSVOID": true,
+ "F_FULLFSYNC": true,
+ "F_GETFD": true,
+ "F_GETFL": true,
+ "F_GETLEASE": true,
+ "F_GETLK": true,
+ "F_GETLK64": true,
+ "F_GETLKPID": true,
+ "F_GETNOSIGPIPE": true,
+ "F_GETOWN": true,
+ "F_GETOWN_EX": true,
+ "F_GETPATH": true,
+ "F_GETPATH_MTMINFO": true,
+ "F_GETPIPE_SZ": true,
+ "F_GETPROTECTIONCLASS": true,
+ "F_GETSIG": true,
+ "F_GLOBAL_NOCACHE": true,
+ "F_LOCK": true,
+ "F_LOG2PHYS": true,
+ "F_LOG2PHYS_EXT": true,
+ "F_MARKDEPENDENCY": true,
+ "F_MAXFD": true,
+ "F_NOCACHE": true,
+ "F_NODIRECT": true,
+ "F_NOTIFY": true,
+ "F_OGETLK": true,
+ "F_OK": true,
+ "F_OSETLK": true,
+ "F_OSETLKW": true,
+ "F_PARAM_MASK": true,
+ "F_PARAM_MAX": true,
+ "F_PATHPKG_CHECK": true,
+ "F_PEOFPOSMODE": true,
+ "F_PREALLOCATE": true,
+ "F_RDADVISE": true,
+ "F_RDAHEAD": true,
+ "F_RDLCK": true,
+ "F_READAHEAD": true,
+ "F_READBOOTSTRAP": true,
+ "F_SETBACKINGSTORE": true,
+ "F_SETFD": true,
+ "F_SETFL": true,
+ "F_SETLEASE": true,
+ "F_SETLK": true,
+ "F_SETLK64": true,
+ "F_SETLKW": true,
+ "F_SETLKW64": true,
+ "F_SETLK_REMOTE": true,
+ "F_SETNOSIGPIPE": true,
+ "F_SETOWN": true,
+ "F_SETOWN_EX": true,
+ "F_SETPIPE_SZ": true,
+ "F_SETPROTECTIONCLASS": true,
+ "F_SETSIG": true,
+ "F_SETSIZE": true,
+ "F_SHLCK": true,
+ "F_TEST": true,
+ "F_THAW_FS": true,
+ "F_TLOCK": true,
+ "F_ULOCK": true,
+ "F_UNLCK": true,
+ "F_UNLCKSYS": true,
+ "F_VOLPOSMODE": true,
+ "F_WRITEBOOTSTRAP": true,
+ "F_WRLCK": true,
+ "Faccessat": true,
+ "Fallocate": true,
+ "Fbootstraptransfer_t": true,
+ "Fchdir": true,
+ "Fchflags": true,
+ "Fchmod": true,
+ "Fchmodat": true,
+ "Fchown": true,
+ "Fchownat": true,
+ "FcntlFlock": true,
+ "FdSet": true,
+ "Fdatasync": true,
+ "FileNotifyInformation": true,
+ "Filetime": true,
+ "FindClose": true,
+ "FindFirstFile": true,
+ "FindNextFile": true,
+ "Flock": true,
+ "Flock_t": true,
+ "FlushBpf": true,
+ "FlushFileBuffers": true,
+ "FlushViewOfFile": true,
+ "ForkExec": true,
+ "ForkLock": true,
+ "FormatMessage": true,
+ "Fpathconf": true,
+ "FreeAddrInfoW": true,
+ "FreeEnvironmentStrings": true,
+ "FreeLibrary": true,
+ "Fsid": true,
+ "Fstat": true,
+ "Fstatat": true,
+ "Fstatfs": true,
+ "Fstore_t": true,
+ "Fsync": true,
+ "Ftruncate": true,
+ "FullPath": true,
+ "Futimes": true,
+ "Futimesat": true,
+ "GENERIC_ALL": true,
+ "GENERIC_EXECUTE": true,
+ "GENERIC_READ": true,
+ "GENERIC_WRITE": true,
+ "GUID": true,
+ "GetAcceptExSockaddrs": true,
+ "GetAdaptersInfo": true,
+ "GetAddrInfoW": true,
+ "GetCommandLine": true,
+ "GetComputerName": true,
+ "GetConsoleMode": true,
+ "GetCurrentDirectory": true,
+ "GetCurrentProcess": true,
+ "GetEnvironmentStrings": true,
+ "GetEnvironmentVariable": true,
+ "GetExitCodeProcess": true,
+ "GetFileAttributes": true,
+ "GetFileAttributesEx": true,
+ "GetFileExInfoStandard": true,
+ "GetFileExMaxInfoLevel": true,
+ "GetFileInformationByHandle": true,
+ "GetFileType": true,
+ "GetFullPathName": true,
+ "GetHostByName": true,
+ "GetIfEntry": true,
+ "GetLastError": true,
+ "GetLengthSid": true,
+ "GetLongPathName": true,
+ "GetProcAddress": true,
+ "GetProcessTimes": true,
+ "GetProtoByName": true,
+ "GetQueuedCompletionStatus": true,
+ "GetServByName": true,
+ "GetShortPathName": true,
+ "GetStartupInfo": true,
+ "GetStdHandle": true,
+ "GetSystemTimeAsFileTime": true,
+ "GetTempPath": true,
+ "GetTimeZoneInformation": true,
+ "GetTokenInformation": true,
+ "GetUserNameEx": true,
+ "GetUserProfileDirectory": true,
+ "GetVersion": true,
+ "Getcwd": true,
+ "Getdents": true,
+ "Getdirentries": true,
+ "Getdtablesize": true,
+ "Getegid": true,
+ "Getenv": true,
+ "Geteuid": true,
+ "Getfsstat": true,
+ "Getgid": true,
+ "Getgroups": true,
+ "Getpagesize": true,
+ "Getpeername": true,
+ "Getpgid": true,
+ "Getpgrp": true,
+ "Getpid": true,
+ "Getppid": true,
+ "Getpriority": true,
+ "Getrlimit": true,
+ "Getrusage": true,
+ "Getsid": true,
+ "Getsockname": true,
+ "Getsockopt": true,
+ "GetsockoptByte": true,
+ "GetsockoptICMPv6Filter": true,
+ "GetsockoptIPMreq": true,
+ "GetsockoptIPMreqn": true,
+ "GetsockoptIPv6MTUInfo": true,
+ "GetsockoptIPv6Mreq": true,
+ "GetsockoptInet4Addr": true,
+ "GetsockoptInt": true,
+ "GetsockoptUcred": true,
+ "Gettid": true,
+ "Gettimeofday": true,
+ "Getuid": true,
+ "Getwd": true,
+ "Getxattr": true,
+ "HANDLE_FLAG_INHERIT": true,
+ "HKEY_CLASSES_ROOT": true,
+ "HKEY_CURRENT_CONFIG": true,
+ "HKEY_CURRENT_USER": true,
+ "HKEY_DYN_DATA": true,
+ "HKEY_LOCAL_MACHINE": true,
+ "HKEY_PERFORMANCE_DATA": true,
+ "HKEY_USERS": true,
+ "HUPCL": true,
+ "Handle": true,
+ "Hostent": true,
+ "ICANON": true,
+ "ICMP6_FILTER": true,
+ "ICMPV6_FILTER": true,
+ "ICMPv6Filter": true,
+ "ICRNL": true,
+ "IEXTEN": true,
+ "IFAN_ARRIVAL": true,
+ "IFAN_DEPARTURE": true,
+ "IFA_ADDRESS": true,
+ "IFA_ANYCAST": true,
+ "IFA_BROADCAST": true,
+ "IFA_CACHEINFO": true,
+ "IFA_F_DADFAILED": true,
+ "IFA_F_DEPRECATED": true,
+ "IFA_F_HOMEADDRESS": true,
+ "IFA_F_NODAD": true,
+ "IFA_F_OPTIMISTIC": true,
+ "IFA_F_PERMANENT": true,
+ "IFA_F_SECONDARY": true,
+ "IFA_F_TEMPORARY": true,
+ "IFA_F_TENTATIVE": true,
+ "IFA_LABEL": true,
+ "IFA_LOCAL": true,
+ "IFA_MAX": true,
+ "IFA_MULTICAST": true,
+ "IFA_ROUTE": true,
+ "IFA_UNSPEC": true,
+ "IFF_ALLMULTI": true,
+ "IFF_ALTPHYS": true,
+ "IFF_AUTOMEDIA": true,
+ "IFF_BROADCAST": true,
+ "IFF_CANTCHANGE": true,
+ "IFF_CANTCONFIG": true,
+ "IFF_DEBUG": true,
+ "IFF_DRV_OACTIVE": true,
+ "IFF_DRV_RUNNING": true,
+ "IFF_DYING": true,
+ "IFF_DYNAMIC": true,
+ "IFF_LINK0": true,
+ "IFF_LINK1": true,
+ "IFF_LINK2": true,
+ "IFF_LOOPBACK": true,
+ "IFF_MASTER": true,
+ "IFF_MONITOR": true,
+ "IFF_MULTICAST": true,
+ "IFF_NOARP": true,
+ "IFF_NOTRAILERS": true,
+ "IFF_NO_PI": true,
+ "IFF_OACTIVE": true,
+ "IFF_ONE_QUEUE": true,
+ "IFF_POINTOPOINT": true,
+ "IFF_POINTTOPOINT": true,
+ "IFF_PORTSEL": true,
+ "IFF_PPROMISC": true,
+ "IFF_PROMISC": true,
+ "IFF_RENAMING": true,
+ "IFF_RUNNING": true,
+ "IFF_SIMPLEX": true,
+ "IFF_SLAVE": true,
+ "IFF_SMART": true,
+ "IFF_STATICARP": true,
+ "IFF_TAP": true,
+ "IFF_TUN": true,
+ "IFF_TUN_EXCL": true,
+ "IFF_UP": true,
+ "IFF_VNET_HDR": true,
+ "IFLA_ADDRESS": true,
+ "IFLA_BROADCAST": true,
+ "IFLA_COST": true,
+ "IFLA_IFALIAS": true,
+ "IFLA_IFNAME": true,
+ "IFLA_LINK": true,
+ "IFLA_LINKINFO": true,
+ "IFLA_LINKMODE": true,
+ "IFLA_MAP": true,
+ "IFLA_MASTER": true,
+ "IFLA_MAX": true,
+ "IFLA_MTU": true,
+ "IFLA_NET_NS_PID": true,
+ "IFLA_OPERSTATE": true,
+ "IFLA_PRIORITY": true,
+ "IFLA_PROTINFO": true,
+ "IFLA_QDISC": true,
+ "IFLA_STATS": true,
+ "IFLA_TXQLEN": true,
+ "IFLA_UNSPEC": true,
+ "IFLA_WEIGHT": true,
+ "IFLA_WIRELESS": true,
+ "IFNAMSIZ": true,
+ "IFT_1822": true,
+ "IFT_A12MPPSWITCH": true,
+ "IFT_AAL2": true,
+ "IFT_AAL5": true,
+ "IFT_ADSL": true,
+ "IFT_AFLANE8023": true,
+ "IFT_AFLANE8025": true,
+ "IFT_ARAP": true,
+ "IFT_ARCNET": true,
+ "IFT_ARCNETPLUS": true,
+ "IFT_ASYNC": true,
+ "IFT_ATM": true,
+ "IFT_ATMDXI": true,
+ "IFT_ATMFUNI": true,
+ "IFT_ATMIMA": true,
+ "IFT_ATMLOGICAL": true,
+ "IFT_ATMRADIO": true,
+ "IFT_ATMSUBINTERFACE": true,
+ "IFT_ATMVCIENDPT": true,
+ "IFT_ATMVIRTUAL": true,
+ "IFT_BGPPOLICYACCOUNTING": true,
+ "IFT_BLUETOOTH": true,
+ "IFT_BRIDGE": true,
+ "IFT_BSC": true,
+ "IFT_CARP": true,
+ "IFT_CCTEMUL": true,
+ "IFT_CELLULAR": true,
+ "IFT_CEPT": true,
+ "IFT_CES": true,
+ "IFT_CHANNEL": true,
+ "IFT_CNR": true,
+ "IFT_COFFEE": true,
+ "IFT_COMPOSITELINK": true,
+ "IFT_DCN": true,
+ "IFT_DIGITALPOWERLINE": true,
+ "IFT_DIGITALWRAPPEROVERHEADCHANNEL": true,
+ "IFT_DLSW": true,
+ "IFT_DOCSCABLEDOWNSTREAM": true,
+ "IFT_DOCSCABLEMACLAYER": true,
+ "IFT_DOCSCABLEUPSTREAM": true,
+ "IFT_DOCSCABLEUPSTREAMCHANNEL": true,
+ "IFT_DS0": true,
+ "IFT_DS0BUNDLE": true,
+ "IFT_DS1FDL": true,
+ "IFT_DS3": true,
+ "IFT_DTM": true,
+ "IFT_DUMMY": true,
+ "IFT_DVBASILN": true,
+ "IFT_DVBASIOUT": true,
+ "IFT_DVBRCCDOWNSTREAM": true,
+ "IFT_DVBRCCMACLAYER": true,
+ "IFT_DVBRCCUPSTREAM": true,
+ "IFT_ECONET": true,
+ "IFT_ENC": true,
+ "IFT_EON": true,
+ "IFT_EPLRS": true,
+ "IFT_ESCON": true,
+ "IFT_ETHER": true,
+ "IFT_FAITH": true,
+ "IFT_FAST": true,
+ "IFT_FASTETHER": true,
+ "IFT_FASTETHERFX": true,
+ "IFT_FDDI": true,
+ "IFT_FIBRECHANNEL": true,
+ "IFT_FRAMERELAYINTERCONNECT": true,
+ "IFT_FRAMERELAYMPI": true,
+ "IFT_FRDLCIENDPT": true,
+ "IFT_FRELAY": true,
+ "IFT_FRELAYDCE": true,
+ "IFT_FRF16MFRBUNDLE": true,
+ "IFT_FRFORWARD": true,
+ "IFT_G703AT2MB": true,
+ "IFT_G703AT64K": true,
+ "IFT_GIF": true,
+ "IFT_GIGABITETHERNET": true,
+ "IFT_GR303IDT": true,
+ "IFT_GR303RDT": true,
+ "IFT_H323GATEKEEPER": true,
+ "IFT_H323PROXY": true,
+ "IFT_HDH1822": true,
+ "IFT_HDLC": true,
+ "IFT_HDSL2": true,
+ "IFT_HIPERLAN2": true,
+ "IFT_HIPPI": true,
+ "IFT_HIPPIINTERFACE": true,
+ "IFT_HOSTPAD": true,
+ "IFT_HSSI": true,
+ "IFT_HY": true,
+ "IFT_IBM370PARCHAN": true,
+ "IFT_IDSL": true,
+ "IFT_IEEE1394": true,
+ "IFT_IEEE80211": true,
+ "IFT_IEEE80212": true,
+ "IFT_IEEE8023ADLAG": true,
+ "IFT_IFGSN": true,
+ "IFT_IMT": true,
+ "IFT_INFINIBAND": true,
+ "IFT_INTERLEAVE": true,
+ "IFT_IP": true,
+ "IFT_IPFORWARD": true,
+ "IFT_IPOVERATM": true,
+ "IFT_IPOVERCDLC": true,
+ "IFT_IPOVERCLAW": true,
+ "IFT_IPSWITCH": true,
+ "IFT_IPXIP": true,
+ "IFT_ISDN": true,
+ "IFT_ISDNBASIC": true,
+ "IFT_ISDNPRIMARY": true,
+ "IFT_ISDNS": true,
+ "IFT_ISDNU": true,
+ "IFT_ISO88022LLC": true,
+ "IFT_ISO88023": true,
+ "IFT_ISO88024": true,
+ "IFT_ISO88025": true,
+ "IFT_ISO88025CRFPINT": true,
+ "IFT_ISO88025DTR": true,
+ "IFT_ISO88025FIBER": true,
+ "IFT_ISO88026": true,
+ "IFT_ISUP": true,
+ "IFT_L2VLAN": true,
+ "IFT_L3IPVLAN": true,
+ "IFT_L3IPXVLAN": true,
+ "IFT_LAPB": true,
+ "IFT_LAPD": true,
+ "IFT_LAPF": true,
+ "IFT_LINEGROUP": true,
+ "IFT_LOCALTALK": true,
+ "IFT_LOOP": true,
+ "IFT_MEDIAMAILOVERIP": true,
+ "IFT_MFSIGLINK": true,
+ "IFT_MIOX25": true,
+ "IFT_MODEM": true,
+ "IFT_MPC": true,
+ "IFT_MPLS": true,
+ "IFT_MPLSTUNNEL": true,
+ "IFT_MSDSL": true,
+ "IFT_MVL": true,
+ "IFT_MYRINET": true,
+ "IFT_NFAS": true,
+ "IFT_NSIP": true,
+ "IFT_OPTICALCHANNEL": true,
+ "IFT_OPTICALTRANSPORT": true,
+ "IFT_OTHER": true,
+ "IFT_P10": true,
+ "IFT_P80": true,
+ "IFT_PARA": true,
+ "IFT_PDP": true,
+ "IFT_PFLOG": true,
+ "IFT_PFLOW": true,
+ "IFT_PFSYNC": true,
+ "IFT_PLC": true,
+ "IFT_PON155": true,
+ "IFT_PON622": true,
+ "IFT_POS": true,
+ "IFT_PPP": true,
+ "IFT_PPPMULTILINKBUNDLE": true,
+ "IFT_PROPATM": true,
+ "IFT_PROPBWAP2MP": true,
+ "IFT_PROPCNLS": true,
+ "IFT_PROPDOCSWIRELESSDOWNSTREAM": true,
+ "IFT_PROPDOCSWIRELESSMACLAYER": true,
+ "IFT_PROPDOCSWIRELESSUPSTREAM": true,
+ "IFT_PROPMUX": true,
+ "IFT_PROPVIRTUAL": true,
+ "IFT_PROPWIRELESSP2P": true,
+ "IFT_PTPSERIAL": true,
+ "IFT_PVC": true,
+ "IFT_Q2931": true,
+ "IFT_QLLC": true,
+ "IFT_RADIOMAC": true,
+ "IFT_RADSL": true,
+ "IFT_REACHDSL": true,
+ "IFT_RFC1483": true,
+ "IFT_RS232": true,
+ "IFT_RSRB": true,
+ "IFT_SDLC": true,
+ "IFT_SDSL": true,
+ "IFT_SHDSL": true,
+ "IFT_SIP": true,
+ "IFT_SIPSIG": true,
+ "IFT_SIPTG": true,
+ "IFT_SLIP": true,
+ "IFT_SMDSDXI": true,
+ "IFT_SMDSICIP": true,
+ "IFT_SONET": true,
+ "IFT_SONETOVERHEADCHANNEL": true,
+ "IFT_SONETPATH": true,
+ "IFT_SONETVT": true,
+ "IFT_SRP": true,
+ "IFT_SS7SIGLINK": true,
+ "IFT_STACKTOSTACK": true,
+ "IFT_STARLAN": true,
+ "IFT_STF": true,
+ "IFT_T1": true,
+ "IFT_TDLC": true,
+ "IFT_TELINK": true,
+ "IFT_TERMPAD": true,
+ "IFT_TR008": true,
+ "IFT_TRANSPHDLC": true,
+ "IFT_TUNNEL": true,
+ "IFT_ULTRA": true,
+ "IFT_USB": true,
+ "IFT_V11": true,
+ "IFT_V35": true,
+ "IFT_V36": true,
+ "IFT_V37": true,
+ "IFT_VDSL": true,
+ "IFT_VIRTUALIPADDRESS": true,
+ "IFT_VIRTUALTG": true,
+ "IFT_VOICEDID": true,
+ "IFT_VOICEEM": true,
+ "IFT_VOICEEMFGD": true,
+ "IFT_VOICEENCAP": true,
+ "IFT_VOICEFGDEANA": true,
+ "IFT_VOICEFXO": true,
+ "IFT_VOICEFXS": true,
+ "IFT_VOICEOVERATM": true,
+ "IFT_VOICEOVERCABLE": true,
+ "IFT_VOICEOVERFRAMERELAY": true,
+ "IFT_VOICEOVERIP": true,
+ "IFT_X213": true,
+ "IFT_X25": true,
+ "IFT_X25DDN": true,
+ "IFT_X25HUNTGROUP": true,
+ "IFT_X25MLP": true,
+ "IFT_X25PLE": true,
+ "IFT_XETHER": true,
+ "IGNBRK": true,
+ "IGNCR": true,
+ "IGNORE": true,
+ "IGNPAR": true,
+ "IMAXBEL": true,
+ "INFINITE": true,
+ "INLCR": true,
+ "INPCK": true,
+ "INVALID_FILE_ATTRIBUTES": true,
+ "IN_ACCESS": true,
+ "IN_ALL_EVENTS": true,
+ "IN_ATTRIB": true,
+ "IN_CLASSA_HOST": true,
+ "IN_CLASSA_MAX": true,
+ "IN_CLASSA_NET": true,
+ "IN_CLASSA_NSHIFT": true,
+ "IN_CLASSB_HOST": true,
+ "IN_CLASSB_MAX": true,
+ "IN_CLASSB_NET": true,
+ "IN_CLASSB_NSHIFT": true,
+ "IN_CLASSC_HOST": true,
+ "IN_CLASSC_NET": true,
+ "IN_CLASSC_NSHIFT": true,
+ "IN_CLASSD_HOST": true,
+ "IN_CLASSD_NET": true,
+ "IN_CLASSD_NSHIFT": true,
+ "IN_CLOEXEC": true,
+ "IN_CLOSE": true,
+ "IN_CLOSE_NOWRITE": true,
+ "IN_CLOSE_WRITE": true,
+ "IN_CREATE": true,
+ "IN_DELETE": true,
+ "IN_DELETE_SELF": true,
+ "IN_DONT_FOLLOW": true,
+ "IN_EXCL_UNLINK": true,
+ "IN_IGNORED": true,
+ "IN_ISDIR": true,
+ "IN_LINKLOCALNETNUM": true,
+ "IN_LOOPBACKNET": true,
+ "IN_MASK_ADD": true,
+ "IN_MODIFY": true,
+ "IN_MOVE": true,
+ "IN_MOVED_FROM": true,
+ "IN_MOVED_TO": true,
+ "IN_MOVE_SELF": true,
+ "IN_NONBLOCK": true,
+ "IN_ONESHOT": true,
+ "IN_ONLYDIR": true,
+ "IN_OPEN": true,
+ "IN_Q_OVERFLOW": true,
+ "IN_RFC3021_HOST": true,
+ "IN_RFC3021_MASK": true,
+ "IN_RFC3021_NET": true,
+ "IN_RFC3021_NSHIFT": true,
+ "IN_UNMOUNT": true,
+ "IOC_IN": true,
+ "IOC_INOUT": true,
+ "IOC_OUT": true,
+ "IOC_VENDOR": true,
+ "IOC_WS2": true,
+ "IO_REPARSE_TAG_SYMLINK": true,
+ "IPMreq": true,
+ "IPMreqn": true,
+ "IPPROTO_3PC": true,
+ "IPPROTO_ADFS": true,
+ "IPPROTO_AH": true,
+ "IPPROTO_AHIP": true,
+ "IPPROTO_APES": true,
+ "IPPROTO_ARGUS": true,
+ "IPPROTO_AX25": true,
+ "IPPROTO_BHA": true,
+ "IPPROTO_BLT": true,
+ "IPPROTO_BRSATMON": true,
+ "IPPROTO_CARP": true,
+ "IPPROTO_CFTP": true,
+ "IPPROTO_CHAOS": true,
+ "IPPROTO_CMTP": true,
+ "IPPROTO_COMP": true,
+ "IPPROTO_CPHB": true,
+ "IPPROTO_CPNX": true,
+ "IPPROTO_DCCP": true,
+ "IPPROTO_DDP": true,
+ "IPPROTO_DGP": true,
+ "IPPROTO_DIVERT": true,
+ "IPPROTO_DIVERT_INIT": true,
+ "IPPROTO_DIVERT_RESP": true,
+ "IPPROTO_DONE": true,
+ "IPPROTO_DSTOPTS": true,
+ "IPPROTO_EGP": true,
+ "IPPROTO_EMCON": true,
+ "IPPROTO_ENCAP": true,
+ "IPPROTO_EON": true,
+ "IPPROTO_ESP": true,
+ "IPPROTO_ETHERIP": true,
+ "IPPROTO_FRAGMENT": true,
+ "IPPROTO_GGP": true,
+ "IPPROTO_GMTP": true,
+ "IPPROTO_GRE": true,
+ "IPPROTO_HELLO": true,
+ "IPPROTO_HMP": true,
+ "IPPROTO_HOPOPTS": true,
+ "IPPROTO_ICMP": true,
+ "IPPROTO_ICMPV6": true,
+ "IPPROTO_IDP": true,
+ "IPPROTO_IDPR": true,
+ "IPPROTO_IDRP": true,
+ "IPPROTO_IGMP": true,
+ "IPPROTO_IGP": true,
+ "IPPROTO_IGRP": true,
+ "IPPROTO_IL": true,
+ "IPPROTO_INLSP": true,
+ "IPPROTO_INP": true,
+ "IPPROTO_IP": true,
+ "IPPROTO_IPCOMP": true,
+ "IPPROTO_IPCV": true,
+ "IPPROTO_IPEIP": true,
+ "IPPROTO_IPIP": true,
+ "IPPROTO_IPPC": true,
+ "IPPROTO_IPV4": true,
+ "IPPROTO_IPV6": true,
+ "IPPROTO_IPV6_ICMP": true,
+ "IPPROTO_IRTP": true,
+ "IPPROTO_KRYPTOLAN": true,
+ "IPPROTO_LARP": true,
+ "IPPROTO_LEAF1": true,
+ "IPPROTO_LEAF2": true,
+ "IPPROTO_MAX": true,
+ "IPPROTO_MAXID": true,
+ "IPPROTO_MEAS": true,
+ "IPPROTO_MH": true,
+ "IPPROTO_MHRP": true,
+ "IPPROTO_MICP": true,
+ "IPPROTO_MOBILE": true,
+ "IPPROTO_MPLS": true,
+ "IPPROTO_MTP": true,
+ "IPPROTO_MUX": true,
+ "IPPROTO_ND": true,
+ "IPPROTO_NHRP": true,
+ "IPPROTO_NONE": true,
+ "IPPROTO_NSP": true,
+ "IPPROTO_NVPII": true,
+ "IPPROTO_OLD_DIVERT": true,
+ "IPPROTO_OSPFIGP": true,
+ "IPPROTO_PFSYNC": true,
+ "IPPROTO_PGM": true,
+ "IPPROTO_PIGP": true,
+ "IPPROTO_PIM": true,
+ "IPPROTO_PRM": true,
+ "IPPROTO_PUP": true,
+ "IPPROTO_PVP": true,
+ "IPPROTO_RAW": true,
+ "IPPROTO_RCCMON": true,
+ "IPPROTO_RDP": true,
+ "IPPROTO_ROUTING": true,
+ "IPPROTO_RSVP": true,
+ "IPPROTO_RVD": true,
+ "IPPROTO_SATEXPAK": true,
+ "IPPROTO_SATMON": true,
+ "IPPROTO_SCCSP": true,
+ "IPPROTO_SCTP": true,
+ "IPPROTO_SDRP": true,
+ "IPPROTO_SEND": true,
+ "IPPROTO_SEP": true,
+ "IPPROTO_SKIP": true,
+ "IPPROTO_SPACER": true,
+ "IPPROTO_SRPC": true,
+ "IPPROTO_ST": true,
+ "IPPROTO_SVMTP": true,
+ "IPPROTO_SWIPE": true,
+ "IPPROTO_TCF": true,
+ "IPPROTO_TCP": true,
+ "IPPROTO_TLSP": true,
+ "IPPROTO_TP": true,
+ "IPPROTO_TPXX": true,
+ "IPPROTO_TRUNK1": true,
+ "IPPROTO_TRUNK2": true,
+ "IPPROTO_TTP": true,
+ "IPPROTO_UDP": true,
+ "IPPROTO_UDPLITE": true,
+ "IPPROTO_VINES": true,
+ "IPPROTO_VISA": true,
+ "IPPROTO_VMTP": true,
+ "IPPROTO_VRRP": true,
+ "IPPROTO_WBEXPAK": true,
+ "IPPROTO_WBMON": true,
+ "IPPROTO_WSN": true,
+ "IPPROTO_XNET": true,
+ "IPPROTO_XTP": true,
+ "IPV6_2292DSTOPTS": true,
+ "IPV6_2292HOPLIMIT": true,
+ "IPV6_2292HOPOPTS": true,
+ "IPV6_2292NEXTHOP": true,
+ "IPV6_2292PKTINFO": true,
+ "IPV6_2292PKTOPTIONS": true,
+ "IPV6_2292RTHDR": true,
+ "IPV6_ADDRFORM": true,
+ "IPV6_ADD_MEMBERSHIP": true,
+ "IPV6_AUTHHDR": true,
+ "IPV6_AUTH_LEVEL": true,
+ "IPV6_AUTOFLOWLABEL": true,
+ "IPV6_BINDANY": true,
+ "IPV6_BINDV6ONLY": true,
+ "IPV6_BOUND_IF": true,
+ "IPV6_CHECKSUM": true,
+ "IPV6_DEFAULT_MULTICAST_HOPS": true,
+ "IPV6_DEFAULT_MULTICAST_LOOP": true,
+ "IPV6_DEFHLIM": true,
+ "IPV6_DONTFRAG": true,
+ "IPV6_DROP_MEMBERSHIP": true,
+ "IPV6_DSTOPTS": true,
+ "IPV6_ESP_NETWORK_LEVEL": true,
+ "IPV6_ESP_TRANS_LEVEL": true,
+ "IPV6_FAITH": true,
+ "IPV6_FLOWINFO_MASK": true,
+ "IPV6_FLOWLABEL_MASK": true,
+ "IPV6_FRAGTTL": true,
+ "IPV6_FW_ADD": true,
+ "IPV6_FW_DEL": true,
+ "IPV6_FW_FLUSH": true,
+ "IPV6_FW_GET": true,
+ "IPV6_FW_ZERO": true,
+ "IPV6_HLIMDEC": true,
+ "IPV6_HOPLIMIT": true,
+ "IPV6_HOPOPTS": true,
+ "IPV6_IPCOMP_LEVEL": true,
+ "IPV6_IPSEC_POLICY": true,
+ "IPV6_JOIN_ANYCAST": true,
+ "IPV6_JOIN_GROUP": true,
+ "IPV6_LEAVE_ANYCAST": true,
+ "IPV6_LEAVE_GROUP": true,
+ "IPV6_MAXHLIM": true,
+ "IPV6_MAXOPTHDR": true,
+ "IPV6_MAXPACKET": true,
+ "IPV6_MAX_GROUP_SRC_FILTER": true,
+ "IPV6_MAX_MEMBERSHIPS": true,
+ "IPV6_MAX_SOCK_SRC_FILTER": true,
+ "IPV6_MIN_MEMBERSHIPS": true,
+ "IPV6_MMTU": true,
+ "IPV6_MSFILTER": true,
+ "IPV6_MTU": true,
+ "IPV6_MTU_DISCOVER": true,
+ "IPV6_MULTICAST_HOPS": true,
+ "IPV6_MULTICAST_IF": true,
+ "IPV6_MULTICAST_LOOP": true,
+ "IPV6_NEXTHOP": true,
+ "IPV6_OPTIONS": true,
+ "IPV6_PATHMTU": true,
+ "IPV6_PIPEX": true,
+ "IPV6_PKTINFO": true,
+ "IPV6_PMTUDISC_DO": true,
+ "IPV6_PMTUDISC_DONT": true,
+ "IPV6_PMTUDISC_PROBE": true,
+ "IPV6_PMTUDISC_WANT": true,
+ "IPV6_PORTRANGE": true,
+ "IPV6_PORTRANGE_DEFAULT": true,
+ "IPV6_PORTRANGE_HIGH": true,
+ "IPV6_PORTRANGE_LOW": true,
+ "IPV6_PREFER_TEMPADDR": true,
+ "IPV6_RECVDSTOPTS": true,
+ "IPV6_RECVDSTPORT": true,
+ "IPV6_RECVERR": true,
+ "IPV6_RECVHOPLIMIT": true,
+ "IPV6_RECVHOPOPTS": true,
+ "IPV6_RECVPATHMTU": true,
+ "IPV6_RECVPKTINFO": true,
+ "IPV6_RECVRTHDR": true,
+ "IPV6_RECVTCLASS": true,
+ "IPV6_ROUTER_ALERT": true,
+ "IPV6_RTABLE": true,
+ "IPV6_RTHDR": true,
+ "IPV6_RTHDRDSTOPTS": true,
+ "IPV6_RTHDR_LOOSE": true,
+ "IPV6_RTHDR_STRICT": true,
+ "IPV6_RTHDR_TYPE_0": true,
+ "IPV6_RXDSTOPTS": true,
+ "IPV6_RXHOPOPTS": true,
+ "IPV6_SOCKOPT_RESERVED1": true,
+ "IPV6_TCLASS": true,
+ "IPV6_UNICAST_HOPS": true,
+ "IPV6_USE_MIN_MTU": true,
+ "IPV6_V6ONLY": true,
+ "IPV6_VERSION": true,
+ "IPV6_VERSION_MASK": true,
+ "IPV6_XFRM_POLICY": true,
+ "IP_ADD_MEMBERSHIP": true,
+ "IP_ADD_SOURCE_MEMBERSHIP": true,
+ "IP_AUTH_LEVEL": true,
+ "IP_BINDANY": true,
+ "IP_BLOCK_SOURCE": true,
+ "IP_BOUND_IF": true,
+ "IP_DEFAULT_MULTICAST_LOOP": true,
+ "IP_DEFAULT_MULTICAST_TTL": true,
+ "IP_DF": true,
+ "IP_DIVERTFL": true,
+ "IP_DONTFRAG": true,
+ "IP_DROP_MEMBERSHIP": true,
+ "IP_DROP_SOURCE_MEMBERSHIP": true,
+ "IP_DUMMYNET3": true,
+ "IP_DUMMYNET_CONFIGURE": true,
+ "IP_DUMMYNET_DEL": true,
+ "IP_DUMMYNET_FLUSH": true,
+ "IP_DUMMYNET_GET": true,
+ "IP_EF": true,
+ "IP_ERRORMTU": true,
+ "IP_ESP_NETWORK_LEVEL": true,
+ "IP_ESP_TRANS_LEVEL": true,
+ "IP_FAITH": true,
+ "IP_FREEBIND": true,
+ "IP_FW3": true,
+ "IP_FW_ADD": true,
+ "IP_FW_DEL": true,
+ "IP_FW_FLUSH": true,
+ "IP_FW_GET": true,
+ "IP_FW_NAT_CFG": true,
+ "IP_FW_NAT_DEL": true,
+ "IP_FW_NAT_GET_CONFIG": true,
+ "IP_FW_NAT_GET_LOG": true,
+ "IP_FW_RESETLOG": true,
+ "IP_FW_TABLE_ADD": true,
+ "IP_FW_TABLE_DEL": true,
+ "IP_FW_TABLE_FLUSH": true,
+ "IP_FW_TABLE_GETSIZE": true,
+ "IP_FW_TABLE_LIST": true,
+ "IP_FW_ZERO": true,
+ "IP_HDRINCL": true,
+ "IP_IPCOMP_LEVEL": true,
+ "IP_IPSECFLOWINFO": true,
+ "IP_IPSEC_LOCAL_AUTH": true,
+ "IP_IPSEC_LOCAL_CRED": true,
+ "IP_IPSEC_LOCAL_ID": true,
+ "IP_IPSEC_POLICY": true,
+ "IP_IPSEC_REMOTE_AUTH": true,
+ "IP_IPSEC_REMOTE_CRED": true,
+ "IP_IPSEC_REMOTE_ID": true,
+ "IP_MAXPACKET": true,
+ "IP_MAX_GROUP_SRC_FILTER": true,
+ "IP_MAX_MEMBERSHIPS": true,
+ "IP_MAX_SOCK_MUTE_FILTER": true,
+ "IP_MAX_SOCK_SRC_FILTER": true,
+ "IP_MAX_SOURCE_FILTER": true,
+ "IP_MF": true,
+ "IP_MINFRAGSIZE": true,
+ "IP_MINTTL": true,
+ "IP_MIN_MEMBERSHIPS": true,
+ "IP_MSFILTER": true,
+ "IP_MSS": true,
+ "IP_MTU": true,
+ "IP_MTU_DISCOVER": true,
+ "IP_MULTICAST_IF": true,
+ "IP_MULTICAST_IFINDEX": true,
+ "IP_MULTICAST_LOOP": true,
+ "IP_MULTICAST_TTL": true,
+ "IP_MULTICAST_VIF": true,
+ "IP_NAT__XXX": true,
+ "IP_OFFMASK": true,
+ "IP_OLD_FW_ADD": true,
+ "IP_OLD_FW_DEL": true,
+ "IP_OLD_FW_FLUSH": true,
+ "IP_OLD_FW_GET": true,
+ "IP_OLD_FW_RESETLOG": true,
+ "IP_OLD_FW_ZERO": true,
+ "IP_ONESBCAST": true,
+ "IP_OPTIONS": true,
+ "IP_ORIGDSTADDR": true,
+ "IP_PASSSEC": true,
+ "IP_PIPEX": true,
+ "IP_PKTINFO": true,
+ "IP_PKTOPTIONS": true,
+ "IP_PMTUDISC": true,
+ "IP_PMTUDISC_DO": true,
+ "IP_PMTUDISC_DONT": true,
+ "IP_PMTUDISC_PROBE": true,
+ "IP_PMTUDISC_WANT": true,
+ "IP_PORTRANGE": true,
+ "IP_PORTRANGE_DEFAULT": true,
+ "IP_PORTRANGE_HIGH": true,
+ "IP_PORTRANGE_LOW": true,
+ "IP_RECVDSTADDR": true,
+ "IP_RECVDSTPORT": true,
+ "IP_RECVERR": true,
+ "IP_RECVIF": true,
+ "IP_RECVOPTS": true,
+ "IP_RECVORIGDSTADDR": true,
+ "IP_RECVPKTINFO": true,
+ "IP_RECVRETOPTS": true,
+ "IP_RECVRTABLE": true,
+ "IP_RECVTOS": true,
+ "IP_RECVTTL": true,
+ "IP_RETOPTS": true,
+ "IP_RF": true,
+ "IP_ROUTER_ALERT": true,
+ "IP_RSVP_OFF": true,
+ "IP_RSVP_ON": true,
+ "IP_RSVP_VIF_OFF": true,
+ "IP_RSVP_VIF_ON": true,
+ "IP_RTABLE": true,
+ "IP_SENDSRCADDR": true,
+ "IP_STRIPHDR": true,
+ "IP_TOS": true,
+ "IP_TRAFFIC_MGT_BACKGROUND": true,
+ "IP_TRANSPARENT": true,
+ "IP_TTL": true,
+ "IP_UNBLOCK_SOURCE": true,
+ "IP_XFRM_POLICY": true,
+ "IPv6MTUInfo": true,
+ "IPv6Mreq": true,
+ "ISIG": true,
+ "ISTRIP": true,
+ "IUCLC": true,
+ "IUTF8": true,
+ "IXANY": true,
+ "IXOFF": true,
+ "IXON": true,
+ "IfAddrmsg": true,
+ "IfAnnounceMsghdr": true,
+ "IfData": true,
+ "IfInfomsg": true,
+ "IfMsghdr": true,
+ "IfaMsghdr": true,
+ "IfmaMsghdr": true,
+ "IfmaMsghdr2": true,
+ "ImplementsGetwd": true,
+ "Inet4Pktinfo": true,
+ "Inet6Pktinfo": true,
+ "InotifyAddWatch": true,
+ "InotifyEvent": true,
+ "InotifyInit": true,
+ "InotifyInit1": true,
+ "InotifyRmWatch": true,
+ "InterfaceAddrMessage": true,
+ "InterfaceAnnounceMessage": true,
+ "InterfaceInfo": true,
+ "InterfaceMessage": true,
+ "InterfaceMulticastAddrMessage": true,
+ "InvalidHandle": true,
+ "Ioperm": true,
+ "Iopl": true,
+ "Iovec": true,
+ "IpAdapterInfo": true,
+ "IpAddrString": true,
+ "IpAddressString": true,
+ "IpMaskString": true,
+ "Issetugid": true,
+ "KEY_ALL_ACCESS": true,
+ "KEY_CREATE_LINK": true,
+ "KEY_CREATE_SUB_KEY": true,
+ "KEY_ENUMERATE_SUB_KEYS": true,
+ "KEY_EXECUTE": true,
+ "KEY_NOTIFY": true,
+ "KEY_QUERY_VALUE": true,
+ "KEY_READ": true,
+ "KEY_SET_VALUE": true,
+ "KEY_WOW64_32KEY": true,
+ "KEY_WOW64_64KEY": true,
+ "KEY_WRITE": true,
+ "Kevent": true,
+ "Kevent_t": true,
+ "Kill": true,
+ "Klogctl": true,
+ "Kqueue": true,
+ "LANG_ENGLISH": true,
+ "LAYERED_PROTOCOL": true,
+ "LCNT_OVERLOAD_FLUSH": true,
+ "LINUX_REBOOT_CMD_CAD_OFF": true,
+ "LINUX_REBOOT_CMD_CAD_ON": true,
+ "LINUX_REBOOT_CMD_HALT": true,
+ "LINUX_REBOOT_CMD_KEXEC": true,
+ "LINUX_REBOOT_CMD_POWER_OFF": true,
+ "LINUX_REBOOT_CMD_RESTART": true,
+ "LINUX_REBOOT_CMD_RESTART2": true,
+ "LINUX_REBOOT_CMD_SW_SUSPEND": true,
+ "LINUX_REBOOT_MAGIC1": true,
+ "LINUX_REBOOT_MAGIC2": true,
+ "LOCK_EX": true,
+ "LOCK_NB": true,
+ "LOCK_SH": true,
+ "LOCK_UN": true,
+ "LazyDLL": true,
+ "LazyProc": true,
+ "Lchown": true,
+ "Linger": true,
+ "Link": true,
+ "Listen": true,
+ "Listxattr": true,
+ "LoadCancelIoEx": true,
+ "LoadConnectEx": true,
+ "LoadCreateSymbolicLink": true,
+ "LoadDLL": true,
+ "LoadGetAddrInfo": true,
+ "LoadLibrary": true,
+ "LoadSetFileCompletionNotificationModes": true,
+ "LocalFree": true,
+ "Log2phys_t": true,
+ "LookupAccountName": true,
+ "LookupAccountSid": true,
+ "LookupSID": true,
+ "LsfJump": true,
+ "LsfSocket": true,
+ "LsfStmt": true,
+ "Lstat": true,
+ "MADV_AUTOSYNC": true,
+ "MADV_CAN_REUSE": true,
+ "MADV_CORE": true,
+ "MADV_DOFORK": true,
+ "MADV_DONTFORK": true,
+ "MADV_DONTNEED": true,
+ "MADV_FREE": true,
+ "MADV_FREE_REUSABLE": true,
+ "MADV_FREE_REUSE": true,
+ "MADV_HUGEPAGE": true,
+ "MADV_HWPOISON": true,
+ "MADV_MERGEABLE": true,
+ "MADV_NOCORE": true,
+ "MADV_NOHUGEPAGE": true,
+ "MADV_NORMAL": true,
+ "MADV_NOSYNC": true,
+ "MADV_PROTECT": true,
+ "MADV_RANDOM": true,
+ "MADV_REMOVE": true,
+ "MADV_SEQUENTIAL": true,
+ "MADV_SPACEAVAIL": true,
+ "MADV_UNMERGEABLE": true,
+ "MADV_WILLNEED": true,
+ "MADV_ZERO_WIRED_PAGES": true,
+ "MAP_32BIT": true,
+ "MAP_ALIGNED_SUPER": true,
+ "MAP_ALIGNMENT_16MB": true,
+ "MAP_ALIGNMENT_1TB": true,
+ "MAP_ALIGNMENT_256TB": true,
+ "MAP_ALIGNMENT_4GB": true,
+ "MAP_ALIGNMENT_64KB": true,
+ "MAP_ALIGNMENT_64PB": true,
+ "MAP_ALIGNMENT_MASK": true,
+ "MAP_ALIGNMENT_SHIFT": true,
+ "MAP_ANON": true,
+ "MAP_ANONYMOUS": true,
+ "MAP_COPY": true,
+ "MAP_DENYWRITE": true,
+ "MAP_EXECUTABLE": true,
+ "MAP_FILE": true,
+ "MAP_FIXED": true,
+ "MAP_FLAGMASK": true,
+ "MAP_GROWSDOWN": true,
+ "MAP_HASSEMAPHORE": true,
+ "MAP_HUGETLB": true,
+ "MAP_INHERIT": true,
+ "MAP_INHERIT_COPY": true,
+ "MAP_INHERIT_DEFAULT": true,
+ "MAP_INHERIT_DONATE_COPY": true,
+ "MAP_INHERIT_NONE": true,
+ "MAP_INHERIT_SHARE": true,
+ "MAP_JIT": true,
+ "MAP_LOCKED": true,
+ "MAP_NOCACHE": true,
+ "MAP_NOCORE": true,
+ "MAP_NOEXTEND": true,
+ "MAP_NONBLOCK": true,
+ "MAP_NORESERVE": true,
+ "MAP_NOSYNC": true,
+ "MAP_POPULATE": true,
+ "MAP_PREFAULT_READ": true,
+ "MAP_PRIVATE": true,
+ "MAP_RENAME": true,
+ "MAP_RESERVED0080": true,
+ "MAP_RESERVED0100": true,
+ "MAP_SHARED": true,
+ "MAP_STACK": true,
+ "MAP_TRYFIXED": true,
+ "MAP_TYPE": true,
+ "MAP_WIRED": true,
+ "MAXIMUM_REPARSE_DATA_BUFFER_SIZE": true,
+ "MAXLEN_IFDESCR": true,
+ "MAXLEN_PHYSADDR": true,
+ "MAX_ADAPTER_ADDRESS_LENGTH": true,
+ "MAX_ADAPTER_DESCRIPTION_LENGTH": true,
+ "MAX_ADAPTER_NAME_LENGTH": true,
+ "MAX_COMPUTERNAME_LENGTH": true,
+ "MAX_INTERFACE_NAME_LEN": true,
+ "MAX_LONG_PATH": true,
+ "MAX_PATH": true,
+ "MAX_PROTOCOL_CHAIN": true,
+ "MCL_CURRENT": true,
+ "MCL_FUTURE": true,
+ "MNT_DETACH": true,
+ "MNT_EXPIRE": true,
+ "MNT_FORCE": true,
+ "MSG_BCAST": true,
+ "MSG_CMSG_CLOEXEC": true,
+ "MSG_COMPAT": true,
+ "MSG_CONFIRM": true,
+ "MSG_CONTROLMBUF": true,
+ "MSG_CTRUNC": true,
+ "MSG_DONTROUTE": true,
+ "MSG_DONTWAIT": true,
+ "MSG_EOF": true,
+ "MSG_EOR": true,
+ "MSG_ERRQUEUE": true,
+ "MSG_FASTOPEN": true,
+ "MSG_FIN": true,
+ "MSG_FLUSH": true,
+ "MSG_HAVEMORE": true,
+ "MSG_HOLD": true,
+ "MSG_IOVUSRSPACE": true,
+ "MSG_LENUSRSPACE": true,
+ "MSG_MCAST": true,
+ "MSG_MORE": true,
+ "MSG_NAMEMBUF": true,
+ "MSG_NBIO": true,
+ "MSG_NEEDSA": true,
+ "MSG_NOSIGNAL": true,
+ "MSG_NOTIFICATION": true,
+ "MSG_OOB": true,
+ "MSG_PEEK": true,
+ "MSG_PROXY": true,
+ "MSG_RCVMORE": true,
+ "MSG_RST": true,
+ "MSG_SEND": true,
+ "MSG_SYN": true,
+ "MSG_TRUNC": true,
+ "MSG_TRYHARD": true,
+ "MSG_USERFLAGS": true,
+ "MSG_WAITALL": true,
+ "MSG_WAITFORONE": true,
+ "MSG_WAITSTREAM": true,
+ "MS_ACTIVE": true,
+ "MS_ASYNC": true,
+ "MS_BIND": true,
+ "MS_DEACTIVATE": true,
+ "MS_DIRSYNC": true,
+ "MS_INVALIDATE": true,
+ "MS_I_VERSION": true,
+ "MS_KERNMOUNT": true,
+ "MS_KILLPAGES": true,
+ "MS_MANDLOCK": true,
+ "MS_MGC_MSK": true,
+ "MS_MGC_VAL": true,
+ "MS_MOVE": true,
+ "MS_NOATIME": true,
+ "MS_NODEV": true,
+ "MS_NODIRATIME": true,
+ "MS_NOEXEC": true,
+ "MS_NOSUID": true,
+ "MS_NOUSER": true,
+ "MS_POSIXACL": true,
+ "MS_PRIVATE": true,
+ "MS_RDONLY": true,
+ "MS_REC": true,
+ "MS_RELATIME": true,
+ "MS_REMOUNT": true,
+ "MS_RMT_MASK": true,
+ "MS_SHARED": true,
+ "MS_SILENT": true,
+ "MS_SLAVE": true,
+ "MS_STRICTATIME": true,
+ "MS_SYNC": true,
+ "MS_SYNCHRONOUS": true,
+ "MS_UNBINDABLE": true,
+ "Madvise": true,
+ "MapViewOfFile": true,
+ "MaxTokenInfoClass": true,
+ "Mclpool": true,
+ "MibIfRow": true,
+ "Mkdir": true,
+ "Mkdirat": true,
+ "Mkfifo": true,
+ "Mknod": true,
+ "Mknodat": true,
+ "Mlock": true,
+ "Mlockall": true,
+ "Mmap": true,
+ "Mount": true,
+ "MoveFile": true,
+ "Mprotect": true,
+ "Msghdr": true,
+ "Munlock": true,
+ "Munlockall": true,
+ "Munmap": true,
+ "MustLoadDLL": true,
+ "NAME_MAX": true,
+ "NETLINK_ADD_MEMBERSHIP": true,
+ "NETLINK_AUDIT": true,
+ "NETLINK_BROADCAST_ERROR": true,
+ "NETLINK_CONNECTOR": true,
+ "NETLINK_DNRTMSG": true,
+ "NETLINK_DROP_MEMBERSHIP": true,
+ "NETLINK_ECRYPTFS": true,
+ "NETLINK_FIB_LOOKUP": true,
+ "NETLINK_FIREWALL": true,
+ "NETLINK_GENERIC": true,
+ "NETLINK_INET_DIAG": true,
+ "NETLINK_IP6_FW": true,
+ "NETLINK_ISCSI": true,
+ "NETLINK_KOBJECT_UEVENT": true,
+ "NETLINK_NETFILTER": true,
+ "NETLINK_NFLOG": true,
+ "NETLINK_NO_ENOBUFS": true,
+ "NETLINK_PKTINFO": true,
+ "NETLINK_RDMA": true,
+ "NETLINK_ROUTE": true,
+ "NETLINK_SCSITRANSPORT": true,
+ "NETLINK_SELINUX": true,
+ "NETLINK_UNUSED": true,
+ "NETLINK_USERSOCK": true,
+ "NETLINK_XFRM": true,
+ "NET_RT_DUMP": true,
+ "NET_RT_DUMP2": true,
+ "NET_RT_FLAGS": true,
+ "NET_RT_IFLIST": true,
+ "NET_RT_IFLIST2": true,
+ "NET_RT_IFLISTL": true,
+ "NET_RT_IFMALIST": true,
+ "NET_RT_MAXID": true,
+ "NET_RT_OIFLIST": true,
+ "NET_RT_OOIFLIST": true,
+ "NET_RT_STAT": true,
+ "NET_RT_STATS": true,
+ "NET_RT_TABLE": true,
+ "NET_RT_TRASH": true,
+ "NLA_ALIGNTO": true,
+ "NLA_F_NESTED": true,
+ "NLA_F_NET_BYTEORDER": true,
+ "NLA_HDRLEN": true,
+ "NLMSG_ALIGNTO": true,
+ "NLMSG_DONE": true,
+ "NLMSG_ERROR": true,
+ "NLMSG_HDRLEN": true,
+ "NLMSG_MIN_TYPE": true,
+ "NLMSG_NOOP": true,
+ "NLMSG_OVERRUN": true,
+ "NLM_F_ACK": true,
+ "NLM_F_APPEND": true,
+ "NLM_F_ATOMIC": true,
+ "NLM_F_CREATE": true,
+ "NLM_F_DUMP": true,
+ "NLM_F_ECHO": true,
+ "NLM_F_EXCL": true,
+ "NLM_F_MATCH": true,
+ "NLM_F_MULTI": true,
+ "NLM_F_REPLACE": true,
+ "NLM_F_REQUEST": true,
+ "NLM_F_ROOT": true,
+ "NOFLSH": true,
+ "NOTE_ABSOLUTE": true,
+ "NOTE_ATTRIB": true,
+ "NOTE_CHILD": true,
+ "NOTE_DELETE": true,
+ "NOTE_EOF": true,
+ "NOTE_EXEC": true,
+ "NOTE_EXIT": true,
+ "NOTE_EXITSTATUS": true,
+ "NOTE_EXTEND": true,
+ "NOTE_FFAND": true,
+ "NOTE_FFCOPY": true,
+ "NOTE_FFCTRLMASK": true,
+ "NOTE_FFLAGSMASK": true,
+ "NOTE_FFNOP": true,
+ "NOTE_FFOR": true,
+ "NOTE_FORK": true,
+ "NOTE_LINK": true,
+ "NOTE_LOWAT": true,
+ "NOTE_NONE": true,
+ "NOTE_NSECONDS": true,
+ "NOTE_PCTRLMASK": true,
+ "NOTE_PDATAMASK": true,
+ "NOTE_REAP": true,
+ "NOTE_RENAME": true,
+ "NOTE_RESOURCEEND": true,
+ "NOTE_REVOKE": true,
+ "NOTE_SECONDS": true,
+ "NOTE_SIGNAL": true,
+ "NOTE_TRACK": true,
+ "NOTE_TRACKERR": true,
+ "NOTE_TRIGGER": true,
+ "NOTE_TRUNCATE": true,
+ "NOTE_USECONDS": true,
+ "NOTE_VM_ERROR": true,
+ "NOTE_VM_PRESSURE": true,
+ "NOTE_VM_PRESSURE_SUDDEN_TERMINATE": true,
+ "NOTE_VM_PRESSURE_TERMINATE": true,
+ "NOTE_WRITE": true,
+ "NameCanonical": true,
+ "NameCanonicalEx": true,
+ "NameDisplay": true,
+ "NameDnsDomain": true,
+ "NameFullyQualifiedDN": true,
+ "NameSamCompatible": true,
+ "NameServicePrincipal": true,
+ "NameUniqueId": true,
+ "NameUnknown": true,
+ "NameUserPrincipal": true,
+ "Nanosleep": true,
+ "NetApiBufferFree": true,
+ "NetGetJoinInformation": true,
+ "NetSetupDomainName": true,
+ "NetSetupUnjoined": true,
+ "NetSetupUnknownStatus": true,
+ "NetSetupWorkgroupName": true,
+ "NetUserGetInfo": true,
+ "NetlinkMessage": true,
+ "NetlinkRIB": true,
+ "NetlinkRouteAttr": true,
+ "NetlinkRouteRequest": true,
+ "NewCallback": true,
+ "NewCallbackCDecl": true,
+ "NewLazyDLL": true,
+ "NlAttr": true,
+ "NlMsgerr": true,
+ "NlMsghdr": true,
+ "NsecToFiletime": true,
+ "NsecToTimespec": true,
+ "NsecToTimeval": true,
+ "Ntohs": true,
+ "OCRNL": true,
+ "OFDEL": true,
+ "OFILL": true,
+ "OFIOGETBMAP": true,
+ "OID_PKIX_KP_SERVER_AUTH": true,
+ "OID_SERVER_GATED_CRYPTO": true,
+ "OID_SGC_NETSCAPE": true,
+ "OLCUC": true,
+ "ONLCR": true,
+ "ONLRET": true,
+ "ONOCR": true,
+ "ONOEOT": true,
+ "OPEN_ALWAYS": true,
+ "OPEN_EXISTING": true,
+ "OPOST": true,
+ "O_ACCMODE": true,
+ "O_ALERT": true,
+ "O_ALT_IO": true,
+ "O_APPEND": true,
+ "O_ASYNC": true,
+ "O_CLOEXEC": true,
+ "O_CREAT": true,
+ "O_DIRECT": true,
+ "O_DIRECTORY": true,
+ "O_DSYNC": true,
+ "O_EVTONLY": true,
+ "O_EXCL": true,
+ "O_EXEC": true,
+ "O_EXLOCK": true,
+ "O_FSYNC": true,
+ "O_LARGEFILE": true,
+ "O_NDELAY": true,
+ "O_NOATIME": true,
+ "O_NOCTTY": true,
+ "O_NOFOLLOW": true,
+ "O_NONBLOCK": true,
+ "O_NOSIGPIPE": true,
+ "O_POPUP": true,
+ "O_RDONLY": true,
+ "O_RDWR": true,
+ "O_RSYNC": true,
+ "O_SHLOCK": true,
+ "O_SYMLINK": true,
+ "O_SYNC": true,
+ "O_TRUNC": true,
+ "O_TTY_INIT": true,
+ "O_WRONLY": true,
+ "Open": true,
+ "OpenCurrentProcessToken": true,
+ "OpenProcess": true,
+ "OpenProcessToken": true,
+ "Openat": true,
+ "Overlapped": true,
+ "PACKET_ADD_MEMBERSHIP": true,
+ "PACKET_BROADCAST": true,
+ "PACKET_DROP_MEMBERSHIP": true,
+ "PACKET_FASTROUTE": true,
+ "PACKET_HOST": true,
+ "PACKET_LOOPBACK": true,
+ "PACKET_MR_ALLMULTI": true,
+ "PACKET_MR_MULTICAST": true,
+ "PACKET_MR_PROMISC": true,
+ "PACKET_MULTICAST": true,
+ "PACKET_OTHERHOST": true,
+ "PACKET_OUTGOING": true,
+ "PACKET_RECV_OUTPUT": true,
+ "PACKET_RX_RING": true,
+ "PACKET_STATISTICS": true,
+ "PAGE_EXECUTE_READ": true,
+ "PAGE_EXECUTE_READWRITE": true,
+ "PAGE_EXECUTE_WRITECOPY": true,
+ "PAGE_READONLY": true,
+ "PAGE_READWRITE": true,
+ "PAGE_WRITECOPY": true,
+ "PARENB": true,
+ "PARMRK": true,
+ "PARODD": true,
+ "PENDIN": true,
+ "PFL_HIDDEN": true,
+ "PFL_MATCHES_PROTOCOL_ZERO": true,
+ "PFL_MULTIPLE_PROTO_ENTRIES": true,
+ "PFL_NETWORKDIRECT_PROVIDER": true,
+ "PFL_RECOMMENDED_PROTO_ENTRY": true,
+ "PF_FLUSH": true,
+ "PKCS_7_ASN_ENCODING": true,
+ "PMC5_PIPELINE_FLUSH": true,
+ "PRIO_PGRP": true,
+ "PRIO_PROCESS": true,
+ "PRIO_USER": true,
+ "PRI_IOFLUSH": true,
+ "PROCESS_QUERY_INFORMATION": true,
+ "PROCESS_TERMINATE": true,
+ "PROT_EXEC": true,
+ "PROT_GROWSDOWN": true,
+ "PROT_GROWSUP": true,
+ "PROT_NONE": true,
+ "PROT_READ": true,
+ "PROT_WRITE": true,
+ "PROV_DH_SCHANNEL": true,
+ "PROV_DSS": true,
+ "PROV_DSS_DH": true,
+ "PROV_EC_ECDSA_FULL": true,
+ "PROV_EC_ECDSA_SIG": true,
+ "PROV_EC_ECNRA_FULL": true,
+ "PROV_EC_ECNRA_SIG": true,
+ "PROV_FORTEZZA": true,
+ "PROV_INTEL_SEC": true,
+ "PROV_MS_EXCHANGE": true,
+ "PROV_REPLACE_OWF": true,
+ "PROV_RNG": true,
+ "PROV_RSA_AES": true,
+ "PROV_RSA_FULL": true,
+ "PROV_RSA_SCHANNEL": true,
+ "PROV_RSA_SIG": true,
+ "PROV_SPYRUS_LYNKS": true,
+ "PROV_SSL": true,
+ "PR_CAPBSET_DROP": true,
+ "PR_CAPBSET_READ": true,
+ "PR_CLEAR_SECCOMP_FILTER": true,
+ "PR_ENDIAN_BIG": true,
+ "PR_ENDIAN_LITTLE": true,
+ "PR_ENDIAN_PPC_LITTLE": true,
+ "PR_FPEMU_NOPRINT": true,
+ "PR_FPEMU_SIGFPE": true,
+ "PR_FP_EXC_ASYNC": true,
+ "PR_FP_EXC_DISABLED": true,
+ "PR_FP_EXC_DIV": true,
+ "PR_FP_EXC_INV": true,
+ "PR_FP_EXC_NONRECOV": true,
+ "PR_FP_EXC_OVF": true,
+ "PR_FP_EXC_PRECISE": true,
+ "PR_FP_EXC_RES": true,
+ "PR_FP_EXC_SW_ENABLE": true,
+ "PR_FP_EXC_UND": true,
+ "PR_GET_DUMPABLE": true,
+ "PR_GET_ENDIAN": true,
+ "PR_GET_FPEMU": true,
+ "PR_GET_FPEXC": true,
+ "PR_GET_KEEPCAPS": true,
+ "PR_GET_NAME": true,
+ "PR_GET_PDEATHSIG": true,
+ "PR_GET_SECCOMP": true,
+ "PR_GET_SECCOMP_FILTER": true,
+ "PR_GET_SECUREBITS": true,
+ "PR_GET_TIMERSLACK": true,
+ "PR_GET_TIMING": true,
+ "PR_GET_TSC": true,
+ "PR_GET_UNALIGN": true,
+ "PR_MCE_KILL": true,
+ "PR_MCE_KILL_CLEAR": true,
+ "PR_MCE_KILL_DEFAULT": true,
+ "PR_MCE_KILL_EARLY": true,
+ "PR_MCE_KILL_GET": true,
+ "PR_MCE_KILL_LATE": true,
+ "PR_MCE_KILL_SET": true,
+ "PR_SECCOMP_FILTER_EVENT": true,
+ "PR_SECCOMP_FILTER_SYSCALL": true,
+ "PR_SET_DUMPABLE": true,
+ "PR_SET_ENDIAN": true,
+ "PR_SET_FPEMU": true,
+ "PR_SET_FPEXC": true,
+ "PR_SET_KEEPCAPS": true,
+ "PR_SET_NAME": true,
+ "PR_SET_PDEATHSIG": true,
+ "PR_SET_PTRACER": true,
+ "PR_SET_SECCOMP": true,
+ "PR_SET_SECCOMP_FILTER": true,
+ "PR_SET_SECUREBITS": true,
+ "PR_SET_TIMERSLACK": true,
+ "PR_SET_TIMING": true,
+ "PR_SET_TSC": true,
+ "PR_SET_UNALIGN": true,
+ "PR_TASK_PERF_EVENTS_DISABLE": true,
+ "PR_TASK_PERF_EVENTS_ENABLE": true,
+ "PR_TIMING_STATISTICAL": true,
+ "PR_TIMING_TIMESTAMP": true,
+ "PR_TSC_ENABLE": true,
+ "PR_TSC_SIGSEGV": true,
+ "PR_UNALIGN_NOPRINT": true,
+ "PR_UNALIGN_SIGBUS": true,
+ "PTRACE_ARCH_PRCTL": true,
+ "PTRACE_ATTACH": true,
+ "PTRACE_CONT": true,
+ "PTRACE_DETACH": true,
+ "PTRACE_EVENT_CLONE": true,
+ "PTRACE_EVENT_EXEC": true,
+ "PTRACE_EVENT_EXIT": true,
+ "PTRACE_EVENT_FORK": true,
+ "PTRACE_EVENT_VFORK": true,
+ "PTRACE_EVENT_VFORK_DONE": true,
+ "PTRACE_GETCRUNCHREGS": true,
+ "PTRACE_GETEVENTMSG": true,
+ "PTRACE_GETFPREGS": true,
+ "PTRACE_GETFPXREGS": true,
+ "PTRACE_GETHBPREGS": true,
+ "PTRACE_GETREGS": true,
+ "PTRACE_GETREGSET": true,
+ "PTRACE_GETSIGINFO": true,
+ "PTRACE_GETVFPREGS": true,
+ "PTRACE_GETWMMXREGS": true,
+ "PTRACE_GET_THREAD_AREA": true,
+ "PTRACE_KILL": true,
+ "PTRACE_OLDSETOPTIONS": true,
+ "PTRACE_O_MASK": true,
+ "PTRACE_O_TRACECLONE": true,
+ "PTRACE_O_TRACEEXEC": true,
+ "PTRACE_O_TRACEEXIT": true,
+ "PTRACE_O_TRACEFORK": true,
+ "PTRACE_O_TRACESYSGOOD": true,
+ "PTRACE_O_TRACEVFORK": true,
+ "PTRACE_O_TRACEVFORKDONE": true,
+ "PTRACE_PEEKDATA": true,
+ "PTRACE_PEEKTEXT": true,
+ "PTRACE_PEEKUSR": true,
+ "PTRACE_POKEDATA": true,
+ "PTRACE_POKETEXT": true,
+ "PTRACE_POKEUSR": true,
+ "PTRACE_SETCRUNCHREGS": true,
+ "PTRACE_SETFPREGS": true,
+ "PTRACE_SETFPXREGS": true,
+ "PTRACE_SETHBPREGS": true,
+ "PTRACE_SETOPTIONS": true,
+ "PTRACE_SETREGS": true,
+ "PTRACE_SETREGSET": true,
+ "PTRACE_SETSIGINFO": true,
+ "PTRACE_SETVFPREGS": true,
+ "PTRACE_SETWMMXREGS": true,
+ "PTRACE_SET_SYSCALL": true,
+ "PTRACE_SET_THREAD_AREA": true,
+ "PTRACE_SINGLEBLOCK": true,
+ "PTRACE_SINGLESTEP": true,
+ "PTRACE_SYSCALL": true,
+ "PTRACE_SYSEMU": true,
+ "PTRACE_SYSEMU_SINGLESTEP": true,
+ "PTRACE_TRACEME": true,
+ "PT_ATTACH": true,
+ "PT_ATTACHEXC": true,
+ "PT_CONTINUE": true,
+ "PT_DATA_ADDR": true,
+ "PT_DENY_ATTACH": true,
+ "PT_DETACH": true,
+ "PT_FIRSTMACH": true,
+ "PT_FORCEQUOTA": true,
+ "PT_KILL": true,
+ "PT_MASK": true,
+ "PT_READ_D": true,
+ "PT_READ_I": true,
+ "PT_READ_U": true,
+ "PT_SIGEXC": true,
+ "PT_STEP": true,
+ "PT_TEXT_ADDR": true,
+ "PT_TEXT_END_ADDR": true,
+ "PT_THUPDATE": true,
+ "PT_TRACE_ME": true,
+ "PT_WRITE_D": true,
+ "PT_WRITE_I": true,
+ "PT_WRITE_U": true,
+ "ParseDirent": true,
+ "ParseNetlinkMessage": true,
+ "ParseNetlinkRouteAttr": true,
+ "ParseRoutingMessage": true,
+ "ParseRoutingSockaddr": true,
+ "ParseSocketControlMessage": true,
+ "ParseUnixCredentials": true,
+ "ParseUnixRights": true,
+ "PathMax": true,
+ "Pathconf": true,
+ "Pause": true,
+ "Pipe": true,
+ "Pipe2": true,
+ "PivotRoot": true,
+ "Pointer": true,
+ "PostQueuedCompletionStatus": true,
+ "Pread": true,
+ "Proc": true,
+ "ProcAttr": true,
+ "Process32First": true,
+ "Process32Next": true,
+ "ProcessEntry32": true,
+ "ProcessInformation": true,
+ "Protoent": true,
+ "PtraceAttach": true,
+ "PtraceCont": true,
+ "PtraceDetach": true,
+ "PtraceGetEventMsg": true,
+ "PtraceGetRegs": true,
+ "PtracePeekData": true,
+ "PtracePeekText": true,
+ "PtracePokeData": true,
+ "PtracePokeText": true,
+ "PtraceRegs": true,
+ "PtraceSetOptions": true,
+ "PtraceSetRegs": true,
+ "PtraceSingleStep": true,
+ "PtraceSyscall": true,
+ "Pwrite": true,
+ "REG_BINARY": true,
+ "REG_DWORD": true,
+ "REG_DWORD_BIG_ENDIAN": true,
+ "REG_DWORD_LITTLE_ENDIAN": true,
+ "REG_EXPAND_SZ": true,
+ "REG_FULL_RESOURCE_DESCRIPTOR": true,
+ "REG_LINK": true,
+ "REG_MULTI_SZ": true,
+ "REG_NONE": true,
+ "REG_QWORD": true,
+ "REG_QWORD_LITTLE_ENDIAN": true,
+ "REG_RESOURCE_LIST": true,
+ "REG_RESOURCE_REQUIREMENTS_LIST": true,
+ "REG_SZ": true,
+ "RLIMIT_AS": true,
+ "RLIMIT_CORE": true,
+ "RLIMIT_CPU": true,
+ "RLIMIT_DATA": true,
+ "RLIMIT_FSIZE": true,
+ "RLIMIT_NOFILE": true,
+ "RLIMIT_STACK": true,
+ "RLIM_INFINITY": true,
+ "RTAX_ADVMSS": true,
+ "RTAX_AUTHOR": true,
+ "RTAX_BRD": true,
+ "RTAX_CWND": true,
+ "RTAX_DST": true,
+ "RTAX_FEATURES": true,
+ "RTAX_FEATURE_ALLFRAG": true,
+ "RTAX_FEATURE_ECN": true,
+ "RTAX_FEATURE_SACK": true,
+ "RTAX_FEATURE_TIMESTAMP": true,
+ "RTAX_GATEWAY": true,
+ "RTAX_GENMASK": true,
+ "RTAX_HOPLIMIT": true,
+ "RTAX_IFA": true,
+ "RTAX_IFP": true,
+ "RTAX_INITCWND": true,
+ "RTAX_INITRWND": true,
+ "RTAX_LABEL": true,
+ "RTAX_LOCK": true,
+ "RTAX_MAX": true,
+ "RTAX_MTU": true,
+ "RTAX_NETMASK": true,
+ "RTAX_REORDERING": true,
+ "RTAX_RTO_MIN": true,
+ "RTAX_RTT": true,
+ "RTAX_RTTVAR": true,
+ "RTAX_SRC": true,
+ "RTAX_SRCMASK": true,
+ "RTAX_SSTHRESH": true,
+ "RTAX_TAG": true,
+ "RTAX_UNSPEC": true,
+ "RTAX_WINDOW": true,
+ "RTA_ALIGNTO": true,
+ "RTA_AUTHOR": true,
+ "RTA_BRD": true,
+ "RTA_CACHEINFO": true,
+ "RTA_DST": true,
+ "RTA_FLOW": true,
+ "RTA_GATEWAY": true,
+ "RTA_GENMASK": true,
+ "RTA_IFA": true,
+ "RTA_IFP": true,
+ "RTA_IIF": true,
+ "RTA_LABEL": true,
+ "RTA_MAX": true,
+ "RTA_METRICS": true,
+ "RTA_MULTIPATH": true,
+ "RTA_NETMASK": true,
+ "RTA_OIF": true,
+ "RTA_PREFSRC": true,
+ "RTA_PRIORITY": true,
+ "RTA_SRC": true,
+ "RTA_SRCMASK": true,
+ "RTA_TABLE": true,
+ "RTA_TAG": true,
+ "RTA_UNSPEC": true,
+ "RTCF_DIRECTSRC": true,
+ "RTCF_DOREDIRECT": true,
+ "RTCF_LOG": true,
+ "RTCF_MASQ": true,
+ "RTCF_NAT": true,
+ "RTCF_VALVE": true,
+ "RTF_ADDRCLASSMASK": true,
+ "RTF_ADDRCONF": true,
+ "RTF_ALLONLINK": true,
+ "RTF_ANNOUNCE": true,
+ "RTF_BLACKHOLE": true,
+ "RTF_BROADCAST": true,
+ "RTF_CACHE": true,
+ "RTF_CLONED": true,
+ "RTF_CLONING": true,
+ "RTF_CONDEMNED": true,
+ "RTF_DEFAULT": true,
+ "RTF_DELCLONE": true,
+ "RTF_DONE": true,
+ "RTF_DYNAMIC": true,
+ "RTF_FLOW": true,
+ "RTF_FMASK": true,
+ "RTF_GATEWAY": true,
+ "RTF_GWFLAG_COMPAT": true,
+ "RTF_HOST": true,
+ "RTF_IFREF": true,
+ "RTF_IFSCOPE": true,
+ "RTF_INTERFACE": true,
+ "RTF_IRTT": true,
+ "RTF_LINKRT": true,
+ "RTF_LLDATA": true,
+ "RTF_LLINFO": true,
+ "RTF_LOCAL": true,
+ "RTF_MASK": true,
+ "RTF_MODIFIED": true,
+ "RTF_MPATH": true,
+ "RTF_MPLS": true,
+ "RTF_MSS": true,
+ "RTF_MTU": true,
+ "RTF_MULTICAST": true,
+ "RTF_NAT": true,
+ "RTF_NOFORWARD": true,
+ "RTF_NONEXTHOP": true,
+ "RTF_NOPMTUDISC": true,
+ "RTF_PERMANENT_ARP": true,
+ "RTF_PINNED": true,
+ "RTF_POLICY": true,
+ "RTF_PRCLONING": true,
+ "RTF_PROTO1": true,
+ "RTF_PROTO2": true,
+ "RTF_PROTO3": true,
+ "RTF_REINSTATE": true,
+ "RTF_REJECT": true,
+ "RTF_RNH_LOCKED": true,
+ "RTF_SOURCE": true,
+ "RTF_SRC": true,
+ "RTF_STATIC": true,
+ "RTF_STICKY": true,
+ "RTF_THROW": true,
+ "RTF_TUNNEL": true,
+ "RTF_UP": true,
+ "RTF_USETRAILERS": true,
+ "RTF_WASCLONED": true,
+ "RTF_WINDOW": true,
+ "RTF_XRESOLVE": true,
+ "RTM_ADD": true,
+ "RTM_BASE": true,
+ "RTM_CHANGE": true,
+ "RTM_CHGADDR": true,
+ "RTM_DELACTION": true,
+ "RTM_DELADDR": true,
+ "RTM_DELADDRLABEL": true,
+ "RTM_DELETE": true,
+ "RTM_DELLINK": true,
+ "RTM_DELMADDR": true,
+ "RTM_DELNEIGH": true,
+ "RTM_DELQDISC": true,
+ "RTM_DELROUTE": true,
+ "RTM_DELRULE": true,
+ "RTM_DELTCLASS": true,
+ "RTM_DELTFILTER": true,
+ "RTM_DESYNC": true,
+ "RTM_F_CLONED": true,
+ "RTM_F_EQUALIZE": true,
+ "RTM_F_NOTIFY": true,
+ "RTM_F_PREFIX": true,
+ "RTM_GET": true,
+ "RTM_GET2": true,
+ "RTM_GETACTION": true,
+ "RTM_GETADDR": true,
+ "RTM_GETADDRLABEL": true,
+ "RTM_GETANYCAST": true,
+ "RTM_GETDCB": true,
+ "RTM_GETLINK": true,
+ "RTM_GETMULTICAST": true,
+ "RTM_GETNEIGH": true,
+ "RTM_GETNEIGHTBL": true,
+ "RTM_GETQDISC": true,
+ "RTM_GETROUTE": true,
+ "RTM_GETRULE": true,
+ "RTM_GETTCLASS": true,
+ "RTM_GETTFILTER": true,
+ "RTM_IEEE80211": true,
+ "RTM_IFANNOUNCE": true,
+ "RTM_IFINFO": true,
+ "RTM_IFINFO2": true,
+ "RTM_LLINFO_UPD": true,
+ "RTM_LOCK": true,
+ "RTM_LOSING": true,
+ "RTM_MAX": true,
+ "RTM_MAXSIZE": true,
+ "RTM_MISS": true,
+ "RTM_NEWACTION": true,
+ "RTM_NEWADDR": true,
+ "RTM_NEWADDRLABEL": true,
+ "RTM_NEWLINK": true,
+ "RTM_NEWMADDR": true,
+ "RTM_NEWMADDR2": true,
+ "RTM_NEWNDUSEROPT": true,
+ "RTM_NEWNEIGH": true,
+ "RTM_NEWNEIGHTBL": true,
+ "RTM_NEWPREFIX": true,
+ "RTM_NEWQDISC": true,
+ "RTM_NEWROUTE": true,
+ "RTM_NEWRULE": true,
+ "RTM_NEWTCLASS": true,
+ "RTM_NEWTFILTER": true,
+ "RTM_NR_FAMILIES": true,
+ "RTM_NR_MSGTYPES": true,
+ "RTM_OIFINFO": true,
+ "RTM_OLDADD": true,
+ "RTM_OLDDEL": true,
+ "RTM_OOIFINFO": true,
+ "RTM_REDIRECT": true,
+ "RTM_RESOLVE": true,
+ "RTM_RTTUNIT": true,
+ "RTM_SETDCB": true,
+ "RTM_SETGATE": true,
+ "RTM_SETLINK": true,
+ "RTM_SETNEIGHTBL": true,
+ "RTM_VERSION": true,
+ "RTNH_ALIGNTO": true,
+ "RTNH_F_DEAD": true,
+ "RTNH_F_ONLINK": true,
+ "RTNH_F_PERVASIVE": true,
+ "RTNLGRP_IPV4_IFADDR": true,
+ "RTNLGRP_IPV4_MROUTE": true,
+ "RTNLGRP_IPV4_ROUTE": true,
+ "RTNLGRP_IPV4_RULE": true,
+ "RTNLGRP_IPV6_IFADDR": true,
+ "RTNLGRP_IPV6_IFINFO": true,
+ "RTNLGRP_IPV6_MROUTE": true,
+ "RTNLGRP_IPV6_PREFIX": true,
+ "RTNLGRP_IPV6_ROUTE": true,
+ "RTNLGRP_IPV6_RULE": true,
+ "RTNLGRP_LINK": true,
+ "RTNLGRP_ND_USEROPT": true,
+ "RTNLGRP_NEIGH": true,
+ "RTNLGRP_NONE": true,
+ "RTNLGRP_NOTIFY": true,
+ "RTNLGRP_TC": true,
+ "RTN_ANYCAST": true,
+ "RTN_BLACKHOLE": true,
+ "RTN_BROADCAST": true,
+ "RTN_LOCAL": true,
+ "RTN_MAX": true,
+ "RTN_MULTICAST": true,
+ "RTN_NAT": true,
+ "RTN_PROHIBIT": true,
+ "RTN_THROW": true,
+ "RTN_UNICAST": true,
+ "RTN_UNREACHABLE": true,
+ "RTN_UNSPEC": true,
+ "RTN_XRESOLVE": true,
+ "RTPROT_BIRD": true,
+ "RTPROT_BOOT": true,
+ "RTPROT_DHCP": true,
+ "RTPROT_DNROUTED": true,
+ "RTPROT_GATED": true,
+ "RTPROT_KERNEL": true,
+ "RTPROT_MRT": true,
+ "RTPROT_NTK": true,
+ "RTPROT_RA": true,
+ "RTPROT_REDIRECT": true,
+ "RTPROT_STATIC": true,
+ "RTPROT_UNSPEC": true,
+ "RTPROT_XORP": true,
+ "RTPROT_ZEBRA": true,
+ "RTV_EXPIRE": true,
+ "RTV_HOPCOUNT": true,
+ "RTV_MTU": true,
+ "RTV_RPIPE": true,
+ "RTV_RTT": true,
+ "RTV_RTTVAR": true,
+ "RTV_SPIPE": true,
+ "RTV_SSTHRESH": true,
+ "RTV_WEIGHT": true,
+ "RT_CACHING_CONTEXT": true,
+ "RT_CLASS_DEFAULT": true,
+ "RT_CLASS_LOCAL": true,
+ "RT_CLASS_MAIN": true,
+ "RT_CLASS_MAX": true,
+ "RT_CLASS_UNSPEC": true,
+ "RT_DEFAULT_FIB": true,
+ "RT_NORTREF": true,
+ "RT_SCOPE_HOST": true,
+ "RT_SCOPE_LINK": true,
+ "RT_SCOPE_NOWHERE": true,
+ "RT_SCOPE_SITE": true,
+ "RT_SCOPE_UNIVERSE": true,
+ "RT_TABLEID_MAX": true,
+ "RT_TABLE_COMPAT": true,
+ "RT_TABLE_DEFAULT": true,
+ "RT_TABLE_LOCAL": true,
+ "RT_TABLE_MAIN": true,
+ "RT_TABLE_MAX": true,
+ "RT_TABLE_UNSPEC": true,
+ "RUSAGE_CHILDREN": true,
+ "RUSAGE_SELF": true,
+ "RUSAGE_THREAD": true,
+ "Radvisory_t": true,
+ "RawConn": true,
+ "RawSockaddr": true,
+ "RawSockaddrAny": true,
+ "RawSockaddrDatalink": true,
+ "RawSockaddrInet4": true,
+ "RawSockaddrInet6": true,
+ "RawSockaddrLinklayer": true,
+ "RawSockaddrNetlink": true,
+ "RawSockaddrUnix": true,
+ "RawSyscall": true,
+ "RawSyscall6": true,
+ "Read": true,
+ "ReadConsole": true,
+ "ReadDirectoryChanges": true,
+ "ReadDirent": true,
+ "ReadFile": true,
+ "Readlink": true,
+ "Reboot": true,
+ "Recvfrom": true,
+ "Recvmsg": true,
+ "RegCloseKey": true,
+ "RegEnumKeyEx": true,
+ "RegOpenKeyEx": true,
+ "RegQueryInfoKey": true,
+ "RegQueryValueEx": true,
+ "RemoveDirectory": true,
+ "Removexattr": true,
+ "Rename": true,
+ "Renameat": true,
+ "Revoke": true,
+ "Rlimit": true,
+ "Rmdir": true,
+ "RouteMessage": true,
+ "RouteRIB": true,
+ "RtAttr": true,
+ "RtGenmsg": true,
+ "RtMetrics": true,
+ "RtMsg": true,
+ "RtMsghdr": true,
+ "RtNexthop": true,
+ "Rusage": true,
+ "SCM_BINTIME": true,
+ "SCM_CREDENTIALS": true,
+ "SCM_CREDS": true,
+ "SCM_RIGHTS": true,
+ "SCM_TIMESTAMP": true,
+ "SCM_TIMESTAMPING": true,
+ "SCM_TIMESTAMPNS": true,
+ "SCM_TIMESTAMP_MONOTONIC": true,
+ "SHUT_RD": true,
+ "SHUT_RDWR": true,
+ "SHUT_WR": true,
+ "SID": true,
+ "SIDAndAttributes": true,
+ "SIGABRT": true,
+ "SIGALRM": true,
+ "SIGBUS": true,
+ "SIGCHLD": true,
+ "SIGCLD": true,
+ "SIGCONT": true,
+ "SIGEMT": true,
+ "SIGFPE": true,
+ "SIGHUP": true,
+ "SIGILL": true,
+ "SIGINFO": true,
+ "SIGINT": true,
+ "SIGIO": true,
+ "SIGIOT": true,
+ "SIGKILL": true,
+ "SIGLIBRT": true,
+ "SIGLWP": true,
+ "SIGPIPE": true,
+ "SIGPOLL": true,
+ "SIGPROF": true,
+ "SIGPWR": true,
+ "SIGQUIT": true,
+ "SIGSEGV": true,
+ "SIGSTKFLT": true,
+ "SIGSTOP": true,
+ "SIGSYS": true,
+ "SIGTERM": true,
+ "SIGTHR": true,
+ "SIGTRAP": true,
+ "SIGTSTP": true,
+ "SIGTTIN": true,
+ "SIGTTOU": true,
+ "SIGUNUSED": true,
+ "SIGURG": true,
+ "SIGUSR1": true,
+ "SIGUSR2": true,
+ "SIGVTALRM": true,
+ "SIGWINCH": true,
+ "SIGXCPU": true,
+ "SIGXFSZ": true,
+ "SIOCADDDLCI": true,
+ "SIOCADDMULTI": true,
+ "SIOCADDRT": true,
+ "SIOCAIFADDR": true,
+ "SIOCAIFGROUP": true,
+ "SIOCALIFADDR": true,
+ "SIOCARPIPLL": true,
+ "SIOCATMARK": true,
+ "SIOCAUTOADDR": true,
+ "SIOCAUTONETMASK": true,
+ "SIOCBRDGADD": true,
+ "SIOCBRDGADDS": true,
+ "SIOCBRDGARL": true,
+ "SIOCBRDGDADDR": true,
+ "SIOCBRDGDEL": true,
+ "SIOCBRDGDELS": true,
+ "SIOCBRDGFLUSH": true,
+ "SIOCBRDGFRL": true,
+ "SIOCBRDGGCACHE": true,
+ "SIOCBRDGGFD": true,
+ "SIOCBRDGGHT": true,
+ "SIOCBRDGGIFFLGS": true,
+ "SIOCBRDGGMA": true,
+ "SIOCBRDGGPARAM": true,
+ "SIOCBRDGGPRI": true,
+ "SIOCBRDGGRL": true,
+ "SIOCBRDGGSIFS": true,
+ "SIOCBRDGGTO": true,
+ "SIOCBRDGIFS": true,
+ "SIOCBRDGRTS": true,
+ "SIOCBRDGSADDR": true,
+ "SIOCBRDGSCACHE": true,
+ "SIOCBRDGSFD": true,
+ "SIOCBRDGSHT": true,
+ "SIOCBRDGSIFCOST": true,
+ "SIOCBRDGSIFFLGS": true,
+ "SIOCBRDGSIFPRIO": true,
+ "SIOCBRDGSMA": true,
+ "SIOCBRDGSPRI": true,
+ "SIOCBRDGSPROTO": true,
+ "SIOCBRDGSTO": true,
+ "SIOCBRDGSTXHC": true,
+ "SIOCDARP": true,
+ "SIOCDELDLCI": true,
+ "SIOCDELMULTI": true,
+ "SIOCDELRT": true,
+ "SIOCDEVPRIVATE": true,
+ "SIOCDIFADDR": true,
+ "SIOCDIFGROUP": true,
+ "SIOCDIFPHYADDR": true,
+ "SIOCDLIFADDR": true,
+ "SIOCDRARP": true,
+ "SIOCGARP": true,
+ "SIOCGDRVSPEC": true,
+ "SIOCGETKALIVE": true,
+ "SIOCGETLABEL": true,
+ "SIOCGETPFLOW": true,
+ "SIOCGETPFSYNC": true,
+ "SIOCGETSGCNT": true,
+ "SIOCGETVIFCNT": true,
+ "SIOCGETVLAN": true,
+ "SIOCGHIWAT": true,
+ "SIOCGIFADDR": true,
+ "SIOCGIFADDRPREF": true,
+ "SIOCGIFALIAS": true,
+ "SIOCGIFALTMTU": true,
+ "SIOCGIFASYNCMAP": true,
+ "SIOCGIFBOND": true,
+ "SIOCGIFBR": true,
+ "SIOCGIFBRDADDR": true,
+ "SIOCGIFCAP": true,
+ "SIOCGIFCONF": true,
+ "SIOCGIFCOUNT": true,
+ "SIOCGIFDATA": true,
+ "SIOCGIFDESCR": true,
+ "SIOCGIFDEVMTU": true,
+ "SIOCGIFDLT": true,
+ "SIOCGIFDSTADDR": true,
+ "SIOCGIFENCAP": true,
+ "SIOCGIFFIB": true,
+ "SIOCGIFFLAGS": true,
+ "SIOCGIFGATTR": true,
+ "SIOCGIFGENERIC": true,
+ "SIOCGIFGMEMB": true,
+ "SIOCGIFGROUP": true,
+ "SIOCGIFHARDMTU": true,
+ "SIOCGIFHWADDR": true,
+ "SIOCGIFINDEX": true,
+ "SIOCGIFKPI": true,
+ "SIOCGIFMAC": true,
+ "SIOCGIFMAP": true,
+ "SIOCGIFMEDIA": true,
+ "SIOCGIFMEM": true,
+ "SIOCGIFMETRIC": true,
+ "SIOCGIFMTU": true,
+ "SIOCGIFNAME": true,
+ "SIOCGIFNETMASK": true,
+ "SIOCGIFPDSTADDR": true,
+ "SIOCGIFPFLAGS": true,
+ "SIOCGIFPHYS": true,
+ "SIOCGIFPRIORITY": true,
+ "SIOCGIFPSRCADDR": true,
+ "SIOCGIFRDOMAIN": true,
+ "SIOCGIFRTLABEL": true,
+ "SIOCGIFSLAVE": true,
+ "SIOCGIFSTATUS": true,
+ "SIOCGIFTIMESLOT": true,
+ "SIOCGIFTXQLEN": true,
+ "SIOCGIFVLAN": true,
+ "SIOCGIFWAKEFLAGS": true,
+ "SIOCGIFXFLAGS": true,
+ "SIOCGLIFADDR": true,
+ "SIOCGLIFPHYADDR": true,
+ "SIOCGLIFPHYRTABLE": true,
+ "SIOCGLIFPHYTTL": true,
+ "SIOCGLINKSTR": true,
+ "SIOCGLOWAT": true,
+ "SIOCGPGRP": true,
+ "SIOCGPRIVATE_0": true,
+ "SIOCGPRIVATE_1": true,
+ "SIOCGRARP": true,
+ "SIOCGSPPPPARAMS": true,
+ "SIOCGSTAMP": true,
+ "SIOCGSTAMPNS": true,
+ "SIOCGVH": true,
+ "SIOCGVNETID": true,
+ "SIOCIFCREATE": true,
+ "SIOCIFCREATE2": true,
+ "SIOCIFDESTROY": true,
+ "SIOCIFGCLONERS": true,
+ "SIOCINITIFADDR": true,
+ "SIOCPROTOPRIVATE": true,
+ "SIOCRSLVMULTI": true,
+ "SIOCRTMSG": true,
+ "SIOCSARP": true,
+ "SIOCSDRVSPEC": true,
+ "SIOCSETKALIVE": true,
+ "SIOCSETLABEL": true,
+ "SIOCSETPFLOW": true,
+ "SIOCSETPFSYNC": true,
+ "SIOCSETVLAN": true,
+ "SIOCSHIWAT": true,
+ "SIOCSIFADDR": true,
+ "SIOCSIFADDRPREF": true,
+ "SIOCSIFALTMTU": true,
+ "SIOCSIFASYNCMAP": true,
+ "SIOCSIFBOND": true,
+ "SIOCSIFBR": true,
+ "SIOCSIFBRDADDR": true,
+ "SIOCSIFCAP": true,
+ "SIOCSIFDESCR": true,
+ "SIOCSIFDSTADDR": true,
+ "SIOCSIFENCAP": true,
+ "SIOCSIFFIB": true,
+ "SIOCSIFFLAGS": true,
+ "SIOCSIFGATTR": true,
+ "SIOCSIFGENERIC": true,
+ "SIOCSIFHWADDR": true,
+ "SIOCSIFHWBROADCAST": true,
+ "SIOCSIFKPI": true,
+ "SIOCSIFLINK": true,
+ "SIOCSIFLLADDR": true,
+ "SIOCSIFMAC": true,
+ "SIOCSIFMAP": true,
+ "SIOCSIFMEDIA": true,
+ "SIOCSIFMEM": true,
+ "SIOCSIFMETRIC": true,
+ "SIOCSIFMTU": true,
+ "SIOCSIFNAME": true,
+ "SIOCSIFNETMASK": true,
+ "SIOCSIFPFLAGS": true,
+ "SIOCSIFPHYADDR": true,
+ "SIOCSIFPHYS": true,
+ "SIOCSIFPRIORITY": true,
+ "SIOCSIFRDOMAIN": true,
+ "SIOCSIFRTLABEL": true,
+ "SIOCSIFRVNET": true,
+ "SIOCSIFSLAVE": true,
+ "SIOCSIFTIMESLOT": true,
+ "SIOCSIFTXQLEN": true,
+ "SIOCSIFVLAN": true,
+ "SIOCSIFVNET": true,
+ "SIOCSIFXFLAGS": true,
+ "SIOCSLIFPHYADDR": true,
+ "SIOCSLIFPHYRTABLE": true,
+ "SIOCSLIFPHYTTL": true,
+ "SIOCSLINKSTR": true,
+ "SIOCSLOWAT": true,
+ "SIOCSPGRP": true,
+ "SIOCSRARP": true,
+ "SIOCSSPPPPARAMS": true,
+ "SIOCSVH": true,
+ "SIOCSVNETID": true,
+ "SIOCZIFDATA": true,
+ "SIO_GET_EXTENSION_FUNCTION_POINTER": true,
+ "SIO_GET_INTERFACE_LIST": true,
+ "SIO_KEEPALIVE_VALS": true,
+ "SIO_UDP_CONNRESET": true,
+ "SOCK_CLOEXEC": true,
+ "SOCK_DCCP": true,
+ "SOCK_DGRAM": true,
+ "SOCK_FLAGS_MASK": true,
+ "SOCK_MAXADDRLEN": true,
+ "SOCK_NONBLOCK": true,
+ "SOCK_NOSIGPIPE": true,
+ "SOCK_PACKET": true,
+ "SOCK_RAW": true,
+ "SOCK_RDM": true,
+ "SOCK_SEQPACKET": true,
+ "SOCK_STREAM": true,
+ "SOL_AAL": true,
+ "SOL_ATM": true,
+ "SOL_DECNET": true,
+ "SOL_ICMPV6": true,
+ "SOL_IP": true,
+ "SOL_IPV6": true,
+ "SOL_IRDA": true,
+ "SOL_PACKET": true,
+ "SOL_RAW": true,
+ "SOL_SOCKET": true,
+ "SOL_TCP": true,
+ "SOL_X25": true,
+ "SOMAXCONN": true,
+ "SO_ACCEPTCONN": true,
+ "SO_ACCEPTFILTER": true,
+ "SO_ATTACH_FILTER": true,
+ "SO_BINDANY": true,
+ "SO_BINDTODEVICE": true,
+ "SO_BINTIME": true,
+ "SO_BROADCAST": true,
+ "SO_BSDCOMPAT": true,
+ "SO_DEBUG": true,
+ "SO_DETACH_FILTER": true,
+ "SO_DOMAIN": true,
+ "SO_DONTROUTE": true,
+ "SO_DONTTRUNC": true,
+ "SO_ERROR": true,
+ "SO_KEEPALIVE": true,
+ "SO_LABEL": true,
+ "SO_LINGER": true,
+ "SO_LINGER_SEC": true,
+ "SO_LISTENINCQLEN": true,
+ "SO_LISTENQLEN": true,
+ "SO_LISTENQLIMIT": true,
+ "SO_MARK": true,
+ "SO_NETPROC": true,
+ "SO_NKE": true,
+ "SO_NOADDRERR": true,
+ "SO_NOHEADER": true,
+ "SO_NOSIGPIPE": true,
+ "SO_NOTIFYCONFLICT": true,
+ "SO_NO_CHECK": true,
+ "SO_NO_DDP": true,
+ "SO_NO_OFFLOAD": true,
+ "SO_NP_EXTENSIONS": true,
+ "SO_NREAD": true,
+ "SO_NWRITE": true,
+ "SO_OOBINLINE": true,
+ "SO_OVERFLOWED": true,
+ "SO_PASSCRED": true,
+ "SO_PASSSEC": true,
+ "SO_PEERCRED": true,
+ "SO_PEERLABEL": true,
+ "SO_PEERNAME": true,
+ "SO_PEERSEC": true,
+ "SO_PRIORITY": true,
+ "SO_PROTOCOL": true,
+ "SO_PROTOTYPE": true,
+ "SO_RANDOMPORT": true,
+ "SO_RCVBUF": true,
+ "SO_RCVBUFFORCE": true,
+ "SO_RCVLOWAT": true,
+ "SO_RCVTIMEO": true,
+ "SO_RESTRICTIONS": true,
+ "SO_RESTRICT_DENYIN": true,
+ "SO_RESTRICT_DENYOUT": true,
+ "SO_RESTRICT_DENYSET": true,
+ "SO_REUSEADDR": true,
+ "SO_REUSEPORT": true,
+ "SO_REUSESHAREUID": true,
+ "SO_RTABLE": true,
+ "SO_RXQ_OVFL": true,
+ "SO_SECURITY_AUTHENTICATION": true,
+ "SO_SECURITY_ENCRYPTION_NETWORK": true,
+ "SO_SECURITY_ENCRYPTION_TRANSPORT": true,
+ "SO_SETFIB": true,
+ "SO_SNDBUF": true,
+ "SO_SNDBUFFORCE": true,
+ "SO_SNDLOWAT": true,
+ "SO_SNDTIMEO": true,
+ "SO_SPLICE": true,
+ "SO_TIMESTAMP": true,
+ "SO_TIMESTAMPING": true,
+ "SO_TIMESTAMPNS": true,
+ "SO_TIMESTAMP_MONOTONIC": true,
+ "SO_TYPE": true,
+ "SO_UPCALLCLOSEWAIT": true,
+ "SO_UPDATE_ACCEPT_CONTEXT": true,
+ "SO_UPDATE_CONNECT_CONTEXT": true,
+ "SO_USELOOPBACK": true,
+ "SO_USER_COOKIE": true,
+ "SO_VENDOR": true,
+ "SO_WANTMORE": true,
+ "SO_WANTOOBFLAG": true,
+ "SSLExtraCertChainPolicyPara": true,
+ "STANDARD_RIGHTS_ALL": true,
+ "STANDARD_RIGHTS_EXECUTE": true,
+ "STANDARD_RIGHTS_READ": true,
+ "STANDARD_RIGHTS_REQUIRED": true,
+ "STANDARD_RIGHTS_WRITE": true,
+ "STARTF_USESHOWWINDOW": true,
+ "STARTF_USESTDHANDLES": true,
+ "STD_ERROR_HANDLE": true,
+ "STD_INPUT_HANDLE": true,
+ "STD_OUTPUT_HANDLE": true,
+ "SUBLANG_ENGLISH_US": true,
+ "SW_FORCEMINIMIZE": true,
+ "SW_HIDE": true,
+ "SW_MAXIMIZE": true,
+ "SW_MINIMIZE": true,
+ "SW_NORMAL": true,
+ "SW_RESTORE": true,
+ "SW_SHOW": true,
+ "SW_SHOWDEFAULT": true,
+ "SW_SHOWMAXIMIZED": true,
+ "SW_SHOWMINIMIZED": true,
+ "SW_SHOWMINNOACTIVE": true,
+ "SW_SHOWNA": true,
+ "SW_SHOWNOACTIVATE": true,
+ "SW_SHOWNORMAL": true,
+ "SYMBOLIC_LINK_FLAG_DIRECTORY": true,
+ "SYNCHRONIZE": true,
+ "SYSCTL_VERSION": true,
+ "SYSCTL_VERS_0": true,
+ "SYSCTL_VERS_1": true,
+ "SYSCTL_VERS_MASK": true,
+ "SYS_ABORT2": true,
+ "SYS_ACCEPT": true,
+ "SYS_ACCEPT4": true,
+ "SYS_ACCEPT_NOCANCEL": true,
+ "SYS_ACCESS": true,
+ "SYS_ACCESS_EXTENDED": true,
+ "SYS_ACCT": true,
+ "SYS_ADD_KEY": true,
+ "SYS_ADD_PROFIL": true,
+ "SYS_ADJFREQ": true,
+ "SYS_ADJTIME": true,
+ "SYS_ADJTIMEX": true,
+ "SYS_AFS_SYSCALL": true,
+ "SYS_AIO_CANCEL": true,
+ "SYS_AIO_ERROR": true,
+ "SYS_AIO_FSYNC": true,
+ "SYS_AIO_READ": true,
+ "SYS_AIO_RETURN": true,
+ "SYS_AIO_SUSPEND": true,
+ "SYS_AIO_SUSPEND_NOCANCEL": true,
+ "SYS_AIO_WRITE": true,
+ "SYS_ALARM": true,
+ "SYS_ARCH_PRCTL": true,
+ "SYS_ARM_FADVISE64_64": true,
+ "SYS_ARM_SYNC_FILE_RANGE": true,
+ "SYS_ATGETMSG": true,
+ "SYS_ATPGETREQ": true,
+ "SYS_ATPGETRSP": true,
+ "SYS_ATPSNDREQ": true,
+ "SYS_ATPSNDRSP": true,
+ "SYS_ATPUTMSG": true,
+ "SYS_ATSOCKET": true,
+ "SYS_AUDIT": true,
+ "SYS_AUDITCTL": true,
+ "SYS_AUDITON": true,
+ "SYS_AUDIT_SESSION_JOIN": true,
+ "SYS_AUDIT_SESSION_PORT": true,
+ "SYS_AUDIT_SESSION_SELF": true,
+ "SYS_BDFLUSH": true,
+ "SYS_BIND": true,
+ "SYS_BINDAT": true,
+ "SYS_BREAK": true,
+ "SYS_BRK": true,
+ "SYS_BSDTHREAD_CREATE": true,
+ "SYS_BSDTHREAD_REGISTER": true,
+ "SYS_BSDTHREAD_TERMINATE": true,
+ "SYS_CAPGET": true,
+ "SYS_CAPSET": true,
+ "SYS_CAP_ENTER": true,
+ "SYS_CAP_FCNTLS_GET": true,
+ "SYS_CAP_FCNTLS_LIMIT": true,
+ "SYS_CAP_GETMODE": true,
+ "SYS_CAP_GETRIGHTS": true,
+ "SYS_CAP_IOCTLS_GET": true,
+ "SYS_CAP_IOCTLS_LIMIT": true,
+ "SYS_CAP_NEW": true,
+ "SYS_CAP_RIGHTS_GET": true,
+ "SYS_CAP_RIGHTS_LIMIT": true,
+ "SYS_CHDIR": true,
+ "SYS_CHFLAGS": true,
+ "SYS_CHFLAGSAT": true,
+ "SYS_CHMOD": true,
+ "SYS_CHMOD_EXTENDED": true,
+ "SYS_CHOWN": true,
+ "SYS_CHOWN32": true,
+ "SYS_CHROOT": true,
+ "SYS_CHUD": true,
+ "SYS_CLOCK_ADJTIME": true,
+ "SYS_CLOCK_GETCPUCLOCKID2": true,
+ "SYS_CLOCK_GETRES": true,
+ "SYS_CLOCK_GETTIME": true,
+ "SYS_CLOCK_NANOSLEEP": true,
+ "SYS_CLOCK_SETTIME": true,
+ "SYS_CLONE": true,
+ "SYS_CLOSE": true,
+ "SYS_CLOSEFROM": true,
+ "SYS_CLOSE_NOCANCEL": true,
+ "SYS_CONNECT": true,
+ "SYS_CONNECTAT": true,
+ "SYS_CONNECT_NOCANCEL": true,
+ "SYS_COPYFILE": true,
+ "SYS_CPUSET": true,
+ "SYS_CPUSET_GETAFFINITY": true,
+ "SYS_CPUSET_GETID": true,
+ "SYS_CPUSET_SETAFFINITY": true,
+ "SYS_CPUSET_SETID": true,
+ "SYS_CREAT": true,
+ "SYS_CREATE_MODULE": true,
+ "SYS_CSOPS": true,
+ "SYS_DELETE": true,
+ "SYS_DELETE_MODULE": true,
+ "SYS_DUP": true,
+ "SYS_DUP2": true,
+ "SYS_DUP3": true,
+ "SYS_EACCESS": true,
+ "SYS_EPOLL_CREATE": true,
+ "SYS_EPOLL_CREATE1": true,
+ "SYS_EPOLL_CTL": true,
+ "SYS_EPOLL_CTL_OLD": true,
+ "SYS_EPOLL_PWAIT": true,
+ "SYS_EPOLL_WAIT": true,
+ "SYS_EPOLL_WAIT_OLD": true,
+ "SYS_EVENTFD": true,
+ "SYS_EVENTFD2": true,
+ "SYS_EXCHANGEDATA": true,
+ "SYS_EXECVE": true,
+ "SYS_EXIT": true,
+ "SYS_EXIT_GROUP": true,
+ "SYS_EXTATTRCTL": true,
+ "SYS_EXTATTR_DELETE_FD": true,
+ "SYS_EXTATTR_DELETE_FILE": true,
+ "SYS_EXTATTR_DELETE_LINK": true,
+ "SYS_EXTATTR_GET_FD": true,
+ "SYS_EXTATTR_GET_FILE": true,
+ "SYS_EXTATTR_GET_LINK": true,
+ "SYS_EXTATTR_LIST_FD": true,
+ "SYS_EXTATTR_LIST_FILE": true,
+ "SYS_EXTATTR_LIST_LINK": true,
+ "SYS_EXTATTR_SET_FD": true,
+ "SYS_EXTATTR_SET_FILE": true,
+ "SYS_EXTATTR_SET_LINK": true,
+ "SYS_FACCESSAT": true,
+ "SYS_FADVISE64": true,
+ "SYS_FADVISE64_64": true,
+ "SYS_FALLOCATE": true,
+ "SYS_FANOTIFY_INIT": true,
+ "SYS_FANOTIFY_MARK": true,
+ "SYS_FCHDIR": true,
+ "SYS_FCHFLAGS": true,
+ "SYS_FCHMOD": true,
+ "SYS_FCHMODAT": true,
+ "SYS_FCHMOD_EXTENDED": true,
+ "SYS_FCHOWN": true,
+ "SYS_FCHOWN32": true,
+ "SYS_FCHOWNAT": true,
+ "SYS_FCHROOT": true,
+ "SYS_FCNTL": true,
+ "SYS_FCNTL64": true,
+ "SYS_FCNTL_NOCANCEL": true,
+ "SYS_FDATASYNC": true,
+ "SYS_FEXECVE": true,
+ "SYS_FFCLOCK_GETCOUNTER": true,
+ "SYS_FFCLOCK_GETESTIMATE": true,
+ "SYS_FFCLOCK_SETESTIMATE": true,
+ "SYS_FFSCTL": true,
+ "SYS_FGETATTRLIST": true,
+ "SYS_FGETXATTR": true,
+ "SYS_FHOPEN": true,
+ "SYS_FHSTAT": true,
+ "SYS_FHSTATFS": true,
+ "SYS_FILEPORT_MAKEFD": true,
+ "SYS_FILEPORT_MAKEPORT": true,
+ "SYS_FKTRACE": true,
+ "SYS_FLISTXATTR": true,
+ "SYS_FLOCK": true,
+ "SYS_FORK": true,
+ "SYS_FPATHCONF": true,
+ "SYS_FREEBSD6_FTRUNCATE": true,
+ "SYS_FREEBSD6_LSEEK": true,
+ "SYS_FREEBSD6_MMAP": true,
+ "SYS_FREEBSD6_PREAD": true,
+ "SYS_FREEBSD6_PWRITE": true,
+ "SYS_FREEBSD6_TRUNCATE": true,
+ "SYS_FREMOVEXATTR": true,
+ "SYS_FSCTL": true,
+ "SYS_FSETATTRLIST": true,
+ "SYS_FSETXATTR": true,
+ "SYS_FSGETPATH": true,
+ "SYS_FSTAT": true,
+ "SYS_FSTAT64": true,
+ "SYS_FSTAT64_EXTENDED": true,
+ "SYS_FSTATAT": true,
+ "SYS_FSTATAT64": true,
+ "SYS_FSTATFS": true,
+ "SYS_FSTATFS64": true,
+ "SYS_FSTATV": true,
+ "SYS_FSTATVFS1": true,
+ "SYS_FSTAT_EXTENDED": true,
+ "SYS_FSYNC": true,
+ "SYS_FSYNC_NOCANCEL": true,
+ "SYS_FSYNC_RANGE": true,
+ "SYS_FTIME": true,
+ "SYS_FTRUNCATE": true,
+ "SYS_FTRUNCATE64": true,
+ "SYS_FUTEX": true,
+ "SYS_FUTIMENS": true,
+ "SYS_FUTIMES": true,
+ "SYS_FUTIMESAT": true,
+ "SYS_GETATTRLIST": true,
+ "SYS_GETAUDIT": true,
+ "SYS_GETAUDIT_ADDR": true,
+ "SYS_GETAUID": true,
+ "SYS_GETCONTEXT": true,
+ "SYS_GETCPU": true,
+ "SYS_GETCWD": true,
+ "SYS_GETDENTS": true,
+ "SYS_GETDENTS64": true,
+ "SYS_GETDIRENTRIES": true,
+ "SYS_GETDIRENTRIES64": true,
+ "SYS_GETDIRENTRIESATTR": true,
+ "SYS_GETDTABLECOUNT": true,
+ "SYS_GETDTABLESIZE": true,
+ "SYS_GETEGID": true,
+ "SYS_GETEGID32": true,
+ "SYS_GETEUID": true,
+ "SYS_GETEUID32": true,
+ "SYS_GETFH": true,
+ "SYS_GETFSSTAT": true,
+ "SYS_GETFSSTAT64": true,
+ "SYS_GETGID": true,
+ "SYS_GETGID32": true,
+ "SYS_GETGROUPS": true,
+ "SYS_GETGROUPS32": true,
+ "SYS_GETHOSTUUID": true,
+ "SYS_GETITIMER": true,
+ "SYS_GETLCID": true,
+ "SYS_GETLOGIN": true,
+ "SYS_GETLOGINCLASS": true,
+ "SYS_GETPEERNAME": true,
+ "SYS_GETPGID": true,
+ "SYS_GETPGRP": true,
+ "SYS_GETPID": true,
+ "SYS_GETPMSG": true,
+ "SYS_GETPPID": true,
+ "SYS_GETPRIORITY": true,
+ "SYS_GETRESGID": true,
+ "SYS_GETRESGID32": true,
+ "SYS_GETRESUID": true,
+ "SYS_GETRESUID32": true,
+ "SYS_GETRLIMIT": true,
+ "SYS_GETRTABLE": true,
+ "SYS_GETRUSAGE": true,
+ "SYS_GETSGROUPS": true,
+ "SYS_GETSID": true,
+ "SYS_GETSOCKNAME": true,
+ "SYS_GETSOCKOPT": true,
+ "SYS_GETTHRID": true,
+ "SYS_GETTID": true,
+ "SYS_GETTIMEOFDAY": true,
+ "SYS_GETUID": true,
+ "SYS_GETUID32": true,
+ "SYS_GETVFSSTAT": true,
+ "SYS_GETWGROUPS": true,
+ "SYS_GETXATTR": true,
+ "SYS_GET_KERNEL_SYMS": true,
+ "SYS_GET_MEMPOLICY": true,
+ "SYS_GET_ROBUST_LIST": true,
+ "SYS_GET_THREAD_AREA": true,
+ "SYS_GTTY": true,
+ "SYS_IDENTITYSVC": true,
+ "SYS_IDLE": true,
+ "SYS_INITGROUPS": true,
+ "SYS_INIT_MODULE": true,
+ "SYS_INOTIFY_ADD_WATCH": true,
+ "SYS_INOTIFY_INIT": true,
+ "SYS_INOTIFY_INIT1": true,
+ "SYS_INOTIFY_RM_WATCH": true,
+ "SYS_IOCTL": true,
+ "SYS_IOPERM": true,
+ "SYS_IOPL": true,
+ "SYS_IOPOLICYSYS": true,
+ "SYS_IOPRIO_GET": true,
+ "SYS_IOPRIO_SET": true,
+ "SYS_IO_CANCEL": true,
+ "SYS_IO_DESTROY": true,
+ "SYS_IO_GETEVENTS": true,
+ "SYS_IO_SETUP": true,
+ "SYS_IO_SUBMIT": true,
+ "SYS_IPC": true,
+ "SYS_ISSETUGID": true,
+ "SYS_JAIL": true,
+ "SYS_JAIL_ATTACH": true,
+ "SYS_JAIL_GET": true,
+ "SYS_JAIL_REMOVE": true,
+ "SYS_JAIL_SET": true,
+ "SYS_KDEBUG_TRACE": true,
+ "SYS_KENV": true,
+ "SYS_KEVENT": true,
+ "SYS_KEVENT64": true,
+ "SYS_KEXEC_LOAD": true,
+ "SYS_KEYCTL": true,
+ "SYS_KILL": true,
+ "SYS_KLDFIND": true,
+ "SYS_KLDFIRSTMOD": true,
+ "SYS_KLDLOAD": true,
+ "SYS_KLDNEXT": true,
+ "SYS_KLDSTAT": true,
+ "SYS_KLDSYM": true,
+ "SYS_KLDUNLOAD": true,
+ "SYS_KLDUNLOADF": true,
+ "SYS_KQUEUE": true,
+ "SYS_KQUEUE1": true,
+ "SYS_KTIMER_CREATE": true,
+ "SYS_KTIMER_DELETE": true,
+ "SYS_KTIMER_GETOVERRUN": true,
+ "SYS_KTIMER_GETTIME": true,
+ "SYS_KTIMER_SETTIME": true,
+ "SYS_KTRACE": true,
+ "SYS_LCHFLAGS": true,
+ "SYS_LCHMOD": true,
+ "SYS_LCHOWN": true,
+ "SYS_LCHOWN32": true,
+ "SYS_LGETFH": true,
+ "SYS_LGETXATTR": true,
+ "SYS_LINK": true,
+ "SYS_LINKAT": true,
+ "SYS_LIO_LISTIO": true,
+ "SYS_LISTEN": true,
+ "SYS_LISTXATTR": true,
+ "SYS_LLISTXATTR": true,
+ "SYS_LOCK": true,
+ "SYS_LOOKUP_DCOOKIE": true,
+ "SYS_LPATHCONF": true,
+ "SYS_LREMOVEXATTR": true,
+ "SYS_LSEEK": true,
+ "SYS_LSETXATTR": true,
+ "SYS_LSTAT": true,
+ "SYS_LSTAT64": true,
+ "SYS_LSTAT64_EXTENDED": true,
+ "SYS_LSTATV": true,
+ "SYS_LSTAT_EXTENDED": true,
+ "SYS_LUTIMES": true,
+ "SYS_MAC_SYSCALL": true,
+ "SYS_MADVISE": true,
+ "SYS_MADVISE1": true,
+ "SYS_MAXSYSCALL": true,
+ "SYS_MBIND": true,
+ "SYS_MIGRATE_PAGES": true,
+ "SYS_MINCORE": true,
+ "SYS_MINHERIT": true,
+ "SYS_MKCOMPLEX": true,
+ "SYS_MKDIR": true,
+ "SYS_MKDIRAT": true,
+ "SYS_MKDIR_EXTENDED": true,
+ "SYS_MKFIFO": true,
+ "SYS_MKFIFOAT": true,
+ "SYS_MKFIFO_EXTENDED": true,
+ "SYS_MKNOD": true,
+ "SYS_MKNODAT": true,
+ "SYS_MLOCK": true,
+ "SYS_MLOCKALL": true,
+ "SYS_MMAP": true,
+ "SYS_MMAP2": true,
+ "SYS_MODCTL": true,
+ "SYS_MODFIND": true,
+ "SYS_MODFNEXT": true,
+ "SYS_MODIFY_LDT": true,
+ "SYS_MODNEXT": true,
+ "SYS_MODSTAT": true,
+ "SYS_MODWATCH": true,
+ "SYS_MOUNT": true,
+ "SYS_MOVE_PAGES": true,
+ "SYS_MPROTECT": true,
+ "SYS_MPX": true,
+ "SYS_MQUERY": true,
+ "SYS_MQ_GETSETATTR": true,
+ "SYS_MQ_NOTIFY": true,
+ "SYS_MQ_OPEN": true,
+ "SYS_MQ_TIMEDRECEIVE": true,
+ "SYS_MQ_TIMEDSEND": true,
+ "SYS_MQ_UNLINK": true,
+ "SYS_MREMAP": true,
+ "SYS_MSGCTL": true,
+ "SYS_MSGGET": true,
+ "SYS_MSGRCV": true,
+ "SYS_MSGRCV_NOCANCEL": true,
+ "SYS_MSGSND": true,
+ "SYS_MSGSND_NOCANCEL": true,
+ "SYS_MSGSYS": true,
+ "SYS_MSYNC": true,
+ "SYS_MSYNC_NOCANCEL": true,
+ "SYS_MUNLOCK": true,
+ "SYS_MUNLOCKALL": true,
+ "SYS_MUNMAP": true,
+ "SYS_NAME_TO_HANDLE_AT": true,
+ "SYS_NANOSLEEP": true,
+ "SYS_NEWFSTATAT": true,
+ "SYS_NFSCLNT": true,
+ "SYS_NFSSERVCTL": true,
+ "SYS_NFSSVC": true,
+ "SYS_NFSTAT": true,
+ "SYS_NICE": true,
+ "SYS_NLSTAT": true,
+ "SYS_NMOUNT": true,
+ "SYS_NSTAT": true,
+ "SYS_NTP_ADJTIME": true,
+ "SYS_NTP_GETTIME": true,
+ "SYS_OABI_SYSCALL_BASE": true,
+ "SYS_OBREAK": true,
+ "SYS_OLDFSTAT": true,
+ "SYS_OLDLSTAT": true,
+ "SYS_OLDOLDUNAME": true,
+ "SYS_OLDSTAT": true,
+ "SYS_OLDUNAME": true,
+ "SYS_OPEN": true,
+ "SYS_OPENAT": true,
+ "SYS_OPENBSD_POLL": true,
+ "SYS_OPEN_BY_HANDLE_AT": true,
+ "SYS_OPEN_EXTENDED": true,
+ "SYS_OPEN_NOCANCEL": true,
+ "SYS_OVADVISE": true,
+ "SYS_PACCEPT": true,
+ "SYS_PATHCONF": true,
+ "SYS_PAUSE": true,
+ "SYS_PCICONFIG_IOBASE": true,
+ "SYS_PCICONFIG_READ": true,
+ "SYS_PCICONFIG_WRITE": true,
+ "SYS_PDFORK": true,
+ "SYS_PDGETPID": true,
+ "SYS_PDKILL": true,
+ "SYS_PERF_EVENT_OPEN": true,
+ "SYS_PERSONALITY": true,
+ "SYS_PID_HIBERNATE": true,
+ "SYS_PID_RESUME": true,
+ "SYS_PID_SHUTDOWN_SOCKETS": true,
+ "SYS_PID_SUSPEND": true,
+ "SYS_PIPE": true,
+ "SYS_PIPE2": true,
+ "SYS_PIVOT_ROOT": true,
+ "SYS_PMC_CONTROL": true,
+ "SYS_PMC_GET_INFO": true,
+ "SYS_POLL": true,
+ "SYS_POLLTS": true,
+ "SYS_POLL_NOCANCEL": true,
+ "SYS_POSIX_FADVISE": true,
+ "SYS_POSIX_FALLOCATE": true,
+ "SYS_POSIX_OPENPT": true,
+ "SYS_POSIX_SPAWN": true,
+ "SYS_PPOLL": true,
+ "SYS_PRCTL": true,
+ "SYS_PREAD": true,
+ "SYS_PREAD64": true,
+ "SYS_PREADV": true,
+ "SYS_PREAD_NOCANCEL": true,
+ "SYS_PRLIMIT64": true,
+ "SYS_PROCCTL": true,
+ "SYS_PROCESS_POLICY": true,
+ "SYS_PROCESS_VM_READV": true,
+ "SYS_PROCESS_VM_WRITEV": true,
+ "SYS_PROC_INFO": true,
+ "SYS_PROF": true,
+ "SYS_PROFIL": true,
+ "SYS_PSELECT": true,
+ "SYS_PSELECT6": true,
+ "SYS_PSET_ASSIGN": true,
+ "SYS_PSET_CREATE": true,
+ "SYS_PSET_DESTROY": true,
+ "SYS_PSYNCH_CVBROAD": true,
+ "SYS_PSYNCH_CVCLRPREPOST": true,
+ "SYS_PSYNCH_CVSIGNAL": true,
+ "SYS_PSYNCH_CVWAIT": true,
+ "SYS_PSYNCH_MUTEXDROP": true,
+ "SYS_PSYNCH_MUTEXWAIT": true,
+ "SYS_PSYNCH_RW_DOWNGRADE": true,
+ "SYS_PSYNCH_RW_LONGRDLOCK": true,
+ "SYS_PSYNCH_RW_RDLOCK": true,
+ "SYS_PSYNCH_RW_UNLOCK": true,
+ "SYS_PSYNCH_RW_UNLOCK2": true,
+ "SYS_PSYNCH_RW_UPGRADE": true,
+ "SYS_PSYNCH_RW_WRLOCK": true,
+ "SYS_PSYNCH_RW_YIELDWRLOCK": true,
+ "SYS_PTRACE": true,
+ "SYS_PUTPMSG": true,
+ "SYS_PWRITE": true,
+ "SYS_PWRITE64": true,
+ "SYS_PWRITEV": true,
+ "SYS_PWRITE_NOCANCEL": true,
+ "SYS_QUERY_MODULE": true,
+ "SYS_QUOTACTL": true,
+ "SYS_RASCTL": true,
+ "SYS_RCTL_ADD_RULE": true,
+ "SYS_RCTL_GET_LIMITS": true,
+ "SYS_RCTL_GET_RACCT": true,
+ "SYS_RCTL_GET_RULES": true,
+ "SYS_RCTL_REMOVE_RULE": true,
+ "SYS_READ": true,
+ "SYS_READAHEAD": true,
+ "SYS_READDIR": true,
+ "SYS_READLINK": true,
+ "SYS_READLINKAT": true,
+ "SYS_READV": true,
+ "SYS_READV_NOCANCEL": true,
+ "SYS_READ_NOCANCEL": true,
+ "SYS_REBOOT": true,
+ "SYS_RECV": true,
+ "SYS_RECVFROM": true,
+ "SYS_RECVFROM_NOCANCEL": true,
+ "SYS_RECVMMSG": true,
+ "SYS_RECVMSG": true,
+ "SYS_RECVMSG_NOCANCEL": true,
+ "SYS_REMAP_FILE_PAGES": true,
+ "SYS_REMOVEXATTR": true,
+ "SYS_RENAME": true,
+ "SYS_RENAMEAT": true,
+ "SYS_REQUEST_KEY": true,
+ "SYS_RESTART_SYSCALL": true,
+ "SYS_REVOKE": true,
+ "SYS_RFORK": true,
+ "SYS_RMDIR": true,
+ "SYS_RTPRIO": true,
+ "SYS_RTPRIO_THREAD": true,
+ "SYS_RT_SIGACTION": true,
+ "SYS_RT_SIGPENDING": true,
+ "SYS_RT_SIGPROCMASK": true,
+ "SYS_RT_SIGQUEUEINFO": true,
+ "SYS_RT_SIGRETURN": true,
+ "SYS_RT_SIGSUSPEND": true,
+ "SYS_RT_SIGTIMEDWAIT": true,
+ "SYS_RT_TGSIGQUEUEINFO": true,
+ "SYS_SBRK": true,
+ "SYS_SCHED_GETAFFINITY": true,
+ "SYS_SCHED_GETPARAM": true,
+ "SYS_SCHED_GETSCHEDULER": true,
+ "SYS_SCHED_GET_PRIORITY_MAX": true,
+ "SYS_SCHED_GET_PRIORITY_MIN": true,
+ "SYS_SCHED_RR_GET_INTERVAL": true,
+ "SYS_SCHED_SETAFFINITY": true,
+ "SYS_SCHED_SETPARAM": true,
+ "SYS_SCHED_SETSCHEDULER": true,
+ "SYS_SCHED_YIELD": true,
+ "SYS_SCTP_GENERIC_RECVMSG": true,
+ "SYS_SCTP_GENERIC_SENDMSG": true,
+ "SYS_SCTP_GENERIC_SENDMSG_IOV": true,
+ "SYS_SCTP_PEELOFF": true,
+ "SYS_SEARCHFS": true,
+ "SYS_SECURITY": true,
+ "SYS_SELECT": true,
+ "SYS_SELECT_NOCANCEL": true,
+ "SYS_SEMCONFIG": true,
+ "SYS_SEMCTL": true,
+ "SYS_SEMGET": true,
+ "SYS_SEMOP": true,
+ "SYS_SEMSYS": true,
+ "SYS_SEMTIMEDOP": true,
+ "SYS_SEM_CLOSE": true,
+ "SYS_SEM_DESTROY": true,
+ "SYS_SEM_GETVALUE": true,
+ "SYS_SEM_INIT": true,
+ "SYS_SEM_OPEN": true,
+ "SYS_SEM_POST": true,
+ "SYS_SEM_TRYWAIT": true,
+ "SYS_SEM_UNLINK": true,
+ "SYS_SEM_WAIT": true,
+ "SYS_SEM_WAIT_NOCANCEL": true,
+ "SYS_SEND": true,
+ "SYS_SENDFILE": true,
+ "SYS_SENDFILE64": true,
+ "SYS_SENDMMSG": true,
+ "SYS_SENDMSG": true,
+ "SYS_SENDMSG_NOCANCEL": true,
+ "SYS_SENDTO": true,
+ "SYS_SENDTO_NOCANCEL": true,
+ "SYS_SETATTRLIST": true,
+ "SYS_SETAUDIT": true,
+ "SYS_SETAUDIT_ADDR": true,
+ "SYS_SETAUID": true,
+ "SYS_SETCONTEXT": true,
+ "SYS_SETDOMAINNAME": true,
+ "SYS_SETEGID": true,
+ "SYS_SETEUID": true,
+ "SYS_SETFIB": true,
+ "SYS_SETFSGID": true,
+ "SYS_SETFSGID32": true,
+ "SYS_SETFSUID": true,
+ "SYS_SETFSUID32": true,
+ "SYS_SETGID": true,
+ "SYS_SETGID32": true,
+ "SYS_SETGROUPS": true,
+ "SYS_SETGROUPS32": true,
+ "SYS_SETHOSTNAME": true,
+ "SYS_SETITIMER": true,
+ "SYS_SETLCID": true,
+ "SYS_SETLOGIN": true,
+ "SYS_SETLOGINCLASS": true,
+ "SYS_SETNS": true,
+ "SYS_SETPGID": true,
+ "SYS_SETPRIORITY": true,
+ "SYS_SETPRIVEXEC": true,
+ "SYS_SETREGID": true,
+ "SYS_SETREGID32": true,
+ "SYS_SETRESGID": true,
+ "SYS_SETRESGID32": true,
+ "SYS_SETRESUID": true,
+ "SYS_SETRESUID32": true,
+ "SYS_SETREUID": true,
+ "SYS_SETREUID32": true,
+ "SYS_SETRLIMIT": true,
+ "SYS_SETRTABLE": true,
+ "SYS_SETSGROUPS": true,
+ "SYS_SETSID": true,
+ "SYS_SETSOCKOPT": true,
+ "SYS_SETTID": true,
+ "SYS_SETTID_WITH_PID": true,
+ "SYS_SETTIMEOFDAY": true,
+ "SYS_SETUID": true,
+ "SYS_SETUID32": true,
+ "SYS_SETWGROUPS": true,
+ "SYS_SETXATTR": true,
+ "SYS_SET_MEMPOLICY": true,
+ "SYS_SET_ROBUST_LIST": true,
+ "SYS_SET_THREAD_AREA": true,
+ "SYS_SET_TID_ADDRESS": true,
+ "SYS_SGETMASK": true,
+ "SYS_SHARED_REGION_CHECK_NP": true,
+ "SYS_SHARED_REGION_MAP_AND_SLIDE_NP": true,
+ "SYS_SHMAT": true,
+ "SYS_SHMCTL": true,
+ "SYS_SHMDT": true,
+ "SYS_SHMGET": true,
+ "SYS_SHMSYS": true,
+ "SYS_SHM_OPEN": true,
+ "SYS_SHM_UNLINK": true,
+ "SYS_SHUTDOWN": true,
+ "SYS_SIGACTION": true,
+ "SYS_SIGALTSTACK": true,
+ "SYS_SIGNAL": true,
+ "SYS_SIGNALFD": true,
+ "SYS_SIGNALFD4": true,
+ "SYS_SIGPENDING": true,
+ "SYS_SIGPROCMASK": true,
+ "SYS_SIGQUEUE": true,
+ "SYS_SIGQUEUEINFO": true,
+ "SYS_SIGRETURN": true,
+ "SYS_SIGSUSPEND": true,
+ "SYS_SIGSUSPEND_NOCANCEL": true,
+ "SYS_SIGTIMEDWAIT": true,
+ "SYS_SIGWAIT": true,
+ "SYS_SIGWAITINFO": true,
+ "SYS_SOCKET": true,
+ "SYS_SOCKETCALL": true,
+ "SYS_SOCKETPAIR": true,
+ "SYS_SPLICE": true,
+ "SYS_SSETMASK": true,
+ "SYS_SSTK": true,
+ "SYS_STACK_SNAPSHOT": true,
+ "SYS_STAT": true,
+ "SYS_STAT64": true,
+ "SYS_STAT64_EXTENDED": true,
+ "SYS_STATFS": true,
+ "SYS_STATFS64": true,
+ "SYS_STATV": true,
+ "SYS_STATVFS1": true,
+ "SYS_STAT_EXTENDED": true,
+ "SYS_STIME": true,
+ "SYS_STTY": true,
+ "SYS_SWAPCONTEXT": true,
+ "SYS_SWAPCTL": true,
+ "SYS_SWAPOFF": true,
+ "SYS_SWAPON": true,
+ "SYS_SYMLINK": true,
+ "SYS_SYMLINKAT": true,
+ "SYS_SYNC": true,
+ "SYS_SYNCFS": true,
+ "SYS_SYNC_FILE_RANGE": true,
+ "SYS_SYSARCH": true,
+ "SYS_SYSCALL": true,
+ "SYS_SYSCALL_BASE": true,
+ "SYS_SYSFS": true,
+ "SYS_SYSINFO": true,
+ "SYS_SYSLOG": true,
+ "SYS_TEE": true,
+ "SYS_TGKILL": true,
+ "SYS_THREAD_SELFID": true,
+ "SYS_THR_CREATE": true,
+ "SYS_THR_EXIT": true,
+ "SYS_THR_KILL": true,
+ "SYS_THR_KILL2": true,
+ "SYS_THR_NEW": true,
+ "SYS_THR_SELF": true,
+ "SYS_THR_SET_NAME": true,
+ "SYS_THR_SUSPEND": true,
+ "SYS_THR_WAKE": true,
+ "SYS_TIME": true,
+ "SYS_TIMERFD_CREATE": true,
+ "SYS_TIMERFD_GETTIME": true,
+ "SYS_TIMERFD_SETTIME": true,
+ "SYS_TIMER_CREATE": true,
+ "SYS_TIMER_DELETE": true,
+ "SYS_TIMER_GETOVERRUN": true,
+ "SYS_TIMER_GETTIME": true,
+ "SYS_TIMER_SETTIME": true,
+ "SYS_TIMES": true,
+ "SYS_TKILL": true,
+ "SYS_TRUNCATE": true,
+ "SYS_TRUNCATE64": true,
+ "SYS_TUXCALL": true,
+ "SYS_UGETRLIMIT": true,
+ "SYS_ULIMIT": true,
+ "SYS_UMASK": true,
+ "SYS_UMASK_EXTENDED": true,
+ "SYS_UMOUNT": true,
+ "SYS_UMOUNT2": true,
+ "SYS_UNAME": true,
+ "SYS_UNDELETE": true,
+ "SYS_UNLINK": true,
+ "SYS_UNLINKAT": true,
+ "SYS_UNMOUNT": true,
+ "SYS_UNSHARE": true,
+ "SYS_USELIB": true,
+ "SYS_USTAT": true,
+ "SYS_UTIME": true,
+ "SYS_UTIMENSAT": true,
+ "SYS_UTIMES": true,
+ "SYS_UTRACE": true,
+ "SYS_UUIDGEN": true,
+ "SYS_VADVISE": true,
+ "SYS_VFORK": true,
+ "SYS_VHANGUP": true,
+ "SYS_VM86": true,
+ "SYS_VM86OLD": true,
+ "SYS_VMSPLICE": true,
+ "SYS_VM_PRESSURE_MONITOR": true,
+ "SYS_VSERVER": true,
+ "SYS_WAIT4": true,
+ "SYS_WAIT4_NOCANCEL": true,
+ "SYS_WAIT6": true,
+ "SYS_WAITEVENT": true,
+ "SYS_WAITID": true,
+ "SYS_WAITID_NOCANCEL": true,
+ "SYS_WAITPID": true,
+ "SYS_WATCHEVENT": true,
+ "SYS_WORKQ_KERNRETURN": true,
+ "SYS_WORKQ_OPEN": true,
+ "SYS_WRITE": true,
+ "SYS_WRITEV": true,
+ "SYS_WRITEV_NOCANCEL": true,
+ "SYS_WRITE_NOCANCEL": true,
+ "SYS_YIELD": true,
+ "SYS__LLSEEK": true,
+ "SYS__LWP_CONTINUE": true,
+ "SYS__LWP_CREATE": true,
+ "SYS__LWP_CTL": true,
+ "SYS__LWP_DETACH": true,
+ "SYS__LWP_EXIT": true,
+ "SYS__LWP_GETNAME": true,
+ "SYS__LWP_GETPRIVATE": true,
+ "SYS__LWP_KILL": true,
+ "SYS__LWP_PARK": true,
+ "SYS__LWP_SELF": true,
+ "SYS__LWP_SETNAME": true,
+ "SYS__LWP_SETPRIVATE": true,
+ "SYS__LWP_SUSPEND": true,
+ "SYS__LWP_UNPARK": true,
+ "SYS__LWP_UNPARK_ALL": true,
+ "SYS__LWP_WAIT": true,
+ "SYS__LWP_WAKEUP": true,
+ "SYS__NEWSELECT": true,
+ "SYS__PSET_BIND": true,
+ "SYS__SCHED_GETAFFINITY": true,
+ "SYS__SCHED_GETPARAM": true,
+ "SYS__SCHED_SETAFFINITY": true,
+ "SYS__SCHED_SETPARAM": true,
+ "SYS__SYSCTL": true,
+ "SYS__UMTX_LOCK": true,
+ "SYS__UMTX_OP": true,
+ "SYS__UMTX_UNLOCK": true,
+ "SYS___ACL_ACLCHECK_FD": true,
+ "SYS___ACL_ACLCHECK_FILE": true,
+ "SYS___ACL_ACLCHECK_LINK": true,
+ "SYS___ACL_DELETE_FD": true,
+ "SYS___ACL_DELETE_FILE": true,
+ "SYS___ACL_DELETE_LINK": true,
+ "SYS___ACL_GET_FD": true,
+ "SYS___ACL_GET_FILE": true,
+ "SYS___ACL_GET_LINK": true,
+ "SYS___ACL_SET_FD": true,
+ "SYS___ACL_SET_FILE": true,
+ "SYS___ACL_SET_LINK": true,
+ "SYS___CLONE": true,
+ "SYS___DISABLE_THREADSIGNAL": true,
+ "SYS___GETCWD": true,
+ "SYS___GETLOGIN": true,
+ "SYS___GET_TCB": true,
+ "SYS___MAC_EXECVE": true,
+ "SYS___MAC_GETFSSTAT": true,
+ "SYS___MAC_GET_FD": true,
+ "SYS___MAC_GET_FILE": true,
+ "SYS___MAC_GET_LCID": true,
+ "SYS___MAC_GET_LCTX": true,
+ "SYS___MAC_GET_LINK": true,
+ "SYS___MAC_GET_MOUNT": true,
+ "SYS___MAC_GET_PID": true,
+ "SYS___MAC_GET_PROC": true,
+ "SYS___MAC_MOUNT": true,
+ "SYS___MAC_SET_FD": true,
+ "SYS___MAC_SET_FILE": true,
+ "SYS___MAC_SET_LCTX": true,
+ "SYS___MAC_SET_LINK": true,
+ "SYS___MAC_SET_PROC": true,
+ "SYS___MAC_SYSCALL": true,
+ "SYS___OLD_SEMWAIT_SIGNAL": true,
+ "SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL": true,
+ "SYS___POSIX_CHOWN": true,
+ "SYS___POSIX_FCHOWN": true,
+ "SYS___POSIX_LCHOWN": true,
+ "SYS___POSIX_RENAME": true,
+ "SYS___PTHREAD_CANCELED": true,
+ "SYS___PTHREAD_CHDIR": true,
+ "SYS___PTHREAD_FCHDIR": true,
+ "SYS___PTHREAD_KILL": true,
+ "SYS___PTHREAD_MARKCANCEL": true,
+ "SYS___PTHREAD_SIGMASK": true,
+ "SYS___QUOTACTL": true,
+ "SYS___SEMCTL": true,
+ "SYS___SEMWAIT_SIGNAL": true,
+ "SYS___SEMWAIT_SIGNAL_NOCANCEL": true,
+ "SYS___SETLOGIN": true,
+ "SYS___SETUGID": true,
+ "SYS___SET_TCB": true,
+ "SYS___SIGACTION_SIGTRAMP": true,
+ "SYS___SIGTIMEDWAIT": true,
+ "SYS___SIGWAIT": true,
+ "SYS___SIGWAIT_NOCANCEL": true,
+ "SYS___SYSCTL": true,
+ "SYS___TFORK": true,
+ "SYS___THREXIT": true,
+ "SYS___THRSIGDIVERT": true,
+ "SYS___THRSLEEP": true,
+ "SYS___THRWAKEUP": true,
+ "S_ARCH1": true,
+ "S_ARCH2": true,
+ "S_BLKSIZE": true,
+ "S_IEXEC": true,
+ "S_IFBLK": true,
+ "S_IFCHR": true,
+ "S_IFDIR": true,
+ "S_IFIFO": true,
+ "S_IFLNK": true,
+ "S_IFMT": true,
+ "S_IFREG": true,
+ "S_IFSOCK": true,
+ "S_IFWHT": true,
+ "S_IREAD": true,
+ "S_IRGRP": true,
+ "S_IROTH": true,
+ "S_IRUSR": true,
+ "S_IRWXG": true,
+ "S_IRWXO": true,
+ "S_IRWXU": true,
+ "S_ISGID": true,
+ "S_ISTXT": true,
+ "S_ISUID": true,
+ "S_ISVTX": true,
+ "S_IWGRP": true,
+ "S_IWOTH": true,
+ "S_IWRITE": true,
+ "S_IWUSR": true,
+ "S_IXGRP": true,
+ "S_IXOTH": true,
+ "S_IXUSR": true,
+ "S_LOGIN_SET": true,
+ "SecurityAttributes": true,
+ "Seek": true,
+ "Select": true,
+ "Sendfile": true,
+ "Sendmsg": true,
+ "SendmsgN": true,
+ "Sendto": true,
+ "Servent": true,
+ "SetBpf": true,
+ "SetBpfBuflen": true,
+ "SetBpfDatalink": true,
+ "SetBpfHeadercmpl": true,
+ "SetBpfImmediate": true,
+ "SetBpfInterface": true,
+ "SetBpfPromisc": true,
+ "SetBpfTimeout": true,
+ "SetCurrentDirectory": true,
+ "SetEndOfFile": true,
+ "SetEnvironmentVariable": true,
+ "SetFileAttributes": true,
+ "SetFileCompletionNotificationModes": true,
+ "SetFilePointer": true,
+ "SetFileTime": true,
+ "SetHandleInformation": true,
+ "SetKevent": true,
+ "SetLsfPromisc": true,
+ "SetNonblock": true,
+ "Setdomainname": true,
+ "Setegid": true,
+ "Setenv": true,
+ "Seteuid": true,
+ "Setfsgid": true,
+ "Setfsuid": true,
+ "Setgid": true,
+ "Setgroups": true,
+ "Sethostname": true,
+ "Setlogin": true,
+ "Setpgid": true,
+ "Setpriority": true,
+ "Setprivexec": true,
+ "Setregid": true,
+ "Setresgid": true,
+ "Setresuid": true,
+ "Setreuid": true,
+ "Setrlimit": true,
+ "Setsid": true,
+ "Setsockopt": true,
+ "SetsockoptByte": true,
+ "SetsockoptICMPv6Filter": true,
+ "SetsockoptIPMreq": true,
+ "SetsockoptIPMreqn": true,
+ "SetsockoptIPv6Mreq": true,
+ "SetsockoptInet4Addr": true,
+ "SetsockoptInt": true,
+ "SetsockoptLinger": true,
+ "SetsockoptString": true,
+ "SetsockoptTimeval": true,
+ "Settimeofday": true,
+ "Setuid": true,
+ "Setxattr": true,
+ "Shutdown": true,
+ "SidTypeAlias": true,
+ "SidTypeComputer": true,
+ "SidTypeDeletedAccount": true,
+ "SidTypeDomain": true,
+ "SidTypeGroup": true,
+ "SidTypeInvalid": true,
+ "SidTypeLabel": true,
+ "SidTypeUnknown": true,
+ "SidTypeUser": true,
+ "SidTypeWellKnownGroup": true,
+ "Signal": true,
+ "SizeofBpfHdr": true,
+ "SizeofBpfInsn": true,
+ "SizeofBpfProgram": true,
+ "SizeofBpfStat": true,
+ "SizeofBpfVersion": true,
+ "SizeofBpfZbuf": true,
+ "SizeofBpfZbufHeader": true,
+ "SizeofCmsghdr": true,
+ "SizeofICMPv6Filter": true,
+ "SizeofIPMreq": true,
+ "SizeofIPMreqn": true,
+ "SizeofIPv6MTUInfo": true,
+ "SizeofIPv6Mreq": true,
+ "SizeofIfAddrmsg": true,
+ "SizeofIfAnnounceMsghdr": true,
+ "SizeofIfData": true,
+ "SizeofIfInfomsg": true,
+ "SizeofIfMsghdr": true,
+ "SizeofIfaMsghdr": true,
+ "SizeofIfmaMsghdr": true,
+ "SizeofIfmaMsghdr2": true,
+ "SizeofInet4Pktinfo": true,
+ "SizeofInet6Pktinfo": true,
+ "SizeofInotifyEvent": true,
+ "SizeofLinger": true,
+ "SizeofMsghdr": true,
+ "SizeofNlAttr": true,
+ "SizeofNlMsgerr": true,
+ "SizeofNlMsghdr": true,
+ "SizeofRtAttr": true,
+ "SizeofRtGenmsg": true,
+ "SizeofRtMetrics": true,
+ "SizeofRtMsg": true,
+ "SizeofRtMsghdr": true,
+ "SizeofRtNexthop": true,
+ "SizeofSockFilter": true,
+ "SizeofSockFprog": true,
+ "SizeofSockaddrAny": true,
+ "SizeofSockaddrDatalink": true,
+ "SizeofSockaddrInet4": true,
+ "SizeofSockaddrInet6": true,
+ "SizeofSockaddrLinklayer": true,
+ "SizeofSockaddrNetlink": true,
+ "SizeofSockaddrUnix": true,
+ "SizeofTCPInfo": true,
+ "SizeofUcred": true,
+ "SlicePtrFromStrings": true,
+ "SockFilter": true,
+ "SockFprog": true,
+ "SockaddrDatalink": true,
+ "SockaddrGen": true,
+ "SockaddrInet4": true,
+ "SockaddrInet6": true,
+ "SockaddrLinklayer": true,
+ "SockaddrNetlink": true,
+ "SockaddrUnix": true,
+ "Socket": true,
+ "SocketControlMessage": true,
+ "SocketDisableIPv6": true,
+ "Socketpair": true,
+ "Splice": true,
+ "StartProcess": true,
+ "StartupInfo": true,
+ "Stat": true,
+ "Stat_t": true,
+ "Statfs": true,
+ "Statfs_t": true,
+ "Stderr": true,
+ "Stdin": true,
+ "Stdout": true,
+ "StringBytePtr": true,
+ "StringByteSlice": true,
+ "StringSlicePtr": true,
+ "StringToSid": true,
+ "StringToUTF16": true,
+ "StringToUTF16Ptr": true,
+ "Symlink": true,
+ "Sync": true,
+ "SyncFileRange": true,
+ "SysProcAttr": true,
+ "SysProcIDMap": true,
+ "Syscall": true,
+ "Syscall12": true,
+ "Syscall15": true,
+ "Syscall18": true,
+ "Syscall6": true,
+ "Syscall9": true,
+ "Sysctl": true,
+ "SysctlUint32": true,
+ "Sysctlnode": true,
+ "Sysinfo": true,
+ "Sysinfo_t": true,
+ "Systemtime": true,
+ "TCGETS": true,
+ "TCIFLUSH": true,
+ "TCIOFLUSH": true,
+ "TCOFLUSH": true,
+ "TCPInfo": true,
+ "TCPKeepalive": true,
+ "TCP_CA_NAME_MAX": true,
+ "TCP_CONGCTL": true,
+ "TCP_CONGESTION": true,
+ "TCP_CONNECTIONTIMEOUT": true,
+ "TCP_CORK": true,
+ "TCP_DEFER_ACCEPT": true,
+ "TCP_INFO": true,
+ "TCP_KEEPALIVE": true,
+ "TCP_KEEPCNT": true,
+ "TCP_KEEPIDLE": true,
+ "TCP_KEEPINIT": true,
+ "TCP_KEEPINTVL": true,
+ "TCP_LINGER2": true,
+ "TCP_MAXBURST": true,
+ "TCP_MAXHLEN": true,
+ "TCP_MAXOLEN": true,
+ "TCP_MAXSEG": true,
+ "TCP_MAXWIN": true,
+ "TCP_MAX_SACK": true,
+ "TCP_MAX_WINSHIFT": true,
+ "TCP_MD5SIG": true,
+ "TCP_MD5SIG_MAXKEYLEN": true,
+ "TCP_MINMSS": true,
+ "TCP_MINMSSOVERLOAD": true,
+ "TCP_MSS": true,
+ "TCP_NODELAY": true,
+ "TCP_NOOPT": true,
+ "TCP_NOPUSH": true,
+ "TCP_NSTATES": true,
+ "TCP_QUICKACK": true,
+ "TCP_RXT_CONNDROPTIME": true,
+ "TCP_RXT_FINDROP": true,
+ "TCP_SACK_ENABLE": true,
+ "TCP_SYNCNT": true,
+ "TCP_VENDOR": true,
+ "TCP_WINDOW_CLAMP": true,
+ "TCSAFLUSH": true,
+ "TCSETS": true,
+ "TF_DISCONNECT": true,
+ "TF_REUSE_SOCKET": true,
+ "TF_USE_DEFAULT_WORKER": true,
+ "TF_USE_KERNEL_APC": true,
+ "TF_USE_SYSTEM_THREAD": true,
+ "TF_WRITE_BEHIND": true,
+ "TH32CS_INHERIT": true,
+ "TH32CS_SNAPALL": true,
+ "TH32CS_SNAPHEAPLIST": true,
+ "TH32CS_SNAPMODULE": true,
+ "TH32CS_SNAPMODULE32": true,
+ "TH32CS_SNAPPROCESS": true,
+ "TH32CS_SNAPTHREAD": true,
+ "TIME_ZONE_ID_DAYLIGHT": true,
+ "TIME_ZONE_ID_STANDARD": true,
+ "TIME_ZONE_ID_UNKNOWN": true,
+ "TIOCCBRK": true,
+ "TIOCCDTR": true,
+ "TIOCCONS": true,
+ "TIOCDCDTIMESTAMP": true,
+ "TIOCDRAIN": true,
+ "TIOCDSIMICROCODE": true,
+ "TIOCEXCL": true,
+ "TIOCEXT": true,
+ "TIOCFLAG_CDTRCTS": true,
+ "TIOCFLAG_CLOCAL": true,
+ "TIOCFLAG_CRTSCTS": true,
+ "TIOCFLAG_MDMBUF": true,
+ "TIOCFLAG_PPS": true,
+ "TIOCFLAG_SOFTCAR": true,
+ "TIOCFLUSH": true,
+ "TIOCGDEV": true,
+ "TIOCGDRAINWAIT": true,
+ "TIOCGETA": true,
+ "TIOCGETD": true,
+ "TIOCGFLAGS": true,
+ "TIOCGICOUNT": true,
+ "TIOCGLCKTRMIOS": true,
+ "TIOCGLINED": true,
+ "TIOCGPGRP": true,
+ "TIOCGPTN": true,
+ "TIOCGQSIZE": true,
+ "TIOCGRANTPT": true,
+ "TIOCGRS485": true,
+ "TIOCGSERIAL": true,
+ "TIOCGSID": true,
+ "TIOCGSIZE": true,
+ "TIOCGSOFTCAR": true,
+ "TIOCGTSTAMP": true,
+ "TIOCGWINSZ": true,
+ "TIOCINQ": true,
+ "TIOCIXOFF": true,
+ "TIOCIXON": true,
+ "TIOCLINUX": true,
+ "TIOCMBIC": true,
+ "TIOCMBIS": true,
+ "TIOCMGDTRWAIT": true,
+ "TIOCMGET": true,
+ "TIOCMIWAIT": true,
+ "TIOCMODG": true,
+ "TIOCMODS": true,
+ "TIOCMSDTRWAIT": true,
+ "TIOCMSET": true,
+ "TIOCM_CAR": true,
+ "TIOCM_CD": true,
+ "TIOCM_CTS": true,
+ "TIOCM_DCD": true,
+ "TIOCM_DSR": true,
+ "TIOCM_DTR": true,
+ "TIOCM_LE": true,
+ "TIOCM_RI": true,
+ "TIOCM_RNG": true,
+ "TIOCM_RTS": true,
+ "TIOCM_SR": true,
+ "TIOCM_ST": true,
+ "TIOCNOTTY": true,
+ "TIOCNXCL": true,
+ "TIOCOUTQ": true,
+ "TIOCPKT": true,
+ "TIOCPKT_DATA": true,
+ "TIOCPKT_DOSTOP": true,
+ "TIOCPKT_FLUSHREAD": true,
+ "TIOCPKT_FLUSHWRITE": true,
+ "TIOCPKT_IOCTL": true,
+ "TIOCPKT_NOSTOP": true,
+ "TIOCPKT_START": true,
+ "TIOCPKT_STOP": true,
+ "TIOCPTMASTER": true,
+ "TIOCPTMGET": true,
+ "TIOCPTSNAME": true,
+ "TIOCPTYGNAME": true,
+ "TIOCPTYGRANT": true,
+ "TIOCPTYUNLK": true,
+ "TIOCRCVFRAME": true,
+ "TIOCREMOTE": true,
+ "TIOCSBRK": true,
+ "TIOCSCONS": true,
+ "TIOCSCTTY": true,
+ "TIOCSDRAINWAIT": true,
+ "TIOCSDTR": true,
+ "TIOCSERCONFIG": true,
+ "TIOCSERGETLSR": true,
+ "TIOCSERGETMULTI": true,
+ "TIOCSERGSTRUCT": true,
+ "TIOCSERGWILD": true,
+ "TIOCSERSETMULTI": true,
+ "TIOCSERSWILD": true,
+ "TIOCSER_TEMT": true,
+ "TIOCSETA": true,
+ "TIOCSETAF": true,
+ "TIOCSETAW": true,
+ "TIOCSETD": true,
+ "TIOCSFLAGS": true,
+ "TIOCSIG": true,
+ "TIOCSLCKTRMIOS": true,
+ "TIOCSLINED": true,
+ "TIOCSPGRP": true,
+ "TIOCSPTLCK": true,
+ "TIOCSQSIZE": true,
+ "TIOCSRS485": true,
+ "TIOCSSERIAL": true,
+ "TIOCSSIZE": true,
+ "TIOCSSOFTCAR": true,
+ "TIOCSTART": true,
+ "TIOCSTAT": true,
+ "TIOCSTI": true,
+ "TIOCSTOP": true,
+ "TIOCSTSTAMP": true,
+ "TIOCSWINSZ": true,
+ "TIOCTIMESTAMP": true,
+ "TIOCUCNTL": true,
+ "TIOCVHANGUP": true,
+ "TIOCXMTFRAME": true,
+ "TOKEN_ADJUST_DEFAULT": true,
+ "TOKEN_ADJUST_GROUPS": true,
+ "TOKEN_ADJUST_PRIVILEGES": true,
+ "TOKEN_ADJUST_SESSIONID": true,
+ "TOKEN_ALL_ACCESS": true,
+ "TOKEN_ASSIGN_PRIMARY": true,
+ "TOKEN_DUPLICATE": true,
+ "TOKEN_EXECUTE": true,
+ "TOKEN_IMPERSONATE": true,
+ "TOKEN_QUERY": true,
+ "TOKEN_QUERY_SOURCE": true,
+ "TOKEN_READ": true,
+ "TOKEN_WRITE": true,
+ "TOSTOP": true,
+ "TRUNCATE_EXISTING": true,
+ "TUNATTACHFILTER": true,
+ "TUNDETACHFILTER": true,
+ "TUNGETFEATURES": true,
+ "TUNGETIFF": true,
+ "TUNGETSNDBUF": true,
+ "TUNGETVNETHDRSZ": true,
+ "TUNSETDEBUG": true,
+ "TUNSETGROUP": true,
+ "TUNSETIFF": true,
+ "TUNSETLINK": true,
+ "TUNSETNOCSUM": true,
+ "TUNSETOFFLOAD": true,
+ "TUNSETOWNER": true,
+ "TUNSETPERSIST": true,
+ "TUNSETSNDBUF": true,
+ "TUNSETTXFILTER": true,
+ "TUNSETVNETHDRSZ": true,
+ "Tee": true,
+ "TerminateProcess": true,
+ "Termios": true,
+ "Tgkill": true,
+ "Time": true,
+ "Time_t": true,
+ "Times": true,
+ "Timespec": true,
+ "TimespecToNsec": true,
+ "Timeval": true,
+ "Timeval32": true,
+ "TimevalToNsec": true,
+ "Timex": true,
+ "Timezoneinformation": true,
+ "Tms": true,
+ "Token": true,
+ "TokenAccessInformation": true,
+ "TokenAuditPolicy": true,
+ "TokenDefaultDacl": true,
+ "TokenElevation": true,
+ "TokenElevationType": true,
+ "TokenGroups": true,
+ "TokenGroupsAndPrivileges": true,
+ "TokenHasRestrictions": true,
+ "TokenImpersonationLevel": true,
+ "TokenIntegrityLevel": true,
+ "TokenLinkedToken": true,
+ "TokenLogonSid": true,
+ "TokenMandatoryPolicy": true,
+ "TokenOrigin": true,
+ "TokenOwner": true,
+ "TokenPrimaryGroup": true,
+ "TokenPrivileges": true,
+ "TokenRestrictedSids": true,
+ "TokenSandBoxInert": true,
+ "TokenSessionId": true,
+ "TokenSessionReference": true,
+ "TokenSource": true,
+ "TokenStatistics": true,
+ "TokenType": true,
+ "TokenUIAccess": true,
+ "TokenUser": true,
+ "TokenVirtualizationAllowed": true,
+ "TokenVirtualizationEnabled": true,
+ "Tokenprimarygroup": true,
+ "Tokenuser": true,
+ "TranslateAccountName": true,
+ "TranslateName": true,
+ "TransmitFile": true,
+ "TransmitFileBuffers": true,
+ "Truncate": true,
+ "UNIX_PATH_MAX": true,
+ "USAGE_MATCH_TYPE_AND": true,
+ "USAGE_MATCH_TYPE_OR": true,
+ "UTF16FromString": true,
+ "UTF16PtrFromString": true,
+ "UTF16ToString": true,
+ "Ucred": true,
+ "Umask": true,
+ "Uname": true,
+ "Undelete": true,
+ "UnixCredentials": true,
+ "UnixRights": true,
+ "Unlink": true,
+ "Unlinkat": true,
+ "UnmapViewOfFile": true,
+ "Unmount": true,
+ "Unsetenv": true,
+ "Unshare": true,
+ "UserInfo10": true,
+ "Ustat": true,
+ "Ustat_t": true,
+ "Utimbuf": true,
+ "Utime": true,
+ "Utimes": true,
+ "UtimesNano": true,
+ "Utsname": true,
+ "VDISCARD": true,
+ "VDSUSP": true,
+ "VEOF": true,
+ "VEOL": true,
+ "VEOL2": true,
+ "VERASE": true,
+ "VERASE2": true,
+ "VINTR": true,
+ "VKILL": true,
+ "VLNEXT": true,
+ "VMIN": true,
+ "VQUIT": true,
+ "VREPRINT": true,
+ "VSTART": true,
+ "VSTATUS": true,
+ "VSTOP": true,
+ "VSUSP": true,
+ "VSWTC": true,
+ "VT0": true,
+ "VT1": true,
+ "VTDLY": true,
+ "VTIME": true,
+ "VWERASE": true,
+ "VirtualLock": true,
+ "VirtualUnlock": true,
+ "WAIT_ABANDONED": true,
+ "WAIT_FAILED": true,
+ "WAIT_OBJECT_0": true,
+ "WAIT_TIMEOUT": true,
+ "WALL": true,
+ "WALLSIG": true,
+ "WALTSIG": true,
+ "WCLONE": true,
+ "WCONTINUED": true,
+ "WCOREFLAG": true,
+ "WEXITED": true,
+ "WLINUXCLONE": true,
+ "WNOHANG": true,
+ "WNOTHREAD": true,
+ "WNOWAIT": true,
+ "WNOZOMBIE": true,
+ "WOPTSCHECKED": true,
+ "WORDSIZE": true,
+ "WSABuf": true,
+ "WSACleanup": true,
+ "WSADESCRIPTION_LEN": true,
+ "WSAData": true,
+ "WSAEACCES": true,
+ "WSAECONNABORTED": true,
+ "WSAECONNRESET": true,
+ "WSAEnumProtocols": true,
+ "WSAID_CONNECTEX": true,
+ "WSAIoctl": true,
+ "WSAPROTOCOL_LEN": true,
+ "WSAProtocolChain": true,
+ "WSAProtocolInfo": true,
+ "WSARecv": true,
+ "WSARecvFrom": true,
+ "WSASYS_STATUS_LEN": true,
+ "WSASend": true,
+ "WSASendTo": true,
+ "WSASendto": true,
+ "WSAStartup": true,
+ "WSTOPPED": true,
+ "WTRAPPED": true,
+ "WUNTRACED": true,
+ "Wait4": true,
+ "WaitForSingleObject": true,
+ "WaitStatus": true,
+ "Win32FileAttributeData": true,
+ "Win32finddata": true,
+ "Write": true,
+ "WriteConsole": true,
+ "WriteFile": true,
+ "X509_ASN_ENCODING": true,
+ "XCASE": true,
+ "XP1_CONNECTIONLESS": true,
+ "XP1_CONNECT_DATA": true,
+ "XP1_DISCONNECT_DATA": true,
+ "XP1_EXPEDITED_DATA": true,
+ "XP1_GRACEFUL_CLOSE": true,
+ "XP1_GUARANTEED_DELIVERY": true,
+ "XP1_GUARANTEED_ORDER": true,
+ "XP1_IFS_HANDLES": true,
+ "XP1_MESSAGE_ORIENTED": true,
+ "XP1_MULTIPOINT_CONTROL_PLANE": true,
+ "XP1_MULTIPOINT_DATA_PLANE": true,
+ "XP1_PARTIAL_MESSAGE": true,
+ "XP1_PSEUDO_STREAM": true,
+ "XP1_QOS_SUPPORTED": true,
+ "XP1_SAN_SUPPORT_SDP": true,
+ "XP1_SUPPORT_BROADCAST": true,
+ "XP1_SUPPORT_MULTIPOINT": true,
+ "XP1_UNI_RECV": true,
+ "XP1_UNI_SEND": true,
+ },
+ "syscall/js": map[string]bool{
+ "Error": true,
+ "Func": true,
+ "FuncOf": true,
+ "Global": true,
+ "Null": true,
+ "Type": true,
+ "TypeBoolean": true,
+ "TypeFunction": true,
+ "TypeNull": true,
+ "TypeNumber": true,
+ "TypeObject": true,
+ "TypeString": true,
+ "TypeSymbol": true,
+ "TypeUndefined": true,
+ "TypedArray": true,
+ "TypedArrayOf": true,
+ "Undefined": true,
+ "Value": true,
+ "ValueError": true,
+ "ValueOf": true,
+ "Wrapper": true,
+ },
+ "testing": map[string]bool{
+ "AllocsPerRun": true,
+ "B": true,
+ "Benchmark": true,
+ "BenchmarkResult": true,
+ "Cover": true,
+ "CoverBlock": true,
+ "CoverMode": true,
+ "Coverage": true,
+ "InternalBenchmark": true,
+ "InternalExample": true,
+ "InternalTest": true,
+ "M": true,
+ "Main": true,
+ "MainStart": true,
+ "PB": true,
+ "RegisterCover": true,
+ "RunBenchmarks": true,
+ "RunExamples": true,
+ "RunTests": true,
+ "Short": true,
+ "T": true,
+ "Verbose": true,
+ },
+ "testing/iotest": map[string]bool{
+ "DataErrReader": true,
+ "ErrTimeout": true,
+ "HalfReader": true,
+ "NewReadLogger": true,
+ "NewWriteLogger": true,
+ "OneByteReader": true,
+ "TimeoutReader": true,
+ "TruncateWriter": true,
+ },
+ "testing/quick": map[string]bool{
+ "Check": true,
+ "CheckEqual": true,
+ "CheckEqualError": true,
+ "CheckError": true,
+ "Config": true,
+ "Generator": true,
+ "SetupError": true,
+ "Value": true,
+ },
+ "text/scanner": map[string]bool{
+ "Char": true,
+ "Comment": true,
+ "EOF": true,
+ "Float": true,
+ "GoTokens": true,
+ "GoWhitespace": true,
+ "Ident": true,
+ "Int": true,
+ "Position": true,
+ "RawString": true,
+ "ScanChars": true,
+ "ScanComments": true,
+ "ScanFloats": true,
+ "ScanIdents": true,
+ "ScanInts": true,
+ "ScanRawStrings": true,
+ "ScanStrings": true,
+ "Scanner": true,
+ "SkipComments": true,
+ "String": true,
+ "TokenString": true,
+ },
+ "text/tabwriter": map[string]bool{
+ "AlignRight": true,
+ "Debug": true,
+ "DiscardEmptyColumns": true,
+ "Escape": true,
+ "FilterHTML": true,
+ "NewWriter": true,
+ "StripEscape": true,
+ "TabIndent": true,
+ "Writer": true,
+ },
+ "text/template": map[string]bool{
+ "ExecError": true,
+ "FuncMap": true,
+ "HTMLEscape": true,
+ "HTMLEscapeString": true,
+ "HTMLEscaper": true,
+ "IsTrue": true,
+ "JSEscape": true,
+ "JSEscapeString": true,
+ "JSEscaper": true,
+ "Must": true,
+ "New": true,
+ "ParseFiles": true,
+ "ParseGlob": true,
+ "Template": true,
+ "URLQueryEscaper": true,
+ },
+ "text/template/parse": map[string]bool{
+ "ActionNode": true,
+ "BoolNode": true,
+ "BranchNode": true,
+ "ChainNode": true,
+ "CommandNode": true,
+ "DotNode": true,
+ "FieldNode": true,
+ "IdentifierNode": true,
+ "IfNode": true,
+ "IsEmptyTree": true,
+ "ListNode": true,
+ "New": true,
+ "NewIdentifier": true,
+ "NilNode": true,
+ "Node": true,
+ "NodeAction": true,
+ "NodeBool": true,
+ "NodeChain": true,
+ "NodeCommand": true,
+ "NodeDot": true,
+ "NodeField": true,
+ "NodeIdentifier": true,
+ "NodeIf": true,
+ "NodeList": true,
+ "NodeNil": true,
+ "NodeNumber": true,
+ "NodePipe": true,
+ "NodeRange": true,
+ "NodeString": true,
+ "NodeTemplate": true,
+ "NodeText": true,
+ "NodeType": true,
+ "NodeVariable": true,
+ "NodeWith": true,
+ "NumberNode": true,
+ "Parse": true,
+ "PipeNode": true,
+ "Pos": true,
+ "RangeNode": true,
+ "StringNode": true,
+ "TemplateNode": true,
+ "TextNode": true,
+ "Tree": true,
+ "VariableNode": true,
+ "WithNode": true,
+ },
+ "time": map[string]bool{
+ "ANSIC": true,
+ "After": true,
+ "AfterFunc": true,
+ "April": true,
+ "August": true,
+ "Date": true,
+ "December": true,
+ "Duration": true,
+ "February": true,
+ "FixedZone": true,
+ "Friday": true,
+ "Hour": true,
+ "January": true,
+ "July": true,
+ "June": true,
+ "Kitchen": true,
+ "LoadLocation": true,
+ "LoadLocationFromTZData": true,
+ "Local": true,
+ "Location": true,
+ "March": true,
+ "May": true,
+ "Microsecond": true,
+ "Millisecond": true,
+ "Minute": true,
+ "Monday": true,
+ "Month": true,
+ "Nanosecond": true,
+ "NewTicker": true,
+ "NewTimer": true,
+ "November": true,
+ "Now": true,
+ "October": true,
+ "Parse": true,
+ "ParseDuration": true,
+ "ParseError": true,
+ "ParseInLocation": true,
+ "RFC1123": true,
+ "RFC1123Z": true,
+ "RFC3339": true,
+ "RFC3339Nano": true,
+ "RFC822": true,
+ "RFC822Z": true,
+ "RFC850": true,
+ "RubyDate": true,
+ "Saturday": true,
+ "Second": true,
+ "September": true,
+ "Since": true,
+ "Sleep": true,
+ "Stamp": true,
+ "StampMicro": true,
+ "StampMilli": true,
+ "StampNano": true,
+ "Sunday": true,
+ "Thursday": true,
+ "Tick": true,
+ "Ticker": true,
+ "Time": true,
+ "Timer": true,
+ "Tuesday": true,
+ "UTC": true,
+ "Unix": true,
+ "UnixDate": true,
+ "Until": true,
+ "Wednesday": true,
+ "Weekday": true,
+ },
+ "unicode": map[string]bool{
+ "ASCII_Hex_Digit": true,
+ "Adlam": true,
+ "Ahom": true,
+ "Anatolian_Hieroglyphs": true,
+ "Arabic": true,
+ "Armenian": true,
+ "Avestan": true,
+ "AzeriCase": true,
+ "Balinese": true,
+ "Bamum": true,
+ "Bassa_Vah": true,
+ "Batak": true,
+ "Bengali": true,
+ "Bhaiksuki": true,
+ "Bidi_Control": true,
+ "Bopomofo": true,
+ "Brahmi": true,
+ "Braille": true,
+ "Buginese": true,
+ "Buhid": true,
+ "C": true,
+ "Canadian_Aboriginal": true,
+ "Carian": true,
+ "CaseRange": true,
+ "CaseRanges": true,
+ "Categories": true,
+ "Caucasian_Albanian": true,
+ "Cc": true,
+ "Cf": true,
+ "Chakma": true,
+ "Cham": true,
+ "Cherokee": true,
+ "Co": true,
+ "Common": true,
+ "Coptic": true,
+ "Cs": true,
+ "Cuneiform": true,
+ "Cypriot": true,
+ "Cyrillic": true,
+ "Dash": true,
+ "Deprecated": true,
+ "Deseret": true,
+ "Devanagari": true,
+ "Diacritic": true,
+ "Digit": true,
+ "Duployan": true,
+ "Egyptian_Hieroglyphs": true,
+ "Elbasan": true,
+ "Ethiopic": true,
+ "Extender": true,
+ "FoldCategory": true,
+ "FoldScript": true,
+ "Georgian": true,
+ "Glagolitic": true,
+ "Gothic": true,
+ "Grantha": true,
+ "GraphicRanges": true,
+ "Greek": true,
+ "Gujarati": true,
+ "Gurmukhi": true,
+ "Han": true,
+ "Hangul": true,
+ "Hanunoo": true,
+ "Hatran": true,
+ "Hebrew": true,
+ "Hex_Digit": true,
+ "Hiragana": true,
+ "Hyphen": true,
+ "IDS_Binary_Operator": true,
+ "IDS_Trinary_Operator": true,
+ "Ideographic": true,
+ "Imperial_Aramaic": true,
+ "In": true,
+ "Inherited": true,
+ "Inscriptional_Pahlavi": true,
+ "Inscriptional_Parthian": true,
+ "Is": true,
+ "IsControl": true,
+ "IsDigit": true,
+ "IsGraphic": true,
+ "IsLetter": true,
+ "IsLower": true,
+ "IsMark": true,
+ "IsNumber": true,
+ "IsOneOf": true,
+ "IsPrint": true,
+ "IsPunct": true,
+ "IsSpace": true,
+ "IsSymbol": true,
+ "IsTitle": true,
+ "IsUpper": true,
+ "Javanese": true,
+ "Join_Control": true,
+ "Kaithi": true,
+ "Kannada": true,
+ "Katakana": true,
+ "Kayah_Li": true,
+ "Kharoshthi": true,
+ "Khmer": true,
+ "Khojki": true,
+ "Khudawadi": true,
+ "L": true,
+ "Lao": true,
+ "Latin": true,
+ "Lepcha": true,
+ "Letter": true,
+ "Limbu": true,
+ "Linear_A": true,
+ "Linear_B": true,
+ "Lisu": true,
+ "Ll": true,
+ "Lm": true,
+ "Lo": true,
+ "Logical_Order_Exception": true,
+ "Lower": true,
+ "LowerCase": true,
+ "Lt": true,
+ "Lu": true,
+ "Lycian": true,
+ "Lydian": true,
+ "M": true,
+ "Mahajani": true,
+ "Malayalam": true,
+ "Mandaic": true,
+ "Manichaean": true,
+ "Marchen": true,
+ "Mark": true,
+ "Masaram_Gondi": true,
+ "MaxASCII": true,
+ "MaxCase": true,
+ "MaxLatin1": true,
+ "MaxRune": true,
+ "Mc": true,
+ "Me": true,
+ "Meetei_Mayek": true,
+ "Mende_Kikakui": true,
+ "Meroitic_Cursive": true,
+ "Meroitic_Hieroglyphs": true,
+ "Miao": true,
+ "Mn": true,
+ "Modi": true,
+ "Mongolian": true,
+ "Mro": true,
+ "Multani": true,
+ "Myanmar": true,
+ "N": true,
+ "Nabataean": true,
+ "Nd": true,
+ "New_Tai_Lue": true,
+ "Newa": true,
+ "Nko": true,
+ "Nl": true,
+ "No": true,
+ "Noncharacter_Code_Point": true,
+ "Number": true,
+ "Nushu": true,
+ "Ogham": true,
+ "Ol_Chiki": true,
+ "Old_Hungarian": true,
+ "Old_Italic": true,
+ "Old_North_Arabian": true,
+ "Old_Permic": true,
+ "Old_Persian": true,
+ "Old_South_Arabian": true,
+ "Old_Turkic": true,
+ "Oriya": true,
+ "Osage": true,
+ "Osmanya": true,
+ "Other": true,
+ "Other_Alphabetic": true,
+ "Other_Default_Ignorable_Code_Point": true,
+ "Other_Grapheme_Extend": true,
+ "Other_ID_Continue": true,
+ "Other_ID_Start": true,
+ "Other_Lowercase": true,
+ "Other_Math": true,
+ "Other_Uppercase": true,
+ "P": true,
+ "Pahawh_Hmong": true,
+ "Palmyrene": true,
+ "Pattern_Syntax": true,
+ "Pattern_White_Space": true,
+ "Pau_Cin_Hau": true,
+ "Pc": true,
+ "Pd": true,
+ "Pe": true,
+ "Pf": true,
+ "Phags_Pa": true,
+ "Phoenician": true,
+ "Pi": true,
+ "Po": true,
+ "Prepended_Concatenation_Mark": true,
+ "PrintRanges": true,
+ "Properties": true,
+ "Ps": true,
+ "Psalter_Pahlavi": true,
+ "Punct": true,
+ "Quotation_Mark": true,
+ "Radical": true,
+ "Range16": true,
+ "Range32": true,
+ "RangeTable": true,
+ "Regional_Indicator": true,
+ "Rejang": true,
+ "ReplacementChar": true,
+ "Runic": true,
+ "S": true,
+ "STerm": true,
+ "Samaritan": true,
+ "Saurashtra": true,
+ "Sc": true,
+ "Scripts": true,
+ "Sentence_Terminal": true,
+ "Sharada": true,
+ "Shavian": true,
+ "Siddham": true,
+ "SignWriting": true,
+ "SimpleFold": true,
+ "Sinhala": true,
+ "Sk": true,
+ "Sm": true,
+ "So": true,
+ "Soft_Dotted": true,
+ "Sora_Sompeng": true,
+ "Soyombo": true,
+ "Space": true,
+ "SpecialCase": true,
+ "Sundanese": true,
+ "Syloti_Nagri": true,
+ "Symbol": true,
+ "Syriac": true,
+ "Tagalog": true,
+ "Tagbanwa": true,
+ "Tai_Le": true,
+ "Tai_Tham": true,
+ "Tai_Viet": true,
+ "Takri": true,
+ "Tamil": true,
+ "Tangut": true,
+ "Telugu": true,
+ "Terminal_Punctuation": true,
+ "Thaana": true,
+ "Thai": true,
+ "Tibetan": true,
+ "Tifinagh": true,
+ "Tirhuta": true,
+ "Title": true,
+ "TitleCase": true,
+ "To": true,
+ "ToLower": true,
+ "ToTitle": true,
+ "ToUpper": true,
+ "TurkishCase": true,
+ "Ugaritic": true,
+ "Unified_Ideograph": true,
+ "Upper": true,
+ "UpperCase": true,
+ "UpperLower": true,
+ "Vai": true,
+ "Variation_Selector": true,
+ "Version": true,
+ "Warang_Citi": true,
+ "White_Space": true,
+ "Yi": true,
+ "Z": true,
+ "Zanabazar_Square": true,
+ "Zl": true,
+ "Zp": true,
+ "Zs": true,
+ },
+ "unicode/utf16": map[string]bool{
+ "Decode": true,
+ "DecodeRune": true,
+ "Encode": true,
+ "EncodeRune": true,
+ "IsSurrogate": true,
+ },
+ "unicode/utf8": map[string]bool{
+ "DecodeLastRune": true,
+ "DecodeLastRuneInString": true,
+ "DecodeRune": true,
+ "DecodeRuneInString": true,
+ "EncodeRune": true,
+ "FullRune": true,
+ "FullRuneInString": true,
+ "MaxRune": true,
+ "RuneCount": true,
+ "RuneCountInString": true,
+ "RuneError": true,
+ "RuneLen": true,
+ "RuneSelf": true,
+ "RuneStart": true,
+ "UTFMax": true,
+ "Valid": true,
+ "ValidRune": true,
+ "ValidString": true,
+ },
+ "unsafe": map[string]bool{
+ "Alignof": true,
+ "ArbitraryType": true,
+ "Offsetof": true,
+ "Pointer": true,
+ "Sizeof": true,
+ },
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
new file mode 100644
index 000000000..7219c8e9f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
@@ -0,0 +1,196 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fastwalk provides a faster version of filepath.Walk for file system
+// scanning tools.
+package fastwalk
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+)
+
+// TraverseLink is used as a return value from WalkFuncs to indicate that the
+// symlink named in the call may be traversed.
+var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
+
+// SkipFiles is a used as a return value from WalkFuncs to indicate that the
+// callback should not be called for any other files in the current directory.
+// Child directories will still be traversed.
+var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
+
+// Walk is a faster implementation of filepath.Walk.
+//
+// filepath.Walk's design necessarily calls os.Lstat on each file,
+// even if the caller needs less info.
+// Many tools need only the type of each file.
+// On some platforms, this information is provided directly by the readdir
+// system call, avoiding the need to stat each file individually.
+// fastwalk_unix.go contains a fork of the syscall routines.
+//
+// See golang.org/issue/16399
+//
+// Walk walks the file tree rooted at root, calling walkFn for
+// each file or directory in the tree, including root.
+//
+// If fastWalk returns filepath.SkipDir, the directory is skipped.
+//
+// Unlike filepath.Walk:
+// * file stat calls must be done by the user.
+// The only provided metadata is the file type, which does not include
+// any permission bits.
+// * multiple goroutines stat the filesystem concurrently. The provided
+// walkFn must be safe for concurrent use.
+// * fastWalk can follow symlinks if walkFn returns the TraverseLink
+// sentinel error. It is the walkFn's responsibility to prevent
+// fastWalk from going into symlink cycles.
+func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
+ // TODO(bradfitz): make numWorkers configurable? We used a
+ // minimum of 4 to give the kernel more info about multiple
+ // things we want, in hopes its I/O scheduling can take
+ // advantage of that. Hopefully most are in cache. Maybe 4 is
+ // even too low of a minimum. Profile more.
+ numWorkers := 4
+ if n := runtime.NumCPU(); n > numWorkers {
+ numWorkers = n
+ }
+
+ // Make sure to wait for all workers to finish, otherwise
+ // walkFn could still be called after returning. This Wait call
+ // runs after close(e.donec) below.
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ w := &walker{
+ fn: walkFn,
+ enqueuec: make(chan walkItem, numWorkers), // buffered for performance
+ workc: make(chan walkItem, numWorkers), // buffered for performance
+ donec: make(chan struct{}),
+
+ // buffered for correctness & not leaking goroutines:
+ resc: make(chan error, numWorkers),
+ }
+ defer close(w.donec)
+
+ for i := 0; i < numWorkers; i++ {
+ wg.Add(1)
+ go w.doWork(&wg)
+ }
+ todo := []walkItem{{dir: root}}
+ out := 0
+ for {
+ workc := w.workc
+ var workItem walkItem
+ if len(todo) == 0 {
+ workc = nil
+ } else {
+ workItem = todo[len(todo)-1]
+ }
+ select {
+ case workc <- workItem:
+ todo = todo[:len(todo)-1]
+ out++
+ case it := <-w.enqueuec:
+ todo = append(todo, it)
+ case err := <-w.resc:
+ out--
+ if err != nil {
+ return err
+ }
+ if out == 0 && len(todo) == 0 {
+ // It's safe to quit here, as long as the buffered
+ // enqueue channel isn't also readable, which might
+ // happen if the worker sends both another unit of
+ // work and its result before the other select was
+ // scheduled and both w.resc and w.enqueuec were
+ // readable.
+ select {
+ case it := <-w.enqueuec:
+ todo = append(todo, it)
+ default:
+ return nil
+ }
+ }
+ }
+ }
+}
+
+// doWork reads directories as instructed (via workc) and runs the
+// user's callback function.
+func (w *walker) doWork(wg *sync.WaitGroup) {
+ defer wg.Done()
+ for {
+ select {
+ case <-w.donec:
+ return
+ case it := <-w.workc:
+ select {
+ case <-w.donec:
+ return
+ case w.resc <- w.walk(it.dir, !it.callbackDone):
+ }
+ }
+ }
+}
+
+type walker struct {
+ fn func(path string, typ os.FileMode) error
+
+ donec chan struct{} // closed on fastWalk's return
+ workc chan walkItem // to workers
+ enqueuec chan walkItem // from workers
+ resc chan error // from workers
+}
+
+type walkItem struct {
+ dir string
+ callbackDone bool // callback already called; don't do it again
+}
+
+func (w *walker) enqueue(it walkItem) {
+ select {
+ case w.enqueuec <- it:
+ case <-w.donec:
+ }
+}
+
+func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
+ joined := dirName + string(os.PathSeparator) + baseName
+ if typ == os.ModeDir {
+ w.enqueue(walkItem{dir: joined})
+ return nil
+ }
+
+ err := w.fn(joined, typ)
+ if typ == os.ModeSymlink {
+ if err == TraverseLink {
+ // Set callbackDone so we don't call it twice for both the
+ // symlink-as-symlink and the symlink-as-directory later:
+ w.enqueue(walkItem{dir: joined, callbackDone: true})
+ return nil
+ }
+ if err == filepath.SkipDir {
+ // Permit SkipDir on symlinks too.
+ return nil
+ }
+ }
+ return err
+}
+
+func (w *walker) walk(root string, runUserCallback bool) error {
+ if runUserCallback {
+ err := w.fn(root, os.ModeDir)
+ if err == filepath.SkipDir {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ return readDir(root, w.onDirEnt)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
new file mode 100644
index 000000000..ccffec5ad
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd
+
+package fastwalk
+
+import "syscall"
+
+func direntInode(dirent *syscall.Dirent) uint64 {
+ return uint64(dirent.Fileno)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
new file mode 100644
index 000000000..ab7fbc0a9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin
+// +build !appengine
+
+package fastwalk
+
+import "syscall"
+
+func direntInode(dirent *syscall.Dirent) uint64 {
+ return uint64(dirent.Ino)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
new file mode 100644
index 000000000..a3b26a7ba
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd openbsd netbsd
+
+package fastwalk
+
+import "syscall"
+
+func direntNamlen(dirent *syscall.Dirent) uint64 {
+ return uint64(dirent.Namlen)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
new file mode 100644
index 000000000..e880d358b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build !appengine
+
+package fastwalk
+
+import (
+ "bytes"
+ "syscall"
+ "unsafe"
+)
+
+func direntNamlen(dirent *syscall.Dirent) uint64 {
+ const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
+ nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
+ const nameBufLen = uint16(len(nameBuf))
+ limit := dirent.Reclen - fixedHdr
+ if limit > nameBufLen {
+ limit = nameBufLen
+ }
+ nameLen := bytes.IndexByte(nameBuf[:limit], 0)
+ if nameLen < 0 {
+ panic("failed to find terminating 0 byte in dirent")
+ }
+ return uint64(nameLen)
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
new file mode 100644
index 000000000..a906b8759
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
+
+package fastwalk
+
+import (
+ "io/ioutil"
+ "os"
+)
+
+// readDir calls fn for each directory entry in dirName.
+// It does not descend into directories or follow symlinks.
+// If fn returns a non-nil error, readDir returns with that error
+// immediately.
+func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
+ fis, err := ioutil.ReadDir(dirName)
+ if err != nil {
+ return err
+ }
+ skipFiles := false
+ for _, fi := range fis {
+ if fi.Mode().IsRegular() && skipFiles {
+ continue
+ }
+ if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
+ if err == SkipFiles {
+ skipFiles = true
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
new file mode 100644
index 000000000..3369b1a0b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
@@ -0,0 +1,127 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd
+// +build !appengine
+
+package fastwalk
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const blockSize = 8 << 10
+
+// unknownFileMode is a sentinel (and bogus) os.FileMode
+// value used to represent a syscall.DT_UNKNOWN Dirent.Type.
+const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice
+
+func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
+ fd, err := syscall.Open(dirName, 0, 0)
+ if err != nil {
+ return &os.PathError{Op: "open", Path: dirName, Err: err}
+ }
+ defer syscall.Close(fd)
+
+ // The buffer must be at least a block long.
+ buf := make([]byte, blockSize) // stack-allocated; doesn't escape
+ bufp := 0 // starting read position in buf
+ nbuf := 0 // end valid data in buf
+ skipFiles := false
+ for {
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = syscall.ReadDirent(fd, buf)
+ if err != nil {
+ return os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ return nil
+ }
+ }
+ consumed, name, typ := parseDirEnt(buf[bufp:nbuf])
+ bufp += consumed
+ if name == "" || name == "." || name == ".." {
+ continue
+ }
+ // Fallback for filesystems (like old XFS) that don't
+ // support Dirent.Type and have DT_UNKNOWN (0) there
+ // instead.
+ if typ == unknownFileMode {
+ fi, err := os.Lstat(dirName + "/" + name)
+ if err != nil {
+ // It got deleted in the meantime.
+ if os.IsNotExist(err) {
+ continue
+ }
+ return err
+ }
+ typ = fi.Mode() & os.ModeType
+ }
+ if skipFiles && typ.IsRegular() {
+ continue
+ }
+ if err := fn(dirName, name, typ); err != nil {
+ if err == SkipFiles {
+ skipFiles = true
+ continue
+ }
+ return err
+ }
+ }
+}
+
+func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
+ // golang.org/issue/15653
+ dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
+ if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
+ panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
+ }
+ if len(buf) < int(dirent.Reclen) {
+ panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen))
+ }
+ consumed = int(dirent.Reclen)
+ if direntInode(dirent) == 0 { // File absent in directory.
+ return
+ }
+ switch dirent.Type {
+ case syscall.DT_REG:
+ typ = 0
+ case syscall.DT_DIR:
+ typ = os.ModeDir
+ case syscall.DT_LNK:
+ typ = os.ModeSymlink
+ case syscall.DT_BLK:
+ typ = os.ModeDevice
+ case syscall.DT_FIFO:
+ typ = os.ModeNamedPipe
+ case syscall.DT_SOCK:
+ typ = os.ModeSocket
+ case syscall.DT_UNKNOWN:
+ typ = unknownFileMode
+ default:
+ // Skip weird things.
+ // It's probably a DT_WHT (http://lwn.net/Articles/325369/)
+ // or something. Revisit if/when this package is moved outside
+ // of goimports. goimports only cares about regular files,
+ // symlinks, and directories.
+ return
+ }
+
+ nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
+ nameLen := direntNamlen(dirent)
+
+ // Special cases for common things:
+ if nameLen == 1 && nameBuf[0] == '.' {
+ name = "."
+ } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' {
+ name = ".."
+ } else {
+ name = string(nameBuf[:nameLen])
+ }
+ return
+}
diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
new file mode 100644
index 000000000..04bb96a36
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
@@ -0,0 +1,250 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gopathwalk is like filepath.Walk but specialized for finding Go
+// packages, particularly in $GOPATH and $GOROOT.
+package gopathwalk
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/build"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/tools/internal/fastwalk"
+)
+
+// Options controls the behavior of a Walk call.
+type Options struct {
+ Debug bool // Enable debug logging
+ ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
+}
+
+// RootType indicates the type of a Root.
+type RootType int
+
+const (
+ RootUnknown RootType = iota
+ RootGOROOT
+ RootGOPATH
+ RootCurrentModule
+ RootModuleCache
+ RootOther
+)
+
+// A Root is a starting point for a Walk.
+type Root struct {
+ Path string
+ Type RootType
+}
+
+// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible.
+func SrcDirsRoots(ctx *build.Context) []Root {
+ var roots []Root
+ roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT})
+ for _, p := range filepath.SplitList(ctx.GOPATH) {
+ roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH})
+ }
+ return roots
+}
+
+// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
+// For each package found, add will be called (concurrently) with the absolute
+// paths of the containing source directory and the package directory.
+// add will be called concurrently.
+func Walk(roots []Root, add func(root Root, dir string), opts Options) {
+ for _, root := range roots {
+ walkDir(root, add, opts)
+ }
+}
+
+func walkDir(root Root, add func(Root, string), opts Options) {
+ if _, err := os.Stat(root.Path); os.IsNotExist(err) {
+ if opts.Debug {
+ log.Printf("skipping nonexistant directory: %v", root.Path)
+ }
+ return
+ }
+ if opts.Debug {
+ log.Printf("scanning %s", root.Path)
+ }
+ w := &walker{
+ root: root,
+ add: add,
+ opts: opts,
+ }
+ w.init()
+ if err := fastwalk.Walk(root.Path, w.walk); err != nil {
+ log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
+ }
+
+ if opts.Debug {
+ log.Printf("scanned %s", root.Path)
+ }
+}
+
+// walker is the callback for fastwalk.Walk.
+type walker struct {
+ root Root // The source directory to scan.
+ add func(Root, string) // The callback that will be invoked for every possible Go package dir.
+ opts Options // Options passed to Walk by the user.
+
+ ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
+}
+
+// init initializes the walker based on its Options.
+func (w *walker) init() {
+ var ignoredPaths []string
+ if w.root.Type == RootModuleCache {
+ ignoredPaths = []string{"cache"}
+ }
+ if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
+ ignoredPaths = w.getIgnoredDirs(w.root.Path)
+ ignoredPaths = append(ignoredPaths, "v", "mod")
+ }
+
+ for _, p := range ignoredPaths {
+ full := filepath.Join(w.root.Path, p)
+ if fi, err := os.Stat(full); err == nil {
+ w.ignoredDirs = append(w.ignoredDirs, fi)
+ if w.opts.Debug {
+ log.Printf("Directory added to ignore list: %s", full)
+ }
+ } else if w.opts.Debug {
+ log.Printf("Error statting ignored directory: %v", err)
+ }
+ }
+}
+
+// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
+// of relative directories to ignore when scanning for go files.
+// The provided path is one of the $GOPATH entries with "src" appended.
+func (w *walker) getIgnoredDirs(path string) []string {
+ file := filepath.Join(path, ".goimportsignore")
+ slurp, err := ioutil.ReadFile(file)
+ if w.opts.Debug {
+ if err != nil {
+ log.Print(err)
+ } else {
+ log.Printf("Read %s", file)
+ }
+ }
+ if err != nil {
+ return nil
+ }
+
+ var ignoredDirs []string
+ bs := bufio.NewScanner(bytes.NewReader(slurp))
+ for bs.Scan() {
+ line := strings.TrimSpace(bs.Text())
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+ ignoredDirs = append(ignoredDirs, line)
+ }
+ return ignoredDirs
+}
+
+func (w *walker) shouldSkipDir(fi os.FileInfo) bool {
+ for _, ignoredDir := range w.ignoredDirs {
+ if os.SameFile(fi, ignoredDir) {
+ return true
+ }
+ }
+ return false
+}
+
+func (w *walker) walk(path string, typ os.FileMode) error {
+ dir := filepath.Dir(path)
+ if typ.IsRegular() {
+ if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
+ // Doesn't make sense to have regular files
+ // directly in your $GOPATH/src or $GOROOT/src.
+ return fastwalk.SkipFiles
+ }
+ if !strings.HasSuffix(path, ".go") {
+ return nil
+ }
+
+ w.add(w.root, dir)
+ return fastwalk.SkipFiles
+ }
+ if typ == os.ModeDir {
+ base := filepath.Base(path)
+ if base == "" || base[0] == '.' || base[0] == '_' ||
+ base == "testdata" ||
+ (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
+ (!w.opts.ModulesEnabled && base == "node_modules") {
+ return filepath.SkipDir
+ }
+ fi, err := os.Lstat(path)
+ if err == nil && w.shouldSkipDir(fi) {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ if typ == os.ModeSymlink {
+ base := filepath.Base(path)
+ if strings.HasPrefix(base, ".#") {
+ // Emacs noise.
+ return nil
+ }
+ fi, err := os.Lstat(path)
+ if err != nil {
+ // Just ignore it.
+ return nil
+ }
+ if w.shouldTraverse(dir, fi) {
+ return fastwalk.TraverseLink
+ }
+ }
+ return nil
+}
+
+// shouldTraverse reports whether the symlink fi, found in dir,
+// should be followed. It makes sure symlinks were never visited
+// before to avoid symlink loops.
+func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
+ path := filepath.Join(dir, fi.Name())
+ target, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return false
+ }
+ ts, err := os.Stat(target)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return false
+ }
+ if !ts.IsDir() {
+ return false
+ }
+ if w.shouldSkipDir(ts) {
+ return false
+ }
+ // Check for symlink loops by statting each directory component
+ // and seeing if any are the same file as ts.
+ for {
+ parent := filepath.Dir(path)
+ if parent == path {
+ // Made it to the root without seeing a cycle.
+ // Use this symlink.
+ return true
+ }
+ parentInfo, err := os.Stat(parent)
+ if err != nil {
+ return false
+ }
+ if os.SameFile(ts, parentInfo) {
+ // Cycle. Don't traverse.
+ return false
+ }
+ path = parent
+ }
+
+}
diff --git a/vendor/golang.org/x/tools/internal/module/module.go b/vendor/golang.org/x/tools/internal/module/module.go
new file mode 100644
index 000000000..9a4edb9de
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/module/module.go
@@ -0,0 +1,540 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package module defines the module.Version type
+// along with support code.
+package module
+
+// IMPORTANT NOTE
+//
+// This file essentially defines the set of valid import paths for the go command.
+// There are many subtle considerations, including Unicode ambiguity,
+// security, network, and file system representations.
+//
+// This file also defines the set of valid module path and version combinations,
+// another topic with many subtle considerations.
+//
+// Changes to the semantics in this file require approval from rsc.
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/tools/internal/semver"
+)
+
+// A Version is defined by a module path and version pair.
+type Version struct {
+ Path string
+
+ // Version is usually a semantic version in canonical form.
+ // There are two exceptions to this general rule.
+ // First, the top-level target of a build has no specific version
+ // and uses Version = "".
+ // Second, during MVS calculations the version "none" is used
+ // to represent the decision to take no version of a given module.
+ Version string `json:",omitempty"`
+}
+
+// Check checks that a given module path, version pair is valid.
+// In addition to the path being a valid module path
+// and the version being a valid semantic version,
+// the two must correspond.
+// For example, the path "yaml/v2" only corresponds to
+// semantic versions beginning with "v2.".
+func Check(path, version string) error {
+ if err := CheckPath(path); err != nil {
+ return err
+ }
+ if !semver.IsValid(version) {
+ return fmt.Errorf("malformed semantic version %v", version)
+ }
+ _, pathMajor, _ := SplitPathVersion(path)
+ if !MatchPathMajor(version, pathMajor) {
+ if pathMajor == "" {
+ pathMajor = "v0 or v1"
+ }
+ if pathMajor[0] == '.' { // .v1
+ pathMajor = pathMajor[1:]
+ }
+ return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
+ }
+ return nil
+}
+
+// firstPathOK reports whether r can appear in the first element of a module path.
+// The first element of the path must be an LDH domain name, at least for now.
+// To avoid case ambiguity, the domain name must be entirely lower case.
+func firstPathOK(r rune) bool {
+ return r == '-' || r == '.' ||
+ '0' <= r && r <= '9' ||
+ 'a' <= r && r <= 'z'
+}
+
+// pathOK reports whether r can appear in an import path element.
+// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
+// This matches what "go get" has historically recognized in import paths.
+// TODO(rsc): We would like to allow Unicode letters, but that requires additional
+// care in the safe encoding (see note below).
+func pathOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
+ '0' <= r && r <= '9' ||
+ 'A' <= r && r <= 'Z' ||
+ 'a' <= r && r <= 'z'
+ }
+ return false
+}
+
+// fileNameOK reports whether r can appear in a file name.
+// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
+// If we expand the set of allowed characters here, we have to
+// work harder at detecting potential case-folding and normalization collisions.
+// See note about "safe encoding" below.
+func fileNameOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ // Entire set of ASCII punctuation, from which we remove characters:
+ // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
+ // We disallow some shell special characters: " ' * < > ? ` |
+ // (Note that some of those are disallowed by the Windows file system as well.)
+ // We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
+ // We allow spaces (U+0020) in file names.
+ const allowed = "!#$%&()+,-.=@[]^_{}~ "
+ if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
+ return true
+ }
+ for i := 0; i < len(allowed); i++ {
+ if rune(allowed[i]) == r {
+ return true
+ }
+ }
+ return false
+ }
+ // It may be OK to add more ASCII punctuation here, but only carefully.
+ // For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
+ return unicode.IsLetter(r)
+}
+
+// CheckPath checks that a module path is valid.
+func CheckPath(path string) error {
+ if err := checkPath(path, false); err != nil {
+ return fmt.Errorf("malformed module path %q: %v", path, err)
+ }
+ i := strings.Index(path, "/")
+ if i < 0 {
+ i = len(path)
+ }
+ if i == 0 {
+ return fmt.Errorf("malformed module path %q: leading slash", path)
+ }
+ if !strings.Contains(path[:i], ".") {
+ return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
+ }
+ if path[0] == '-' {
+ return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
+ }
+ for _, r := range path[:i] {
+ if !firstPathOK(r) {
+ return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
+ }
+ }
+ if _, _, ok := SplitPathVersion(path); !ok {
+ return fmt.Errorf("malformed module path %q: invalid version", path)
+ }
+ return nil
+}
+
+// CheckImportPath checks that an import path is valid.
+func CheckImportPath(path string) error {
+ if err := checkPath(path, false); err != nil {
+ return fmt.Errorf("malformed import path %q: %v", path, err)
+ }
+ return nil
+}
+
+// checkPath checks that a general path is valid.
+// It returns an error describing why but not mentioning path.
+// Because these checks apply to both module paths and import paths,
+// the caller is expected to add the "malformed ___ path %q: " prefix.
+// fileName indicates whether the final element of the path is a file name
+// (as opposed to a directory name).
+func checkPath(path string, fileName bool) error {
+ if !utf8.ValidString(path) {
+ return fmt.Errorf("invalid UTF-8")
+ }
+ if path == "" {
+ return fmt.Errorf("empty string")
+ }
+ if strings.Contains(path, "..") {
+ return fmt.Errorf("double dot")
+ }
+ if strings.Contains(path, "//") {
+ return fmt.Errorf("double slash")
+ }
+ if path[len(path)-1] == '/' {
+ return fmt.Errorf("trailing slash")
+ }
+ elemStart := 0
+ for i, r := range path {
+ if r == '/' {
+ if err := checkElem(path[elemStart:i], fileName); err != nil {
+ return err
+ }
+ elemStart = i + 1
+ }
+ }
+ if err := checkElem(path[elemStart:], fileName); err != nil {
+ return err
+ }
+ return nil
+}
+
+// checkElem checks whether an individual path element is valid.
+// fileName indicates whether the element is a file name (not a directory name).
+func checkElem(elem string, fileName bool) error {
+ if elem == "" {
+ return fmt.Errorf("empty path element")
+ }
+ if strings.Count(elem, ".") == len(elem) {
+ return fmt.Errorf("invalid path element %q", elem)
+ }
+ if elem[0] == '.' && !fileName {
+ return fmt.Errorf("leading dot in path element")
+ }
+ if elem[len(elem)-1] == '.' {
+ return fmt.Errorf("trailing dot in path element")
+ }
+ charOK := pathOK
+ if fileName {
+ charOK = fileNameOK
+ }
+ for _, r := range elem {
+ if !charOK(r) {
+ return fmt.Errorf("invalid char %q", r)
+ }
+ }
+
+ // Windows disallows a bunch of path elements, sadly.
+ // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+ short := elem
+ if i := strings.Index(short, "."); i >= 0 {
+ short = short[:i]
+ }
+ for _, bad := range badWindowsNames {
+ if strings.EqualFold(bad, short) {
+ return fmt.Errorf("disallowed path element %q", elem)
+ }
+ }
+ return nil
+}
+
+// CheckFilePath checks whether a slash-separated file path is valid.
+func CheckFilePath(path string) error {
+ if err := checkPath(path, true); err != nil {
+ return fmt.Errorf("malformed file path %q: %v", path, err)
+ }
+ return nil
+}
+
+// badWindowsNames are the reserved file path elements on Windows.
+// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+var badWindowsNames = []string{
+ "CON",
+ "PRN",
+ "AUX",
+ "NUL",
+ "COM1",
+ "COM2",
+ "COM3",
+ "COM4",
+ "COM5",
+ "COM6",
+ "COM7",
+ "COM8",
+ "COM9",
+ "LPT1",
+ "LPT2",
+ "LPT3",
+ "LPT4",
+ "LPT5",
+ "LPT6",
+ "LPT7",
+ "LPT8",
+ "LPT9",
+}
+
+// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
+// and version is either empty or "/vN" for N >= 2.
+// As a special case, gopkg.in paths are recognized directly;
+// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
+func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
+ if strings.HasPrefix(path, "gopkg.in/") {
+ return splitGopkgIn(path)
+ }
+
+ i := len(path)
+ dot := false
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
+ if path[i-1] == '.' {
+ dot = true
+ }
+ i--
+ }
+ if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
+ return path, "", true
+ }
+ prefix, pathMajor = path[:i-2], path[i-2:]
+ if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
+ return path, "", false
+ }
+ return prefix, pathMajor, true
+}
+
+// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
+func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
+ if !strings.HasPrefix(path, "gopkg.in/") {
+ return path, "", false
+ }
+ i := len(path)
+ if strings.HasSuffix(path, "-unstable") {
+ i -= len("-unstable")
+ }
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
+ i--
+ }
+ if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
+ // All gopkg.in paths must end in vN for some N.
+ return path, "", false
+ }
+ prefix, pathMajor = path[:i-2], path[i-2:]
+ if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
+ return path, "", false
+ }
+ return prefix, pathMajor, true
+}
+
+// MatchPathMajor reports whether the semantic version v
+// matches the path major version pathMajor.
+func MatchPathMajor(v, pathMajor string) bool {
+ if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
+ pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
+ }
+ if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
+ // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
+ // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
+ return true
+ }
+ m := semver.Major(v)
+ if pathMajor == "" {
+ return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
+ }
+ return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
+}
+
+// CanonicalVersion returns the canonical form of the version string v.
+// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
+func CanonicalVersion(v string) string {
+ cv := semver.Canonical(v)
+ if semver.Build(v) == "+incompatible" {
+ cv += "+incompatible"
+ }
+ return cv
+}
+
+// Sort sorts the list by Path, breaking ties by comparing Versions.
+func Sort(list []Version) {
+ sort.Slice(list, func(i, j int) bool {
+ mi := list[i]
+ mj := list[j]
+ if mi.Path != mj.Path {
+ return mi.Path < mj.Path
+ }
+ // To help go.sum formatting, allow version/file.
+ // Compare semver prefix by semver rules,
+ // file by string order.
+ vi := mi.Version
+ vj := mj.Version
+ var fi, fj string
+ if k := strings.Index(vi, "/"); k >= 0 {
+ vi, fi = vi[:k], vi[k:]
+ }
+ if k := strings.Index(vj, "/"); k >= 0 {
+ vj, fj = vj[:k], vj[k:]
+ }
+ if vi != vj {
+ return semver.Compare(vi, vj) < 0
+ }
+ return fi < fj
+ })
+}
+
+// Safe encodings
+//
+// Module paths appear as substrings of file system paths
+// (in the download cache) and of web server URLs in the proxy protocol.
+// In general we cannot rely on file systems to be case-sensitive,
+// nor can we rely on web servers, since they read from file systems.
+// That is, we cannot rely on the file system to keep rsc.io/QUOTE
+// and rsc.io/quote separate. Windows and macOS don't.
+// Instead, we must never require two different casings of a file path.
+// Because we want the download cache to match the proxy protocol,
+// and because we want the proxy protocol to be possible to serve
+// from a tree of static files (which might be stored on a case-insensitive
+// file system), the proxy protocol must never require two different casings
+// of a URL path either.
+//
+// One possibility would be to make the safe encoding be the lowercase
+// hexadecimal encoding of the actual path bytes. This would avoid ever
+// needing different casings of a file path, but it would be fairly illegible
+// to most programmers when those paths appeared in the file system
+// (including in file paths in compiler errors and stack traces)
+// in web server logs, and so on. Instead, we want a safe encoding that
+// leaves most paths unaltered.
+//
+// The safe encoding is this:
+// replace every uppercase letter with an exclamation mark
+// followed by the letter's lowercase equivalent.
+//
+// For example,
+// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
+// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
+// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
+//
+// Import paths that avoid upper-case letters are left unchanged.
+// Note that because import paths are ASCII-only and avoid various
+// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
+// and avoids the same problematic punctuation.
+//
+// Import paths have never allowed exclamation marks, so there is no
+// need to define how to encode a literal !.
+//
+// Although paths are disallowed from using Unicode (see pathOK above),
+// the eventual plan is to allow Unicode letters as well, to assume that
+// file systems and URLs are Unicode-safe (storing UTF-8), and apply
+// the !-for-uppercase convention. Note however that not all runes that
+// are different but case-fold equivalent are an upper/lower pair.
+// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
+// are considered to case-fold to each other. When we do add Unicode
+// letters, we must not assume that upper/lower are the only case-equivalent pairs.
+// Perhaps the Kelvin symbol would be disallowed entirely, for example.
+// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
+//
+// Also, it would be nice to allow Unicode marks as well as letters,
+// but marks include combining marks, and then we must deal not
+// only with case folding but also normalization: both U+00E9 ('é')
+// and U+0065 U+0301 ('e' followed by combining acute accent)
+// look the same on the page and are treated by some file systems
+// as the same path. If we do allow Unicode marks in paths, there
+// must be some kind of normalization to allow only one canonical
+// encoding of any character used in an import path.
+
+// EncodePath returns the safe encoding of the given module path.
+// It fails if the module path is invalid.
+func EncodePath(path string) (encoding string, err error) {
+ if err := CheckPath(path); err != nil {
+ return "", err
+ }
+
+ return encodeString(path)
+}
+
+// EncodeVersion returns the safe encoding of the given module version.
+// Versions are allowed to be in non-semver form but must be valid file names
+// and not contain exclamation marks.
+func EncodeVersion(v string) (encoding string, err error) {
+ if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
+ return "", fmt.Errorf("disallowed version string %q", v)
+ }
+ return encodeString(v)
+}
+
+func encodeString(s string) (encoding string, err error) {
+ haveUpper := false
+ for _, r := range s {
+ if r == '!' || r >= utf8.RuneSelf {
+ // This should be disallowed by CheckPath, but diagnose anyway.
+ // The correctness of the encoding loop below depends on it.
+ return "", fmt.Errorf("internal error: inconsistency in EncodePath")
+ }
+ if 'A' <= r && r <= 'Z' {
+ haveUpper = true
+ }
+ }
+
+ if !haveUpper {
+ return s, nil
+ }
+
+ var buf []byte
+ for _, r := range s {
+ if 'A' <= r && r <= 'Z' {
+ buf = append(buf, '!', byte(r+'a'-'A'))
+ } else {
+ buf = append(buf, byte(r))
+ }
+ }
+ return string(buf), nil
+}
+
+// DecodePath returns the module path of the given safe encoding.
+// It fails if the encoding is invalid or encodes an invalid path.
+func DecodePath(encoding string) (path string, err error) {
+ path, ok := decodeString(encoding)
+ if !ok {
+ return "", fmt.Errorf("invalid module path encoding %q", encoding)
+ }
+ if err := CheckPath(path); err != nil {
+ return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
+ }
+ return path, nil
+}
+
+// DecodeVersion returns the version string for the given safe encoding.
+// It fails if the encoding is invalid or encodes an invalid version.
+// Versions are allowed to be in non-semver form but must be valid file names
+// and not contain exclamation marks.
+func DecodeVersion(encoding string) (v string, err error) {
+ v, ok := decodeString(encoding)
+ if !ok {
+ return "", fmt.Errorf("invalid version encoding %q", encoding)
+ }
+ if err := checkElem(v, true); err != nil {
+ return "", fmt.Errorf("disallowed version string %q", v)
+ }
+ return v, nil
+}
+
+func decodeString(encoding string) (string, bool) {
+ var buf []byte
+
+ bang := false
+ for _, r := range encoding {
+ if r >= utf8.RuneSelf {
+ return "", false
+ }
+ if bang {
+ bang = false
+ if r < 'a' || 'z' < r {
+ return "", false
+ }
+ buf = append(buf, byte(r+'A'-'a'))
+ continue
+ }
+ if r == '!' {
+ bang = true
+ continue
+ }
+ if 'A' <= r && r <= 'Z' {
+ return "", false
+ }
+ buf = append(buf, byte(r))
+ }
+ if bang {
+ return "", false
+ }
+ return string(buf), true
+}
diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go
new file mode 100644
index 000000000..4af7118e5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/semver/semver.go
@@ -0,0 +1,388 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semver implements comparison of semantic version strings.
+// In this package, semantic version strings must begin with a leading "v",
+// as in "v1.0.0".
+//
+// The general form of a semantic version string accepted by this package is
+//
+// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
+//
+// where square brackets indicate optional parts of the syntax;
+// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
+// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
+// using only alphanumeric characters and hyphens; and
+// all-numeric PRERELEASE identifiers must not have leading zeros.
+//
+// This package follows Semantic Versioning 2.0.0 (see semver.org)
+// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
+// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
+// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
+package semver
+
+// parsed returns the parsed form of a semantic version string.
+type parsed struct {
+ major string
+ minor string
+ patch string
+ short string
+ prerelease string
+ build string
+ err string
+}
+
+// IsValid reports whether v is a valid semantic version string.
+func IsValid(v string) bool {
+ _, ok := parse(v)
+ return ok
+}
+
+// Canonical returns the canonical formatting of the semantic version v.
+// It fills in any missing .MINOR or .PATCH and discards build metadata.
+// Two semantic versions compare equal only if their canonical formattings
+// are identical strings.
+// The canonical invalid semantic version is the empty string.
+func Canonical(v string) string {
+ p, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ if p.build != "" {
+ return v[:len(v)-len(p.build)]
+ }
+ if p.short != "" {
+ return v + p.short
+ }
+ return v
+}
+
+// Major returns the major version prefix of the semantic version v.
+// For example, Major("v2.1.0") == "v2".
+// If v is an invalid semantic version string, Major returns the empty string.
+func Major(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return v[:1+len(pv.major)]
+}
+
+// MajorMinor returns the major.minor version prefix of the semantic version v.
+// For example, MajorMinor("v2.1.0") == "v2.1".
+// If v is an invalid semantic version string, MajorMinor returns the empty string.
+func MajorMinor(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ i := 1 + len(pv.major)
+ if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
+ return v[:j]
+ }
+ return v[:i] + "." + pv.minor
+}
+
+// Prerelease returns the prerelease suffix of the semantic version v.
+// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
+// If v is an invalid semantic version string, Prerelease returns the empty string.
+func Prerelease(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.prerelease
+}
+
+// Build returns the build suffix of the semantic version v.
+// For example, Build("v2.1.0+meta") == "+meta".
+// If v is an invalid semantic version string, Build returns the empty string.
+func Build(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.build
+}
+
+// Compare returns an integer comparing two versions according to
+// according to semantic version precedence.
+// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
+//
+// An invalid semantic version string is considered less than a valid one.
+// All invalid semantic version strings compare equal to each other.
+func Compare(v, w string) int {
+ pv, ok1 := parse(v)
+ pw, ok2 := parse(w)
+ if !ok1 && !ok2 {
+ return 0
+ }
+ if !ok1 {
+ return -1
+ }
+ if !ok2 {
+ return +1
+ }
+ if c := compareInt(pv.major, pw.major); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.minor, pw.minor); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.patch, pw.patch); c != 0 {
+ return c
+ }
+ return comparePrerelease(pv.prerelease, pw.prerelease)
+}
+
+// Max canonicalizes its arguments and then returns the version string
+// that compares greater.
+func Max(v, w string) string {
+ v = Canonical(v)
+ w = Canonical(w)
+ if Compare(v, w) > 0 {
+ return v
+ }
+ return w
+}
+
+func parse(v string) (p parsed, ok bool) {
+ if v == "" || v[0] != 'v' {
+ p.err = "missing v prefix"
+ return
+ }
+ p.major, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad major version"
+ return
+ }
+ if v == "" {
+ p.minor = "0"
+ p.patch = "0"
+ p.short = ".0.0"
+ return
+ }
+ if v[0] != '.' {
+ p.err = "bad minor prefix"
+ ok = false
+ return
+ }
+ p.minor, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad minor version"
+ return
+ }
+ if v == "" {
+ p.patch = "0"
+ p.short = ".0"
+ return
+ }
+ if v[0] != '.' {
+ p.err = "bad patch prefix"
+ ok = false
+ return
+ }
+ p.patch, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad patch version"
+ return
+ }
+ if len(v) > 0 && v[0] == '-' {
+ p.prerelease, v, ok = parsePrerelease(v)
+ if !ok {
+ p.err = "bad prerelease"
+ return
+ }
+ }
+ if len(v) > 0 && v[0] == '+' {
+ p.build, v, ok = parseBuild(v)
+ if !ok {
+ p.err = "bad build"
+ return
+ }
+ }
+ if v != "" {
+ p.err = "junk on end"
+ ok = false
+ return
+ }
+ ok = true
+ return
+}
+
+func parseInt(v string) (t, rest string, ok bool) {
+ if v == "" {
+ return
+ }
+ if v[0] < '0' || '9' < v[0] {
+ return
+ }
+ i := 1
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ if v[0] == '0' && i != 1 {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parsePrerelease(v string) (t, rest string, ok bool) {
+ // "A pre-release version MAY be denoted by appending a hyphen and
+ // a series of dot separated identifiers immediately following the patch version.
+ // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
+ // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
+ if v == "" || v[0] != '-' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) && v[i] != '+' {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parseBuild(v string) (t, rest string, ok bool) {
+ if v == "" || v[0] != '+' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) {
+ if !isIdentChar(v[i]) {
+ return
+ }
+ if v[i] == '.' {
+ if start == i {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func isIdentChar(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
+}
+
+func isBadNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v) && i > 1 && v[0] == '0'
+}
+
+func isNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v)
+}
+
+func compareInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func comparePrerelease(x, y string) int {
+ // "When major, minor, and patch are equal, a pre-release version has
+ // lower precedence than a normal version.
+ // Example: 1.0.0-alpha < 1.0.0.
+ // Precedence for two pre-release versions with the same major, minor,
+ // and patch version MUST be determined by comparing each dot separated
+ // identifier from left to right until a difference is found as follows:
+ // identifiers consisting of only digits are compared numerically and
+ // identifiers with letters or hyphens are compared lexically in ASCII
+ // sort order. Numeric identifiers always have lower precedence than
+ // non-numeric identifiers. A larger set of pre-release fields has a
+ // higher precedence than a smaller set, if all of the preceding
+ // identifiers are equal.
+ // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
+ // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
+ if x == y {
+ return 0
+ }
+ if x == "" {
+ return +1
+ }
+ if y == "" {
+ return -1
+ }
+ for x != "" && y != "" {
+ x = x[1:] // skip - or .
+ y = y[1:] // skip - or .
+ var dx, dy string
+ dx, x = nextIdent(x)
+ dy, y = nextIdent(y)
+ if dx != dy {
+ ix := isNum(dx)
+ iy := isNum(dy)
+ if ix != iy {
+ if ix {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ if ix {
+ if len(dx) < len(dy) {
+ return -1
+ }
+ if len(dx) > len(dy) {
+ return +1
+ }
+ }
+ if dx < dy {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ }
+ if x == "" {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func nextIdent(x string) (dx, rest string) {
+ i := 0
+ for i < len(x) && x[i] != '.' {
+ i++
+ }
+ return x[:i], x[i:]
+}
diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go
index 26548b63c..3b4afedf1 100644
--- a/vendor/gopkg.in/inf.v0/dec.go
+++ b/vendor/gopkg.in/inf.v0/dec.go
@@ -104,7 +104,7 @@ var bigInt = [...]*big.Int{
var exp10cache [64]big.Int = func() [64]big.Int {
e10, e10i := [64]big.Int{}, bigInt[1]
- for i := range e10 {
+ for i, _ := range e10 {
e10[i].Set(e10i)
e10i = new(big.Int).Mul(e10i, bigInt[10])
}
diff --git a/vendor/gopkg.in/tomb.v1/tomb.go b/vendor/gopkg.in/tomb.v1/tomb.go
index af11b2605..9aec56d82 100644
--- a/vendor/gopkg.in/tomb.v1/tomb.go
+++ b/vendor/gopkg.in/tomb.v1/tomb.go
@@ -1,10 +1,10 @@
// Copyright (c) 2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
-//
+//
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
-//
+//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
@@ -13,7 +13,7 @@
// * Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -79,7 +79,7 @@ type Tomb struct {
var (
ErrStillAlive = errors.New("tomb: still alive")
- ErrDying = errors.New("tomb: dying")
+ ErrDying = errors.New("tomb: dying")
)
func (t *Tomb) init() {
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
index b0c436c4a..7c1f5fac3 100644
--- a/vendor/gopkg.in/yaml.v2/readerc.go
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -95,7 +95,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
// [Go] This function was changed to guarantee the requested length size at EOF.
// The fact we need to do this is pretty awful, but the description above implies
- // for that to be the case, and there are tests
+ // for that to be the case, and there are tests
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
index bf830eee5..6c151db6f 100644
--- a/vendor/gopkg.in/yaml.v2/resolve.go
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -180,7 +180,7 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
- intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
if err == nil {
if true || intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
index 2edd73405..4c45e660a 100644
--- a/vendor/gopkg.in/yaml.v2/sorter.go
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -52,7 +52,7 @@ func (l keyList) Less(i, j int) bool {
var ai, bi int
var an, bn int64
if ar[i] == '0' || br[i] == '0' {
- for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
if ar[j] != '0' {
an = 1
bn = 1
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
index 60fbf353f..4dec6f2f8 100644
--- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
}
var map_DaemonSetStatus = map[string]string{
- "": "DaemonSetStatus represents the current status of a daemon set.",
+ "": "DaemonSetStatus represents the current status of a daemon set.",
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
index 0d6c02ed4..e2f133b51 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
}
var map_DaemonSetStatus = map[string]string{
- "": "DaemonSetStatus represents the current status of a daemon set.",
+ "": "DaemonSetStatus represents the current status of a daemon set.",
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
index b3e067a30..ec364f53e 100644
--- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
@@ -190,8 +190,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string {
}
var map_ResourceMetricSource = map[string]string{
- "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
- "name": "name is the name of the resource in question.",
+ "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
+ "name": "name is the name of the resource in question.",
"targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
"targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
}
@@ -201,8 +201,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string {
}
var map_ResourceMetricStatus = map[string]string{
- "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
- "name": "name is the name of the resource in question.",
+ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+ "name": "name is the name of the resource in question.",
"currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
"currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
}
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
index 30d9862aa..12cfa7649 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
@@ -191,8 +191,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string {
}
var map_ResourceMetricSource = map[string]string{
- "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
- "name": "name is the name of the resource in question.",
+ "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
+ "name": "name is the name of the resource in question.",
"targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
"targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
}
@@ -202,8 +202,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string {
}
var map_ResourceMetricStatus = map[string]string{
- "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
- "name": "name is the name of the resource in question.",
+ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+ "name": "name is the name of the resource in question.",
"currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
"currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
}
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 87da71975..83433be48 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -1631,7 +1631,7 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string {
}
var map_Probe = map[string]string{
- "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
+ "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
"initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
@@ -2216,23 +2216,23 @@ var map_VolumeSource = map[string]string{
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md",
"glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md",
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
- "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
- "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
- "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
- "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
- "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
- "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
- "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
- "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
- "configMap": "ConfigMap represents a configMap that should populate this volume",
- "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
- "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
- "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
- "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
- "projected": "Items for all in one resources secrets, configmaps, and downward API",
- "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
- "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
- "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+ "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
+ "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+ "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
+ "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ "configMap": "ConfigMap represents a configMap that should populate this volume",
+ "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+ "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+ "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+ "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+ "projected": "Items for all in one resources secrets, configmaps, and downward API",
+ "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+ "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
}
func (VolumeSource) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index 6ad4f9345..236d934fa 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -113,7 +113,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
}
var map_DaemonSetStatus = map[string]string{
- "": "DaemonSetStatus represents the current status of a daemon set.",
+ "": "DaemonSetStatus represents the current status of a daemon set.",
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
index ceb96eeb1..280ae5a82 100644
--- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
@@ -28,7 +28,7 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE
var map_AggregationRule = map[string]string{
- "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
+ "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
}
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
index 88f084ce2..e56cd0f10 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
@@ -28,7 +28,7 @@ package v1alpha1
// AUTO-GENERATED FUNCTIONS START HERE
var map_AggregationRule = map[string]string{
- "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
+ "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
}
diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
index 43a8493ad..6180d6d43 100644
--- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
@@ -28,7 +28,7 @@ package v1beta1
// AUTO-GENERATED FUNCTIONS START HERE
var map_AggregationRule = map[string]string{
- "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
+ "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
}
diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
index 398b48da3..faca8e939 100644
--- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
@@ -49,7 +49,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string {
}
var map_VolumeAttachmentSource = map[string]string{
- "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
+ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
"persistentVolumeName": "Name of the persistent volume to attach.",
}
diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
index e0ec9a82a..c9c20c453 100644
--- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
@@ -74,7 +74,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string {
}
var map_VolumeAttachmentSource = map[string]string{
- "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
+ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
"persistentVolumeName": "Name of the persistent volume to attach.",
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
index a55cf8a09..b9670071c 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
@@ -135,12 +135,12 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata
CreationTimestamp: m.GetCreationTimestamp(),
DeletionTimestamp: m.GetDeletionTimestamp(),
DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(),
- Labels: m.GetLabels(),
- Annotations: m.GetAnnotations(),
- OwnerReferences: m.GetOwnerReferences(),
- Finalizers: m.GetFinalizers(),
- ClusterName: m.GetClusterName(),
- Initializers: m.GetInitializers(),
+ Labels: m.GetLabels(),
+ Annotations: m.GetAnnotations(),
+ OwnerReferences: m.GetOwnerReferences(),
+ Finalizers: m.GetFinalizers(),
+ ClusterName: m.GetClusterName(),
+ Initializers: m.GetInitializers(),
},
}
}
diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go
index 05faf48eb..6f26b2275 100644
--- a/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go
+++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go
@@ -137,7 +137,7 @@ type ChannelProtocolConfig struct {
// channels.
func NewDefaultChannelProtocols(channels []ChannelType) map[string]ChannelProtocolConfig {
return map[string]ChannelProtocolConfig{
- "": {Binary: true, Channels: channels},
+ "": {Binary: true, Channels: channels},
ChannelWebSocketProtocol: {Binary: true, Channels: channels},
Base64ChannelWebSocketProtocol: {Binary: false, Channels: channels},
}
diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go
index 4253c17cf..9dd165bfa 100644
--- a/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go
+++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go
@@ -48,7 +48,7 @@ type ReaderProtocolConfig struct {
// subprotocols "", "channel.k8s.io", "base64.channel.k8s.io".
func NewDefaultReaderProtocols() map[string]ReaderProtocolConfig {
return map[string]ReaderProtocolConfig{
- "": {Binary: true},
+ "": {Binary: true},
binaryWebSocketProtocol: {Binary: true},
base64BinaryWebSocketProtocol: {Binary: false},
}
diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
index f29a4b331..5f8c507f9 100644
--- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go
+++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
@@ -86,7 +86,7 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
- clock: realClock,
+ clock: realClock,
}
return sharedIndexInformer
}
diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go
index 7af1a5f30..fb7f5facc 100644
--- a/vendor/k8s.io/client-go/util/cert/cert.go
+++ b/vendor/k8s.io/client-go/util/cert/cert.go
@@ -72,7 +72,7 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er
NotAfter: now.Add(duration365d * 10).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
- IsCA: true,
+ IsCA: true,
}
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
@@ -153,7 +153,7 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
- IsCA: true,
+ IsCA: true,
}
caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
diff --git a/vendor/k8s.io/kubernetes/Godeps/LICENSES b/vendor/k8s.io/kubernetes/Godeps/LICENSES
index dc81ab7d1..15af22a45 100644
--- a/vendor/k8s.io/kubernetes/Godeps/LICENSES
+++ b/vendor/k8s.io/kubernetes/Godeps/LICENSES
@@ -90416,80 +90416,80 @@ SOFTWARE.
================================================================================
= vendor/github.com/vmware/photon-controller-go-sdk/photon licensed under: =
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
-
-"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
-(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
-(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
-(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
-(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
-
-You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied.
-
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
+(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
+(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
+(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
+(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
+
+You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied.
+
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
= vendor/github.com/vmware/photon-controller-go-sdk/LICENSE 0de60303c844eac44e45012dac1987de
================================================================================
@@ -90498,80 +90498,80 @@ To apply the Apache License to your work, attach the following boilerplate notic
================================================================================
= vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave licensed under: =
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
-
-"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
-(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
-(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
-(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
-(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
-
-You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied.
-
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
+(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
+(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
+(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
+(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
+
+You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied.
+
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
= vendor/github.com/vmware/photon-controller-go-sdk/LICENSE 0de60303c844eac44e45012dac1987de
================================================================================
@@ -90580,80 +90580,80 @@ To apply the Apache License to your work, attach the following boilerplate notic
================================================================================
= vendor/github.com/vmware/photon-controller-go-sdk/SSPI licensed under: =
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
-
-"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
-(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
-(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
-(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
-(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
-
-You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied.
-
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
+(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
+(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
+(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
+(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
+
+You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied.
+
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
= vendor/github.com/vmware/photon-controller-go-sdk/LICENSE 0de60303c844eac44e45012dac1987de
================================================================================
diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
index 198918ca1..1b745ef93 100644
--- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
+++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
@@ -78,12 +78,12 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
}
controller := &PersistentVolumeController{
- volumes: newPersistentVolumeOrderedIndex(),
- claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
- kubeClient: p.KubeClient,
- eventRecorder: eventRecorder,
- runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
- cloud: p.Cloud,
+ volumes: newPersistentVolumeOrderedIndex(),
+ claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
+ kubeClient: p.KubeClient,
+ eventRecorder: eventRecorder,
+ runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
+ cloud: p.Cloud,
enableDynamicProvisioning: p.EnableDynamicProvisioning,
clusterName: p.ClusterName,
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go
index 67227b30f..dd8f24c28 100644
--- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go
+++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go
@@ -284,8 +284,8 @@ func init() {
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout Kubernetes binaries.
var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{
- AppArmor: {Default: true, PreRelease: utilfeature.Beta},
- DynamicKubeletConfig: {Default: false, PreRelease: utilfeature.Alpha},
+ AppArmor: {Default: true, PreRelease: utilfeature.Beta},
+ DynamicKubeletConfig: {Default: false, PreRelease: utilfeature.Alpha},
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta},
ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha},
Accelerators: {Default: false, PreRelease: utilfeature.Alpha},
diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go
index 570455ef6..0cc0c3ae0 100644
--- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go
+++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go
@@ -318,10 +318,10 @@ type progressReporter struct {
// newProgressReporter creates a new progressReporter for specific image with specified reporting interval
func newProgressReporter(image string, cancel context.CancelFunc, imagePullProgressDeadline time.Duration) *progressReporter {
return &progressReporter{
- progress: newProgress(),
- image: image,
- cancel: cancel,
- stopCh: make(chan struct{}),
+ progress: newProgress(),
+ image: image,
+ cancel: cancel,
+ stopCh: make(chan struct{}),
imagePullProgressDeadline: imagePullProgressDeadline,
}
}
diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
index cf916593d..f19229ff1 100644
--- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
+++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
@@ -504,8 +504,8 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID
},
}
container = &v1.Container{
- Name: l.ContainerName,
- Ports: a.ContainerPorts,
+ Name: l.ContainerName,
+ Ports: a.ContainerPorts,
TerminationMessagePath: a.TerminationMessagePath,
}
if a.PreStopHandler != nil {
diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go
index c56af6c32..919ed5a78 100644
--- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go
+++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go
@@ -58,9 +58,9 @@ func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder P
streamChan: streamChan,
streamPairs: make(map[string]*httpStreamPair),
streamCreationTimeout: streamCreationTimeout,
- pod: podName,
- uid: uid,
- forwarder: portForwarder,
+ pod: podName,
+ uid: uid,
+ forwarder: portForwarder,
}
h.run()
diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
index 7df41bed0..a8caabdde 100644
--- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
+++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
@@ -72,11 +72,11 @@ func NewOperationGenerator(kubeClient clientset.Interface,
blkUtil volumepathhandler.BlockVolumePathHandler) OperationGenerator {
return &operationGenerator{
- kubeClient: kubeClient,
- volumePluginMgr: volumePluginMgr,
- recorder: recorder,
+ kubeClient: kubeClient,
+ volumePluginMgr: volumePluginMgr,
+ recorder: recorder,
checkNodeCapabilitiesBeforeMount: checkNodeCapabilitiesBeforeMount,
- blkUtil: blkUtil,
+ blkUtil: blkUtil,
}
}