summaryrefslogtreecommitdiff
path: root/vendor/helm.sh
diff options
context:
space:
mode:
authorzhu733756 <zhu733756@kubesphere.io>2022-03-03 17:01:05 +0800
committerzhu733756 <zhu733756@kubesphere.io>2022-03-03 17:03:17 +0800
commit231a52442bde834132fae929a0e5974647e56404 (patch)
tree1282d9695eaafb35c3bc9ca1639027d43664ffa1 /vendor/helm.sh
parentremove iptablesmgr static file (diff)
downloadkubeedge-231a52442bde834132fae929a0e5974647e56404.tar.gz
fix vendor/licenses && go.mod
Signed-off-by: zhu733756 <zhu733756@kubesphere.io>
Diffstat (limited to 'vendor/helm.sh')
-rw-r--r--vendor/helm.sh/helm/v3/LICENSE202
-rw-r--r--vendor/helm.sh/helm/v3/internal/experimental/registry/client.go477
-rw-r--r--vendor/helm.sh/helm/v3/internal/experimental/registry/constants.go37
-rw-r--r--vendor/helm.sh/helm/v3/internal/experimental/registry/util.go56
-rw-r--r--vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go51
-rw-r--r--vendor/helm.sh/helm/v3/internal/ignore/doc.go67
-rw-r--r--vendor/helm.sh/helm/v3/internal/ignore/rules.go228
-rw-r--r--vendor/helm.sh/helm/v3/internal/resolver/resolver.go235
-rw-r--r--vendor/helm.sh/helm/v3/internal/sympath/walk.go119
-rw-r--r--vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go372
-rw-r--r--vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go58
-rw-r--r--vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go69
-rw-r--r--vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go178
-rw-r--r--vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go58
-rw-r--r--vendor/helm.sh/helm/v3/internal/tlsutil/tls.go76
-rw-r--r--vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go73
-rw-r--r--vendor/helm.sh/helm/v3/internal/version/version.go81
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/action.go420
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/dependency.go230
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/doc.go22
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/get.go47
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/get_values.go60
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/history.go58
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/hooks.go151
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/install.go770
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/lazyclient.go197
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/lint.go118
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/list.go323
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/package.go182
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/pull.go170
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/release_testing.go138
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/resource_policy.go46
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/rollback.go241
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/show.go144
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/status.go51
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/uninstall.go222
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/upgrade.go570
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/validate.go184
-rw-r--r--vendor/helm.sh/helm/v3/pkg/action/verify.go59
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chart/chart.go173
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chart/dependency.go79
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chart/errors.go30
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chart/file.go27
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go196
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go120
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chart/loader/load.go200
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chart/metadata.go160
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go126
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go93
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go206
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go34
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/create.go687
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go285
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/doc.go44
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/errors.go35
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/expand.go91
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go87
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/save.go244
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go107
-rw-r--r--vendor/helm.sh/helm/v3/pkg/chartutil/values.go212
-rw-r--r--vendor/helm.sh/helm/v3/pkg/cli/environment.go186
-rw-r--r--vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go384
-rw-r--r--vendor/helm.sh/helm/v3/pkg/downloader/doc.go23
-rw-r--r--vendor/helm.sh/helm/v3/pkg/downloader/manager.go903
-rw-r--r--vendor/helm.sh/helm/v3/pkg/engine/doc.go23
-rw-r--r--vendor/helm.sh/helm/v3/pkg/engine/engine.go401
-rw-r--r--vendor/helm.sh/helm/v3/pkg/engine/files.go160
-rw-r--r--vendor/helm.sh/helm/v3/pkg/engine/funcs.go177
-rw-r--r--vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go124
-rw-r--r--vendor/helm.sh/helm/v3/pkg/gates/doc.go20
-rw-r--r--vendor/helm.sh/helm/v3/pkg/gates/gates.go38
-rw-r--r--vendor/helm.sh/helm/v3/pkg/getter/doc.go21
-rw-r--r--vendor/helm.sh/helm/v3/pkg/getter/getter.go184
-rw-r--r--vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go145
-rw-r--r--vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go86
-rw-r--r--vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go102
-rw-r--r--vendor/helm.sh/helm/v3/pkg/helmpath/home.go44
-rw-r--r--vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go72
-rw-r--r--vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go35
-rw-r--r--vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go46
-rw-r--r--vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go24
-rw-r--r--vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go34
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/client.go663
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/config.go30
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/converter.go69
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/factory.go38
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go117
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go110
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/interface.go82
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/ready.go397
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/resource.go85
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go26
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/result.go28
-rw-r--r--vendor/helm.sh/helm/v3/pkg/kube/wait.go128
-rw-r--r--vendor/helm.sh/helm/v3/pkg/lint/lint.go37
-rw-r--r--vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go210
-rw-r--r--vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go82
-rw-r--r--vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go95
-rw-r--r--vendor/helm.sh/helm/v3/pkg/lint/rules/template.go333
-rw-r--r--vendor/helm.sh/helm/v3/pkg/lint/rules/values.go87
-rw-r--r--vendor/helm.sh/helm/v3/pkg/lint/support/doc.go22
-rw-r--r--vendor/helm.sh/helm/v3/pkg/lint/support/message.go76
-rw-r--r--vendor/helm.sh/helm/v3/pkg/plugin/hooks.go29
-rw-r--r--vendor/helm.sh/helm/v3/pkg/plugin/plugin.go282
-rw-r--r--vendor/helm.sh/helm/v3/pkg/postrender/exec.go108
-rw-r--r--vendor/helm.sh/helm/v3/pkg/postrender/postrender.go29
-rw-r--r--vendor/helm.sh/helm/v3/pkg/provenance/doc.go37
-rw-r--r--vendor/helm.sh/helm/v3/pkg/provenance/sign.go409
-rw-r--r--vendor/helm.sh/helm/v3/pkg/release/hook.go106
-rw-r--r--vendor/helm.sh/helm/v3/pkg/release/info.go36
-rw-r--r--vendor/helm.sh/helm/v3/pkg/release/mock.go116
-rw-r--r--vendor/helm.sh/helm/v3/pkg/release/release.go49
-rw-r--r--vendor/helm.sh/helm/v3/pkg/release/responses.go24
-rw-r--r--vendor/helm.sh/helm/v3/pkg/release/status.go49
-rw-r--r--vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go78
-rw-r--r--vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go156
-rw-r--r--vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go72
-rw-r--r--vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go233
-rw-r--r--vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go78
-rw-r--r--vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go309
-rw-r--r--vendor/helm.sh/helm/v3/pkg/repo/doc.go93
-rw-r--r--vendor/helm.sh/helm/v3/pkg/repo/index.go356
-rw-r--r--vendor/helm.sh/helm/v3/pkg/repo/repo.go123
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go257
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go105
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go48
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go240
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/driver/records.go124
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go250
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go496
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/driver/util.go85
-rw-r--r--vendor/helm.sh/helm/v3/pkg/storage/storage.go266
-rw-r--r--vendor/helm.sh/helm/v3/pkg/strvals/doc.go32
-rw-r--r--vendor/helm.sh/helm/v3/pkg/strvals/parser.go446
-rw-r--r--vendor/helm.sh/helm/v3/pkg/time/time.go91
135 files changed, 21235 insertions, 0 deletions
diff --git a/vendor/helm.sh/helm/v3/LICENSE b/vendor/helm.sh/helm/v3/LICENSE
new file mode 100644
index 000000000..21c57fae2
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 The Kubernetes Authors All Rights Reserved
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/helm.sh/helm/v3/internal/experimental/registry/client.go b/vendor/helm.sh/helm/v3/internal/experimental/registry/client.go
new file mode 100644
index 000000000..cc9e1fe79
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/experimental/registry/client.go
@@ -0,0 +1,477 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry // import "helm.sh/helm/v3/internal/experimental/registry"
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/containerd/containerd/remotes"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "oras.land/oras-go/pkg/auth"
+ dockerauth "oras.land/oras-go/pkg/auth/docker"
+ "oras.land/oras-go/pkg/content"
+ "oras.land/oras-go/pkg/oras"
+
+ "helm.sh/helm/v3/internal/version"
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/helmpath"
+)
+
+type (
+ // Client works with OCI-compliant registries
+ Client struct {
+ debug bool
+ // path to repository config file e.g. ~/.docker/config.json
+ credentialsFile string
+ out io.Writer
+ authorizer auth.Client
+ resolver remotes.Resolver
+ }
+
+ // ClientOption allows specifying various settings configurable by the user for overriding the defaults
+ // used when creating a new default client
+ ClientOption func(*Client)
+)
+
+// NewClient returns a new registry client with config
+func NewClient(options ...ClientOption) (*Client, error) {
+ client := &Client{
+ out: ioutil.Discard,
+ }
+ for _, option := range options {
+ option(client)
+ }
+ if client.credentialsFile == "" {
+ client.credentialsFile = helmpath.ConfigPath(CredentialsFileBasename)
+ }
+ if client.authorizer == nil {
+ authClient, err := dockerauth.NewClient(client.credentialsFile)
+ if err != nil {
+ return nil, err
+ }
+ client.authorizer = authClient
+ }
+ if client.resolver == nil {
+ headers := http.Header{}
+ headers.Set("User-Agent", version.GetUserAgent())
+ opts := []auth.ResolverOption{auth.WithResolverHeaders(headers)}
+ resolver, err := client.authorizer.ResolverWithOpts(opts...)
+ if err != nil {
+ return nil, err
+ }
+ client.resolver = resolver
+ }
+ return client, nil
+}
+
+// ClientOptDebug returns a function that sets the debug setting on client options set
+func ClientOptDebug(debug bool) ClientOption {
+ return func(client *Client) {
+ client.debug = debug
+ }
+}
+
+// ClientOptWriter returns a function that sets the writer setting on client options set
+func ClientOptWriter(out io.Writer) ClientOption {
+ return func(client *Client) {
+ client.out = out
+ }
+}
+
+// ClientOptCredentialsFile returns a function that sets the credentialsFile setting on a client options set
+func ClientOptCredentialsFile(credentialsFile string) ClientOption {
+ return func(client *Client) {
+ client.credentialsFile = credentialsFile
+ }
+}
+
+type (
+ // LoginOption allows specifying various settings on login
+ LoginOption func(*loginOperation)
+
+ loginOperation struct {
+ username string
+ password string
+ insecure bool
+ }
+)
+
+// Login logs into a registry
+func (c *Client) Login(host string, options ...LoginOption) error {
+ operation := &loginOperation{}
+ for _, option := range options {
+ option(operation)
+ }
+ authorizerLoginOpts := []auth.LoginOption{
+ auth.WithLoginContext(ctx(c.out, c.debug)),
+ auth.WithLoginHostname(host),
+ auth.WithLoginUsername(operation.username),
+ auth.WithLoginSecret(operation.password),
+ auth.WithLoginUserAgent(version.GetUserAgent()),
+ }
+ if operation.insecure {
+ authorizerLoginOpts = append(authorizerLoginOpts, auth.WithLoginInsecure())
+ }
+ if err := c.authorizer.LoginWithOpts(authorizerLoginOpts...); err != nil {
+ return err
+ }
+ fmt.Fprintln(c.out, "Login Succeeded")
+ return nil
+}
+
+// LoginOptBasicAuth returns a function that sets the username/password settings on login
+func LoginOptBasicAuth(username string, password string) LoginOption {
+ return func(operation *loginOperation) {
+ operation.username = username
+ operation.password = password
+ }
+}
+
+// LoginOptInsecure returns a function that sets the insecure setting on login
+func LoginOptInsecure(insecure bool) LoginOption {
+ return func(operation *loginOperation) {
+ operation.insecure = insecure
+ }
+}
+
+type (
+ // LogoutOption allows specifying various settings on logout
+ LogoutOption func(*logoutOperation)
+
+ logoutOperation struct{}
+)
+
+// Logout logs out of a registry
+func (c *Client) Logout(host string, opts ...LogoutOption) error {
+ operation := &logoutOperation{}
+ for _, opt := range opts {
+ opt(operation)
+ }
+ if err := c.authorizer.Logout(ctx(c.out, c.debug), host); err != nil {
+ return err
+ }
+ fmt.Fprintf(c.out, "Removing login credentials for %s\n", host)
+ return nil
+}
+
+type (
+ // PullOption allows specifying various settings on pull
+ PullOption func(*pullOperation)
+
+ // PullResult is the result returned upon successful pull.
+ PullResult struct {
+ Manifest *descriptorPullSummary `json:"manifest"`
+ Config *descriptorPullSummary `json:"config"`
+ Chart *descriptorPullSummaryWithMeta `json:"chart"`
+ Prov *descriptorPullSummary `json:"prov"`
+ Ref string `json:"ref"`
+ }
+
+ descriptorPullSummary struct {
+ Data []byte `json:"-"`
+ Digest string `json:"digest"`
+ Size int64 `json:"size"`
+ }
+
+ descriptorPullSummaryWithMeta struct {
+ descriptorPullSummary
+ Meta *chart.Metadata `json:"meta"`
+ }
+
+ pullOperation struct {
+ withChart bool
+ withProv bool
+ ignoreMissingProv bool
+ }
+)
+
+// Pull downloads a chart from a registry
+func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
+ operation := &pullOperation{
+ withChart: true, // By default, always download the chart layer
+ }
+ for _, option := range options {
+ option(operation)
+ }
+ if !operation.withChart && !operation.withProv {
+ return nil, errors.New(
+ "must specify at least one layer to pull (chart/prov)")
+ }
+ store := content.NewMemoryStore()
+ allowedMediaTypes := []string{
+ ConfigMediaType,
+ }
+ minNumDescriptors := 1 // 1 for the config
+ if operation.withChart {
+ minNumDescriptors++
+ allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType)
+ }
+ if operation.withProv {
+ if !operation.ignoreMissingProv {
+ minNumDescriptors++
+ }
+ allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType)
+ }
+ manifest, descriptors, err := oras.Pull(ctx(c.out, c.debug), c.resolver, ref, store,
+ oras.WithPullEmptyNameAllowed(),
+ oras.WithAllowedMediaTypes(allowedMediaTypes))
+ if err != nil {
+ return nil, err
+ }
+ numDescriptors := len(descriptors)
+ if numDescriptors < minNumDescriptors {
+ return nil, errors.New(
+ fmt.Sprintf("manifest does not contain minimum number of descriptors (%d), descriptors found: %d",
+ minNumDescriptors, numDescriptors))
+ }
+ var configDescriptor *ocispec.Descriptor
+ var chartDescriptor *ocispec.Descriptor
+ var provDescriptor *ocispec.Descriptor
+ for _, descriptor := range descriptors {
+ d := descriptor
+ switch d.MediaType {
+ case ConfigMediaType:
+ configDescriptor = &d
+ case ChartLayerMediaType:
+ chartDescriptor = &d
+ case ProvLayerMediaType:
+ provDescriptor = &d
+ case LegacyChartLayerMediaType:
+ chartDescriptor = &d
+ fmt.Fprintf(c.out, "Warning: chart media type %s is deprecated\n", LegacyChartLayerMediaType)
+ }
+ }
+ if configDescriptor == nil {
+ return nil, errors.New(
+ fmt.Sprintf("could not load config with mediatype %s", ConfigMediaType))
+ }
+ if operation.withChart && chartDescriptor == nil {
+ return nil, errors.New(
+ fmt.Sprintf("manifest does not contain a layer with mediatype %s",
+ ChartLayerMediaType))
+ }
+ var provMissing bool
+ if operation.withProv && provDescriptor == nil {
+ if operation.ignoreMissingProv {
+ provMissing = true
+ } else {
+ return nil, errors.New(
+ fmt.Sprintf("manifest does not contain a layer with mediatype %s",
+ ProvLayerMediaType))
+ }
+ }
+ result := &PullResult{
+ Manifest: &descriptorPullSummary{
+ Digest: manifest.Digest.String(),
+ Size: manifest.Size,
+ },
+ Config: &descriptorPullSummary{
+ Digest: configDescriptor.Digest.String(),
+ Size: configDescriptor.Size,
+ },
+ Chart: &descriptorPullSummaryWithMeta{},
+ Prov: &descriptorPullSummary{},
+ Ref: ref,
+ }
+ var getManifestErr error
+ if _, manifestData, ok := store.Get(manifest); !ok {
+ getManifestErr = errors.Errorf("Unable to retrieve blob with digest %s", manifest.Digest)
+ } else {
+ result.Manifest.Data = manifestData
+ }
+ if getManifestErr != nil {
+ return nil, getManifestErr
+ }
+ var getConfigDescriptorErr error
+ if _, configData, ok := store.Get(*configDescriptor); !ok {
+ getConfigDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", configDescriptor.Digest)
+ } else {
+ result.Config.Data = configData
+ var meta *chart.Metadata
+ if err := json.Unmarshal(configData, &meta); err != nil {
+ return nil, err
+ }
+ result.Chart.Meta = meta
+ }
+ if getConfigDescriptorErr != nil {
+ return nil, getConfigDescriptorErr
+ }
+ if operation.withChart {
+ var getChartDescriptorErr error
+ if _, chartData, ok := store.Get(*chartDescriptor); !ok {
+ getChartDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", chartDescriptor.Digest)
+ } else {
+ result.Chart.Data = chartData
+ result.Chart.Digest = chartDescriptor.Digest.String()
+ result.Chart.Size = chartDescriptor.Size
+ }
+ if getChartDescriptorErr != nil {
+ return nil, getChartDescriptorErr
+ }
+ }
+ if operation.withProv && !provMissing {
+ var getProvDescriptorErr error
+ if _, provData, ok := store.Get(*provDescriptor); !ok {
+ getProvDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", provDescriptor.Digest)
+ } else {
+ result.Prov.Data = provData
+ result.Prov.Digest = provDescriptor.Digest.String()
+ result.Prov.Size = provDescriptor.Size
+ }
+ if getProvDescriptorErr != nil {
+ return nil, getProvDescriptorErr
+ }
+ }
+ fmt.Fprintf(c.out, "Pulled: %s\n", result.Ref)
+ fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest)
+ return result, nil
+}
+
+// PullOptWithChart returns a function that sets the withChart setting on pull
+func PullOptWithChart(withChart bool) PullOption {
+ return func(operation *pullOperation) {
+ operation.withChart = withChart
+ }
+}
+
+// PullOptWithProv returns a function that sets the withProv setting on pull
+func PullOptWithProv(withProv bool) PullOption {
+ return func(operation *pullOperation) {
+ operation.withProv = withProv
+ }
+}
+
+// PullOptIgnoreMissingProv returns a function that sets the ignoreMissingProv setting on pull
+func PullOptIgnoreMissingProv(ignoreMissingProv bool) PullOption {
+ return func(operation *pullOperation) {
+ operation.ignoreMissingProv = ignoreMissingProv
+ }
+}
+
+type (
+ // PushOption allows specifying various settings on push
+ PushOption func(*pushOperation)
+
+ // PushResult is the result returned upon successful push.
+ PushResult struct {
+ Manifest *descriptorPushSummary `json:"manifest"`
+ Config *descriptorPushSummary `json:"config"`
+ Chart *descriptorPushSummaryWithMeta `json:"chart"`
+ Prov *descriptorPushSummary `json:"prov"`
+ Ref string `json:"ref"`
+ }
+
+ descriptorPushSummary struct {
+ Digest string `json:"digest"`
+ Size int64 `json:"size"`
+ }
+
+ descriptorPushSummaryWithMeta struct {
+ descriptorPushSummary
+ Meta *chart.Metadata `json:"meta"`
+ }
+
+ pushOperation struct {
+ provData []byte
+ strictMode bool
+ }
+)
+
+// Push uploads a chart to a registry.
+func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResult, error) {
+ operation := &pushOperation{
+ strictMode: true, // By default, enable strict mode
+ }
+ for _, option := range options {
+ option(operation)
+ }
+ meta, err := extractChartMeta(data)
+ if err != nil {
+ return nil, err
+ }
+ if operation.strictMode {
+ if !strings.HasSuffix(ref, fmt.Sprintf("/%s:%s", meta.Name, meta.Version)) {
+ return nil, errors.New(
+ "strict mode enabled, ref basename and tag must match the chart name and version")
+ }
+ }
+ store := content.NewMemoryStore()
+ chartDescriptor := store.Add("", ChartLayerMediaType, data)
+ configData, err := json.Marshal(meta)
+ if err != nil {
+ return nil, err
+ }
+ configDescriptor := store.Add("", ConfigMediaType, configData)
+ descriptors := []ocispec.Descriptor{chartDescriptor}
+ var provDescriptor ocispec.Descriptor
+ if operation.provData != nil {
+ provDescriptor = store.Add("", ProvLayerMediaType, operation.provData)
+ descriptors = append(descriptors, provDescriptor)
+ }
+ manifest, err := oras.Push(ctx(c.out, c.debug), c.resolver, ref, store, descriptors,
+ oras.WithConfig(configDescriptor), oras.WithNameValidation(nil))
+ if err != nil {
+ return nil, err
+ }
+ chartSummary := &descriptorPushSummaryWithMeta{
+ Meta: meta,
+ }
+ chartSummary.Digest = chartDescriptor.Digest.String()
+ chartSummary.Size = chartDescriptor.Size
+ result := &PushResult{
+ Manifest: &descriptorPushSummary{
+ Digest: manifest.Digest.String(),
+ Size: manifest.Size,
+ },
+ Config: &descriptorPushSummary{
+ Digest: configDescriptor.Digest.String(),
+ Size: configDescriptor.Size,
+ },
+ Chart: chartSummary,
+ Prov: &descriptorPushSummary{}, // prevent nil references
+ Ref: ref,
+ }
+ if operation.provData != nil {
+ result.Prov = &descriptorPushSummary{
+ Digest: provDescriptor.Digest.String(),
+ Size: provDescriptor.Size,
+ }
+ }
+ fmt.Fprintf(c.out, "Pushed: %s\n", result.Ref)
+ fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest)
+ return result, err
+}
+
+// PushOptProvData returns a function that sets the prov bytes setting on push
+func PushOptProvData(provData []byte) PushOption {
+ return func(operation *pushOperation) {
+ operation.provData = provData
+ }
+}
+
+// PushOptStrictMode returns a function that sets the strictMode setting on push
+func PushOptStrictMode(strictMode bool) PushOption {
+ return func(operation *pushOperation) {
+ operation.strictMode = strictMode
+ }
+}
diff --git a/vendor/helm.sh/helm/v3/internal/experimental/registry/constants.go b/vendor/helm.sh/helm/v3/internal/experimental/registry/constants.go
new file mode 100644
index 000000000..876e4dc13
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/experimental/registry/constants.go
@@ -0,0 +1,37 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry // import "helm.sh/helm/v3/internal/experimental/registry"
+
+const (
+ // OCIScheme is the URL scheme for OCI-based requests
+ OCIScheme = "oci"
+
+ // CredentialsFileBasename is the filename for auth credentials file
+ CredentialsFileBasename = "registry.json"
+
+ // ConfigMediaType is the reserved media type for the Helm chart manifest config
+ ConfigMediaType = "application/vnd.cncf.helm.config.v1+json"
+
+ // ChartLayerMediaType is the reserved media type for Helm chart package content
+ ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
+
+ // ProvLayerMediaType is the reserved media type for Helm chart provenance files
+ ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov"
+
+ // LegacyChartLayerMediaType is the legacy reserved media type for Helm chart package content.
+ LegacyChartLayerMediaType = "application/tar+gzip"
+)
diff --git a/vendor/helm.sh/helm/v3/internal/experimental/registry/util.go b/vendor/helm.sh/helm/v3/internal/experimental/registry/util.go
new file mode 100644
index 000000000..257e7af87
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/experimental/registry/util.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry // import "helm.sh/helm/v3/internal/experimental/registry"
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ orascontext "oras.land/oras-go/pkg/context"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+)
+
+// IsOCI determines whether or not a URL is to be treated as an OCI URL
+func IsOCI(url string) bool {
+ return strings.HasPrefix(url, fmt.Sprintf("%s://", OCIScheme))
+}
+
+// extractChartMeta is used to extract a chart metadata from a byte array
+func extractChartMeta(chartData []byte) (*chart.Metadata, error) {
+ ch, err := loader.LoadArchive(bytes.NewReader(chartData))
+ if err != nil {
+ return nil, err
+ }
+ return ch.Metadata, nil
+}
+
+// ctx retrieves a fresh context.
+// disable verbose logging coming from ORAS (unless debug is enabled)
+func ctx(out io.Writer, debug bool) context.Context {
+ if !debug {
+ return orascontext.Background()
+ }
+ ctx := orascontext.WithLoggerFromWriter(context.Background(), out)
+ orascontext.GetLogger(ctx).Logger.SetLevel(logrus.DebugLevel)
+ return ctx
+}
diff --git a/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go b/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go
new file mode 100644
index 000000000..739093f3b
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go
@@ -0,0 +1,51 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fileutil
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v3/internal/third_party/dep/fs"
+)
+
+// AtomicWriteFile atomically (as atomic as os.Rename allows) writes a file to a
+// disk.
+func AtomicWriteFile(filename string, reader io.Reader, mode os.FileMode) error {
+ tempFile, err := ioutil.TempFile(filepath.Split(filename))
+ if err != nil {
+ return err
+ }
+ tempName := tempFile.Name()
+
+ if _, err := io.Copy(tempFile, reader); err != nil {
+ tempFile.Close() // return value is ignored as we are already on error path
+ return err
+ }
+
+ if err := tempFile.Close(); err != nil {
+ return err
+ }
+
+ if err := os.Chmod(tempName, mode); err != nil {
+ return err
+ }
+
+ return fs.RenameWithFallback(tempName, filename)
+}
diff --git a/vendor/helm.sh/helm/v3/internal/ignore/doc.go b/vendor/helm.sh/helm/v3/internal/ignore/doc.go
new file mode 100644
index 000000000..e6a6a6c7b
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/ignore/doc.go
@@ -0,0 +1,67 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package ignore provides tools for writing ignore files (a la .gitignore).
+
+This provides both an ignore parser and a file-aware processor.
+
+The format of ignore files closely follows, but does not exactly match, the
+format for .gitignore files (https://git-scm.com/docs/gitignore).
+
+The formatting rules are as follows:
+
+ - Parsing is line-by-line
+ - Empty lines are ignored
+ - Lines the begin with # (comments) will be ignored
+ - Leading and trailing spaces are always ignored
+ - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment)
+ - There is no support for multi-line patterns
+ - Shell glob patterns are supported. See Go's "path/filepath".Match
+ - If a pattern begins with a leading !, the match will be negated.
+ - If a pattern begins with a leading /, only paths relatively rooted will match.
+ - If the pattern ends with a trailing /, only directories will match
+ - If a pattern contains no slashes, file basenames are tested (not paths)
+ - The pattern sequence "**", while legal in a glob, will cause an error here
+ (to indicate incompatibility with .gitignore).
+
+Example:
+
+ # Match any file named foo.txt
+ foo.txt
+
+ # Match any text file
+ *.txt
+
+ # Match only directories named mydir
+ mydir/
+
+ # Match only text files in the top-level directory
+ /*.txt
+
+ # Match only the file foo.txt in the top-level directory
+ /foo.txt
+
+ # Match any file named ab.txt, ac.txt, or ad.txt
+ a[b-d].txt
+
+Notable differences from .gitignore:
+ - The '**' syntax is not supported.
+ - The globbing library is Go's 'filepath.Match', not fnmatch(3)
+ - Trailing spaces are always ignored (there is no supported escape sequence)
+ - The evaluation of escape sequences has not been tested for compatibility
+ - There is no support for '\!' as a special leading sequence.
+*/
+package ignore // import "helm.sh/helm/v3/internal/ignore"
diff --git a/vendor/helm.sh/helm/v3/internal/ignore/rules.go b/vendor/helm.sh/helm/v3/internal/ignore/rules.go
new file mode 100644
index 000000000..a80923baf
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/ignore/rules.go
@@ -0,0 +1,228 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ignore
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// HelmIgnore default name of an ignorefile.
+const HelmIgnore = ".helmignore"
+
+// Rules is a collection of path matching rules.
+//
+// Parse() and ParseFile() will construct and populate new Rules.
+// Empty() will create an immutable empty ruleset.
+type Rules struct {
+ patterns []*pattern
+}
+
+// Empty builds an empty ruleset.
+func Empty() *Rules {
+ return &Rules{patterns: []*pattern{}}
+}
+
+// AddDefaults adds default ignore patterns.
+//
+// Ignore all dotfiles in "templates/"
+func (r *Rules) AddDefaults() {
+ r.parseRule(`templates/.?*`)
+}
+
+// ParseFile parses a helmignore file and returns the *Rules.
+func ParseFile(file string) (*Rules, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return Parse(f)
+}
+
+// Parse parses a rules file
+func Parse(file io.Reader) (*Rules, error) {
+ r := &Rules{patterns: []*pattern{}}
+
+ s := bufio.NewScanner(file)
+ currentLine := 0
+ utf8bom := []byte{0xEF, 0xBB, 0xBF}
+ for s.Scan() {
+ scannedBytes := s.Bytes()
+ // We trim UTF8 BOM
+ if currentLine == 0 {
+ scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
+ }
+ line := string(scannedBytes)
+ currentLine++
+
+ if err := r.parseRule(line); err != nil {
+ return r, err
+ }
+ }
+ return r, s.Err()
+}
+
+// Ignore evaluates the file at the given path, and returns true if it should be ignored.
+//
+// Ignore evaluates path against the rules in order. Evaluation stops when a match
+// is found. Matching a negative rule will stop evaluation.
+func (r *Rules) Ignore(path string, fi os.FileInfo) bool {
+ // Don't match on empty dirs.
+ if path == "" {
+ return false
+ }
+
+ // Disallow ignoring the current working directory.
+ // See issue:
+ // 1776 (New York City) Hamilton: "Pardon me, are you Aaron Burr, sir?"
+ if path == "." || path == "./" {
+ return false
+ }
+ for _, p := range r.patterns {
+ if p.match == nil {
+ log.Printf("ignore: no matcher supplied for %q", p.raw)
+ return false
+ }
+
+ // For negative rules, we need to capture and return non-matches,
+ // and continue for matches.
+ if p.negate {
+ if p.mustDir && !fi.IsDir() {
+ return true
+ }
+ if !p.match(path, fi) {
+ return true
+ }
+ continue
+ }
+
+ // If the rule is looking for directories, and this is not a directory,
+ // skip it.
+ if p.mustDir && !fi.IsDir() {
+ continue
+ }
+ if p.match(path, fi) {
+ return true
+ }
+ }
+ return false
+}
+
+// parseRule parses a rule string and creates a pattern, which is then stored in the Rules object.
+func (r *Rules) parseRule(rule string) error {
+ rule = strings.TrimSpace(rule)
+
+ // Ignore blank lines
+ if rule == "" {
+ return nil
+ }
+ // Comment
+ if strings.HasPrefix(rule, "#") {
+ return nil
+ }
+
+ // Fail any rules that contain **
+ if strings.Contains(rule, "**") {
+ return errors.New("double-star (**) syntax is not supported")
+ }
+
+ // Fail any patterns that can't compile. A non-empty string must be
+ // given to Match() to avoid optimization that skips rule evaluation.
+ if _, err := filepath.Match(rule, "abc"); err != nil {
+ return err
+ }
+
+ p := &pattern{raw: rule}
+
+ // Negation is handled at a higher level, so strip the leading ! from the
+ // string.
+ if strings.HasPrefix(rule, "!") {
+ p.negate = true
+ rule = rule[1:]
+ }
+
+ // Directory verification is handled by a higher level, so the trailing /
+ // is removed from the rule. That way, a directory named "foo" matches,
+ // even if the supplied string does not contain a literal slash character.
+ if strings.HasSuffix(rule, "/") {
+ p.mustDir = true
+ rule = strings.TrimSuffix(rule, "/")
+ }
+
+ if strings.HasPrefix(rule, "/") {
+ // Require path matches the root path.
+ p.match = func(n string, fi os.FileInfo) bool {
+ rule = strings.TrimPrefix(rule, "/")
+ ok, err := filepath.Match(rule, n)
+ if err != nil {
+ log.Printf("Failed to compile %q: %s", rule, err)
+ return false
+ }
+ return ok
+ }
+ } else if strings.Contains(rule, "/") {
+ // require structural match.
+ p.match = func(n string, fi os.FileInfo) bool {
+ ok, err := filepath.Match(rule, n)
+ if err != nil {
+ log.Printf("Failed to compile %q: %s", rule, err)
+ return false
+ }
+ return ok
+ }
+ } else {
+ p.match = func(n string, fi os.FileInfo) bool {
+ // When there is no slash in the pattern, we evaluate ONLY the
+ // filename.
+ n = filepath.Base(n)
+ ok, err := filepath.Match(rule, n)
+ if err != nil {
+ log.Printf("Failed to compile %q: %s", rule, err)
+ return false
+ }
+ return ok
+ }
+ }
+
+ r.patterns = append(r.patterns, p)
+ return nil
+}
+
+// matcher is a function capable of computing a match.
+//
+// It returns true if the rule matches.
+type matcher func(name string, fi os.FileInfo) bool
+
+// pattern describes a pattern to be matched in a rule set.
+type pattern struct {
+ // raw is the unparsed string, with nothing stripped.
+ raw string
+ // match is the matcher function.
+ match matcher
+ // negate indicates that the rule's outcome should be negated.
+ negate bool
+ // mustDir indicates that the matched file must be a directory.
+ mustDir bool
+}
diff --git a/vendor/helm.sh/helm/v3/internal/resolver/resolver.go b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go
new file mode 100644
index 000000000..70ce6a55b
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go
@@ -0,0 +1,235 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resolver
+
+import (
+ "bytes"
+ "encoding/json"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/internal/experimental/registry"
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/gates"
+ "helm.sh/helm/v3/pkg/helmpath"
+ "helm.sh/helm/v3/pkg/provenance"
+ "helm.sh/helm/v3/pkg/repo"
+)
+
+const FeatureGateOCI = gates.Gate("HELM_EXPERIMENTAL_OCI")
+
+// Resolver resolves dependencies from semantic version ranges to a particular version.
+type Resolver struct {
+ chartpath string
+ cachepath string
+}
+
+// New creates a new resolver for a given chart and a given helm home.
+func New(chartpath, cachepath string) *Resolver {
+ return &Resolver{
+ chartpath: chartpath,
+ cachepath: cachepath,
+ }
+}
+
+// Resolve resolves dependencies and returns a lock file with the resolution.
+func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) {
+
+ // Now we clone the dependencies, locking as we go.
+ locked := make([]*chart.Dependency, len(reqs))
+ missing := []string{}
+ for i, d := range reqs {
+ constraint, err := semver.NewConstraint(d.Version)
+ if err != nil {
+ return nil, errors.Wrapf(err, "dependency %q has an invalid version/constraint format", d.Name)
+ }
+
+ if d.Repository == "" {
+ // Local chart subfolder
+ if _, err := GetLocalPath(filepath.Join("charts", d.Name), r.chartpath); err != nil {
+ return nil, err
+ }
+
+ locked[i] = &chart.Dependency{
+ Name: d.Name,
+ Repository: "",
+ Version: d.Version,
+ }
+ continue
+ }
+ if strings.HasPrefix(d.Repository, "file://") {
+
+ chartpath, err := GetLocalPath(d.Repository, r.chartpath)
+ if err != nil {
+ return nil, err
+ }
+
+ ch, err := loader.LoadDir(chartpath)
+ if err != nil {
+ return nil, err
+ }
+
+ v, err := semver.NewVersion(ch.Metadata.Version)
+ if err != nil {
+ // Not a legit entry.
+ continue
+ }
+
+ if !constraint.Check(v) {
+ missing = append(missing, d.Name)
+ continue
+ }
+
+ locked[i] = &chart.Dependency{
+ Name: d.Name,
+ Repository: d.Repository,
+ Version: ch.Metadata.Version,
+ }
+ continue
+ }
+
+ repoName := repoNames[d.Name]
+ // if the repository was not defined, but the dependency defines a repository url, bypass the cache
+ if repoName == "" && d.Repository != "" {
+ locked[i] = &chart.Dependency{
+ Name: d.Name,
+ Repository: d.Repository,
+ Version: d.Version,
+ }
+ continue
+ }
+
+ var vs repo.ChartVersions
+ var version string
+ var ok bool
+ found := true
+ if !registry.IsOCI(d.Repository) {
+ repoIndex, err := repo.LoadIndexFile(filepath.Join(r.cachepath, helmpath.CacheIndexFile(repoName)))
+ if err != nil {
+ return nil, errors.Wrapf(err, "no cached repository for %s found. (try 'helm repo update')", repoName)
+ }
+
+ vs, ok = repoIndex.Entries[d.Name]
+ if !ok {
+ return nil, errors.Errorf("%s chart not found in repo %s", d.Name, d.Repository)
+ }
+ found = false
+ } else {
+ version = d.Version
+ if !FeatureGateOCI.IsEnabled() {
+ return nil, errors.Wrapf(FeatureGateOCI.Error(),
+ "repository %s is an OCI registry", d.Repository)
+ }
+ }
+
+ locked[i] = &chart.Dependency{
+ Name: d.Name,
+ Repository: d.Repository,
+ Version: version,
+ }
+ // The version are already sorted and hence the first one to satisfy the constraint is used
+ for _, ver := range vs {
+ v, err := semver.NewVersion(ver.Version)
+ if err != nil || len(ver.URLs) == 0 {
+ // Not a legit entry.
+ continue
+ }
+ if constraint.Check(v) {
+ found = true
+ locked[i].Version = v.Original()
+ break
+ }
+ }
+
+ if !found {
+ missing = append(missing, d.Name)
+ }
+ }
+ if len(missing) > 0 {
+ return nil, errors.Errorf("can't get a valid version for repositories %s. Try changing the version constraint in Chart.yaml", strings.Join(missing, ", "))
+ }
+
+ digest, err := HashReq(reqs, locked)
+ if err != nil {
+ return nil, err
+ }
+
+ return &chart.Lock{
+ Generated: time.Now(),
+ Digest: digest,
+ Dependencies: locked,
+ }, nil
+}
+
+// HashReq generates a hash of the dependencies.
+//
+// This should be used only to compare against another hash generated by this
+// function.
+func HashReq(req, lock []*chart.Dependency) (string, error) {
+ data, err := json.Marshal([2][]*chart.Dependency{req, lock})
+ if err != nil {
+ return "", err
+ }
+ s, err := provenance.Digest(bytes.NewBuffer(data))
+ return "sha256:" + s, err
+}
+
+// HashV2Req generates a hash of requirements generated in Helm v2.
+//
+// This should be used only to compare against another hash generated by the
+// Helm v2 hash function. It is to handle issue:
+// https://github.com/helm/helm/issues/7233
+func HashV2Req(req []*chart.Dependency) (string, error) {
+ dep := make(map[string][]*chart.Dependency)
+ dep["dependencies"] = req
+ data, err := json.Marshal(dep)
+ if err != nil {
+ return "", err
+ }
+ s, err := provenance.Digest(bytes.NewBuffer(data))
+ return "sha256:" + s, err
+}
+
+// GetLocalPath generates absolute local path when use
+// "file://" in repository of dependencies
+func GetLocalPath(repo, chartpath string) (string, error) {
+ var depPath string
+ var err error
+ p := strings.TrimPrefix(repo, "file://")
+
+ // root path is absolute
+ if strings.HasPrefix(p, "/") {
+ if depPath, err = filepath.Abs(p); err != nil {
+ return "", err
+ }
+ } else {
+ depPath = filepath.Join(chartpath, p)
+ }
+
+ if _, err = os.Stat(depPath); os.IsNotExist(err) {
+ return "", errors.Errorf("directory %s not found", depPath)
+ } else if err != nil {
+ return "", err
+ }
+
+ return depPath, nil
+}
diff --git a/vendor/helm.sh/helm/v3/internal/sympath/walk.go b/vendor/helm.sh/helm/v3/internal/sympath/walk.go
new file mode 100644
index 000000000..752526fe9
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/sympath/walk.go
@@ -0,0 +1,119 @@
+/*
+Copyright (c) for portions of walk.go are held by The Go Authors, 2009 and are
+provided under the BSD license.
+
+https://github.com/golang/go/blob/master/LICENSE
+
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sympath
+
+import (
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+
+ "github.com/pkg/errors"
+)
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or directory
+// in the tree, including root. All errors that arise visiting files and directories
+// are filtered by walkFn. The files are walked in lexical order, which makes the
+// output deterministic but means that for very large directories Walk can be
+// inefficient. Walk follows symbolic links.
+func Walk(root string, walkFn filepath.WalkFunc) error {
+ info, err := os.Lstat(root)
+ if err != nil {
+ err = walkFn(root, nil, err)
+ } else {
+ err = symwalk(root, info, walkFn)
+ }
+ if err == filepath.SkipDir {
+ return nil
+ }
+ return err
+}
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+func readDirNames(dirname string) ([]string, error) {
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// symwalk recursively descends path, calling walkFn.
+func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ // Recursively walk symlinked directories.
+ if IsSymlink(info) {
+ resolved, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return errors.Wrapf(err, "error evaluating symlink %s", path)
+ }
+ log.Printf("found symbolic link in path: %s resolves to %s", path, resolved)
+ if info, err = os.Lstat(resolved); err != nil {
+ return err
+ }
+ if err := symwalk(path, info, walkFn); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ return nil
+ }
+
+ if err := walkFn(path, info, nil); err != nil {
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ names, err := readDirNames(path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, name := range names {
+ filename := filepath.Join(path, name)
+ fileInfo, err := os.Lstat(filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = symwalk(filename, fileInfo, walkFn)
+ if err != nil {
+ if (!fileInfo.IsDir() && !IsSymlink(fileInfo)) || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// IsSymlink is used to determine if the fileinfo is a symbolic link.
+func IsSymlink(fi os.FileInfo) bool {
+ return fi.Mode()&os.ModeSymlink != 0
+}
diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go
new file mode 100644
index 000000000..4e4eacc60
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go
@@ -0,0 +1,372 @@
+/*
+Copyright (c) for portions of fs.go are held by The Go Authors, 2016 and are provided under
+the BSD license.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package fs
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+// fs contains a copy of a few functions from dep tool code to avoid a dependency on golang/dep.
+// This code is copied from https://github.com/golang/dep/blob/37d6c560cdf407be7b6cd035b23dba89df9275cf/internal/fs/fs.go
+// No changes to the code were made other than removing some unused functions
+
+// RenameWithFallback attempts to rename a file or directory, but falls back to
+// copying in the event of a cross-device link error. If the fallback copy
+// succeeds, src is still removed, emulating normal rename behavior.
+func RenameWithFallback(src, dst string) error {
+ _, err := os.Stat(src)
+ if err != nil {
+ return errors.Wrapf(err, "cannot stat %s", src)
+ }
+
+ err = os.Rename(src, dst)
+ if err == nil {
+ return nil
+ }
+
+ return renameFallback(err, src, dst)
+}
+
+// renameByCopy attempts to rename a file or directory by copying it to the
+// destination and then removing the src thus emulating the rename behavior.
+func renameByCopy(src, dst string) error {
+ var cerr error
+ if dir, _ := IsDir(src); dir {
+ cerr = CopyDir(src, dst)
+ if cerr != nil {
+ cerr = errors.Wrap(cerr, "copying directory failed")
+ }
+ } else {
+ cerr = copyFile(src, dst)
+ if cerr != nil {
+ cerr = errors.Wrap(cerr, "copying file failed")
+ }
+ }
+
+ if cerr != nil {
+ return errors.Wrapf(cerr, "rename fallback failed: cannot rename %s to %s", src, dst)
+ }
+
+ return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src)
+}
+
+var (
+ errSrcNotDir = errors.New("source is not a directory")
+ errDstExist = errors.New("destination already exists")
+)
+
+// CopyDir recursively copies a directory tree, attempting to preserve permissions.
+// Source directory must exist, destination directory must *not* exist.
+func CopyDir(src, dst string) error {
+ src = filepath.Clean(src)
+ dst = filepath.Clean(dst)
+
+ // We use os.Lstat() here to ensure we don't fall in a loop where a symlink
+ // actually links to a one of its parent directories.
+ fi, err := os.Lstat(src)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errSrcNotDir
+ }
+
+ _, err = os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ return errDstExist
+ }
+
+ if err = os.MkdirAll(dst, fi.Mode()); err != nil {
+ return errors.Wrapf(err, "cannot mkdir %s", dst)
+ }
+
+ entries, err := os.ReadDir(src)
+ if err != nil {
+ return errors.Wrapf(err, "cannot read directory %s", dst)
+ }
+
+ for _, entry := range entries {
+ srcPath := filepath.Join(src, entry.Name())
+ dstPath := filepath.Join(dst, entry.Name())
+
+ if entry.IsDir() {
+ if err = CopyDir(srcPath, dstPath); err != nil {
+ return errors.Wrap(err, "copying directory failed")
+ }
+ } else {
+ // This will include symlinks, which is what we want when
+ // copying things.
+ if err = copyFile(srcPath, dstPath); err != nil {
+ return errors.Wrap(err, "copying file failed")
+ }
+ }
+ }
+
+ return nil
+}
+
+// copyFile copies the contents of the file named src to the file named
+// by dst. The file will be created if it does not already exist. If the
+// destination file exists, all its contents will be replaced by the contents
+// of the source file. The file mode will be copied from the source.
+func copyFile(src, dst string) (err error) {
+ if sym, err := IsSymlink(src); err != nil {
+ return errors.Wrap(err, "symlink check failed")
+ } else if sym {
+ if err := cloneSymlink(src, dst); err != nil {
+ if runtime.GOOS == "windows" {
+ // If cloning the symlink fails on Windows because the user
+ // does not have the required privileges, ignore the error and
+ // fall back to copying the file contents.
+ //
+ // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522):
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx
+ if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) {
+ return err
+ }
+ } else {
+ return err
+ }
+ } else {
+ return nil
+ }
+ }
+
+ in, err := os.Open(src)
+ if err != nil {
+ return
+ }
+ defer in.Close()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return
+ }
+
+ if _, err = io.Copy(out, in); err != nil {
+ out.Close()
+ return
+ }
+
+ // Check for write errors on Close
+ if err = out.Close(); err != nil {
+ return
+ }
+
+ si, err := os.Stat(src)
+ if err != nil {
+ return
+ }
+
+ // Temporary fix for Go < 1.9
+ //
+ // See: https://github.com/golang/dep/issues/774
+ // and https://github.com/golang/go/issues/20829
+ if runtime.GOOS == "windows" {
+ dst = fixLongPath(dst)
+ }
+ err = os.Chmod(dst, si.Mode())
+
+ return
+}
+
+// cloneSymlink will create a new symlink that points to the resolved path of sl.
+// If sl is a relative symlink, dst will also be a relative symlink.
+func cloneSymlink(sl, dst string) error {
+ resolved, err := os.Readlink(sl)
+ if err != nil {
+ return err
+ }
+
+ return os.Symlink(resolved, dst)
+}
+
+// IsDir determines is the path given is a directory or not.
+func IsDir(name string) (bool, error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return false, err
+ }
+ if !fi.IsDir() {
+ return false, errors.Errorf("%q is not a directory", name)
+ }
+ return true, nil
+}
+
+// IsSymlink determines if the given path is a symbolic link.
+func IsSymlink(path string) (bool, error) {
+ l, err := os.Lstat(path)
+ if err != nil {
+ return false, err
+ }
+
+ return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil
+}
+
+// fixLongPath returns the extended-length (\\?\-prefixed) form of
+// path when needed, in order to avoid the default 260 character file
+// path limit imposed by Windows. If path is not easily converted to
+// the extended-length form (for example, if path is a relative path
+// or contains .. elements), or is short enough, fixLongPath returns
+// path unmodified.
+//
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
+func fixLongPath(path string) string {
+ // Do nothing (and don't allocate) if the path is "short".
+ // Empirically (at least on the Windows Server 2013 builder),
+ // the kernel is arbitrarily okay with < 248 bytes. That
+ // matches what the docs above say:
+ // "When using an API to create a directory, the specified
+ // path cannot be so long that you cannot append an 8.3 file
+ // name (that is, the directory name cannot exceed MAX_PATH
+ // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
+ //
+ // The MSDN docs appear to say that a normal path that is 248 bytes long
+ // will work; empirically the path must be less then 248 bytes long.
+ if len(path) < 248 {
+ // Don't fix. (This is how Go 1.7 and earlier worked,
+ // not automatically generating the \\?\ form)
+ return path
+ }
+
+ // The extended form begins with \\?\, as in
+ // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt.
+ // The extended form disables evaluation of . and .. path
+ // elements and disables the interpretation of / as equivalent
+ // to \. The conversion here rewrites / to \ and elides
+ // . elements as well as trailing or duplicate separators. For
+ // simplicity it avoids the conversion entirely for relative
+ // paths or paths containing .. elements. For now,
+ // \\server\share paths are not converted to
+ // \\?\UNC\server\share paths because the rules for doing so
+ // are less well-specified.
+ if len(path) >= 2 && path[:2] == `\\` {
+ // Don't canonicalize UNC paths.
+ return path
+ }
+ if !isAbs(path) {
+ // Relative path
+ return path
+ }
+
+ const prefix = `\\?`
+
+ pathbuf := make([]byte, len(prefix)+len(path)+len(`\`))
+ copy(pathbuf, prefix)
+ n := len(path)
+ r, w := 0, len(prefix)
+ for r < n {
+ switch {
+ case os.IsPathSeparator(path[r]):
+ // empty block
+ r++
+ case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
+ // /./
+ r++
+ case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // /../ is currently unhandled
+ return path
+ default:
+ pathbuf[w] = '\\'
+ w++
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ pathbuf[w] = path[r]
+ w++
+ }
+ }
+ }
+ // A drive's root directory needs a trailing \
+ if w == len(`\\?\c:`) {
+ pathbuf[w] = '\\'
+ w++
+ }
+ return string(pathbuf[:w])
+}
+
+func isAbs(path string) (b bool) {
+ v := volumeName(path)
+ if v == "" {
+ return false
+ }
+ path = path[len(v):]
+ if path == "" {
+ return false
+ }
+ return os.IsPathSeparator(path[0])
+}
+
+func volumeName(path string) (v string) {
+ if len(path) < 2 {
+ return ""
+ }
+ // with drive letter
+ c := path[0]
+ if path[1] == ':' &&
+ ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' ||
+ 'A' <= c && c <= 'Z') {
+ return path[:2]
+ }
+ // is it UNC
+ if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) &&
+ !os.IsPathSeparator(path[2]) && path[2] != '.' {
+ // first, leading `\\` and next shouldn't be `\`. its server name.
+ for n := 3; n < l-1; n++ {
+ // second, next '\' shouldn't be repeated.
+ if os.IsPathSeparator(path[n]) {
+ n++
+ // third, following something characters. its share name.
+ if !os.IsPathSeparator(path[n]) {
+ if path[n] == '.' {
+ break
+ }
+ for ; n < l; n++ {
+ if os.IsPathSeparator(path[n]) {
+ break
+ }
+ }
+ return path[:n]
+ }
+ break
+ }
+ }
+ }
+ return ""
+}
diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go
new file mode 100644
index 000000000..0bb600949
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go
@@ -0,0 +1,58 @@
+// +build !windows
+
+/*
+Copyright (c) for portions of rename.go are held by The Go Authors, 2016 and are provided under
+the BSD license.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package fs
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+// renameFallback attempts to determine the appropriate fallback to failed rename
+// operation depending on the resulting error.
+func renameFallback(err error, src, dst string) error {
+ // Rename may fail if src and dst are on different devices; fall back to
+ // copy if we detect that case. syscall.EXDEV is the common name for the
+ // cross device link error which has varying output text across different
+ // operating systems.
+ terr, ok := err.(*os.LinkError)
+ if !ok {
+ return err
+ } else if terr.Err != syscall.EXDEV {
+ return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst)
+ }
+
+ return renameByCopy(src, dst)
+}
diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go
new file mode 100644
index 000000000..14f017d09
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go
@@ -0,0 +1,69 @@
+// +build windows
+
+/*
+Copyright (c) for portions of rename_windows.go are held by The Go Authors, 2016 and are provided under
+the BSD license.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package fs
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+// renameFallback attempts to determine the appropriate fallback to failed rename
+// operation depending on the resulting error.
+func renameFallback(err error, src, dst string) error {
+ // Rename may fail if src and dst are on different devices; fall back to
+ // copy if we detect that case. syscall.EXDEV is the common name for the
+ // cross device link error which has varying output text across different
+ // operating systems.
+ terr, ok := err.(*os.LinkError)
+ if !ok {
+ return err
+ }
+
+ if terr.Err != syscall.EXDEV {
+ // In windows it can drop down to an operating system call that
+ // returns an operating system error with a different number and
+ // message. Checking for that as a fall back.
+ noerr, ok := terr.Err.(syscall.Errno)
+
+ // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error.
+ // See https://msdn.microsoft.com/en-us/library/cc231199.aspx
+ if ok && noerr != 0x11 {
+ return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst)
+ }
+ }
+
+ return renameByCopy(src, dst)
+}
diff --git a/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go b/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go
new file mode 100644
index 000000000..103db35c4
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "context"
+ "sort"
+
+ apps "k8s.io/api/apps/v1"
+ v1 "k8s.io/api/core/v1"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ intstrutil "k8s.io/apimachinery/pkg/util/intstr"
+ appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
+)
+
+// deploymentutil contains a copy of a few functions from Kubernetes controller code to avoid a dependency on k8s.io/kubernetes.
+// This code is copied from https://github.com/kubernetes/kubernetes/blob/e856613dd5bb00bcfaca6974431151b5c06cbed5/pkg/controller/deployment/util/deployment_util.go
+// No changes to the code were made other than removing some unused functions
+
+// RsListFunc returns the ReplicaSet from the ReplicaSet namespace and the List metav1.ListOptions.
+type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error)
+
+// ListReplicaSets returns a slice of RSes the given deployment targets.
+// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
+// because only the controller itself should do that.
+// However, it does filter out anything whose ControllerRef doesn't match.
+func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) {
+ // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
+ // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
+ namespace := deployment.Namespace
+ selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
+ if err != nil {
+ return nil, err
+ }
+ options := metav1.ListOptions{LabelSelector: selector.String()}
+ all, err := getRSList(namespace, options)
+ if err != nil {
+ return nil, err
+ }
+ // Only include those whose ControllerRef matches the Deployment.
+ owned := make([]*apps.ReplicaSet, 0, len(all))
+ for _, rs := range all {
+ if metav1.IsControlledBy(rs, deployment) {
+ owned = append(owned, rs)
+ }
+ }
+ return owned, nil
+}
+
+// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
+type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet
+
+func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
+func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
+func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
+ if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
+ return o[i].Name < o[j].Name
+ }
+ return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
+}
+
+// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
+func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet {
+ sort.Sort(ReplicaSetsByCreationTimestamp(rsList))
+ for i := range rsList {
+ if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
+ // In rare cases, such as after cluster upgrades, Deployment may end up with
+ // having more than one new ReplicaSets that have the same template as its template,
+ // see https://github.com/kubernetes/kubernetes/issues/40415
+ // We deterministically choose the oldest new ReplicaSet.
+ return rsList[i]
+ }
+ }
+ // new ReplicaSet does not exist.
+ return nil
+}
+
+// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
+// We ignore pod-template-hash because:
+// 1. The hash result would be different upon podTemplateSpec API changes
+// (e.g. the addition of a new field will cause the hash code to change)
+// 2. The deployment template won't have hash labels
+func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
+ t1Copy := template1.DeepCopy()
+ t2Copy := template2.DeepCopy()
+ // Remove hash labels from template.Labels before comparing
+ delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
+ delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
+ return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
+}
+
+// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
+// Returns nil if the new replica set doesn't exist yet.
+func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) {
+ rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
+ if err != nil {
+ return nil, err
+ }
+ return FindNewReplicaSet(deployment, rsList), nil
+}
+
+// RsListFromClient returns an rsListFunc that wraps the given client.
+func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
+ return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
+ rsList, err := c.ReplicaSets(namespace).List(context.Background(), options)
+ if err != nil {
+ return nil, err
+ }
+ var ret []*apps.ReplicaSet
+ for i := range rsList.Items {
+ ret = append(ret, &rsList.Items[i])
+ }
+ return ret, err
+ }
+}
+
+// IsRollingUpdate returns true if the strategy type is a rolling update.
+func IsRollingUpdate(deployment *apps.Deployment) bool {
+ return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType
+}
+
+// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
+func MaxUnavailable(deployment apps.Deployment) int32 {
+ if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 {
+ return int32(0)
+ }
+ // Error caught by validation
+ _, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
+ if maxUnavailable > *deployment.Spec.Replicas {
+ return *deployment.Spec.Replicas
+ }
+ return maxUnavailable
+}
+
+// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
+// step. For example:
+//
+// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
+// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
+// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
+// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
+// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
+// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
+func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
+ surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
+ if err != nil {
+ return 0, 0, err
+ }
+ unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ if surge == 0 && unavailable == 0 {
+ // Validation should never allow the user to explicitly use zero values for both maxSurge
+ // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
+ // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
+ // theory that surge might not work due to quota.
+ unavailable = 1
+ }
+
+ return int32(surge), int32(unavailable), nil
+}
diff --git a/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go b/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go
new file mode 100644
index 000000000..8b9d4329f
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go
@@ -0,0 +1,58 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "os"
+
+ "github.com/pkg/errors"
+)
+
+// Options represents configurable options used to create client and server TLS configurations.
+type Options struct {
+ CaCertFile string
+ // If either the KeyFile or CertFile is empty, ClientConfig() will not load them.
+ KeyFile string
+ CertFile string
+ // Client-only options
+ InsecureSkipVerify bool
+}
+
+// ClientConfig returns a TLS configuration for use by a Helm client.
+func ClientConfig(opts Options) (cfg *tls.Config, err error) {
+ var cert *tls.Certificate
+ var pool *x509.CertPool
+
+ if opts.CertFile != "" || opts.KeyFile != "" {
+ if cert, err = CertFromFilePair(opts.CertFile, opts.KeyFile); err != nil {
+ if os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "could not load x509 key pair (cert: %q, key: %q)", opts.CertFile, opts.KeyFile)
+ }
+ return nil, errors.Wrapf(err, "could not read x509 key pair (cert: %q, key: %q)", opts.CertFile, opts.KeyFile)
+ }
+ }
+ if !opts.InsecureSkipVerify && opts.CaCertFile != "" {
+ if pool, err = CertPoolFromFile(opts.CaCertFile); err != nil {
+ return nil, err
+ }
+ }
+
+ cfg = &tls.Config{InsecureSkipVerify: opts.InsecureSkipVerify, Certificates: []tls.Certificate{*cert}, RootCAs: pool}
+ return cfg, nil
+}
diff --git a/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go b/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go
new file mode 100644
index 000000000..ed7795dbe
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go
@@ -0,0 +1,76 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "io/ioutil"
+
+ "github.com/pkg/errors"
+)
+
+// NewClientTLS returns tls.Config appropriate for client auth.
+func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) {
+ config := tls.Config{}
+
+ if certFile != "" && keyFile != "" {
+ cert, err := CertFromFilePair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+ config.Certificates = []tls.Certificate{*cert}
+ }
+
+ if caFile != "" {
+ cp, err := CertPoolFromFile(caFile)
+ if err != nil {
+ return nil, err
+ }
+ config.RootCAs = cp
+ }
+
+ return &config, nil
+}
+
+// CertPoolFromFile returns an x509.CertPool containing the certificates
+// in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not
+// be parsed, or if the file does not contain any certificates
+func CertPoolFromFile(filename string) (*x509.CertPool, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, errors.Errorf("can't read CA file: %v", filename)
+ }
+ cp := x509.NewCertPool()
+ if !cp.AppendCertsFromPEM(b) {
+ return nil, errors.Errorf("failed to append certificates from file: %s", filename)
+ }
+ return cp, nil
+}
+
+// CertFromFilePair returns an tls.Certificate containing the
+// certificates public/private key pair from a pair of given PEM-encoded files.
+// Returns an error if the file could not be read, a certificate could not
+// be parsed, or if the file does not contain any certificates
+func CertFromFilePair(certFile, keyFile string) (*tls.Certificate, error) {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "can't load key pair from cert %s and key %s", certFile, keyFile)
+ }
+ return &cert, err
+}
diff --git a/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go b/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go
new file mode 100644
index 000000000..a8cf7398c
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go
@@ -0,0 +1,73 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package urlutil
+
+import (
+ "net/url"
+ "path"
+ "path/filepath"
+)
+
+// URLJoin joins a base URL to one or more path components.
+//
+// It's like filepath.Join for URLs. If the baseURL is pathish, this will still
+// perform a join.
+//
+// If the URL is unparsable, this returns an error.
+func URLJoin(baseURL string, paths ...string) (string, error) {
+ u, err := url.Parse(baseURL)
+ if err != nil {
+ return "", err
+ }
+ // We want path instead of filepath because path always uses /.
+ all := []string{u.Path}
+ all = append(all, paths...)
+ u.Path = path.Join(all...)
+ return u.String(), nil
+}
+
+// Equal normalizes two URLs and then compares for equality.
+func Equal(a, b string) bool {
+ au, err := url.Parse(a)
+ if err != nil {
+ a = filepath.Clean(a)
+ b = filepath.Clean(b)
+ // If urls are paths, return true only if they are an exact match
+ return a == b
+ }
+ bu, err := url.Parse(b)
+ if err != nil {
+ return false
+ }
+
+ for _, u := range []*url.URL{au, bu} {
+ if u.Path == "" {
+ u.Path = "/"
+ }
+ u.Path = filepath.Clean(u.Path)
+ }
+ return au.String() == bu.String()
+}
+
+// ExtractHostname returns hostname from URL
+func ExtractHostname(addr string) (string, error) {
+ u, err := url.Parse(addr)
+ if err != nil {
+ return "", err
+ }
+ return u.Hostname(), nil
+}
diff --git a/vendor/helm.sh/helm/v3/internal/version/version.go b/vendor/helm.sh/helm/v3/internal/version/version.go
new file mode 100644
index 000000000..201eee359
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/internal/version/version.go
@@ -0,0 +1,81 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version // import "helm.sh/helm/v3/internal/version"
+
+import (
+ "flag"
+ "runtime"
+ "strings"
+)
+
+var (
+ // version is the current version of Helm.
+ // Update this whenever making a new release.
+ // The version is of the format Major.Minor.Patch[-Prerelease][+BuildMetadata]
+ //
+ // Increment major number for new feature additions and behavioral changes.
+ // Increment minor number for bug fixes and performance enhancements.
+ version = "v3.7"
+
+ // metadata is extra build time data
+ metadata = ""
+ // gitCommit is the git sha1
+ gitCommit = ""
+ // gitTreeState is the state of the git tree
+ gitTreeState = ""
+)
+
+// BuildInfo describes the compile time information.
+type BuildInfo struct {
+ // Version is the current semver.
+ Version string `json:"version,omitempty"`
+ // GitCommit is the git sha1.
+ GitCommit string `json:"git_commit,omitempty"`
+ // GitTreeState is the state of the git tree.
+ GitTreeState string `json:"git_tree_state,omitempty"`
+ // GoVersion is the version of the Go compiler used.
+ GoVersion string `json:"go_version,omitempty"`
+}
+
+// GetVersion returns the semver string of the version
+func GetVersion() string {
+ if metadata == "" {
+ return version
+ }
+ return version + "+" + metadata
+}
+
+// GetUserAgent returns a user agent for user with an HTTP client
+func GetUserAgent() string {
+ return "Helm/" + strings.TrimPrefix(GetVersion(), "v")
+}
+
+// Get returns build info
+func Get() BuildInfo {
+ v := BuildInfo{
+ Version: GetVersion(),
+ GitCommit: gitCommit,
+ GitTreeState: gitTreeState,
+ GoVersion: runtime.Version(),
+ }
+
+ // HACK(bacongobbler): strip out GoVersion during a test run for consistent test output
+ if flag.Lookup("test.v") != nil {
+ v.GoVersion = ""
+ }
+ return v
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/action.go b/vendor/helm.sh/helm/v3/pkg/action/action.go
new file mode 100644
index 000000000..f093ed7f8
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/action.go
@@ -0,0 +1,420 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+
+ "helm.sh/helm/v3/internal/experimental/registry"
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/engine"
+ "helm.sh/helm/v3/pkg/kube"
+ "helm.sh/helm/v3/pkg/postrender"
+ "helm.sh/helm/v3/pkg/release"
+ "helm.sh/helm/v3/pkg/releaseutil"
+ "helm.sh/helm/v3/pkg/storage"
+ "helm.sh/helm/v3/pkg/storage/driver"
+ "helm.sh/helm/v3/pkg/time"
+)
+
+// Timestamper is a function capable of producing a timestamp.Timestamper.
+//
+// By default, this is a time.Time function from the Helm time package. This can
+// be overridden for testing though, so that timestamps are predictable.
+var Timestamper = time.Now
+
+var (
+ // errMissingChart indicates that a chart was not provided.
+ errMissingChart = errors.New("no chart provided")
+ // errMissingRelease indicates that a release (name) was not provided.
+ errMissingRelease = errors.New("no release provided")
+ // errInvalidRevision indicates that an invalid release revision number was provided.
+ errInvalidRevision = errors.New("invalid release revision")
+ // errPending indicates that another instance of Helm is already applying an operation on a release.
+ errPending = errors.New("another operation (install/upgrade/rollback) is in progress")
+)
+
+// ValidName is a regular expression for resource names.
+//
+// DEPRECATED: This will be removed in Helm 4, and is no longer used here. See
+// pkg/lint/rules.validateMetadataNameFunc for the replacement.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+var ValidName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
+
+// Configuration injects the dependencies that all actions share.
+type Configuration struct {
+ // RESTClientGetter is an interface that loads Kubernetes clients.
+ RESTClientGetter RESTClientGetter
+
+ // Releases stores records of releases.
+ Releases *storage.Storage
+
+ // KubeClient is a Kubernetes API client.
+ KubeClient kube.Interface
+
+ // RegistryClient is a client for working with registries
+ RegistryClient *registry.Client
+
+ // Capabilities describes the capabilities of the Kubernetes cluster.
+ Capabilities *chartutil.Capabilities
+
+ Log func(string, ...interface{})
+}
+
+// renderResources renders the templates in a chart
+//
+// TODO: This function is badly in need of a refactor.
+// TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed
+// This code has to do with writing files to disk.
+func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrender.PostRenderer, dryRun bool) ([]*release.Hook, *bytes.Buffer, string, error) {
+ hs := []*release.Hook{}
+ b := bytes.NewBuffer(nil)
+
+ caps, err := cfg.getCapabilities()
+ if err != nil {
+ return hs, b, "", err
+ }
+
+ if ch.Metadata.KubeVersion != "" {
+ if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) {
+ return hs, b, "", errors.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String())
+ }
+ }
+
+ var files map[string]string
+ var err2 error
+
+ // A `helm template` or `helm install --dry-run` should not talk to the remote cluster.
+ // It will break in interesting and exotic ways because other data (e.g. discovery)
+ // is mocked. It is not up to the template author to decide when the user wants to
+ // connect to the cluster. So when the user says to dry run, respect the user's
+ // wishes and do not connect to the cluster.
+ if !dryRun && cfg.RESTClientGetter != nil {
+ restConfig, err := cfg.RESTClientGetter.ToRESTConfig()
+ if err != nil {
+ return hs, b, "", err
+ }
+ files, err2 = engine.RenderWithClient(ch, values, restConfig)
+ } else {
+ files, err2 = engine.Render(ch, values)
+ }
+
+ if err2 != nil {
+ return hs, b, "", err2
+ }
+
+ // NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource,
+ // pull it out of here into a separate file so that we can actually use the output of the rendered
+ // text file. We have to spin through this map because the file contains path information, so we
+ // look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip
+ // it in the sortHooks.
+ var notesBuffer bytes.Buffer
+ for k, v := range files {
+ if strings.HasSuffix(k, notesFileSuffix) {
+ if subNotes || (k == path.Join(ch.Name(), "templates", notesFileSuffix)) {
+ // If buffer contains data, add newline before adding more
+ if notesBuffer.Len() > 0 {
+ notesBuffer.WriteString("\n")
+ }
+ notesBuffer.WriteString(v)
+ }
+ delete(files, k)
+ }
+ }
+ notes := notesBuffer.String()
+
+ // Sort hooks, manifests, and partials. Only hooks and manifests are returned,
+ // as partials are not used after renderer.Render. Empty manifests are also
+ // removed here.
+ hs, manifests, err := releaseutil.SortManifests(files, caps.APIVersions, releaseutil.InstallOrder)
+ if err != nil {
+ // By catching parse errors here, we can prevent bogus releases from going
+ // to Kubernetes.
+ //
+ // We return the files as a big blob of data to help the user debug parser
+ // errors.
+ for name, content := range files {
+ if strings.TrimSpace(content) == "" {
+ continue
+ }
+ fmt.Fprintf(b, "---\n# Source: %s\n%s\n", name, content)
+ }
+ return hs, b, "", err
+ }
+
+ // Aggregate all valid manifests into one big doc.
+ fileWritten := make(map[string]bool)
+
+ if includeCrds {
+ for _, crd := range ch.CRDObjects() {
+ if outputDir == "" {
+ fmt.Fprintf(b, "---\n# Source: %s\n%s\n", crd.Name, string(crd.File.Data[:]))
+ } else {
+ err = writeToFile(outputDir, crd.Filename, string(crd.File.Data[:]), fileWritten[crd.Name])
+ if err != nil {
+ return hs, b, "", err
+ }
+ fileWritten[crd.Name] = true
+ }
+ }
+ }
+
+ for _, m := range manifests {
+ if outputDir == "" {
+ fmt.Fprintf(b, "---\n# Source: %s\n%s\n", m.Name, m.Content)
+ } else {
+ newDir := outputDir
+ if useReleaseName {
+ newDir = filepath.Join(outputDir, releaseName)
+ }
+ // NOTE: We do not have to worry about the post-renderer because
+ // output dir is only used by `helm template`. In the next major
+ // release, we should move this logic to template only as it is not
+ // used by install or upgrade
+ err = writeToFile(newDir, m.Name, m.Content, fileWritten[m.Name])
+ if err != nil {
+ return hs, b, "", err
+ }
+ fileWritten[m.Name] = true
+ }
+ }
+
+ if pr != nil {
+ b, err = pr.Run(b)
+ if err != nil {
+ return hs, b, notes, errors.Wrap(err, "error while running post render on files")
+ }
+ }
+
+ return hs, b, notes, nil
+}
+
+// RESTClientGetter gets the rest client
+type RESTClientGetter interface {
+ ToRESTConfig() (*rest.Config, error)
+ ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error)
+ ToRESTMapper() (meta.RESTMapper, error)
+}
+
+// DebugLog sets the logger that writes debug strings
+type DebugLog func(format string, v ...interface{})
+
+// capabilities builds a Capabilities from discovery information.
+func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
+ if cfg.Capabilities != nil {
+ return cfg.Capabilities, nil
+ }
+ dc, err := cfg.RESTClientGetter.ToDiscoveryClient()
+ if err != nil {
+ return nil, errors.Wrap(err, "could not get Kubernetes discovery client")
+ }
+ // force a discovery cache invalidation to always fetch the latest server version/capabilities.
+ dc.Invalidate()
+ kubeVersion, err := dc.ServerVersion()
+ if err != nil {
+ return nil, errors.Wrap(err, "could not get server version from Kubernetes")
+ }
+ // Issue #6361:
+ // Client-Go emits an error when an API service is registered but unimplemented.
+ // We trap that error here and print a warning. But since the discovery client continues
+ // building the API object, it is correctly populated with all valid APIs.
+ // See https://github.com/kubernetes/kubernetes/issues/72051#issuecomment-521157642
+ apiVersions, err := GetVersionSet(dc)
+ if err != nil {
+ if discovery.IsGroupDiscoveryFailedError(err) {
+ cfg.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err)
+ cfg.Log("WARNING: To fix this, kubectl delete apiservice <service-name>")
+ } else {
+ return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes")
+ }
+ }
+
+ cfg.Capabilities = &chartutil.Capabilities{
+ APIVersions: apiVersions,
+ KubeVersion: chartutil.KubeVersion{
+ Version: kubeVersion.GitVersion,
+ Major: kubeVersion.Major,
+ Minor: kubeVersion.Minor,
+ },
+ }
+ return cfg.Capabilities, nil
+}
+
+// KubernetesClientSet creates a new kubernetes ClientSet based on the configuration
+func (cfg *Configuration) KubernetesClientSet() (kubernetes.Interface, error) {
+ conf, err := cfg.RESTClientGetter.ToRESTConfig()
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to generate config for kubernetes client")
+ }
+
+ return kubernetes.NewForConfig(conf)
+}
+
+// Now generates a timestamp
+//
+// If the configuration has a Timestamper on it, that will be used.
+// Otherwise, this will use time.Now().
+func (cfg *Configuration) Now() time.Time {
+ return Timestamper()
+}
+
+func (cfg *Configuration) releaseContent(name string, version int) (*release.Release, error) {
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, errors.Errorf("releaseContent: Release name is invalid: %s", name)
+ }
+
+ if version <= 0 {
+ return cfg.Releases.Last(name)
+ }
+
+ return cfg.Releases.Get(name, version)
+}
+
+// GetVersionSet retrieves a set of available k8s API versions
+func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.VersionSet, error) {
+ groups, resources, err := client.ServerGroupsAndResources()
+ if err != nil && !discovery.IsGroupDiscoveryFailedError(err) {
+ return chartutil.DefaultVersionSet, errors.Wrap(err, "could not get apiVersions from Kubernetes")
+ }
+
+ // FIXME: The Kubernetes test fixture for cli appears to always return nil
+ // for calls to Discovery().ServerGroupsAndResources(). So in this case, we
+ // return the default API list. This is also a safe value to return in any
+ // other odd-ball case.
+ if len(groups) == 0 && len(resources) == 0 {
+ return chartutil.DefaultVersionSet, nil
+ }
+
+ versionMap := make(map[string]interface{})
+ versions := []string{}
+
+ // Extract the groups
+ for _, g := range groups {
+ for _, gv := range g.Versions {
+ versionMap[gv.GroupVersion] = struct{}{}
+ }
+ }
+
+ // Extract the resources
+ var id string
+ var ok bool
+ for _, r := range resources {
+ for _, rl := range r.APIResources {
+
+ // A Kind at a GroupVersion can show up more than once. We only want
+ // it displayed once in the final output.
+ id = path.Join(r.GroupVersion, rl.Kind)
+ if _, ok = versionMap[id]; !ok {
+ versionMap[id] = struct{}{}
+ }
+ }
+ }
+
+ // Convert to a form that NewVersionSet can use
+ for k := range versionMap {
+ versions = append(versions, k)
+ }
+
+ return chartutil.VersionSet(versions), nil
+}
+
+// recordRelease with an update operation in case reuse has been set.
+func (cfg *Configuration) recordRelease(r *release.Release) {
+ if err := cfg.Releases.Update(r); err != nil {
+ cfg.Log("warning: Failed to update release %s: %s", r.Name, err)
+ }
+}
+
+// Init initializes the action configuration
+func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error {
+ kc := kube.New(getter)
+ kc.Log = log
+
+ lazyClient := &lazyClient{
+ namespace: namespace,
+ clientFn: kc.Factory.KubernetesClientSet,
+ }
+
+ var store *storage.Storage
+ switch helmDriver {
+ case "secret", "secrets", "":
+ d := driver.NewSecrets(newSecretClient(lazyClient))
+ d.Log = log
+ store = storage.Init(d)
+ case "configmap", "configmaps":
+ d := driver.NewConfigMaps(newConfigMapClient(lazyClient))
+ d.Log = log
+ store = storage.Init(d)
+ case "memory":
+ var d *driver.Memory
+ if cfg.Releases != nil {
+ if mem, ok := cfg.Releases.Driver.(*driver.Memory); ok {
+ // This function can be called more than once (e.g., helm list --all-namespaces).
+ // If a memory driver was already initialized, re-use it but set the possibly new namespace.
+ // We re-use it in case some releases where already created in the existing memory driver.
+ d = mem
+ }
+ }
+ if d == nil {
+ d = driver.NewMemory()
+ }
+ d.SetNamespace(namespace)
+ store = storage.Init(d)
+ case "sql":
+ d, err := driver.NewSQL(
+ os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"),
+ log,
+ namespace,
+ )
+ if err != nil {
+ panic(fmt.Sprintf("Unable to instantiate SQL driver: %v", err))
+ }
+ store = storage.Init(d)
+ default:
+ // Not sure what to do here.
+ panic("Unknown driver in HELM_DRIVER: " + helmDriver)
+ }
+
+ cfg.RESTClientGetter = getter
+ cfg.KubeClient = kc
+ cfg.Releases = store
+ cfg.Log = log
+
+ return nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/dependency.go b/vendor/helm.sh/helm/v3/pkg/action/dependency.go
new file mode 100644
index 000000000..3265f1f17
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/dependency.go
@@ -0,0 +1,230 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/gosuri/uitable"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+)
+
+// Dependency is the action for building a given chart's dependency tree.
+//
+// It provides the implementation of 'helm dependency' and its respective subcommands.
+type Dependency struct {
+ Verify bool
+ Keyring string
+ SkipRefresh bool
+ ColumnWidth uint
+}
+
+// NewDependency creates a new Dependency object with the given configuration.
+func NewDependency() *Dependency {
+ return &Dependency{
+ ColumnWidth: 80,
+ }
+}
+
+// List executes 'helm dependency list'.
+func (d *Dependency) List(chartpath string, out io.Writer) error {
+ c, err := loader.Load(chartpath)
+ if err != nil {
+ return err
+ }
+
+ if c.Metadata.Dependencies == nil {
+ fmt.Fprintf(out, "WARNING: no dependencies at %s\n", filepath.Join(chartpath, "charts"))
+ return nil
+ }
+
+ d.printDependencies(chartpath, out, c)
+ fmt.Fprintln(out)
+ d.printMissing(chartpath, out, c.Metadata.Dependencies)
+ return nil
+}
+
+// dependencyStatus returns a string describing the status of a dependency viz a viz the parent chart.
+func (d *Dependency) dependencyStatus(chartpath string, dep *chart.Dependency, parent *chart.Chart) string {
+ filename := fmt.Sprintf("%s-%s.tgz", dep.Name, "*")
+
+ // If a chart is unpacked, this will check the unpacked chart's `charts/` directory for tarballs.
+ // Technically, this is COMPLETELY unnecessary, and should be removed in Helm 4. It is here
+ // to preserved backward compatibility. In Helm 2/3, there is a "difference" between
+ // the tgz version (which outputs "ok" if it unpacks) and the loaded version (which outputs
+ // "unpacked"). Early in Helm 2's history, this would have made a difference. But it no
+ // longer does. However, since this code shipped with Helm 3, the output must remain stable
+ // until Helm 4.
+ switch archives, err := filepath.Glob(filepath.Join(chartpath, "charts", filename)); {
+ case err != nil:
+ return "bad pattern"
+ case len(archives) > 1:
+ // See if the second part is a SemVer
+ found := []string{}
+ for _, arc := range archives {
+ // we need to trip the prefix dirs and the extension off.
+ filename = strings.TrimSuffix(filepath.Base(arc), ".tgz")
+ maybeVersion := strings.TrimPrefix(filename, fmt.Sprintf("%s-", dep.Name))
+
+ if _, err := semver.StrictNewVersion(maybeVersion); err == nil {
+ // If the version parsed without an error, it is possibly a valid
+ // version.
+ found = append(found, arc)
+ }
+ }
+
+ if l := len(found); l == 1 {
+ // If we get here, we do the same thing as in len(archives) == 1.
+ if r := statArchiveForStatus(found[0], dep); r != "" {
+ return r
+ }
+
+ // Fall through and look for directories
+ } else if l > 1 {
+ return "too many matches"
+ }
+
+ // The sanest thing to do here is to fall through and see if we have any directory
+ // matches.
+
+ case len(archives) == 1:
+ archive := archives[0]
+ if r := statArchiveForStatus(archive, dep); r != "" {
+ return r
+ }
+
+ }
+ // End unnecessary code.
+
+ var depChart *chart.Chart
+ for _, item := range parent.Dependencies() {
+ if item.Name() == dep.Name {
+ depChart = item
+ }
+ }
+
+ if depChart == nil {
+ return "missing"
+ }
+
+ if depChart.Metadata.Version != dep.Version {
+ constraint, err := semver.NewConstraint(dep.Version)
+ if err != nil {
+ return "invalid version"
+ }
+
+ v, err := semver.NewVersion(depChart.Metadata.Version)
+ if err != nil {
+ return "invalid version"
+ }
+
+ if !constraint.Check(v) {
+ return "wrong version"
+ }
+ }
+
+ return "unpacked"
+}
+
+// stat an archive and return a message if the stat is successful
+//
+// This is a refactor of the code originally in dependencyStatus. It is here to
+// support legacy behavior, and should be removed in Helm 4.
+func statArchiveForStatus(archive string, dep *chart.Dependency) string {
+ if _, err := os.Stat(archive); err == nil {
+ c, err := loader.Load(archive)
+ if err != nil {
+ return "corrupt"
+ }
+ if c.Name() != dep.Name {
+ return "misnamed"
+ }
+
+ if c.Metadata.Version != dep.Version {
+ constraint, err := semver.NewConstraint(dep.Version)
+ if err != nil {
+ return "invalid version"
+ }
+
+ v, err := semver.NewVersion(c.Metadata.Version)
+ if err != nil {
+ return "invalid version"
+ }
+
+ if !constraint.Check(v) {
+ return "wrong version"
+ }
+ }
+ return "ok"
+ }
+ return ""
+}
+
+// printDependencies prints all of the dependencies in the yaml file.
+func (d *Dependency) printDependencies(chartpath string, out io.Writer, c *chart.Chart) {
+ table := uitable.New()
+ table.MaxColWidth = d.ColumnWidth
+ table.AddRow("NAME", "VERSION", "REPOSITORY", "STATUS")
+ for _, row := range c.Metadata.Dependencies {
+ table.AddRow(row.Name, row.Version, row.Repository, d.dependencyStatus(chartpath, row, c))
+ }
+ fmt.Fprintln(out, table)
+}
+
+// printMissing prints warnings about charts that are present on disk, but are
+// not in Chart.yaml.
+func (d *Dependency) printMissing(chartpath string, out io.Writer, reqs []*chart.Dependency) {
+ folder := filepath.Join(chartpath, "charts/*")
+ files, err := filepath.Glob(folder)
+ if err != nil {
+ fmt.Fprintln(out, err)
+ return
+ }
+
+ for _, f := range files {
+ fi, err := os.Stat(f)
+ if err != nil {
+ fmt.Fprintf(out, "Warning: %s\n", err)
+ }
+ // Skip anything that is not a directory and not a tgz file.
+ if !fi.IsDir() && filepath.Ext(f) != ".tgz" {
+ continue
+ }
+ c, err := loader.Load(f)
+ if err != nil {
+ fmt.Fprintf(out, "WARNING: %q is not a chart.\n", f)
+ continue
+ }
+ found := false
+ for _, d := range reqs {
+ if d.Name == c.Name() {
+ found = true
+ break
+ }
+ }
+ if !found {
+ fmt.Fprintf(out, "WARNING: %q is not in Chart.yaml.\n", f)
+ }
+ }
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/doc.go b/vendor/helm.sh/helm/v3/pkg/action/doc.go
new file mode 100644
index 000000000..3c91bd618
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package action contains the logic for each action that Helm can perform.
+//
+// This is a library for calling top-level Helm actions like 'install',
+// 'upgrade', or 'list'. Actions approximately match the command line
+// invocations that the Helm client uses.
+package action
diff --git a/vendor/helm.sh/helm/v3/pkg/action/get.go b/vendor/helm.sh/helm/v3/pkg/action/get.go
new file mode 100644
index 000000000..f44b53307
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/get.go
@@ -0,0 +1,47 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "helm.sh/helm/v3/pkg/release"
+)
+
+// Get is the action for checking a given release's information.
+//
+// It provides the implementation of 'helm get' and its respective subcommands (except `helm get values`).
+type Get struct {
+ cfg *Configuration
+
+ // Initializing Version to 0 will get the latest revision of the release.
+ Version int
+}
+
+// NewGet creates a new Get object with the given configuration.
+func NewGet(cfg *Configuration) *Get {
+ return &Get{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm get' against the given release.
+func (g *Get) Run(name string) (*release.Release, error) {
+ if err := g.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ return g.cfg.releaseContent(name, g.Version)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/get_values.go b/vendor/helm.sh/helm/v3/pkg/action/get_values.go
new file mode 100644
index 000000000..9c32db213
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/get_values.go
@@ -0,0 +1,60 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "helm.sh/helm/v3/pkg/chartutil"
+)
+
+// GetValues is the action for checking a given release's values.
+//
+// It provides the implementation of 'helm get values'.
+type GetValues struct {
+ cfg *Configuration
+
+ Version int
+ AllValues bool
+}
+
+// NewGetValues creates a new GetValues object with the given configuration.
+func NewGetValues(cfg *Configuration) *GetValues {
+ return &GetValues{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm get values' against the given release.
+func (g *GetValues) Run(name string) (map[string]interface{}, error) {
+ if err := g.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ rel, err := g.cfg.releaseContent(name, g.Version)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the user wants all values, compute the values and return.
+ if g.AllValues {
+ cfg, err := chartutil.CoalesceValues(rel.Chart, rel.Config)
+ if err != nil {
+ return nil, err
+ }
+ return cfg, nil
+ }
+ return rel.Config, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/history.go b/vendor/helm.sh/helm/v3/pkg/action/history.go
new file mode 100644
index 000000000..0430aaf7a
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/history.go
@@ -0,0 +1,58 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/release"
+)
+
+// History is the action for checking the release's ledger.
+//
+// It provides the implementation of 'helm history'.
+// It returns all the revisions for a specific release.
+// To list up to one revision of every release in one specific, or in all,
+// namespaces, see the List action.
+type History struct {
+ cfg *Configuration
+
+ Max int
+ Version int
+}
+
+// NewHistory creates a new History object with the given configuration.
+func NewHistory(cfg *Configuration) *History {
+ return &History{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm history' against the given release.
+func (h *History) Run(name string) ([]*release.Release, error) {
+ if err := h.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, errors.Errorf("release name is invalid: %s", name)
+ }
+
+ h.cfg.Log("getting history for release %s", name)
+ return h.cfg.Releases.History(name)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/hooks.go b/vendor/helm.sh/helm/v3/pkg/action/hooks.go
new file mode 100644
index 000000000..40c1ffdb6
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/hooks.go
@@ -0,0 +1,151 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "sort"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/release"
+ helmtime "helm.sh/helm/v3/pkg/time"
+)
+
+// execHook executes all of the hooks for the given hook event.
+func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error {
+ executingHooks := []*release.Hook{}
+
+ for _, h := range rl.Hooks {
+ for _, e := range h.Events {
+ if e == hook {
+ executingHooks = append(executingHooks, h)
+ }
+ }
+ }
+
+ // hooke are pre-ordered by kind, so keep order stable
+ sort.Stable(hookByWeight(executingHooks))
+
+ for _, h := range executingHooks {
+ // Set default delete policy to before-hook-creation
+ if h.DeletePolicies == nil || len(h.DeletePolicies) == 0 {
+ // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion
+ // resources. For all other resource types update in place if a
+ // resource with the same name already exists and is owned by the
+ // current release.
+ h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation}
+ }
+
+ if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil {
+ return err
+ }
+
+ resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true)
+ if err != nil {
+ return errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path)
+ }
+
+ // Record the time at which the hook was applied to the cluster
+ h.LastRun = release.HookExecution{
+ StartedAt: helmtime.Now(),
+ Phase: release.HookPhaseRunning,
+ }
+ cfg.recordRelease(rl)
+
+ // As long as the implementation of WatchUntilReady does not panic, HookPhaseFailed or HookPhaseSucceeded
+ // should always be set by this function. If we fail to do that for any reason, then HookPhaseUnknown is
+ // the most appropriate value to surface.
+ h.LastRun.Phase = release.HookPhaseUnknown
+
+ // Create hook resources
+ if _, err := cfg.KubeClient.Create(resources); err != nil {
+ h.LastRun.CompletedAt = helmtime.Now()
+ h.LastRun.Phase = release.HookPhaseFailed
+ return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path)
+ }
+
+ // Watch hook resources until they have completed
+ err = cfg.KubeClient.WatchUntilReady(resources, timeout)
+ // Note the time of success/failure
+ h.LastRun.CompletedAt = helmtime.Now()
+ // Mark hook as succeeded or failed
+ if err != nil {
+ h.LastRun.Phase = release.HookPhaseFailed
+ // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted
+ // under failed condition. If so, then clear the corresponding resource object in the hook
+ if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil {
+ return err
+ }
+ return err
+ }
+ h.LastRun.Phase = release.HookPhaseSucceeded
+ }
+
+ // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted
+ // under succeeded condition. If so, then clear the corresponding resource object in each hook
+ for _, h := range executingHooks {
+ if err := cfg.deleteHookByPolicy(h, release.HookSucceeded); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// hookByWeight is a sorter for hooks
+type hookByWeight []*release.Hook
+
+func (x hookByWeight) Len() int { return len(x) }
+func (x hookByWeight) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x hookByWeight) Less(i, j int) bool {
+ if x[i].Weight == x[j].Weight {
+ return x[i].Name < x[j].Name
+ }
+ return x[i].Weight < x[j].Weight
+}
+
+// deleteHookByPolicy deletes a hook if the hook policy instructs it to
+func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy) error {
+ // Never delete CustomResourceDefinitions; this could cause lots of
+ // cascading garbage collection.
+ if h.Kind == "CustomResourceDefinition" {
+ return nil
+ }
+ if hookHasDeletePolicy(h, policy) {
+ resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), false)
+ if err != nil {
+ return errors.Wrapf(err, "unable to build kubernetes object for deleting hook %s", h.Path)
+ }
+ _, errs := cfg.KubeClient.Delete(resources)
+ if len(errs) > 0 {
+ return errors.New(joinErrors(errs))
+ }
+ }
+ return nil
+}
+
+// hookHasDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices
+// supported by helm. If so, mark the hook as one should be deleted.
+func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool {
+ for _, v := range h.DeletePolicies {
+ if policy == v {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/install.go b/vendor/helm.sh/helm/v3/pkg/action/install.go
new file mode 100644
index 000000000..b84a57271
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/install.go
@@ -0,0 +1,770 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+
+ "github.com/Masterminds/sprig/v3"
+ "github.com/pkg/errors"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/cli-runtime/pkg/resource"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/internal/experimental/registry"
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/cli"
+ "helm.sh/helm/v3/pkg/downloader"
+ "helm.sh/helm/v3/pkg/getter"
+ "helm.sh/helm/v3/pkg/kube"
+ kubefake "helm.sh/helm/v3/pkg/kube/fake"
+ "helm.sh/helm/v3/pkg/postrender"
+ "helm.sh/helm/v3/pkg/release"
+ "helm.sh/helm/v3/pkg/releaseutil"
+ "helm.sh/helm/v3/pkg/repo"
+ "helm.sh/helm/v3/pkg/storage"
+ "helm.sh/helm/v3/pkg/storage/driver"
+)
+
+// releaseNameMaxLen is the maximum length of a release name.
+//
+// As of Kubernetes 1.4, the max limit on a name is 63 chars. We reserve 10 for
+// charts to add data. Effectively, that gives us 53 chars.
+// See https://github.com/helm/helm/issues/1528
+const releaseNameMaxLen = 53
+
+// NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine
+// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually
+// wants to see this file after rendering in the status command. However, it must be a suffix
+// since there can be filepath in front of it.
+const notesFileSuffix = "NOTES.txt"
+
+const defaultDirectoryPermission = 0755
+
+// Install performs an installation operation.
+type Install struct {
+ cfg *Configuration
+
+ ChartPathOptions
+
+ ClientOnly bool
+ CreateNamespace bool
+ DryRun bool
+ DisableHooks bool
+ Replace bool
+ Wait bool
+ WaitForJobs bool
+ Devel bool
+ DependencyUpdate bool
+ Timeout time.Duration
+ Namespace string
+ ReleaseName string
+ GenerateName bool
+ NameTemplate string
+ Description string
+ OutputDir string
+ Atomic bool
+ SkipCRDs bool
+ SubNotes bool
+ DisableOpenAPIValidation bool
+ IncludeCRDs bool
+ // KubeVersion allows specifying a custom kubernetes version to use and
+ // APIVersions allows a manual set of supported API Versions to be passed
+ // (for things like templating). These are ignored if ClientOnly is false
+ KubeVersion *chartutil.KubeVersion
+ APIVersions chartutil.VersionSet
+ // Used by helm template to render charts with .Release.IsUpgrade. Ignored if Dry-Run is false
+ IsUpgrade bool
+ // Used by helm template to add the release as part of OutputDir path
+ // OutputDir/<ReleaseName>
+ UseReleaseName bool
+ PostRenderer postrender.PostRenderer
+ // Lock to control raceconditions when the process receives a SIGTERM
+ Lock sync.Mutex
+}
+
+// ChartPathOptions captures common options used for controlling chart paths
+type ChartPathOptions struct {
+ CaFile string // --ca-file
+ CertFile string // --cert-file
+ KeyFile string // --key-file
+ InsecureSkipTLSverify bool // --insecure-skip-verify
+ Keyring string // --keyring
+ Password string // --password
+ PassCredentialsAll bool // --pass-credentials
+ RepoURL string // --repo
+ Username string // --username
+ Verify bool // --verify
+ Version string // --version
+}
+
+// NewInstall creates a new Install object with the given configuration.
+func NewInstall(cfg *Configuration) *Install {
+ return &Install{
+ cfg: cfg,
+ }
+}
+
+func (i *Install) installCRDs(crds []chart.CRD) error {
+ // We do these one file at a time in the order they were read.
+ totalItems := []*resource.Info{}
+ for _, obj := range crds {
+ // Read in the resources
+ res, err := i.cfg.KubeClient.Build(bytes.NewBuffer(obj.File.Data), false)
+ if err != nil {
+ return errors.Wrapf(err, "failed to install CRD %s", obj.Name)
+ }
+
+ // Send them to Kube
+ if _, err := i.cfg.KubeClient.Create(res); err != nil {
+ // If the error is CRD already exists, continue.
+ if apierrors.IsAlreadyExists(err) {
+ crdName := res[0].Name
+ i.cfg.Log("CRD %s is already present. Skipping.", crdName)
+ continue
+ }
+ return errors.Wrapf(err, "failed to install CRD %s", obj.Name)
+ }
+ totalItems = append(totalItems, res...)
+ }
+ if len(totalItems) > 0 {
+ // Invalidate the local cache, since it will not have the new CRDs
+ // present.
+ discoveryClient, err := i.cfg.RESTClientGetter.ToDiscoveryClient()
+ if err != nil {
+ return err
+ }
+ i.cfg.Log("Clearing discovery cache")
+ discoveryClient.Invalidate()
+ // Give time for the CRD to be recognized.
+
+ if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil {
+ return err
+ }
+
+ // Make sure to force a rebuild of the cache.
+ discoveryClient.ServerGroups()
+ }
+ return nil
+}
+
+// Run executes the installation
+//
+// If DryRun is set to true, this will prepare the release, but not install it
+
+func (i *Install) Run(chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) {
+ ctx := context.Background()
+ return i.RunWithContext(ctx, chrt, vals)
+}
+
+// Run executes the installation with Context
+func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) {
+ // Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`)
+ if !i.ClientOnly {
+ if err := i.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := i.availableName(); err != nil {
+ return nil, err
+ }
+
+ // Pre-install anything in the crd/ directory. We do this before Helm
+ // contacts the upstream server and builds the capabilities object.
+ if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 {
+ // On dry run, bail here
+ if i.DryRun {
+ i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
+ } else if err := i.installCRDs(crds); err != nil {
+ return nil, err
+ }
+ }
+
+ if i.ClientOnly {
+ // Add mock objects in here so it doesn't use Kube API server
+ // NOTE(bacongobbler): used for `helm template`
+ i.cfg.Capabilities = chartutil.DefaultCapabilities.Copy()
+ if i.KubeVersion != nil {
+ i.cfg.Capabilities.KubeVersion = *i.KubeVersion
+ }
+ i.cfg.Capabilities.APIVersions = append(i.cfg.Capabilities.APIVersions, i.APIVersions...)
+ i.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: ioutil.Discard}
+
+ mem := driver.NewMemory()
+ mem.SetNamespace(i.Namespace)
+ i.cfg.Releases = storage.Init(mem)
+ } else if !i.ClientOnly && len(i.APIVersions) > 0 {
+ i.cfg.Log("API Version list given outside of client only mode, this list will be ignored")
+ }
+
+ if err := chartutil.ProcessDependencies(chrt, vals); err != nil {
+ return nil, err
+ }
+
+ // Make sure if Atomic is set, that wait is set as well. This makes it so
+ // the user doesn't have to specify both
+ i.Wait = i.Wait || i.Atomic
+
+ caps, err := i.cfg.getCapabilities()
+ if err != nil {
+ return nil, err
+ }
+
+ // special case for helm template --is-upgrade
+ isUpgrade := i.IsUpgrade && i.DryRun
+ options := chartutil.ReleaseOptions{
+ Name: i.ReleaseName,
+ Namespace: i.Namespace,
+ Revision: 1,
+ IsInstall: !isUpgrade,
+ IsUpgrade: isUpgrade,
+ }
+ valuesToRender, err := chartutil.ToRenderValues(chrt, vals, options, caps)
+ if err != nil {
+ return nil, err
+ }
+
+ rel := i.createRelease(chrt, vals)
+
+ var manifestDoc *bytes.Buffer
+ rel.Hooks, manifestDoc, rel.Info.Notes, err = i.cfg.renderResources(chrt, valuesToRender, i.ReleaseName, i.OutputDir, i.SubNotes, i.UseReleaseName, i.IncludeCRDs, i.PostRenderer, i.DryRun)
+ // Even for errors, attach this if available
+ if manifestDoc != nil {
+ rel.Manifest = manifestDoc.String()
+ }
+ // Check error from render
+ if err != nil {
+ rel.SetStatus(release.StatusFailed, fmt.Sprintf("failed to render resource: %s", err.Error()))
+ // Return a release with partial data so that the client can show debugging information.
+ return rel, err
+ }
+
+ // Mark this release as in-progress
+ rel.SetStatus(release.StatusPendingInstall, "Initial install underway")
+
+ var toBeAdopted kube.ResourceList
+ resources, err := i.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), !i.DisableOpenAPIValidation)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to build kubernetes objects from release manifest")
+ }
+
+ // It is safe to use "force" here because these are resources currently rendered by the chart.
+ err = resources.Visit(setMetadataVisitor(rel.Name, rel.Namespace, true))
+ if err != nil {
+ return nil, err
+ }
+
+ // Install requires an extra validation step of checking that resources
+ // don't already exist before we actually create resources. If we continue
+ // forward and create the release object with resources that already exist,
+ // we'll end up in a state where we will delete those resources upon
+ // deleting the release because the manifest will be pointing at that
+ // resource
+ if !i.ClientOnly && !isUpgrade && len(resources) > 0 {
+ toBeAdopted, err = existingResourceConflict(resources, rel.Name, rel.Namespace)
+ if err != nil {
+ return nil, errors.Wrap(err, "rendered manifests contain a resource that already exists. Unable to continue with install")
+ }
+ }
+
+ // Bail out here if it is a dry run
+ if i.DryRun {
+ rel.Info.Description = "Dry run complete"
+ return rel, nil
+ }
+
+ if i.CreateNamespace {
+ ns := &v1.Namespace{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "Namespace",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: i.Namespace,
+ Labels: map[string]string{
+ "name": i.Namespace,
+ },
+ },
+ }
+ buf, err := yaml.Marshal(ns)
+ if err != nil {
+ return nil, err
+ }
+ resourceList, err := i.cfg.KubeClient.Build(bytes.NewBuffer(buf), true)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := i.cfg.KubeClient.Create(resourceList); err != nil && !apierrors.IsAlreadyExists(err) {
+ return nil, err
+ }
+ }
+
+ // If Replace is true, we need to supercede the last release.
+ if i.Replace {
+ if err := i.replaceRelease(rel); err != nil {
+ return nil, err
+ }
+ }
+
+ // Store the release in history before continuing (new in Helm 3). We always know
+ // that this is a create operation.
+ if err := i.cfg.Releases.Create(rel); err != nil {
+ // We could try to recover gracefully here, but since nothing has been installed
+ // yet, this is probably safer than trying to continue when we know storage is
+ // not working.
+ return rel, err
+ }
+ rChan := make(chan resultMessage)
+ go i.performInstall(rChan, rel, toBeAdopted, resources)
+ go i.handleContext(ctx, rChan, rel)
+ result := <-rChan
+ //start preformInstall go routine
+ return result.r, result.e
+}
+
+func (i *Install) performInstall(c chan<- resultMessage, rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) {
+
+ // pre-install hooks
+ if !i.DisableHooks {
+ if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil {
+ i.reportToRun(c, rel, fmt.Errorf("failed pre-install: %s", err))
+ return
+ }
+ }
+
+ // At this point, we can do the install. Note that before we were detecting whether to
+ // do an update, but it's not clear whether we WANT to do an update if the re-use is set
+ // to true, since that is basically an upgrade operation.
+ if len(toBeAdopted) == 0 && len(resources) > 0 {
+ if _, err := i.cfg.KubeClient.Create(resources); err != nil {
+ i.reportToRun(c, rel, err)
+ return
+ }
+ } else if len(resources) > 0 {
+ if _, err := i.cfg.KubeClient.Update(toBeAdopted, resources, false); err != nil {
+ i.reportToRun(c, rel, err)
+ return
+ }
+ }
+
+ if i.Wait {
+ if i.WaitForJobs {
+ if err := i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout); err != nil {
+ i.reportToRun(c, rel, err)
+ return
+ }
+ } else {
+ if err := i.cfg.KubeClient.Wait(resources, i.Timeout); err != nil {
+ i.reportToRun(c, rel, err)
+ return
+ }
+ }
+ }
+
+ if !i.DisableHooks {
+ if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil {
+ i.reportToRun(c, rel, fmt.Errorf("failed post-install: %s", err))
+ return
+ }
+ }
+
+ if len(i.Description) > 0 {
+ rel.SetStatus(release.StatusDeployed, i.Description)
+ } else {
+ rel.SetStatus(release.StatusDeployed, "Install complete")
+ }
+
+ // This is a tricky case. The release has been created, but the result
+ // cannot be recorded. The truest thing to tell the user is that the
+ // release was created. However, the user will not be able to do anything
+ // further with this release.
+ //
+ // One possible strategy would be to do a timed retry to see if we can get
+ // this stored in the future.
+ if err := i.recordRelease(rel); err != nil {
+ i.cfg.Log("failed to record the release: %s", err)
+ }
+
+ i.reportToRun(c, rel, nil)
+}
+func (i *Install) handleContext(ctx context.Context, c chan<- resultMessage, rel *release.Release) {
+ go func() {
+ <-ctx.Done()
+ err := ctx.Err()
+ i.reportToRun(c, rel, err)
+ }()
+}
+func (i *Install) reportToRun(c chan<- resultMessage, rel *release.Release, err error) {
+ i.Lock.Lock()
+ if err != nil {
+ rel, err = i.failRelease(rel, err)
+ }
+ c <- resultMessage{r: rel, e: err}
+ i.Lock.Unlock()
+}
+func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) {
+ rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error()))
+ if i.Atomic {
+ i.cfg.Log("Install failed and atomic is set, uninstalling release")
+ uninstall := NewUninstall(i.cfg)
+ uninstall.DisableHooks = i.DisableHooks
+ uninstall.KeepHistory = false
+ uninstall.Timeout = i.Timeout
+ if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil {
+ return rel, errors.Wrapf(uninstallErr, "an error occurred while uninstalling the release. original install error: %s", err)
+ }
+ return rel, errors.Wrapf(err, "release %s failed, and has been uninstalled due to atomic being set", i.ReleaseName)
+ }
+ i.recordRelease(rel) // Ignore the error, since we have another error to deal with.
+ return rel, err
+}
+
+// availableName tests whether a name is available
+//
+// Roughly, this will return an error if name is
+//
+// - empty
+// - too long
+// - already in use, and not deleted
+// - used by a deleted release, and i.Replace is false
+func (i *Install) availableName() error {
+ start := i.ReleaseName
+ if start == "" {
+ return errors.New("name is required")
+ }
+
+ if len(start) > releaseNameMaxLen {
+ return errors.Errorf("release name %q exceeds max length of %d", start, releaseNameMaxLen)
+ }
+
+ if i.DryRun {
+ return nil
+ }
+
+ h, err := i.cfg.Releases.History(start)
+ if err != nil || len(h) < 1 {
+ return nil
+ }
+ releaseutil.Reverse(h, releaseutil.SortByRevision)
+ rel := h[0]
+
+ if st := rel.Info.Status; i.Replace && (st == release.StatusUninstalled || st == release.StatusFailed) {
+ return nil
+ }
+ return errors.New("cannot re-use a name that is still in use")
+}
+
+// createRelease creates a new release object
+func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{}) *release.Release {
+ ts := i.cfg.Now()
+ return &release.Release{
+ Name: i.ReleaseName,
+ Namespace: i.Namespace,
+ Chart: chrt,
+ Config: rawVals,
+ Info: &release.Info{
+ FirstDeployed: ts,
+ LastDeployed: ts,
+ Status: release.StatusUnknown,
+ },
+ Version: 1,
+ }
+}
+
+// recordRelease with an update operation in case reuse has been set.
+func (i *Install) recordRelease(r *release.Release) error {
+ // This is a legacy function which has been reduced to a oneliner. Could probably
+ // refactor it out.
+ return i.cfg.Releases.Update(r)
+}
+
+// replaceRelease replaces an older release with this one
+//
+// This allows us to re-use names by superseding an existing release with a new one
+func (i *Install) replaceRelease(rel *release.Release) error {
+ hist, err := i.cfg.Releases.History(rel.Name)
+ if err != nil || len(hist) == 0 {
+ // No releases exist for this name, so we can return early
+ return nil
+ }
+
+ releaseutil.Reverse(hist, releaseutil.SortByRevision)
+ last := hist[0]
+
+ // Update version to the next available
+ rel.Version = last.Version + 1
+
+ // Do not change the status of a failed release.
+ if last.Info.Status == release.StatusFailed {
+ return nil
+ }
+
+ // For any other status, mark it as superseded and store the old record
+ last.SetStatus(release.StatusSuperseded, "superseded by new release")
+ return i.recordRelease(last)
+}
+
+// write the <data> to <output-dir>/<name>. <append> controls if the file is created or content will be appended
+func writeToFile(outputDir string, name string, data string, append bool) error {
+ outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator))
+
+ err := ensureDirectoryForFile(outfileName)
+ if err != nil {
+ return err
+ }
+
+ f, err := createOrOpenFile(outfileName, append)
+ if err != nil {
+ return err
+ }
+
+ defer f.Close()
+
+ _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data))
+
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("wrote %s\n", outfileName)
+ return nil
+}
+
+func createOrOpenFile(filename string, append bool) (*os.File, error) {
+ if append {
+ return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0600)
+ }
+ return os.Create(filename)
+}
+
+// check if the directory exists to create file. creates if don't exists
+func ensureDirectoryForFile(file string) error {
+ baseDir := path.Dir(file)
+ _, err := os.Stat(baseDir)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ return os.MkdirAll(baseDir, defaultDirectoryPermission)
+}
+
+// NameAndChart returns the name and chart that should be used.
+//
+// This will read the flags and handle name generation if necessary.
+func (i *Install) NameAndChart(args []string) (string, string, error) {
+ flagsNotSet := func() error {
+ if i.GenerateName {
+ return errors.New("cannot set --generate-name and also specify a name")
+ }
+ if i.NameTemplate != "" {
+ return errors.New("cannot set --name-template and also specify a name")
+ }
+ return nil
+ }
+
+ if len(args) > 2 {
+ return args[0], args[1], errors.Errorf("expected at most two arguments, unexpected arguments: %v", strings.Join(args[2:], ", "))
+ }
+
+ if len(args) == 2 {
+ return args[0], args[1], flagsNotSet()
+ }
+
+ if i.NameTemplate != "" {
+ name, err := TemplateName(i.NameTemplate)
+ return name, args[0], err
+ }
+
+ if i.ReleaseName != "" {
+ return i.ReleaseName, args[0], nil
+ }
+
+ if !i.GenerateName {
+ return "", args[0], errors.New("must either provide a name or specify --generate-name")
+ }
+
+ base := filepath.Base(args[0])
+ if base == "." || base == "" {
+ base = "chart"
+ }
+ // if present, strip out the file extension from the name
+ if idx := strings.Index(base, "."); idx != -1 {
+ base = base[0:idx]
+ }
+
+ return fmt.Sprintf("%s-%d", base, time.Now().Unix()), args[0], nil
+}
+
+// TemplateName renders a name template, returning the name or an error.
+func TemplateName(nameTemplate string) (string, error) {
+ if nameTemplate == "" {
+ return "", nil
+ }
+
+ t, err := template.New("name-template").Funcs(sprig.TxtFuncMap()).Parse(nameTemplate)
+ if err != nil {
+ return "", err
+ }
+ var b bytes.Buffer
+ if err := t.Execute(&b, nil); err != nil {
+ return "", err
+ }
+
+ return b.String(), nil
+}
+
+// CheckDependencies checks the dependencies for a chart.
+func CheckDependencies(ch *chart.Chart, reqs []*chart.Dependency) error {
+ var missing []string
+
+OUTER:
+ for _, r := range reqs {
+ for _, d := range ch.Dependencies() {
+ if d.Name() == r.Name {
+ continue OUTER
+ }
+ }
+ missing = append(missing, r.Name)
+ }
+
+ if len(missing) > 0 {
+ return errors.Errorf("found in Chart.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", "))
+ }
+ return nil
+}
+
+// LocateChart looks for a chart directory in known places, and returns either the full path or an error.
+//
+// This does not ensure that the chart is well-formed; only that the requested filename exists.
+//
+// Order of resolution:
+// - relative to current working directory
+// - if path is absolute or begins with '.', error out here
+// - URL
+//
+// If 'verify' was set on ChartPathOptions, this will attempt to also verify the chart.
+func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (string, error) {
+ name = strings.TrimSpace(name)
+ version := strings.TrimSpace(c.Version)
+
+ if _, err := os.Stat(name); err == nil {
+ abs, err := filepath.Abs(name)
+ if err != nil {
+ return abs, err
+ }
+ if c.Verify {
+ if _, err := downloader.VerifyChart(abs, c.Keyring); err != nil {
+ return "", err
+ }
+ }
+ return abs, nil
+ }
+ if filepath.IsAbs(name) || strings.HasPrefix(name, ".") {
+ return name, errors.Errorf("path %q not found", name)
+ }
+
+ dl := downloader.ChartDownloader{
+ Out: os.Stdout,
+ Keyring: c.Keyring,
+ Getters: getter.All(settings),
+ Options: []getter.Option{
+ getter.WithPassCredentialsAll(c.PassCredentialsAll),
+ getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile),
+ getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSverify),
+ },
+ RepositoryConfig: settings.RepositoryConfig,
+ RepositoryCache: settings.RepositoryCache,
+ }
+
+ if registry.IsOCI(name) {
+ if version == "" {
+ return "", errors.New("version is explicitly required for OCI registries")
+ }
+ dl.Options = append(dl.Options, getter.WithTagName(version))
+ }
+
+ if c.Verify {
+ dl.Verify = downloader.VerifyAlways
+ }
+ if c.RepoURL != "" {
+ chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(c.RepoURL, c.Username, c.Password, name, version,
+ c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, c.PassCredentialsAll, getter.All(settings))
+ if err != nil {
+ return "", err
+ }
+ name = chartURL
+
+ // Only pass the user/pass on when the user has said to or when the
+ // location of the chart repo and the chart are the same domain.
+ u1, err := url.Parse(c.RepoURL)
+ if err != nil {
+ return "", err
+ }
+ u2, err := url.Parse(chartURL)
+ if err != nil {
+ return "", err
+ }
+
+ // Host on URL (returned from url.Parse) contains the port if present.
+ // This check ensures credentials are not passed between different
+ // services on different ports.
+ if c.PassCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) {
+ dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password))
+ } else {
+ dl.Options = append(dl.Options, getter.WithBasicAuth("", ""))
+ }
+ } else {
+ dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password))
+ }
+
+ if err := os.MkdirAll(settings.RepositoryCache, 0755); err != nil {
+ return "", err
+ }
+
+ filename, _, err := dl.DownloadTo(name, version, settings.RepositoryCache)
+ if err == nil {
+ lname, err := filepath.Abs(filename)
+ if err != nil {
+ return filename, err
+ }
+ return lname, nil
+ } else if settings.Debug {
+ return filename, err
+ }
+
+ atVersion := ""
+ if version != "" {
+ atVersion = fmt.Sprintf(" at version %q", version)
+ }
+
+ return filename, errors.Errorf("failed to download %q%s", name, atVersion)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go b/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go
new file mode 100644
index 000000000..9037782bb
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go
@@ -0,0 +1,197 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "context"
+ "sync"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+ applycorev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ "k8s.io/client-go/kubernetes"
+ corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+)
+
+// lazyClient is a workaround to deal with Kubernetes having an unstable client API.
+// In Kubernetes v1.18 the defaults where removed which broke creating a
+// client without an explicit configuration. ಠ_ಠ
+type lazyClient struct {
+ // client caches an initialized kubernetes client
+ initClient sync.Once
+ client kubernetes.Interface
+ clientErr error
+
+ // clientFn loads a kubernetes client
+ clientFn func() (*kubernetes.Clientset, error)
+
+ // namespace passed to each client request
+ namespace string
+}
+
+func (s *lazyClient) init() error {
+ s.initClient.Do(func() {
+ s.client, s.clientErr = s.clientFn()
+ })
+ return s.clientErr
+}
+
+// secretClient implements a corev1.SecretsInterface
+type secretClient struct{ *lazyClient }
+
+var _ corev1.SecretInterface = (*secretClient)(nil)
+
+func newSecretClient(lc *lazyClient) *secretClient {
+ return &secretClient{lazyClient: lc}
+}
+
+func (s *secretClient) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Create(ctx, secret, opts)
+}
+
+func (s *secretClient) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Update(ctx, secret, opts)
+}
+
+func (s *secretClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ if err := s.init(); err != nil {
+ return err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Delete(ctx, name, opts)
+}
+
+func (s *secretClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ if err := s.init(); err != nil {
+ return err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).DeleteCollection(ctx, opts, listOpts)
+}
+
+func (s *secretClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Get(ctx, name, opts)
+}
+
+func (s *secretClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).List(ctx, opts)
+}
+
+func (s *secretClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Watch(ctx, opts)
+}
+
+func (s *secretClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.Secret, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Patch(ctx, name, pt, data, opts, subresources...)
+}
+
+func (s *secretClient) Apply(ctx context.Context, secretConfiguration *applycorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (*v1.Secret, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Apply(ctx, secretConfiguration, opts)
+}
+
+// configMapClient implements a corev1.ConfigMapInterface
+type configMapClient struct{ *lazyClient }
+
+var _ corev1.ConfigMapInterface = (*configMapClient)(nil)
+
+func newConfigMapClient(lc *lazyClient) *configMapClient {
+ return &configMapClient{lazyClient: lc}
+}
+
+func (c *configMapClient) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Create(ctx, configMap, opts)
+}
+
+func (c *configMapClient) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Update(ctx, configMap, opts)
+}
+
+func (c *configMapClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ if err := c.init(); err != nil {
+ return err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Delete(ctx, name, opts)
+}
+
+func (c *configMapClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ if err := c.init(); err != nil {
+ return err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).DeleteCollection(ctx, opts, listOpts)
+}
+
+func (c *configMapClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Get(ctx, name, opts)
+}
+
+func (c *configMapClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).List(ctx, opts)
+}
+
+func (c *configMapClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Watch(ctx, opts)
+}
+
+func (c *configMapClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Patch(ctx, name, pt, data, opts, subresources...)
+}
+
+func (c *configMapClient) Apply(ctx context.Context, configMap *applycorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Apply(ctx, configMap, opts)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/lint.go b/vendor/helm.sh/helm/v3/pkg/action/lint.go
new file mode 100644
index 000000000..bdb93dcc2
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/lint.go
@@ -0,0 +1,118 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/lint"
+ "helm.sh/helm/v3/pkg/lint/support"
+)
+
+// Lint is the action for checking that the semantics of a chart are well-formed.
+//
+// It provides the implementation of 'helm lint'.
+type Lint struct {
+ Strict bool
+ Namespace string
+ WithSubcharts bool
+}
+
+// LintResult is the result of Lint
+type LintResult struct {
+ TotalChartsLinted int
+ Messages []support.Message
+ Errors []error
+}
+
+// NewLint creates a new Lint object with the given configuration.
+func NewLint() *Lint {
+ return &Lint{}
+}
+
+// Run executes 'helm Lint' against the given chart.
+func (l *Lint) Run(paths []string, vals map[string]interface{}) *LintResult {
+ lowestTolerance := support.ErrorSev
+ if l.Strict {
+ lowestTolerance = support.WarningSev
+ }
+ result := &LintResult{}
+ for _, path := range paths {
+ linter, err := lintChart(path, vals, l.Namespace, l.Strict)
+ if err != nil {
+ result.Errors = append(result.Errors, err)
+ continue
+ }
+
+ result.Messages = append(result.Messages, linter.Messages...)
+ result.TotalChartsLinted++
+ for _, msg := range linter.Messages {
+ if msg.Severity >= lowestTolerance {
+ result.Errors = append(result.Errors, msg.Err)
+ }
+ }
+ }
+ return result
+}
+
+func lintChart(path string, vals map[string]interface{}, namespace string, strict bool) (support.Linter, error) {
+ var chartPath string
+ linter := support.Linter{}
+
+ if strings.HasSuffix(path, ".tgz") || strings.HasSuffix(path, ".tar.gz") {
+ tempDir, err := ioutil.TempDir("", "helm-lint")
+ if err != nil {
+ return linter, errors.Wrap(err, "unable to create temp dir to extract tarball")
+ }
+ defer os.RemoveAll(tempDir)
+
+ file, err := os.Open(path)
+ if err != nil {
+ return linter, errors.Wrap(err, "unable to open tarball")
+ }
+ defer file.Close()
+
+ if err = chartutil.Expand(tempDir, file); err != nil {
+ return linter, errors.Wrap(err, "unable to extract tarball")
+ }
+
+ files, err := os.ReadDir(tempDir)
+ if err != nil {
+ return linter, errors.Wrapf(err, "unable to read temporary output directory %s", tempDir)
+ }
+ if !files[0].IsDir() {
+ return linter, errors.Errorf("unexpected file %s in temporary output directory %s", files[0].Name(), tempDir)
+ }
+
+ chartPath = filepath.Join(tempDir, files[0].Name())
+ } else {
+ chartPath = path
+ }
+
+ // Guard: Error out if this is not a chart.
+ if _, err := os.Stat(filepath.Join(chartPath, "Chart.yaml")); err != nil {
+ return linter, errors.Wrap(err, "unable to check Chart.yaml file in chart")
+ }
+
+ return lint.All(chartPath, vals, namespace, strict), nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/list.go b/vendor/helm.sh/helm/v3/pkg/action/list.go
new file mode 100644
index 000000000..c9e6e364a
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/list.go
@@ -0,0 +1,323 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "path"
+ "regexp"
+
+ "k8s.io/apimachinery/pkg/labels"
+
+ "helm.sh/helm/v3/pkg/release"
+ "helm.sh/helm/v3/pkg/releaseutil"
+)
+
+// ListStates represents zero or more status codes that a list item may have set
+//
+// Because this is used as a bitmask filter, more than one bit can be flipped
+// in the ListStates.
+type ListStates uint
+
+const (
+ // ListDeployed filters on status "deployed"
+ ListDeployed ListStates = 1 << iota
+ // ListUninstalled filters on status "uninstalled"
+ ListUninstalled
+ // ListUninstalling filters on status "uninstalling" (uninstall in progress)
+ ListUninstalling
+ // ListPendingInstall filters on status "pending" (deployment in progress)
+ ListPendingInstall
+ // ListPendingUpgrade filters on status "pending_upgrade" (upgrade in progress)
+ ListPendingUpgrade
+ // ListPendingRollback filters on status "pending_rollback" (rollback in progress)
+ ListPendingRollback
+ // ListSuperseded filters on status "superseded" (historical release version that is no longer deployed)
+ ListSuperseded
+ // ListFailed filters on status "failed" (release version not deployed because of error)
+ ListFailed
+ // ListUnknown filters on an unknown status
+ ListUnknown
+)
+
+// FromName takes a state name and returns a ListStates representation.
+//
+// Currently, there are only names for individual flipped bits, so the returned
+// ListStates will only match one of the constants. However, it is possible that
+// this behavior could change in the future.
+func (s ListStates) FromName(str string) ListStates {
+ switch str {
+ case "deployed":
+ return ListDeployed
+ case "uninstalled":
+ return ListUninstalled
+ case "superseded":
+ return ListSuperseded
+ case "failed":
+ return ListFailed
+ case "uninstalling":
+ return ListUninstalling
+ case "pending-install":
+ return ListPendingInstall
+ case "pending-upgrade":
+ return ListPendingUpgrade
+ case "pending-rollback":
+ return ListPendingRollback
+ }
+ return ListUnknown
+}
+
+// ListAll is a convenience for enabling all list filters
+const ListAll = ListDeployed | ListUninstalled | ListUninstalling | ListPendingInstall | ListPendingRollback | ListPendingUpgrade | ListSuperseded | ListFailed
+
+// Sorter is a top-level sort
+type Sorter uint
+
+const (
+ // ByNameDesc sorts by descending lexicographic order
+ ByNameDesc Sorter = iota + 1
+ // ByDateAsc sorts by ascending dates (oldest updated release first)
+ ByDateAsc
+ // ByDateDesc sorts by descending dates (latest updated release first)
+ ByDateDesc
+)
+
+// List is the action for listing releases.
+//
+// It provides, for example, the implementation of 'helm list'.
+// It returns no more than one revision of every release in one specific, or in
+// all, namespaces.
+// To list all the revisions of a specific release, see the History action.
+type List struct {
+ cfg *Configuration
+
+ // All ignores the limit/offset
+ All bool
+ // AllNamespaces searches across namespaces
+ AllNamespaces bool
+ // Sort indicates the sort to use
+ //
+ // see pkg/releaseutil for several useful sorters
+ Sort Sorter
+ // Overrides the default lexicographic sorting
+ ByDate bool
+ SortReverse bool
+ // StateMask accepts a bitmask of states for items to show.
+ // The default is ListDeployed
+ StateMask ListStates
+ // Limit is the number of items to return per Run()
+ Limit int
+ // Offset is the starting index for the Run() call
+ Offset int
+ // Filter is a filter that is applied to the results
+ Filter string
+ Short bool
+ TimeFormat string
+ Uninstalled bool
+ Superseded bool
+ Uninstalling bool
+ Deployed bool
+ Failed bool
+ Pending bool
+ Selector string
+}
+
+// NewList constructs a new *List
+func NewList(cfg *Configuration) *List {
+ return &List{
+ StateMask: ListDeployed | ListFailed,
+ cfg: cfg,
+ }
+}
+
+// Run executes the list command, returning a set of matches.
+func (l *List) Run() ([]*release.Release, error) {
+ if err := l.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ var filter *regexp.Regexp
+ if l.Filter != "" {
+ var err error
+ filter, err = regexp.Compile(l.Filter)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ results, err := l.cfg.Releases.List(func(rel *release.Release) bool {
+ // Skip anything that doesn't match the filter.
+ if filter != nil && !filter.MatchString(rel.Name) {
+ return false
+ }
+
+ return true
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ if results == nil {
+ return results, nil
+ }
+
+ // by definition, superseded releases are never shown if
+ // only the latest releases are returned. so if requested statemask
+ // is _only_ ListSuperseded, skip the latest release filter
+ if l.StateMask != ListSuperseded {
+ results = filterLatestReleases(results)
+ }
+
+ // State mask application must occur after filtering to
+ // latest releases, otherwise outdated entries can be returned
+ results = l.filterStateMask(results)
+
+ // Skip anything that doesn't match the selector
+ selectorObj, err := labels.Parse(l.Selector)
+ if err != nil {
+ return nil, err
+ }
+ results = l.filterSelector(results, selectorObj)
+
+ // Unfortunately, we have to sort before truncating, which can incur substantial overhead
+ l.sort(results)
+
+ // Guard on offset
+ if l.Offset >= len(results) {
+ return []*release.Release{}, nil
+ }
+
+ // Calculate the limit and offset, and then truncate results if necessary.
+ limit := len(results)
+ if l.Limit > 0 && l.Limit < limit {
+ limit = l.Limit
+ }
+ last := l.Offset + limit
+ if l := len(results); l < last {
+ last = l
+ }
+ results = results[l.Offset:last]
+
+ return results, err
+}
+
+// sort is an in-place sort where order is based on the value of a.Sort
+func (l *List) sort(rels []*release.Release) {
+ if l.SortReverse {
+ l.Sort = ByNameDesc
+ }
+
+ if l.ByDate {
+ l.Sort = ByDateDesc
+ if l.SortReverse {
+ l.Sort = ByDateAsc
+ }
+ }
+
+ switch l.Sort {
+ case ByDateDesc:
+ releaseutil.SortByDate(rels)
+ case ByDateAsc:
+ releaseutil.Reverse(rels, releaseutil.SortByDate)
+ case ByNameDesc:
+ releaseutil.Reverse(rels, releaseutil.SortByName)
+ default:
+ releaseutil.SortByName(rels)
+ }
+}
+
+// filterLatestReleases returns a list scrubbed of old releases.
+func filterLatestReleases(releases []*release.Release) []*release.Release {
+ latestReleases := make(map[string]*release.Release)
+
+ for _, rls := range releases {
+ name, namespace := rls.Name, rls.Namespace
+ key := path.Join(namespace, name)
+ if latestRelease, exists := latestReleases[key]; exists && latestRelease.Version > rls.Version {
+ continue
+ }
+ latestReleases[key] = rls
+ }
+
+ var list = make([]*release.Release, 0, len(latestReleases))
+ for _, rls := range latestReleases {
+ list = append(list, rls)
+ }
+ return list
+}
+
+func (l *List) filterStateMask(releases []*release.Release) []*release.Release {
+ desiredStateReleases := make([]*release.Release, 0)
+
+ for _, rls := range releases {
+ currentStatus := l.StateMask.FromName(rls.Info.Status.String())
+ mask := l.StateMask & currentStatus
+ if mask == 0 {
+ continue
+ }
+ desiredStateReleases = append(desiredStateReleases, rls)
+ }
+
+ return desiredStateReleases
+}
+
+func (l *List) filterSelector(releases []*release.Release, selector labels.Selector) []*release.Release {
+ desiredStateReleases := make([]*release.Release, 0)
+
+ for _, rls := range releases {
+ if selector.Matches(labels.Set(rls.Labels)) {
+ desiredStateReleases = append(desiredStateReleases, rls)
+ }
+ }
+
+ return desiredStateReleases
+}
+
+// SetStateMask calculates the state mask based on parameters.
+func (l *List) SetStateMask() {
+ if l.All {
+ l.StateMask = ListAll
+ return
+ }
+
+ state := ListStates(0)
+ if l.Deployed {
+ state |= ListDeployed
+ }
+ if l.Uninstalled {
+ state |= ListUninstalled
+ }
+ if l.Uninstalling {
+ state |= ListUninstalling
+ }
+ if l.Pending {
+ state |= ListPendingInstall | ListPendingRollback | ListPendingUpgrade
+ }
+ if l.Failed {
+ state |= ListFailed
+ }
+ if l.Superseded {
+ state |= ListSuperseded
+ }
+
+ // Apply a default
+ if state == 0 {
+ state = ListDeployed | ListFailed
+ }
+
+ l.StateMask = state
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/package.go b/vendor/helm.sh/helm/v3/pkg/action/package.go
new file mode 100644
index 000000000..52920956f
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/package.go
@@ -0,0 +1,182 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "syscall"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/pkg/errors"
+ "golang.org/x/term"
+
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/provenance"
+)
+
+// Package is the action for packaging a chart.
+//
+// It provides the implementation of 'helm package'.
+type Package struct {
+ Sign bool
+ Key string
+ Keyring string
+ PassphraseFile string
+ Version string
+ AppVersion string
+ Destination string
+ DependencyUpdate bool
+
+ RepositoryConfig string
+ RepositoryCache string
+}
+
+// NewPackage creates a new Package object with the given configuration.
+func NewPackage() *Package {
+ return &Package{}
+}
+
+// Run executes 'helm package' against the given chart and returns the path to the packaged chart.
+func (p *Package) Run(path string, vals map[string]interface{}) (string, error) {
+ ch, err := loader.LoadDir(path)
+ if err != nil {
+ return "", err
+ }
+
+ // If version is set, modify the version.
+ if p.Version != "" {
+ ch.Metadata.Version = p.Version
+ }
+
+ if err := validateVersion(ch.Metadata.Version); err != nil {
+ return "", err
+ }
+
+ if p.AppVersion != "" {
+ ch.Metadata.AppVersion = p.AppVersion
+ }
+
+ if reqs := ch.Metadata.Dependencies; reqs != nil {
+ if err := CheckDependencies(ch, reqs); err != nil {
+ return "", err
+ }
+ }
+
+ var dest string
+ if p.Destination == "." {
+ // Save to the current working directory.
+ dest, err = os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ } else {
+ // Otherwise save to set destination
+ dest = p.Destination
+ }
+
+ name, err := chartutil.Save(ch, dest)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to save")
+ }
+
+ if p.Sign {
+ err = p.Clearsign(name)
+ }
+
+ return name, err
+}
+
+// validateVersion Verify that version is a Version, and error out if it is not.
+func validateVersion(ver string) error {
+ if _, err := semver.NewVersion(ver); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Clearsign signs a chart
+func (p *Package) Clearsign(filename string) error {
+ // Load keyring
+ signer, err := provenance.NewFromKeyring(p.Keyring, p.Key)
+ if err != nil {
+ return err
+ }
+
+ passphraseFetcher := promptUser
+ if p.PassphraseFile != "" {
+ passphraseFetcher, err = passphraseFileFetcher(p.PassphraseFile, os.Stdin)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := signer.DecryptKey(passphraseFetcher); err != nil {
+ return err
+ }
+
+ sig, err := signer.ClearSign(filename)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(filename+".prov", []byte(sig), 0644)
+}
+
+// promptUser implements provenance.PassphraseFetcher
+func promptUser(name string) ([]byte, error) {
+ fmt.Printf("Password for key %q > ", name)
+ // syscall.Stdin is not an int in all environments and needs to be coerced
+ // into one there (e.g., Windows)
+ pw, err := term.ReadPassword(int(syscall.Stdin))
+ fmt.Println()
+ return pw, err
+}
+
+func passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) {
+ file, err := openPassphraseFile(passphraseFile, stdin)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ reader := bufio.NewReader(file)
+ passphrase, _, err := reader.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ return func(name string) ([]byte, error) {
+ return passphrase, nil
+ }, nil
+}
+
+func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) {
+ if passphraseFile == "-" {
+ stat, err := stdin.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if (stat.Mode() & os.ModeNamedPipe) == 0 {
+ return nil, errors.New("specified reading passphrase from stdin, without input on stdin")
+ }
+ return stdin, nil
+ }
+ return os.Open(passphraseFile)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/pull.go b/vendor/helm.sh/helm/v3/pkg/action/pull.go
new file mode 100644
index 000000000..2f5127ea9
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/pull.go
@@ -0,0 +1,170 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/internal/experimental/registry"
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/cli"
+ "helm.sh/helm/v3/pkg/downloader"
+ "helm.sh/helm/v3/pkg/getter"
+ "helm.sh/helm/v3/pkg/repo"
+)
+
+// Pull is the action for checking a given release's information.
+//
+// It provides the implementation of 'helm pull'.
+type Pull struct {
+ ChartPathOptions
+
+ Settings *cli.EnvSettings // TODO: refactor this out of pkg/action
+
+ Devel bool
+ Untar bool
+ VerifyLater bool
+ UntarDir string
+ DestDir string
+ cfg *Configuration
+}
+
+type PullOpt func(*Pull)
+
+func WithConfig(cfg *Configuration) PullOpt {
+ return func(p *Pull) {
+ p.cfg = cfg
+ }
+}
+
+// NewPull creates a new Pull object.
+func NewPull() *Pull {
+ return NewPullWithOpts()
+}
+
+// NewPullWithOpts creates a new pull, with configuration options.
+func NewPullWithOpts(opts ...PullOpt) *Pull {
+ p := &Pull{}
+ for _, fn := range opts {
+ fn(p)
+ }
+
+ return p
+}
+
+// Run executes 'helm pull' against the given release.
+func (p *Pull) Run(chartRef string) (string, error) {
+ var out strings.Builder
+
+ c := downloader.ChartDownloader{
+ Out: &out,
+ Keyring: p.Keyring,
+ Verify: downloader.VerifyNever,
+ Getters: getter.All(p.Settings),
+ Options: []getter.Option{
+ getter.WithBasicAuth(p.Username, p.Password),
+ getter.WithPassCredentialsAll(p.PassCredentialsAll),
+ getter.WithTLSClientConfig(p.CertFile, p.KeyFile, p.CaFile),
+ getter.WithInsecureSkipVerifyTLS(p.InsecureSkipTLSverify),
+ },
+ RepositoryConfig: p.Settings.RepositoryConfig,
+ RepositoryCache: p.Settings.RepositoryCache,
+ }
+
+ if registry.IsOCI(chartRef) {
+ if p.Version == "" {
+ return out.String(), errors.Errorf("--version flag is explicitly required for OCI registries")
+ }
+
+ c.Options = append(c.Options,
+ getter.WithRegistryClient(p.cfg.RegistryClient),
+ getter.WithTagName(p.Version))
+ }
+
+ if p.Verify {
+ c.Verify = downloader.VerifyAlways
+ } else if p.VerifyLater {
+ c.Verify = downloader.VerifyLater
+ }
+
+ // If untar is set, we fetch to a tempdir, then untar and copy after
+ // verification.
+ dest := p.DestDir
+ if p.Untar {
+ var err error
+ dest, err = ioutil.TempDir("", "helm-")
+ if err != nil {
+ return out.String(), errors.Wrap(err, "failed to untar")
+ }
+ defer os.RemoveAll(dest)
+ }
+
+ if p.RepoURL != "" {
+ chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, p.PassCredentialsAll, getter.All(p.Settings))
+ if err != nil {
+ return out.String(), err
+ }
+ chartRef = chartURL
+ }
+
+ saved, v, err := c.DownloadTo(chartRef, p.Version, dest)
+ if err != nil {
+ return out.String(), err
+ }
+
+ if p.Verify {
+ for name := range v.SignedBy.Identities {
+ fmt.Fprintf(&out, "Signed by: %v\n", name)
+ }
+ fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", v.SignedBy.PrimaryKey.Fingerprint)
+ fmt.Fprintf(&out, "Chart Hash Verified: %s\n", v.FileHash)
+ }
+
+ // After verification, untar the chart into the requested directory.
+ if p.Untar {
+ ud := p.UntarDir
+ if !filepath.IsAbs(ud) {
+ ud = filepath.Join(p.DestDir, ud)
+ }
+ // Let udCheck to check conflict file/dir without replacing ud when untarDir is the current directory(.).
+ udCheck := ud
+ if udCheck == "." {
+ _, udCheck = filepath.Split(chartRef)
+ } else {
+ _, chartName := filepath.Split(chartRef)
+ udCheck = filepath.Join(udCheck, chartName)
+ }
+
+ if _, err := os.Stat(udCheck); err != nil {
+ if err := os.MkdirAll(udCheck, 0755); err != nil {
+ return out.String(), errors.Wrap(err, "failed to untar (mkdir)")
+ }
+
+ } else {
+ return out.String(), errors.Errorf("failed to untar: a file or directory with the name %s already exists", udCheck)
+ }
+
+ return out.String(), chartutil.ExpandFile(ud, saved)
+ }
+ return out.String(), nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/release_testing.go b/vendor/helm.sh/helm/v3/pkg/action/release_testing.go
new file mode 100644
index 000000000..ecaeaf59f
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/release_testing.go
@@ -0,0 +1,138 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/pkg/errors"
+ v1 "k8s.io/api/core/v1"
+
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/release"
+)
+
+// ReleaseTesting is the action for testing a release.
+//
+// It provides the implementation of 'helm test'.
+type ReleaseTesting struct {
+ cfg *Configuration
+ Timeout time.Duration
+ // Used for fetching logs from test pods
+ Namespace string
+ Filters map[string][]string
+}
+
+// NewReleaseTesting creates a new ReleaseTesting object with the given configuration.
+func NewReleaseTesting(cfg *Configuration) *ReleaseTesting {
+ return &ReleaseTesting{
+ cfg: cfg,
+ Filters: map[string][]string{},
+ }
+}
+
+// Run executes 'helm test' against the given release.
+func (r *ReleaseTesting) Run(name string) (*release.Release, error) {
+ if err := r.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, errors.Errorf("releaseTest: Release name is invalid: %s", name)
+ }
+
+ // finds the non-deleted release with the given name
+ rel, err := r.cfg.Releases.Last(name)
+ if err != nil {
+ return rel, err
+ }
+
+ skippedHooks := []*release.Hook{}
+ executingHooks := []*release.Hook{}
+ if len(r.Filters["!name"]) != 0 {
+ for _, h := range rel.Hooks {
+ if contains(r.Filters["!name"], h.Name) {
+ skippedHooks = append(skippedHooks, h)
+ } else {
+ executingHooks = append(executingHooks, h)
+ }
+ }
+ rel.Hooks = executingHooks
+ }
+ if len(r.Filters["name"]) != 0 {
+ executingHooks = nil
+ for _, h := range rel.Hooks {
+ if contains(r.Filters["name"], h.Name) {
+ executingHooks = append(executingHooks, h)
+ } else {
+ skippedHooks = append(skippedHooks, h)
+ }
+ }
+ rel.Hooks = executingHooks
+ }
+
+ if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil {
+ rel.Hooks = append(skippedHooks, rel.Hooks...)
+ r.cfg.Releases.Update(rel)
+ return rel, err
+ }
+
+ rel.Hooks = append(skippedHooks, rel.Hooks...)
+ return rel, r.cfg.Releases.Update(rel)
+}
+
+// GetPodLogs will write the logs for all test pods in the given release into
+// the given writer. These can be immediately output to the user or captured for
+// other uses
+func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error {
+ client, err := r.cfg.KubernetesClientSet()
+ if err != nil {
+ return errors.Wrap(err, "unable to get kubernetes client to fetch pod logs")
+ }
+
+ for _, h := range rel.Hooks {
+ for _, e := range h.Events {
+ if e == release.HookTest {
+ req := client.CoreV1().Pods(r.Namespace).GetLogs(h.Name, &v1.PodLogOptions{})
+ logReader, err := req.Stream(context.Background())
+ if err != nil {
+ return errors.Wrapf(err, "unable to get pod logs for %s", h.Name)
+ }
+
+ fmt.Fprintf(out, "POD LOGS: %s\n", h.Name)
+ _, err = io.Copy(out, logReader)
+ fmt.Fprintln(out)
+ if err != nil {
+ return errors.Wrapf(err, "unable to write pod logs for %s", h.Name)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func contains(arr []string, value string) bool {
+ for _, item := range arr {
+ if item == value {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go b/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go
new file mode 100644
index 000000000..63e83f3d9
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "strings"
+
+ "helm.sh/helm/v3/pkg/kube"
+ "helm.sh/helm/v3/pkg/releaseutil"
+)
+
+func filterManifestsToKeep(manifests []releaseutil.Manifest) (keep, remaining []releaseutil.Manifest) {
+ for _, m := range manifests {
+ if m.Head.Metadata == nil || m.Head.Metadata.Annotations == nil || len(m.Head.Metadata.Annotations) == 0 {
+ remaining = append(remaining, m)
+ continue
+ }
+
+ resourcePolicyType, ok := m.Head.Metadata.Annotations[kube.ResourcePolicyAnno]
+ if !ok {
+ remaining = append(remaining, m)
+ continue
+ }
+
+ resourcePolicyType = strings.ToLower(strings.TrimSpace(resourcePolicyType))
+ if resourcePolicyType == kube.KeepPolicy {
+ keep = append(keep, m)
+ }
+
+ }
+ return keep, remaining
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/rollback.go b/vendor/helm.sh/helm/v3/pkg/action/rollback.go
new file mode 100644
index 000000000..f3f958f3d
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/rollback.go
@@ -0,0 +1,241 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/release"
+ helmtime "helm.sh/helm/v3/pkg/time"
+)
+
+// Rollback is the action for rolling back to a given release.
+//
+// It provides the implementation of 'helm rollback'.
+type Rollback struct {
+ cfg *Configuration
+
+ Version int
+ Timeout time.Duration
+ Wait bool
+ WaitForJobs bool
+ DisableHooks bool
+ DryRun bool
+ Recreate bool // will (if true) recreate pods after a rollback.
+ Force bool // will (if true) force resource upgrade through uninstall/recreate if needed
+ CleanupOnFail bool
+ MaxHistory int // MaxHistory limits the maximum number of revisions saved per release
+}
+
+// NewRollback creates a new Rollback object with the given configuration.
+func NewRollback(cfg *Configuration) *Rollback {
+ return &Rollback{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm rollback' against the given release.
+func (r *Rollback) Run(name string) error {
+ if err := r.cfg.KubeClient.IsReachable(); err != nil {
+ return err
+ }
+
+ r.cfg.Releases.MaxHistory = r.MaxHistory
+
+ r.cfg.Log("preparing rollback of %s", name)
+ currentRelease, targetRelease, err := r.prepareRollback(name)
+ if err != nil {
+ return err
+ }
+
+ if !r.DryRun {
+ r.cfg.Log("creating rolled back release for %s", name)
+ if err := r.cfg.Releases.Create(targetRelease); err != nil {
+ return err
+ }
+ }
+
+ r.cfg.Log("performing rollback of %s", name)
+ if _, err := r.performRollback(currentRelease, targetRelease); err != nil {
+ return err
+ }
+
+ if !r.DryRun {
+ r.cfg.Log("updating status for rolled back release for %s", name)
+ if err := r.cfg.Releases.Update(targetRelease); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// prepareRollback finds the previous release and prepares a new release object with
+// the previous release's configuration
+func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) {
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, nil, errors.Errorf("prepareRollback: Release name is invalid: %s", name)
+ }
+
+ if r.Version < 0 {
+ return nil, nil, errInvalidRevision
+ }
+
+ currentRelease, err := r.cfg.Releases.Last(name)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ previousVersion := r.Version
+ if r.Version == 0 {
+ previousVersion = currentRelease.Version - 1
+ }
+
+ r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion)
+
+ previousRelease, err := r.cfg.Releases.Get(name, previousVersion)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Store a new release object with previous release's configuration
+ targetRelease := &release.Release{
+ Name: name,
+ Namespace: currentRelease.Namespace,
+ Chart: previousRelease.Chart,
+ Config: previousRelease.Config,
+ Info: &release.Info{
+ FirstDeployed: currentRelease.Info.FirstDeployed,
+ LastDeployed: helmtime.Now(),
+ Status: release.StatusPendingRollback,
+ Notes: previousRelease.Info.Notes,
+ // Because we lose the reference to previous version elsewhere, we set the
+ // message here, and only override it later if we experience failure.
+ Description: fmt.Sprintf("Rollback to %d", previousVersion),
+ },
+ Version: currentRelease.Version + 1,
+ Manifest: previousRelease.Manifest,
+ Hooks: previousRelease.Hooks,
+ }
+
+ return currentRelease, targetRelease, nil
+}
+
+func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) {
+ if r.DryRun {
+ r.cfg.Log("dry run for %s", targetRelease.Name)
+ return targetRelease, nil
+ }
+
+ current, err := r.cfg.KubeClient.Build(bytes.NewBufferString(currentRelease.Manifest), false)
+ if err != nil {
+ return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest")
+ }
+ target, err := r.cfg.KubeClient.Build(bytes.NewBufferString(targetRelease.Manifest), false)
+ if err != nil {
+ return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest")
+ }
+
+ // pre-rollback hooks
+ if !r.DisableHooks {
+ if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil {
+ return targetRelease, err
+ }
+ } else {
+ r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name)
+ }
+
+ results, err := r.cfg.KubeClient.Update(current, target, r.Force)
+
+ if err != nil {
+ msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
+ r.cfg.Log("warning: %s", msg)
+ currentRelease.Info.Status = release.StatusSuperseded
+ targetRelease.Info.Status = release.StatusFailed
+ targetRelease.Info.Description = msg
+ r.cfg.recordRelease(currentRelease)
+ r.cfg.recordRelease(targetRelease)
+ if r.CleanupOnFail {
+ r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created))
+ _, errs := r.cfg.KubeClient.Delete(results.Created)
+ if errs != nil {
+ var errorList []string
+ for _, e := range errs {
+ errorList = append(errorList, e.Error())
+ }
+ return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err)
+ }
+ r.cfg.Log("Resource cleanup complete")
+ }
+ return targetRelease, err
+ }
+
+ if r.Recreate {
+ // NOTE: Because this is not critical for a release to succeed, we just
+ // log if an error occurs and continue onward. If we ever introduce log
+ // levels, we should make these error level logs so users are notified
+ // that they'll need to go do the cleanup on their own
+ if err := recreate(r.cfg, results.Updated); err != nil {
+ r.cfg.Log(err.Error())
+ }
+ }
+
+ if r.Wait {
+ if r.WaitForJobs {
+ if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil {
+ targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
+ r.cfg.recordRelease(currentRelease)
+ r.cfg.recordRelease(targetRelease)
+ return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
+ }
+ } else {
+ if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil {
+ targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
+ r.cfg.recordRelease(currentRelease)
+ r.cfg.recordRelease(targetRelease)
+ return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
+ }
+ }
+ }
+
+ // post-rollback hooks
+ if !r.DisableHooks {
+ if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil {
+ return targetRelease, err
+ }
+ }
+
+ deployed, err := r.cfg.Releases.DeployedAll(currentRelease.Name)
+ if err != nil && !strings.Contains(err.Error(), "has no deployed releases") {
+ return nil, err
+ }
+ // Supersede all previous deployments, see issue #2941.
+ for _, rel := range deployed {
+ r.cfg.Log("superseding previous deployment %d", rel.Version)
+ rel.Info.Status = release.StatusSuperseded
+ r.cfg.recordRelease(rel)
+ }
+
+ targetRelease.Info.Status = release.StatusDeployed
+
+ return targetRelease, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/show.go b/vendor/helm.sh/helm/v3/pkg/action/show.go
new file mode 100644
index 000000000..1e3da3bdc
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/show.go
@@ -0,0 +1,144 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "github.com/pkg/errors"
+ "k8s.io/cli-runtime/pkg/printers"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/chartutil"
+)
+
+// ShowOutputFormat is the format of the output of `helm show`
+type ShowOutputFormat string
+
+const (
+ // ShowAll is the format which shows all the information of a chart
+ ShowAll ShowOutputFormat = "all"
+ // ShowChart is the format which only shows the chart's definition
+ ShowChart ShowOutputFormat = "chart"
+ // ShowValues is the format which only shows the chart's values
+ ShowValues ShowOutputFormat = "values"
+ // ShowReadme is the format which only shows the chart's README
+ ShowReadme ShowOutputFormat = "readme"
+ // ShowCRDs is the format which only shows the chart's CRDs
+ ShowCRDs ShowOutputFormat = "crds"
+)
+
+var readmeFileNames = []string{"readme.md", "readme.txt", "readme"}
+
+func (o ShowOutputFormat) String() string {
+ return string(o)
+}
+
+// Show is the action for checking a given release's information.
+//
+// It provides the implementation of 'helm show' and its respective subcommands.
+type Show struct {
+ ChartPathOptions
+ Devel bool
+ OutputFormat ShowOutputFormat
+ JSONPathTemplate string
+ chart *chart.Chart // for testing
+}
+
+// NewShow creates a new Show object with the given configuration.
+func NewShow(output ShowOutputFormat) *Show {
+ return &Show{
+ OutputFormat: output,
+ }
+}
+
+// Run executes 'helm show' against the given release.
+func (s *Show) Run(chartpath string) (string, error) {
+ if s.chart == nil {
+ chrt, err := loader.Load(chartpath)
+ if err != nil {
+ return "", err
+ }
+ s.chart = chrt
+ }
+ cf, err := yaml.Marshal(s.chart.Metadata)
+ if err != nil {
+ return "", err
+ }
+
+ var out strings.Builder
+ if s.OutputFormat == ShowChart || s.OutputFormat == ShowAll {
+ fmt.Fprintf(&out, "%s\n", cf)
+ }
+
+ if (s.OutputFormat == ShowValues || s.OutputFormat == ShowAll) && s.chart.Values != nil {
+ if s.OutputFormat == ShowAll {
+ fmt.Fprintln(&out, "---")
+ }
+ if s.JSONPathTemplate != "" {
+ printer, err := printers.NewJSONPathPrinter(s.JSONPathTemplate)
+ if err != nil {
+ return "", errors.Wrapf(err, "error parsing jsonpath %s", s.JSONPathTemplate)
+ }
+ printer.Execute(&out, s.chart.Values)
+ } else {
+ for _, f := range s.chart.Raw {
+ if f.Name == chartutil.ValuesfileName {
+ fmt.Fprintln(&out, string(f.Data))
+ }
+ }
+ }
+ }
+
+ if s.OutputFormat == ShowReadme || s.OutputFormat == ShowAll {
+ readme := findReadme(s.chart.Files)
+ if readme != nil {
+ if s.OutputFormat == ShowAll {
+ fmt.Fprintln(&out, "---")
+ }
+ fmt.Fprintf(&out, "%s\n", readme.Data)
+ }
+ }
+
+ if s.OutputFormat == ShowCRDs || s.OutputFormat == ShowAll {
+ crds := s.chart.CRDObjects()
+ if len(crds) > 0 {
+ if s.OutputFormat == ShowAll && !bytes.HasPrefix(crds[0].File.Data, []byte("---")) {
+ fmt.Fprintln(&out, "---")
+ }
+ for _, crd := range crds {
+ fmt.Fprintf(&out, "%s\n", string(crd.File.Data))
+ }
+ }
+ }
+ return out.String(), nil
+}
+
+func findReadme(files []*chart.File) (file *chart.File) {
+ for _, file := range files {
+ for _, n := range readmeFileNames {
+ if strings.EqualFold(file.Name, n) {
+ return file
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/status.go b/vendor/helm.sh/helm/v3/pkg/action/status.go
new file mode 100644
index 000000000..1c556e28d
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/status.go
@@ -0,0 +1,51 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "helm.sh/helm/v3/pkg/release"
+)
+
+// Status is the action for checking the deployment status of releases.
+//
+// It provides the implementation of 'helm status'.
+type Status struct {
+ cfg *Configuration
+
+ Version int
+
+ // If true, display description to output format,
+ // only affect print type table.
+ // TODO Helm 4: Remove this flag and output the description by default.
+ ShowDescription bool
+}
+
+// NewStatus creates a new Status object with the given configuration.
+func NewStatus(cfg *Configuration) *Status {
+ return &Status{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm status' against the given release.
+func (s *Status) Run(name string) (*release.Release, error) {
+ if err := s.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ return s.cfg.releaseContent(name, s.Version)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/uninstall.go b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go
new file mode 100644
index 000000000..65993df4c
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go
@@ -0,0 +1,222 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/kube"
+ "helm.sh/helm/v3/pkg/release"
+ "helm.sh/helm/v3/pkg/releaseutil"
+ helmtime "helm.sh/helm/v3/pkg/time"
+)
+
+// Uninstall is the action for uninstalling releases.
+//
+// It provides the implementation of 'helm uninstall'.
+type Uninstall struct {
+ cfg *Configuration
+
+ DisableHooks bool
+ DryRun bool
+ KeepHistory bool
+ Wait bool
+ Timeout time.Duration
+ Description string
+}
+
+// NewUninstall creates a new Uninstall object with the given configuration.
+func NewUninstall(cfg *Configuration) *Uninstall {
+ return &Uninstall{
+ cfg: cfg,
+ }
+}
+
+// Run uninstalls the given release.
+func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) {
+ if err := u.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ if u.DryRun {
+ // In the dry run case, just see if the release exists
+ r, err := u.cfg.releaseContent(name, 0)
+ if err != nil {
+ return &release.UninstallReleaseResponse{}, err
+ }
+ return &release.UninstallReleaseResponse{Release: r}, nil
+ }
+
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, errors.Errorf("uninstall: Release name is invalid: %s", name)
+ }
+
+ rels, err := u.cfg.Releases.History(name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "uninstall: Release not loaded: %s", name)
+ }
+ if len(rels) < 1 {
+ return nil, errMissingRelease
+ }
+
+ releaseutil.SortByRevision(rels)
+ rel := rels[len(rels)-1]
+
+ // TODO: Are there any cases where we want to force a delete even if it's
+ // already marked deleted?
+ if rel.Info.Status == release.StatusUninstalled {
+ if !u.KeepHistory {
+ if err := u.purgeReleases(rels...); err != nil {
+ return nil, errors.Wrap(err, "uninstall: Failed to purge the release")
+ }
+ return &release.UninstallReleaseResponse{Release: rel}, nil
+ }
+ return nil, errors.Errorf("the release named %q is already deleted", name)
+ }
+
+ u.cfg.Log("uninstall: Deleting %s", name)
+ rel.Info.Status = release.StatusUninstalling
+ rel.Info.Deleted = helmtime.Now()
+ rel.Info.Description = "Deletion in progress (or silently failed)"
+ res := &release.UninstallReleaseResponse{Release: rel}
+
+ if !u.DisableHooks {
+ if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil {
+ return res, err
+ }
+ } else {
+ u.cfg.Log("delete hooks disabled for %s", name)
+ }
+
+ // From here on out, the release is currently considered to be in StatusUninstalling
+ // state.
+ if err := u.cfg.Releases.Update(rel); err != nil {
+ u.cfg.Log("uninstall: Failed to store updated release: %s", err)
+ }
+
+ deletedResources, kept, errs := u.deleteRelease(rel)
+
+ if kept != "" {
+ kept = "These resources were kept due to the resource policy:\n" + kept
+ }
+ res.Info = kept
+
+ if u.Wait {
+ if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceExt); ok {
+ if err := kubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ }
+
+ if !u.DisableHooks {
+ if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ rel.Info.Status = release.StatusUninstalled
+ if len(u.Description) > 0 {
+ rel.Info.Description = u.Description
+ } else {
+ rel.Info.Description = "Uninstallation complete"
+ }
+
+ if !u.KeepHistory {
+ u.cfg.Log("purge requested for %s", name)
+ err := u.purgeReleases(rels...)
+ if err != nil {
+ errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release"))
+ }
+
+ // Return the errors that occurred while deleting the release, if any
+ if len(errs) > 0 {
+ return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs))
+ }
+
+ return res, nil
+ }
+
+ if err := u.cfg.Releases.Update(rel); err != nil {
+ u.cfg.Log("uninstall: Failed to store updated release: %s", err)
+ }
+
+ if len(errs) > 0 {
+ return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs))
+ }
+ return res, nil
+}
+
+func (u *Uninstall) purgeReleases(rels ...*release.Release) error {
+ for _, rel := range rels {
+ if _, err := u.cfg.Releases.Delete(rel.Name, rel.Version); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func joinErrors(errs []error) string {
+ es := make([]string, 0, len(errs))
+ for _, e := range errs {
+ es = append(es, e.Error())
+ }
+ return strings.Join(es, "; ")
+}
+
+// deleteRelease deletes the release and returns list of delete resources and manifests that were kept in the deletion process
+func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, string, []error) {
+ var errs []error
+ caps, err := u.cfg.getCapabilities()
+ if err != nil {
+ return nil, rel.Manifest, []error{errors.Wrap(err, "could not get apiVersions from Kubernetes")}
+ }
+
+ manifests := releaseutil.SplitManifests(rel.Manifest)
+ _, files, err := releaseutil.SortManifests(manifests, caps.APIVersions, releaseutil.UninstallOrder)
+ if err != nil {
+ // We could instead just delete everything in no particular order.
+ // FIXME: One way to delete at this point would be to try a label-based
+ // deletion. The problem with this is that we could get a false positive
+ // and delete something that was not legitimately part of this release.
+ return nil, rel.Manifest, []error{errors.Wrap(err, "corrupted release record. You must manually delete the resources")}
+ }
+
+ filesToKeep, filesToDelete := filterManifestsToKeep(files)
+ var kept string
+ for _, f := range filesToKeep {
+ kept += "[" + f.Head.Kind + "] " + f.Head.Metadata.Name + "\n"
+ }
+
+ var builder strings.Builder
+ for _, file := range filesToDelete {
+ builder.WriteString("\n---\n" + file.Content)
+ }
+
+ resources, err := u.cfg.KubeClient.Build(strings.NewReader(builder.String()), false)
+ if err != nil {
+ return nil, "", []error{errors.Wrap(err, "unable to build kubernetes objects for delete")}
+ }
+ if len(resources) > 0 {
+ _, errs = u.cfg.KubeClient.Delete(resources)
+ }
+ return resources, kept, errs
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/upgrade.go b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go
new file mode 100644
index 000000000..27c1f01e7
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go
@@ -0,0 +1,570 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/kube"
+ "helm.sh/helm/v3/pkg/postrender"
+ "helm.sh/helm/v3/pkg/release"
+ "helm.sh/helm/v3/pkg/releaseutil"
+ "helm.sh/helm/v3/pkg/storage/driver"
+)
+
+// Upgrade is the action for upgrading releases.
+//
+// It provides the implementation of 'helm upgrade'.
+type Upgrade struct {
+ cfg *Configuration
+
+ ChartPathOptions
+
+ // Install is a purely informative flag that indicates whether this upgrade was done in "install" mode.
+ //
+ // Applications may use this to determine whether this Upgrade operation was done as part of a
+ // pure upgrade (Upgrade.Install == false) or as part of an install-or-upgrade operation
+ // (Upgrade.Install == true).
+ //
+ // Setting this to `true` will NOT cause `Upgrade` to perform an install if the release does not exist.
+ // That process must be handled by creating an Install action directly. See cmd/upgrade.go for an
+ // example of how this flag is used.
+ Install bool
+ // Devel indicates that the operation is done in devel mode.
+ Devel bool
+ // Namespace is the namespace in which this operation should be performed.
+ Namespace string
+ // SkipCRDs skips installing CRDs when install flag is enabled during upgrade
+ SkipCRDs bool
+ // Timeout is the timeout for this operation
+ Timeout time.Duration
+ // Wait determines whether the wait operation should be performed after the upgrade is requested.
+ Wait bool
+ // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested.
+ WaitForJobs bool
+ // DisableHooks disables hook processing if set to true.
+ DisableHooks bool
+ // DryRun controls whether the operation is prepared, but not executed.
+ // If `true`, the upgrade is prepared but not performed.
+ DryRun bool
+ // Force will, if set to `true`, ignore certain warnings and perform the upgrade anyway.
+ //
+ // This should be used with caution.
+ Force bool
+ // ResetValues will reset the values to the chart's built-ins rather than merging with existing.
+ ResetValues bool
+ // ReuseValues will re-use the user's last supplied values.
+ ReuseValues bool
+ // Recreate will (if true) recreate pods after a rollback.
+ Recreate bool
+ // MaxHistory limits the maximum number of revisions saved per release
+ MaxHistory int
+ // Atomic, if true, will roll back on failure.
+ Atomic bool
+ // CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update.
+ CleanupOnFail bool
+ // SubNotes determines whether sub-notes are rendered in the chart.
+ SubNotes bool
+ // Description is the description of this operation
+ Description string
+ // PostRender is an optional post-renderer
+ //
+ // If this is non-nil, then after templates are rendered, they will be sent to the
+ // post renderer before sending to the Kubernetes API server.
+ PostRenderer postrender.PostRenderer
+ // DisableOpenAPIValidation controls whether OpenAPI validation is enforced.
+ DisableOpenAPIValidation bool
+ // Get missing dependencies
+ DependencyUpdate bool
+ // Lock to control raceconditions when the process receives a SIGTERM
+ Lock sync.Mutex
+}
+
+type resultMessage struct {
+ r *release.Release
+ e error
+}
+
+// NewUpgrade creates a new Upgrade object with the given configuration.
+func NewUpgrade(cfg *Configuration) *Upgrade {
+ return &Upgrade{
+ cfg: cfg,
+ }
+}
+
+// Run executes the upgrade on the given release
+func (u *Upgrade) Run(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) {
+ ctx := context.Background()
+ return u.RunWithContext(ctx, name, chart, vals)
+}
+
+// Run executes the upgrade on the given release with context.
+func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) {
+ if err := u.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ // Make sure if Atomic is set, that wait is set as well. This makes it so
+ // the user doesn't have to specify both
+ u.Wait = u.Wait || u.Atomic
+
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, errors.Errorf("release name is invalid: %s", name)
+ }
+ u.cfg.Log("preparing upgrade for %s", name)
+ currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals)
+ if err != nil {
+ return nil, err
+ }
+
+ u.cfg.Releases.MaxHistory = u.MaxHistory
+
+ u.cfg.Log("performing update for %s", name)
+ res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease)
+ if err != nil {
+ return res, err
+ }
+
+ if !u.DryRun {
+ u.cfg.Log("updating status for upgraded release for %s", name)
+ if err := u.cfg.Releases.Update(upgradedRelease); err != nil {
+ return res, err
+ }
+ }
+
+ return res, nil
+}
+
+// prepareUpgrade builds an upgraded release for an upgrade operation.
+func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, *release.Release, error) {
+ if chart == nil {
+ return nil, nil, errMissingChart
+ }
+
+ // finds the last non-deleted release with the given name
+ lastRelease, err := u.cfg.Releases.Last(name)
+ if err != nil {
+ // to keep existing behavior of returning the "%q has no deployed releases" error when an existing release does not exist
+ if errors.Is(err, driver.ErrReleaseNotFound) {
+ return nil, nil, driver.NewErrNoDeployedReleases(name)
+ }
+ return nil, nil, err
+ }
+
+ // Concurrent `helm upgrade`s will either fail here with `errPending` or when creating the release with "already exists". This should act as a pessimistic lock.
+ if lastRelease.Info.Status.IsPending() {
+ return nil, nil, errPending
+ }
+
+ var currentRelease *release.Release
+ if lastRelease.Info.Status == release.StatusDeployed {
+ // no need to retrieve the last deployed release from storage as the last release is deployed
+ currentRelease = lastRelease
+ } else {
+ // finds the deployed release with the given name
+ currentRelease, err = u.cfg.Releases.Deployed(name)
+ if err != nil {
+ if errors.Is(err, driver.ErrNoDeployedReleases) &&
+ (lastRelease.Info.Status == release.StatusFailed || lastRelease.Info.Status == release.StatusSuperseded) {
+ currentRelease = lastRelease
+ } else {
+ return nil, nil, err
+ }
+ }
+ }
+
+ // determine if values will be reused
+ vals, err = u.reuseValues(chart, currentRelease, vals)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := chartutil.ProcessDependencies(chart, vals); err != nil {
+ return nil, nil, err
+ }
+
+ // Increment revision count. This is passed to templates, and also stored on
+ // the release object.
+ revision := lastRelease.Version + 1
+
+ options := chartutil.ReleaseOptions{
+ Name: name,
+ Namespace: currentRelease.Namespace,
+ Revision: revision,
+ IsUpgrade: true,
+ }
+
+ caps, err := u.cfg.getCapabilities()
+ if err != nil {
+ return nil, nil, err
+ }
+ valuesToRender, err := chartutil.ToRenderValues(chart, vals, options, caps)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, u.DryRun)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Store an upgraded release.
+ upgradedRelease := &release.Release{
+ Name: name,
+ Namespace: currentRelease.Namespace,
+ Chart: chart,
+ Config: vals,
+ Info: &release.Info{
+ FirstDeployed: currentRelease.Info.FirstDeployed,
+ LastDeployed: Timestamper(),
+ Status: release.StatusPendingUpgrade,
+ Description: "Preparing upgrade", // This should be overwritten later.
+ },
+ Version: revision,
+ Manifest: manifestDoc.String(),
+ Hooks: hooks,
+ }
+
+ if len(notesTxt) > 0 {
+ upgradedRelease.Info.Notes = notesTxt
+ }
+ err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation)
+ return currentRelease, upgradedRelease, err
+}
+
+func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedRelease *release.Release) (*release.Release, error) {
+ current, err := u.cfg.KubeClient.Build(bytes.NewBufferString(originalRelease.Manifest), false)
+ if err != nil {
+ // Checking for removed Kubernetes API error so can provide a more informative error message to the user
+ // Ref: https://github.com/helm/helm/issues/7219
+ if strings.Contains(err.Error(), "unable to recognize \"\": no matches for kind") {
+ return upgradedRelease, errors.Wrap(err, "current release manifest contains removed kubernetes api(s) for this "+
+ "kubernetes version and it is therefore unable to build the kubernetes "+
+ "objects for performing the diff. error from kubernetes")
+ }
+ return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest")
+ }
+ target, err := u.cfg.KubeClient.Build(bytes.NewBufferString(upgradedRelease.Manifest), !u.DisableOpenAPIValidation)
+ if err != nil {
+ return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest")
+ }
+
+ // It is safe to use force only on target because these are resources currently rendered by the chart.
+ err = target.Visit(setMetadataVisitor(upgradedRelease.Name, upgradedRelease.Namespace, true))
+ if err != nil {
+ return upgradedRelease, err
+ }
+
+ // Do a basic diff using gvk + name to figure out what new resources are being created so we can validate they don't already exist
+ existingResources := make(map[string]bool)
+ for _, r := range current {
+ existingResources[objectKey(r)] = true
+ }
+
+ var toBeCreated kube.ResourceList
+ for _, r := range target {
+ if !existingResources[objectKey(r)] {
+ toBeCreated = append(toBeCreated, r)
+ }
+ }
+
+ toBeUpdated, err := existingResourceConflict(toBeCreated, upgradedRelease.Name, upgradedRelease.Namespace)
+ if err != nil {
+ return nil, errors.Wrap(err, "rendered manifests contain a resource that already exists. Unable to continue with update")
+ }
+
+ toBeUpdated.Visit(func(r *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+ current.Append(r)
+ return nil
+ })
+
+ if u.DryRun {
+ u.cfg.Log("dry run for %s", upgradedRelease.Name)
+ if len(u.Description) > 0 {
+ upgradedRelease.Info.Description = u.Description
+ } else {
+ upgradedRelease.Info.Description = "Dry run complete"
+ }
+ return upgradedRelease, nil
+ }
+
+ u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name)
+ if err := u.cfg.Releases.Create(upgradedRelease); err != nil {
+ return nil, err
+ }
+ rChan := make(chan resultMessage)
+ ctxChan := make(chan resultMessage)
+ doneChan := make(chan interface{})
+ go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease)
+ go u.handleContext(ctx, doneChan, ctxChan, upgradedRelease)
+ select {
+ case result := <-rChan:
+ doneChan <- true
+ return result.r, result.e
+ case result := <-ctxChan:
+ return result.r, result.e
+ }
+}
+
+// Function used to lock the Mutex, this is important for the case when the atomic flag is set.
+// In that case the upgrade will finish before the rollback is finished so it is necessary to wait for the rollback to finish.
+// The rollback will be trigger by the function failRelease
+func (u *Upgrade) reportToPerformUpgrade(c chan<- resultMessage, rel *release.Release, created kube.ResourceList, err error) {
+ u.Lock.Lock()
+ if err != nil {
+ rel, err = u.failRelease(rel, created, err)
+ }
+ c <- resultMessage{r: rel, e: err}
+ u.Lock.Unlock()
+}
+
+// Setup listener for SIGINT and SIGTERM
+func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c chan<- resultMessage, upgradedRelease *release.Release) {
+ go func() {
+ select {
+ case <-ctx.Done():
+ err := ctx.Err()
+
+ // when the atomic flag is set the ongoing release finish first and doesn't give time for the rollback happens.
+ u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, err)
+ case <-done:
+ return
+ }
+ }()
+}
+func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release) {
+ // pre-upgrade hooks
+
+ if !u.DisableHooks {
+ if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil {
+ u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err))
+ return
+ }
+ } else {
+ u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name)
+ }
+
+ results, err := u.cfg.KubeClient.Update(current, target, u.Force)
+ if err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
+ }
+
+ if u.Recreate {
+ // NOTE: Because this is not critical for a release to succeed, we just
+ // log if an error occurs and continue onward. If we ever introduce log
+ // levels, we should make these error level logs so users are notified
+ // that they'll need to go do the cleanup on their own
+ if err := recreate(u.cfg, results.Updated); err != nil {
+ u.cfg.Log(err.Error())
+ }
+ }
+
+ if u.Wait {
+ if u.WaitForJobs {
+ if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
+ }
+ } else {
+ if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
+ }
+ }
+ }
+
+ // post-upgrade hooks
+ if !u.DisableHooks {
+ if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil {
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err))
+ return
+ }
+ }
+
+ originalRelease.Info.Status = release.StatusSuperseded
+ u.cfg.recordRelease(originalRelease)
+
+ upgradedRelease.Info.Status = release.StatusDeployed
+ if len(u.Description) > 0 {
+ upgradedRelease.Info.Description = u.Description
+ } else {
+ upgradedRelease.Info.Description = "Upgrade complete"
+ }
+ u.reportToPerformUpgrade(c, upgradedRelease, nil, nil)
+}
+
+func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) {
+ msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err)
+ u.cfg.Log("warning: %s", msg)
+
+ rel.Info.Status = release.StatusFailed
+ rel.Info.Description = msg
+ u.cfg.recordRelease(rel)
+ if u.CleanupOnFail && len(created) > 0 {
+ u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created))
+ _, errs := u.cfg.KubeClient.Delete(created)
+ if errs != nil {
+ var errorList []string
+ for _, e := range errs {
+ errorList = append(errorList, e.Error())
+ }
+ return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err)
+ }
+ u.cfg.Log("Resource cleanup complete")
+ }
+ if u.Atomic {
+ u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release")
+
+ // As a protection, get the last successful release before rollback.
+ // If there are no successful releases, bail out
+ hist := NewHistory(u.cfg)
+ fullHistory, herr := hist.Run(rel.Name)
+ if herr != nil {
+ return rel, errors.Wrapf(herr, "an error occurred while finding last successful release. original upgrade error: %s", err)
+ }
+
+ // There isn't a way to tell if a previous release was successful, but
+ // generally failed releases do not get superseded unless the next
+ // release is successful, so this should be relatively safe
+ filteredHistory := releaseutil.FilterFunc(func(r *release.Release) bool {
+ return r.Info.Status == release.StatusSuperseded || r.Info.Status == release.StatusDeployed
+ }).Filter(fullHistory)
+ if len(filteredHistory) == 0 {
+ return rel, errors.Wrap(err, "unable to find a previously successful release when attempting to rollback. original upgrade error")
+ }
+
+ releaseutil.Reverse(filteredHistory, releaseutil.SortByRevision)
+
+ rollin := NewRollback(u.cfg)
+ rollin.Version = filteredHistory[0].Version
+ rollin.Wait = true
+ rollin.WaitForJobs = u.WaitForJobs
+ rollin.DisableHooks = u.DisableHooks
+ rollin.Recreate = u.Recreate
+ rollin.Force = u.Force
+ rollin.Timeout = u.Timeout
+ if rollErr := rollin.Run(rel.Name); rollErr != nil {
+ return rel, errors.Wrapf(rollErr, "an error occurred while rolling back the release. original upgrade error: %s", err)
+ }
+ return rel, errors.Wrapf(err, "release %s failed, and has been rolled back due to atomic being set", rel.Name)
+ }
+
+ return rel, err
+}
+
+// reuseValues copies values from the current release to a new release if the
+// new release does not have any values.
+//
+// If the request already has values, or if there are no values in the current
+// release, this does nothing.
+//
+// This is skipped if the u.ResetValues flag is set, in which case the
+// request values are not altered.
+func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) {
+ if u.ResetValues {
+ // If ResetValues is set, we completely ignore current.Config.
+ u.cfg.Log("resetting values to the chart's original version")
+ return newVals, nil
+ }
+
+ // If the ReuseValues flag is set, we always copy the old values over the new config's values.
+ if u.ReuseValues {
+ u.cfg.Log("reusing the old release's values")
+
+ // We have to regenerate the old coalesced values:
+ oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to rebuild old values")
+ }
+
+ newVals = chartutil.CoalesceTables(newVals, current.Config)
+
+ chart.Values = oldVals
+
+ return newVals, nil
+ }
+
+ if len(newVals) == 0 && len(current.Config) > 0 {
+ u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version)
+ newVals = current.Config
+ }
+ return newVals, nil
+}
+
+func validateManifest(c kube.Interface, manifest []byte, openAPIValidation bool) error {
+ _, err := c.Build(bytes.NewReader(manifest), openAPIValidation)
+ return err
+}
+
+// recreate captures all the logic for recreating pods for both upgrade and
+// rollback. If we end up refactoring rollback to use upgrade, this can just be
+// made an unexported method on the upgrade action.
+func recreate(cfg *Configuration, resources kube.ResourceList) error {
+ for _, res := range resources {
+ versioned := kube.AsVersioned(res)
+ selector, err := kube.SelectorsForObject(versioned)
+ if err != nil {
+ // If no selector is returned, it means this object is
+ // definitely not a pod, so continue onward
+ continue
+ }
+
+ client, err := cfg.KubernetesClientSet()
+ if err != nil {
+ return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name)
+ }
+
+ pods, err := client.CoreV1().Pods(res.Namespace).List(context.Background(), metav1.ListOptions{
+ LabelSelector: selector.String(),
+ })
+ if err != nil {
+ return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name)
+ }
+
+ // Restart pods
+ for _, pod := range pods.Items {
+ // Delete each pod for get them restarted with changed spec.
+ if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, *metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil {
+ return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name)
+ }
+ }
+ }
+ return nil
+}
+
+func objectKey(r *resource.Info) string {
+ gvk := r.Object.GetObjectKind().GroupVersionKind()
+ return fmt.Sprintf("%s/%s/%s/%s", gvk.GroupVersion().String(), gvk.Kind, r.Namespace, r.Name)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/validate.go b/vendor/helm.sh/helm/v3/pkg/action/validate.go
new file mode 100644
index 000000000..6e074f78b
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/validate.go
@@ -0,0 +1,184 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+
+ "github.com/pkg/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v3/pkg/kube"
+)
+
+var accessor = meta.NewAccessor()
+
+const (
+ appManagedByLabel = "app.kubernetes.io/managed-by"
+ appManagedByHelm = "Helm"
+ helmReleaseNameAnnotation = "meta.helm.sh/release-name"
+ helmReleaseNamespaceAnnotation = "meta.helm.sh/release-namespace"
+)
+
+func existingResourceConflict(resources kube.ResourceList, releaseName, releaseNamespace string) (kube.ResourceList, error) {
+ var requireUpdate kube.ResourceList
+
+ err := resources.Visit(func(info *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ helper := resource.NewHelper(info.Client, info.Mapping)
+ existing, err := helper.Get(info.Namespace, info.Name)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+ return errors.Wrap(err, "could not get information about the resource")
+ }
+
+ // Allow adoption of the resource if it is managed by Helm and is annotated with correct release name and namespace.
+ if err := checkOwnership(existing, releaseName, releaseNamespace); err != nil {
+ return fmt.Errorf("%s exists and cannot be imported into the current release: %s", resourceString(info), err)
+ }
+
+ requireUpdate.Append(info)
+ return nil
+ })
+
+ return requireUpdate, err
+}
+
+func checkOwnership(obj runtime.Object, releaseName, releaseNamespace string) error {
+ lbls, err := accessor.Labels(obj)
+ if err != nil {
+ return err
+ }
+ annos, err := accessor.Annotations(obj)
+ if err != nil {
+ return err
+ }
+
+ var errs []error
+ if err := requireValue(lbls, appManagedByLabel, appManagedByHelm); err != nil {
+ errs = append(errs, fmt.Errorf("label validation error: %s", err))
+ }
+ if err := requireValue(annos, helmReleaseNameAnnotation, releaseName); err != nil {
+ errs = append(errs, fmt.Errorf("annotation validation error: %s", err))
+ }
+ if err := requireValue(annos, helmReleaseNamespaceAnnotation, releaseNamespace); err != nil {
+ errs = append(errs, fmt.Errorf("annotation validation error: %s", err))
+ }
+
+ if len(errs) > 0 {
+ err := errors.New("invalid ownership metadata")
+ for _, e := range errs {
+ err = fmt.Errorf("%w; %s", err, e)
+ }
+ return err
+ }
+
+ return nil
+}
+
+func requireValue(meta map[string]string, k, v string) error {
+ actual, ok := meta[k]
+ if !ok {
+ return fmt.Errorf("missing key %q: must be set to %q", k, v)
+ }
+ if actual != v {
+ return fmt.Errorf("key %q must equal %q: current value is %q", k, v, actual)
+ }
+ return nil
+}
+
+// setMetadataVisitor adds release tracking metadata to all resources. If force is enabled, existing
+// ownership metadata will be overwritten. Otherwise an error will be returned if any resource has an
+// existing and conflicting value for the managed by label or Helm release/namespace annotations.
+func setMetadataVisitor(releaseName, releaseNamespace string, force bool) resource.VisitorFunc {
+ return func(info *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if !force {
+ if err := checkOwnership(info.Object, releaseName, releaseNamespace); err != nil {
+ return fmt.Errorf("%s cannot be owned: %s", resourceString(info), err)
+ }
+ }
+
+ if err := mergeLabels(info.Object, map[string]string{
+ appManagedByLabel: appManagedByHelm,
+ }); err != nil {
+ return fmt.Errorf(
+ "%s labels could not be updated: %s",
+ resourceString(info), err,
+ )
+ }
+
+ if err := mergeAnnotations(info.Object, map[string]string{
+ helmReleaseNameAnnotation: releaseName,
+ helmReleaseNamespaceAnnotation: releaseNamespace,
+ }); err != nil {
+ return fmt.Errorf(
+ "%s annotations could not be updated: %s",
+ resourceString(info), err,
+ )
+ }
+
+ return nil
+ }
+}
+
+func resourceString(info *resource.Info) string {
+ _, k := info.Mapping.GroupVersionKind.ToAPIVersionAndKind()
+ return fmt.Sprintf(
+ "%s %q in namespace %q",
+ k, info.Name, info.Namespace,
+ )
+}
+
+func mergeLabels(obj runtime.Object, labels map[string]string) error {
+ current, err := accessor.Labels(obj)
+ if err != nil {
+ return err
+ }
+ return accessor.SetLabels(obj, mergeStrStrMaps(current, labels))
+}
+
+func mergeAnnotations(obj runtime.Object, annotations map[string]string) error {
+ current, err := accessor.Annotations(obj)
+ if err != nil {
+ return err
+ }
+ return accessor.SetAnnotations(obj, mergeStrStrMaps(current, annotations))
+}
+
+// merge two maps, always taking the value on the right
+func mergeStrStrMaps(current, desired map[string]string) map[string]string {
+ result := make(map[string]string)
+ for k, v := range current {
+ result[k] = v
+ }
+ for k, desiredVal := range desired {
+ result[k] = desiredVal
+ }
+ return result
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/action/verify.go b/vendor/helm.sh/helm/v3/pkg/action/verify.go
new file mode 100644
index 000000000..f36239496
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/action/verify.go
@@ -0,0 +1,59 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+ "strings"
+
+ "helm.sh/helm/v3/pkg/downloader"
+)
+
+// Verify is the action for building a given chart's Verify tree.
+//
+// It provides the implementation of 'helm verify'.
+type Verify struct {
+ Keyring string
+ Out string
+}
+
+// NewVerify creates a new Verify object with the given configuration.
+func NewVerify() *Verify {
+ return &Verify{}
+}
+
+// Run executes 'helm verify'.
+func (v *Verify) Run(chartfile string) error {
+ var out strings.Builder
+ p, err := downloader.VerifyChart(chartfile, v.Keyring)
+ if err != nil {
+ return err
+ }
+
+ for name := range p.SignedBy.Identities {
+ fmt.Fprintf(&out, "Signed by: %v\n", name)
+ }
+ fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", p.SignedBy.PrimaryKey.Fingerprint)
+ fmt.Fprintf(&out, "Chart Hash Verified: %s\n", p.FileHash)
+
+ // TODO(mattfarina): The output is set as a property rather than returned
+ // to maintain the Go API. In Helm v4 this function should return the out
+ // and the property on the struct can be removed.
+ v.Out = out.String()
+
+ return nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chart/chart.go b/vendor/helm.sh/helm/v3/pkg/chart/chart.go
new file mode 100644
index 000000000..a3bed63a3
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chart/chart.go
@@ -0,0 +1,173 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import (
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+// APIVersionV1 is the API version number for version 1.
+const APIVersionV1 = "v1"
+
+// APIVersionV2 is the API version number for version 2.
+const APIVersionV2 = "v2"
+
+// aliasNameFormat defines the characters that are legal in an alias name.
+var aliasNameFormat = regexp.MustCompile("^[a-zA-Z0-9_-]+$")
+
+// Chart is a helm package that contains metadata, a default config, zero or more
+// optionally parameterizable templates, and zero or more charts (dependencies).
+type Chart struct {
+ // Raw contains the raw contents of the files originally contained in the chart archive.
+ //
+ // This should not be used except in special cases like `helm show values`,
+ // where we want to display the raw values, comments and all.
+ Raw []*File `json:"-"`
+ // Metadata is the contents of the Chartfile.
+ Metadata *Metadata `json:"metadata"`
+ // Lock is the contents of Chart.lock.
+ Lock *Lock `json:"lock"`
+ // Templates for this chart.
+ Templates []*File `json:"templates"`
+ // Values are default config for this chart.
+ Values map[string]interface{} `json:"values"`
+ // Schema is an optional JSON schema for imposing structure on Values
+ Schema []byte `json:"schema"`
+ // Files are miscellaneous files in a chart archive,
+ // e.g. README, LICENSE, etc.
+ Files []*File `json:"files"`
+
+ parent *Chart
+ dependencies []*Chart
+}
+
+type CRD struct {
+ // Name is the File.Name for the crd file
+ Name string
+ // Filename is the File obj Name including (sub-)chart.ChartFullPath
+ Filename string
+ // File is the File obj for the crd
+ File *File
+}
+
+// SetDependencies replaces the chart dependencies.
+func (ch *Chart) SetDependencies(charts ...*Chart) {
+ ch.dependencies = nil
+ ch.AddDependency(charts...)
+}
+
+// Name returns the name of the chart.
+func (ch *Chart) Name() string {
+ if ch.Metadata == nil {
+ return ""
+ }
+ return ch.Metadata.Name
+}
+
+// AddDependency determines if the chart is a subchart.
+func (ch *Chart) AddDependency(charts ...*Chart) {
+ for i, x := range charts {
+ charts[i].parent = ch
+ ch.dependencies = append(ch.dependencies, x)
+ }
+}
+
+// Root finds the root chart.
+func (ch *Chart) Root() *Chart {
+ if ch.IsRoot() {
+ return ch
+ }
+ return ch.Parent().Root()
+}
+
+// Dependencies are the charts that this chart depends on.
+func (ch *Chart) Dependencies() []*Chart { return ch.dependencies }
+
+// IsRoot determines if the chart is the root chart.
+func (ch *Chart) IsRoot() bool { return ch.parent == nil }
+
+// Parent returns a subchart's parent chart.
+func (ch *Chart) Parent() *Chart { return ch.parent }
+
+// ChartPath returns the full path to this chart in dot notation.
+func (ch *Chart) ChartPath() string {
+ if !ch.IsRoot() {
+ return ch.Parent().ChartPath() + "." + ch.Name()
+ }
+ return ch.Name()
+}
+
+// ChartFullPath returns the full path to this chart.
+func (ch *Chart) ChartFullPath() string {
+ if !ch.IsRoot() {
+ return ch.Parent().ChartFullPath() + "/charts/" + ch.Name()
+ }
+ return ch.Name()
+}
+
+// Validate validates the metadata.
+func (ch *Chart) Validate() error {
+ return ch.Metadata.Validate()
+}
+
+// AppVersion returns the appversion of the chart.
+func (ch *Chart) AppVersion() string {
+ if ch.Metadata == nil {
+ return ""
+ }
+ return ch.Metadata.AppVersion
+}
+
+// CRDs returns a list of File objects in the 'crds/' directory of a Helm chart.
+// Deprecated: use CRDObjects()
+func (ch *Chart) CRDs() []*File {
+ files := []*File{}
+ // Find all resources in the crds/ directory
+ for _, f := range ch.Files {
+ if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
+ files = append(files, f)
+ }
+ }
+ // Get CRDs from dependencies, too.
+ for _, dep := range ch.Dependencies() {
+ files = append(files, dep.CRDs()...)
+ }
+ return files
+}
+
+// CRDObjects returns a list of CRD objects in the 'crds/' directory of a Helm chart & subcharts
+func (ch *Chart) CRDObjects() []CRD {
+ crds := []CRD{}
+ // Find all resources in the crds/ directory
+ for _, f := range ch.Files {
+ if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
+ mycrd := CRD{Name: f.Name, Filename: filepath.Join(ch.ChartFullPath(), f.Name), File: f}
+ crds = append(crds, mycrd)
+ }
+ }
+ // Get CRDs from dependencies, too.
+ for _, dep := range ch.Dependencies() {
+ crds = append(crds, dep.CRDObjects()...)
+ }
+ return crds
+}
+
+func hasManifestExtension(fname string) bool {
+ ext := filepath.Ext(fname)
+ return strings.EqualFold(ext, ".yaml") || strings.EqualFold(ext, ".yml") || strings.EqualFold(ext, ".json")
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chart/dependency.go b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go
new file mode 100644
index 000000000..b2819f373
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go
@@ -0,0 +1,79 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import "time"
+
+// Dependency describes a chart upon which another chart depends.
+//
+// Dependencies can be used to express developer intent, or to capture the state
+// of a chart.
+type Dependency struct {
+ // Name is the name of the dependency.
+ //
+ // This must mach the name in the dependency's Chart.yaml.
+ Name string `json:"name"`
+ // Version is the version (range) of this chart.
+ //
+ // A lock file will always produce a single version, while a dependency
+ // may contain a semantic version range.
+ Version string `json:"version,omitempty"`
+ // The URL to the repository.
+ //
+ // Appending `index.yaml` to this string should result in a URL that can be
+ // used to fetch the repository index.
+ Repository string `json:"repository"`
+ // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
+ Condition string `json:"condition,omitempty"`
+ // Tags can be used to group charts for enabling/disabling together
+ Tags []string `json:"tags,omitempty"`
+ // Enabled bool determines if chart should be loaded
+ Enabled bool `json:"enabled,omitempty"`
+ // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a
+ // string or pair of child/parent sublist items.
+ ImportValues []interface{} `json:"import-values,omitempty"`
+ // Alias usable alias to be used for the chart
+ Alias string `json:"alias,omitempty"`
+}
+
+// Validate checks for common problems with the dependency datastructure in
+// the chart. This check must be done at load time before the dependency's charts are
+// loaded.
+func (d *Dependency) Validate() error {
+ d.Name = sanitizeString(d.Name)
+ d.Version = sanitizeString(d.Version)
+ d.Repository = sanitizeString(d.Repository)
+ d.Condition = sanitizeString(d.Condition)
+ for i := range d.Tags {
+ d.Tags[i] = sanitizeString(d.Tags[i])
+ }
+ if d.Alias != "" && !aliasNameFormat.MatchString(d.Alias) {
+ return ValidationErrorf("dependency %q has disallowed characters in the alias", d.Name)
+ }
+ return nil
+}
+
+// Lock is a lock file for dependencies.
+//
+// It represents the state that the dependencies should be in.
+type Lock struct {
+ // Generated is the date the lock file was last generated.
+ Generated time.Time `json:"generated"`
+ // Digest is a hash of the dependencies in Chart.yaml.
+ Digest string `json:"digest"`
+ // Dependencies is the list of dependencies that this lock file has locked.
+ Dependencies []*Dependency `json:"dependencies"`
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chart/errors.go b/vendor/helm.sh/helm/v3/pkg/chart/errors.go
new file mode 100644
index 000000000..2fad5f370
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chart/errors.go
@@ -0,0 +1,30 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import "fmt"
+
+// ValidationError represents a data validation error.
+type ValidationError string
+
+func (v ValidationError) Error() string {
+ return "validation: " + string(v)
+}
+
+// ValidationErrorf takes a message and formatting options and creates a ValidationError
+func ValidationErrorf(msg string, args ...interface{}) ValidationError {
+ return ValidationError(fmt.Sprintf(msg, args...))
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chart/file.go b/vendor/helm.sh/helm/v3/pkg/chart/file.go
new file mode 100644
index 000000000..9dd7c08d5
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chart/file.go
@@ -0,0 +1,27 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+// File represents a file as a name/value pair.
+//
+// By convention, name is a relative path within the scope of the chart's
+// base directory.
+type File struct {
+ // Name is the path-like name of the template.
+ Name string `json:"name"`
+ // Data is the template as byte data.
+ Data []byte `json:"data"`
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go
new file mode 100644
index 000000000..8b38cb89f
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go
@@ -0,0 +1,196 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`)
+
+// FileLoader loads a chart from a file
+type FileLoader string
+
+// Load loads a chart
+func (l FileLoader) Load() (*chart.Chart, error) {
+ return LoadFile(string(l))
+}
+
+// LoadFile loads from an archive file.
+func LoadFile(name string) (*chart.Chart, error) {
+ if fi, err := os.Stat(name); err != nil {
+ return nil, err
+ } else if fi.IsDir() {
+ return nil, errors.New("cannot load a directory")
+ }
+
+ raw, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer raw.Close()
+
+ err = ensureArchive(name, raw)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := LoadArchive(raw)
+ if err != nil {
+ if err == gzip.ErrHeader {
+ return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err)
+ }
+ }
+ return c, err
+}
+
+// ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive.
+//
+// Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence
+// of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error
+// if we didn't check for this.
+func ensureArchive(name string, raw *os.File) error {
+ defer raw.Seek(0, 0) // reset read offset to allow archive loading to proceed.
+
+ // Check the file format to give us a chance to provide the user with more actionable feedback.
+ buffer := make([]byte, 512)
+ _, err := raw.Read(buffer)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("file '%s' cannot be read: %s", name, err)
+ }
+ if contentType := http.DetectContentType(buffer); contentType != "application/x-gzip" {
+ // TODO: Is there a way to reliably test if a file content is YAML? ghodss/yaml accepts a wide
+ // variety of content (Makefile, .zshrc) as valid YAML without errors.
+
+ // Wrong content type. Let's check if it's yaml and give an extra hint?
+ if strings.HasSuffix(name, ".yml") || strings.HasSuffix(name, ".yaml") {
+ return fmt.Errorf("file '%s' seems to be a YAML file, but expected a gzipped archive", name)
+ }
+ return fmt.Errorf("file '%s' does not appear to be a gzipped archive; got '%s'", name, contentType)
+ }
+ return nil
+}
+
+// LoadArchiveFiles reads in files out of an archive into memory. This function
+// performs important path security checks and should always be used before
+// expanding a tarball
+func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
+ unzipped, err := gzip.NewReader(in)
+ if err != nil {
+ return nil, err
+ }
+ defer unzipped.Close()
+
+ files := []*BufferedFile{}
+ tr := tar.NewReader(unzipped)
+ for {
+ b := bytes.NewBuffer(nil)
+ hd, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if hd.FileInfo().IsDir() {
+ // Use this instead of hd.Typeflag because we don't have to do any
+ // inference chasing.
+ continue
+ }
+
+ switch hd.Typeflag {
+ // We don't want to process these extension header files.
+ case tar.TypeXGlobalHeader, tar.TypeXHeader:
+ continue
+ }
+
+ // Archive could contain \ if generated on Windows
+ delimiter := "/"
+ if strings.ContainsRune(hd.Name, '\\') {
+ delimiter = "\\"
+ }
+
+ parts := strings.Split(hd.Name, delimiter)
+ n := strings.Join(parts[1:], delimiter)
+
+ // Normalize the path to the / delimiter
+ n = strings.ReplaceAll(n, delimiter, "/")
+
+ if path.IsAbs(n) {
+ return nil, errors.New("chart illegally contains absolute paths")
+ }
+
+ n = path.Clean(n)
+ if n == "." {
+ // In this case, the original path was relative when it should have been absolute.
+ return nil, errors.Errorf("chart illegally contains content outside the base directory: %q", hd.Name)
+ }
+ if strings.HasPrefix(n, "..") {
+ return nil, errors.New("chart illegally references parent directory")
+ }
+
+ // In some particularly arcane acts of path creativity, it is possible to intermix
+ // UNIX and Windows style paths in such a way that you produce a result of the form
+ // c:/foo even after all the built-in absolute path checks. So we explicitly check
+ // for this condition.
+ if drivePathPattern.MatchString(n) {
+ return nil, errors.New("chart contains illegally named files")
+ }
+
+ if parts[0] == "Chart.yaml" {
+ return nil, errors.New("chart yaml not in base directory")
+ }
+
+ if _, err := io.Copy(b, tr); err != nil {
+ return nil, err
+ }
+
+ data := bytes.TrimPrefix(b.Bytes(), utf8bom)
+
+ files = append(files, &BufferedFile{Name: n, Data: data})
+ b.Reset()
+ }
+
+ if len(files) == 0 {
+ return nil, errors.New("no files in chart archive")
+ }
+ return files, nil
+}
+
+// LoadArchive loads from a reader containing a compressed tar archive.
+func LoadArchive(in io.Reader) (*chart.Chart, error) {
+ files, err := LoadArchiveFiles(in)
+ if err != nil {
+ return nil, err
+ }
+
+ return LoadFiles(files)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go
new file mode 100644
index 000000000..bbe543870
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/internal/ignore"
+ "helm.sh/helm/v3/internal/sympath"
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+// DirLoader loads a chart from a directory
+type DirLoader string
+
+// Load loads the chart
+func (l DirLoader) Load() (*chart.Chart, error) {
+ return LoadDir(string(l))
+}
+
+// LoadDir loads from a directory.
+//
+// This loads charts only from directories.
+func LoadDir(dir string) (*chart.Chart, error) {
+ topdir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ // Just used for errors.
+ c := &chart.Chart{}
+
+ rules := ignore.Empty()
+ ifile := filepath.Join(topdir, ignore.HelmIgnore)
+ if _, err := os.Stat(ifile); err == nil {
+ r, err := ignore.ParseFile(ifile)
+ if err != nil {
+ return c, err
+ }
+ rules = r
+ }
+ rules.AddDefaults()
+
+ files := []*BufferedFile{}
+ topdir += string(filepath.Separator)
+
+ walk := func(name string, fi os.FileInfo, err error) error {
+ n := strings.TrimPrefix(name, topdir)
+ if n == "" {
+ // No need to process top level. Avoid bug with helmignore .* matching
+ // empty names. See issue 1779.
+ return nil
+ }
+
+ // Normalize to / since it will also work on Windows
+ n = filepath.ToSlash(n)
+
+ if err != nil {
+ return err
+ }
+ if fi.IsDir() {
+ // Directory-based ignore rules should involve skipping the entire
+ // contents of that directory.
+ if rules.Ignore(n, fi) {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ // If a .helmignore file matches, skip this file.
+ if rules.Ignore(n, fi) {
+ return nil
+ }
+
+ // Irregular files include devices, sockets, and other uses of files that
+ // are not regular files. In Go they have a file mode type bit set.
+ // See https://golang.org/pkg/os/#FileMode for examples.
+ if !fi.Mode().IsRegular() {
+ return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name)
+ }
+
+ data, err := ioutil.ReadFile(name)
+ if err != nil {
+ return errors.Wrapf(err, "error reading %s", n)
+ }
+
+ data = bytes.TrimPrefix(data, utf8bom)
+
+ files = append(files, &BufferedFile{Name: n, Data: data})
+ return nil
+ }
+ if err = sympath.Walk(topdir, walk); err != nil {
+ return c, err
+ }
+
+ return LoadFiles(files)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go
new file mode 100644
index 000000000..7cc8878a8
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go
@@ -0,0 +1,200 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "bytes"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+// ChartLoader loads a chart.
+type ChartLoader interface {
+ Load() (*chart.Chart, error)
+}
+
+// Loader returns a new ChartLoader appropriate for the given chart name
+func Loader(name string) (ChartLoader, error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDir() {
+ return DirLoader(name), nil
+ }
+ return FileLoader(name), nil
+
+}
+
+// Load takes a string name, tries to resolve it to a file or directory, and then loads it.
+//
+// This is the preferred way to load a chart. It will discover the chart encoding
+// and hand off to the appropriate chart reader.
+//
+// If a .helmignore file is present, the directory loader will skip loading any files
+// matching it. But .helmignore is not evaluated when reading out of an archive.
+func Load(name string) (*chart.Chart, error) {
+ l, err := Loader(name)
+ if err != nil {
+ return nil, err
+ }
+ return l.Load()
+}
+
+// BufferedFile represents an archive file buffered for later processing.
+type BufferedFile struct {
+ Name string
+ Data []byte
+}
+
+// LoadFiles loads from in-memory files.
+func LoadFiles(files []*BufferedFile) (*chart.Chart, error) {
+ c := new(chart.Chart)
+ subcharts := make(map[string][]*BufferedFile)
+
+ // do not rely on assumed ordering of files in the chart and crash
+ // if Chart.yaml was not coming early enough to initialize metadata
+ for _, f := range files {
+ c.Raw = append(c.Raw, &chart.File{Name: f.Name, Data: f.Data})
+ if f.Name == "Chart.yaml" {
+ if c.Metadata == nil {
+ c.Metadata = new(chart.Metadata)
+ }
+ if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil {
+ return c, errors.Wrap(err, "cannot load Chart.yaml")
+ }
+ // NOTE(bacongobbler): while the chart specification says that APIVersion must be set,
+ // Helm 2 accepted charts that did not provide an APIVersion in their chart metadata.
+ // Because of that, if APIVersion is unset, we should assume we're loading a v1 chart.
+ if c.Metadata.APIVersion == "" {
+ c.Metadata.APIVersion = chart.APIVersionV1
+ }
+ }
+ }
+ for _, f := range files {
+ switch {
+ case f.Name == "Chart.yaml":
+ // already processed
+ continue
+ case f.Name == "Chart.lock":
+ c.Lock = new(chart.Lock)
+ if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil {
+ return c, errors.Wrap(err, "cannot load Chart.lock")
+ }
+ case f.Name == "values.yaml":
+ c.Values = make(map[string]interface{})
+ if err := yaml.Unmarshal(f.Data, &c.Values); err != nil {
+ return c, errors.Wrap(err, "cannot load values.yaml")
+ }
+ case f.Name == "values.schema.json":
+ c.Schema = f.Data
+
+ // Deprecated: requirements.yaml is deprecated use Chart.yaml.
+ // We will handle it for you because we are nice people
+ case f.Name == "requirements.yaml":
+ if c.Metadata == nil {
+ c.Metadata = new(chart.Metadata)
+ }
+ if c.Metadata.APIVersion != chart.APIVersionV1 {
+ log.Printf("Warning: Dependencies are handled in Chart.yaml since apiVersion \"v2\". We recommend migrating dependencies to Chart.yaml.")
+ }
+ if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil {
+ return c, errors.Wrap(err, "cannot load requirements.yaml")
+ }
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data})
+ }
+ // Deprecated: requirements.lock is deprecated use Chart.lock.
+ case f.Name == "requirements.lock":
+ c.Lock = new(chart.Lock)
+ if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil {
+ return c, errors.Wrap(err, "cannot load requirements.lock")
+ }
+ if c.Metadata == nil {
+ c.Metadata = new(chart.Metadata)
+ }
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data})
+ }
+
+ case strings.HasPrefix(f.Name, "templates/"):
+ c.Templates = append(c.Templates, &chart.File{Name: f.Name, Data: f.Data})
+ case strings.HasPrefix(f.Name, "charts/"):
+ if filepath.Ext(f.Name) == ".prov" {
+ c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data})
+ continue
+ }
+
+ fname := strings.TrimPrefix(f.Name, "charts/")
+ cname := strings.SplitN(fname, "/", 2)[0]
+ subcharts[cname] = append(subcharts[cname], &BufferedFile{Name: fname, Data: f.Data})
+ default:
+ c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data})
+ }
+ }
+
+ if c.Metadata == nil {
+ return c, errors.New("Chart.yaml file is missing")
+ }
+
+ if err := c.Validate(); err != nil {
+ return c, err
+ }
+
+ for n, files := range subcharts {
+ var sc *chart.Chart
+ var err error
+ switch {
+ case strings.IndexAny(n, "_.") == 0:
+ continue
+ case filepath.Ext(n) == ".tgz":
+ file := files[0]
+ if file.Name != n {
+ return c, errors.Errorf("error unpacking tar in %s: expected %s, got %s", c.Name(), n, file.Name)
+ }
+ // Untar the chart and add to c.Dependencies
+ sc, err = LoadArchive(bytes.NewBuffer(file.Data))
+ default:
+ // We have to trim the prefix off of every file, and ignore any file
+ // that is in charts/, but isn't actually a chart.
+ buff := make([]*BufferedFile, 0, len(files))
+ for _, f := range files {
+ parts := strings.SplitN(f.Name, "/", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ f.Name = parts[1]
+ buff = append(buff, f)
+ }
+ sc, err = LoadFiles(buff)
+ }
+
+ if err != nil {
+ return c, errors.Wrapf(err, "error unpacking %s in %s", n, c.Name())
+ }
+ c.AddDependency(sc)
+ }
+
+ return c, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go
new file mode 100644
index 000000000..1925e45ac
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go
@@ -0,0 +1,160 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import (
+ "strings"
+ "unicode"
+
+ "github.com/Masterminds/semver/v3"
+)
+
+// Maintainer describes a Chart maintainer.
+type Maintainer struct {
+ // Name is a user name or organization name
+ Name string `json:"name,omitempty"`
+ // Email is an optional email address to contact the named maintainer
+ Email string `json:"email,omitempty"`
+ // URL is an optional URL to an address for the named maintainer
+ URL string `json:"url,omitempty"`
+}
+
+// Validate checks valid data and sanitizes string characters.
+func (m *Maintainer) Validate() error {
+ m.Name = sanitizeString(m.Name)
+ m.Email = sanitizeString(m.Email)
+ m.URL = sanitizeString(m.URL)
+ return nil
+}
+
+// Metadata for a Chart file. This models the structure of a Chart.yaml file.
+type Metadata struct {
+ // The name of the chart. Required.
+ Name string `json:"name,omitempty"`
+ // The URL to a relevant project page, git repo, or contact person
+ Home string `json:"home,omitempty"`
+ // Source is the URL to the source code of this chart
+ Sources []string `json:"sources,omitempty"`
+ // A SemVer 2 conformant version string of the chart. Required.
+ Version string `json:"version,omitempty"`
+ // A one-sentence description of the chart
+ Description string `json:"description,omitempty"`
+ // A list of string keywords
+ Keywords []string `json:"keywords,omitempty"`
+ // A list of name and URL/email address combinations for the maintainer(s)
+ Maintainers []*Maintainer `json:"maintainers,omitempty"`
+ // The URL to an icon file.
+ Icon string `json:"icon,omitempty"`
+ // The API Version of this chart. Required.
+ APIVersion string `json:"apiVersion,omitempty"`
+ // The condition to check to enable chart
+ Condition string `json:"condition,omitempty"`
+ // The tags to check to enable chart
+ Tags string `json:"tags,omitempty"`
+ // The version of the application enclosed inside of this chart.
+ AppVersion string `json:"appVersion,omitempty"`
+ // Whether or not this chart is deprecated
+ Deprecated bool `json:"deprecated,omitempty"`
+ // Annotations are additional mappings uninterpreted by Helm,
+ // made available for inspection by other applications.
+ Annotations map[string]string `json:"annotations,omitempty"`
+ // KubeVersion is a SemVer constraint specifying the version of Kubernetes required.
+ KubeVersion string `json:"kubeVersion,omitempty"`
+ // Dependencies are a list of dependencies for a chart.
+ Dependencies []*Dependency `json:"dependencies,omitempty"`
+ // Specifies the chart type: application or library
+ Type string `json:"type,omitempty"`
+}
+
+// Validate checks the metadata for known issues and sanitizes string
+// characters.
+func (md *Metadata) Validate() error {
+ if md == nil {
+ return ValidationError("chart.metadata is required")
+ }
+
+ md.Name = sanitizeString(md.Name)
+ md.Description = sanitizeString(md.Description)
+ md.Home = sanitizeString(md.Home)
+ md.Icon = sanitizeString(md.Icon)
+ md.Condition = sanitizeString(md.Condition)
+ md.Tags = sanitizeString(md.Tags)
+ md.AppVersion = sanitizeString(md.AppVersion)
+ md.KubeVersion = sanitizeString(md.KubeVersion)
+ for i := range md.Sources {
+ md.Sources[i] = sanitizeString(md.Sources[i])
+ }
+ for i := range md.Keywords {
+ md.Keywords[i] = sanitizeString(md.Keywords[i])
+ }
+
+ if md.APIVersion == "" {
+ return ValidationError("chart.metadata.apiVersion is required")
+ }
+ if md.Name == "" {
+ return ValidationError("chart.metadata.name is required")
+ }
+ if md.Version == "" {
+ return ValidationError("chart.metadata.version is required")
+ }
+ if !isValidSemver(md.Version) {
+ return ValidationErrorf("chart.metadata.version %q is invalid", md.Version)
+ }
+ if !isValidChartType(md.Type) {
+ return ValidationError("chart.metadata.type must be application or library")
+ }
+
+ for _, m := range md.Maintainers {
+ if err := m.Validate(); err != nil {
+ return err
+ }
+ }
+
+ // Aliases need to be validated here to make sure that the alias name does
+ // not contain any illegal characters.
+ for _, dependency := range md.Dependencies {
+ if err := dependency.Validate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func isValidChartType(in string) bool {
+ switch in {
+ case "", "application", "library":
+ return true
+ }
+ return false
+}
+
+func isValidSemver(v string) bool {
+ _, err := semver.NewVersion(v)
+ return err == nil
+}
+
+// sanitizeString normalize spaces and removes non-printable characters.
+func sanitizeString(str string) string {
+ return strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return ' '
+ }
+ if unicode.IsPrint(r) {
+ return r
+ }
+ return -1
+ }, str)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go
new file mode 100644
index 000000000..5f57e11a5
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go
@@ -0,0 +1,126 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/Masterminds/semver/v3"
+ "k8s.io/client-go/kubernetes/scheme"
+
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+
+ helmversion "helm.sh/helm/v3/internal/version"
+)
+
+var (
+ // The Kubernetes version can be set by LDFLAGS. In order to do that the value
+ // must be a string.
+ k8sVersionMajor = "1"
+ k8sVersionMinor = "20"
+
+ // DefaultVersionSet is the default version set, which includes only Core V1 ("v1").
+ DefaultVersionSet = allKnownVersions()
+
+ // DefaultCapabilities is the default set of capabilities.
+ DefaultCapabilities = &Capabilities{
+ KubeVersion: KubeVersion{
+ Version: fmt.Sprintf("v%s.%s.0", k8sVersionMajor, k8sVersionMinor),
+ Major: k8sVersionMajor,
+ Minor: k8sVersionMinor,
+ },
+ APIVersions: DefaultVersionSet,
+ HelmVersion: helmversion.Get(),
+ }
+)
+
+// Capabilities describes the capabilities of the Kubernetes cluster.
+type Capabilities struct {
+ // KubeVersion is the Kubernetes version.
+ KubeVersion KubeVersion
+ // APIversions are supported Kubernetes API versions.
+ APIVersions VersionSet
+ // HelmVersion is the build information for this helm version
+ HelmVersion helmversion.BuildInfo
+}
+
+func (capabilities *Capabilities) Copy() *Capabilities {
+ return &Capabilities{
+ KubeVersion: capabilities.KubeVersion,
+ APIVersions: capabilities.APIVersions,
+ HelmVersion: capabilities.HelmVersion,
+ }
+}
+
+// KubeVersion is the Kubernetes version.
+type KubeVersion struct {
+ Version string // Kubernetes version
+ Major string // Kubernetes major version
+ Minor string // Kubernetes minor version
+}
+
+// String implements fmt.Stringer
+func (kv *KubeVersion) String() string { return kv.Version }
+
+// GitVersion returns the Kubernetes version string.
+//
+// Deprecated: use KubeVersion.Version.
+func (kv *KubeVersion) GitVersion() string { return kv.Version }
+
+// ParseKubeVersion parses kubernetes version from string
+func ParseKubeVersion(version string) (*KubeVersion, error) {
+ sv, err := semver.NewVersion(version)
+ if err != nil {
+ return nil, err
+ }
+ return &KubeVersion{
+ Version: "v" + sv.String(),
+ Major: strconv.FormatUint(sv.Major(), 10),
+ Minor: strconv.FormatUint(sv.Minor(), 10),
+ }, nil
+}
+
+// VersionSet is a set of Kubernetes API versions.
+type VersionSet []string
+
+// Has returns true if the version string is in the set.
+//
+// vs.Has("apps/v1")
+func (v VersionSet) Has(apiVersion string) bool {
+ for _, x := range v {
+ if x == apiVersion {
+ return true
+ }
+ }
+ return false
+}
+
+func allKnownVersions() VersionSet {
+ // We should register the built in extension APIs as well so CRDs are
+ // supported in the default version set. This has caused problems with `helm
+ // template` in the past, so let's be safe
+ apiextensionsv1beta1.AddToScheme(scheme.Scheme)
+ apiextensionsv1.AddToScheme(scheme.Scheme)
+
+ groups := scheme.Scheme.PrioritizedVersionsAllGroups()
+ vs := make(VersionSet, 0, len(groups))
+ for _, gv := range groups {
+ vs = append(vs, gv.String())
+ }
+ return vs
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go b/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go
new file mode 100644
index 000000000..808a902b1
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+// LoadChartfile loads a Chart.yaml file into a *chart.Metadata.
+func LoadChartfile(filename string) (*chart.Metadata, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := new(chart.Metadata)
+ err = yaml.Unmarshal(b, y)
+ return y, err
+}
+
+// SaveChartfile saves the given metadata as a Chart.yaml file at the given path.
+//
+// 'filename' should be the complete path and filename ('foo/Chart.yaml')
+func SaveChartfile(filename string, cf *chart.Metadata) error {
+ // Pull out the dependencies of a v1 Chart, since there's no way
+ // to tell the serializer to skip a field for just this use case
+ savedDependencies := cf.Dependencies
+ if cf.APIVersion == chart.APIVersionV1 {
+ cf.Dependencies = nil
+ }
+ out, err := yaml.Marshal(cf)
+ if cf.APIVersion == chart.APIVersionV1 {
+ cf.Dependencies = savedDependencies
+ }
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(filename, out, 0644)
+}
+
+// IsChartDir validate a chart directory.
+//
+// Checks for a valid Chart.yaml.
+func IsChartDir(dirName string) (bool, error) {
+ if fi, err := os.Stat(dirName); err != nil {
+ return false, err
+ } else if !fi.IsDir() {
+ return false, errors.Errorf("%q is not a directory", dirName)
+ }
+
+ chartYaml := filepath.Join(dirName, ChartfileName)
+ if _, err := os.Stat(chartYaml); os.IsNotExist(err) {
+ return false, errors.Errorf("no %s exists in directory %q", ChartfileName, dirName)
+ }
+
+ chartYamlContent, err := ioutil.ReadFile(chartYaml)
+ if err != nil {
+ return false, errors.Errorf("cannot read %s in directory %q", ChartfileName, dirName)
+ }
+
+ chartContent := new(chart.Metadata)
+ if err := yaml.Unmarshal(chartYamlContent, &chartContent); err != nil {
+ return false, err
+ }
+ if chartContent == nil {
+ return false, errors.Errorf("chart metadata (%s) missing", ChartfileName)
+ }
+ if chartContent.Name == "" {
+ return false, errors.Errorf("invalid chart (%s): name must not be empty", ChartfileName)
+ }
+
+ return true, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go
new file mode 100644
index 000000000..b49a31b01
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go
@@ -0,0 +1,206 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "log"
+
+ "github.com/mitchellh/copystructure"
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+// CoalesceValues coalesces all of the values in a chart (and its subcharts).
+//
+// Values are coalesced together using the following rules:
+//
+// - Values in a higher level chart always override values in a lower-level
+// dependency chart
+// - Scalar values and arrays are replaced, maps are merged
+// - A chart has access to all of the variables for it, as well as all of
+// the values destined for its dependencies.
+func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) {
+ v, err := copystructure.Copy(vals)
+ if err != nil {
+ return vals, err
+ }
+
+ valsCopy := v.(map[string]interface{})
+ // if we have an empty map, make sure it is initialized
+ if valsCopy == nil {
+ valsCopy = make(map[string]interface{})
+ }
+ return coalesce(chrt, valsCopy)
+}
+
+// coalesce coalesces the dest values and the chart values, giving priority to the dest values.
+//
+// This is a helper function for CoalesceValues.
+func coalesce(ch *chart.Chart, dest map[string]interface{}) (map[string]interface{}, error) {
+ coalesceValues(ch, dest)
+ return coalesceDeps(ch, dest)
+}
+
+// coalesceDeps coalesces the dependencies of the given chart.
+func coalesceDeps(chrt *chart.Chart, dest map[string]interface{}) (map[string]interface{}, error) {
+ for _, subchart := range chrt.Dependencies() {
+ if c, ok := dest[subchart.Name()]; !ok {
+ // If dest doesn't already have the key, create it.
+ dest[subchart.Name()] = make(map[string]interface{})
+ } else if !istable(c) {
+ return dest, errors.Errorf("type mismatch on %s: %t", subchart.Name(), c)
+ }
+ if dv, ok := dest[subchart.Name()]; ok {
+ dvmap := dv.(map[string]interface{})
+
+ // Get globals out of dest and merge them into dvmap.
+ coalesceGlobals(dvmap, dest)
+
+ // Now coalesce the rest of the values.
+ var err error
+ dest[subchart.Name()], err = coalesce(subchart, dvmap)
+ if err != nil {
+ return dest, err
+ }
+ }
+ }
+ return dest, nil
+}
+
+// coalesceGlobals copies the globals out of src and merges them into dest.
+//
+// For convenience, returns dest.
+func coalesceGlobals(dest, src map[string]interface{}) {
+ var dg, sg map[string]interface{}
+
+ if destglob, ok := dest[GlobalKey]; !ok {
+ dg = make(map[string]interface{})
+ } else if dg, ok = destglob.(map[string]interface{}); !ok {
+ log.Printf("warning: skipping globals because destination %s is not a table.", GlobalKey)
+ return
+ }
+
+ if srcglob, ok := src[GlobalKey]; !ok {
+ sg = make(map[string]interface{})
+ } else if sg, ok = srcglob.(map[string]interface{}); !ok {
+ log.Printf("warning: skipping globals because source %s is not a table.", GlobalKey)
+ return
+ }
+
+ // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This
+ // reverses that decision. It may somehow be possible to introduce a loop
+ // here, but I haven't found a way. So for the time being, let's allow
+ // tables in globals.
+ for key, val := range sg {
+ if istable(val) {
+ vv := copyMap(val.(map[string]interface{}))
+ if destv, ok := dg[key]; !ok {
+ // Here there is no merge. We're just adding.
+ dg[key] = vv
+ } else {
+ if destvmap, ok := destv.(map[string]interface{}); !ok {
+ log.Printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key)
+ } else {
+ // Basically, we reverse order of coalesce here to merge
+ // top-down.
+ CoalesceTables(vv, destvmap)
+ dg[key] = vv
+ }
+ }
+ } else if dv, ok := dg[key]; ok && istable(dv) {
+ // It's not clear if this condition can actually ever trigger.
+ log.Printf("key %s is table. Skipping", key)
+ } else {
+ // TODO: Do we need to do any additional checking on the value?
+ dg[key] = val
+ }
+ }
+ dest[GlobalKey] = dg
+}
+
+func copyMap(src map[string]interface{}) map[string]interface{} {
+ m := make(map[string]interface{}, len(src))
+ for k, v := range src {
+ m[k] = v
+ }
+ return m
+}
+
+// coalesceValues builds up a values map for a particular chart.
+//
+// Values in v will override the values in the chart.
+func coalesceValues(c *chart.Chart, v map[string]interface{}) {
+ for key, val := range c.Values {
+ if value, ok := v[key]; ok {
+ if value == nil {
+ // When the YAML value is null, we remove the value's key.
+ // This allows Helm's various sources of values (value files or --set) to
+ // remove incompatible keys from any previous chart, file, or set values.
+ delete(v, key)
+ } else if dest, ok := value.(map[string]interface{}); ok {
+ // if v[key] is a table, merge nv's val table into v[key].
+ src, ok := val.(map[string]interface{})
+ if !ok {
+ // If the original value is nil, there is nothing to coalesce, so we don't print
+ // the warning
+ if val != nil {
+ log.Printf("warning: skipped value for %s: Not a table.", key)
+ }
+ } else {
+ // Because v has higher precedence than nv, dest values override src
+ // values.
+ CoalesceTables(dest, src)
+ }
+ }
+ } else {
+ // If the key is not in v, copy it from nv.
+ v[key] = val
+ }
+ }
+}
+
+// CoalesceTables merges a source map into a destination map.
+//
+// dest is considered authoritative.
+func CoalesceTables(dst, src map[string]interface{}) map[string]interface{} {
+ // When --reuse-values is set but there are no modifications yet, return new values
+ if src == nil {
+ return dst
+ }
+ if dst == nil {
+ return src
+ }
+ // Because dest has higher precedence than src, dest values override src
+ // values.
+ for key, val := range src {
+ if dv, ok := dst[key]; ok && dv == nil {
+ delete(dst, key)
+ } else if !ok {
+ dst[key] = val
+ } else if istable(val) {
+ if istable(dv) {
+ CoalesceTables(dv.(map[string]interface{}), val.(map[string]interface{}))
+ } else {
+ log.Printf("warning: cannot overwrite table with non table for %s (%v)", key, val)
+ }
+ } else if istable(dv) && val != nil {
+ log.Printf("warning: destination for %s is a table. Ignoring non-table value %v", key, val)
+ }
+ }
+ return dst
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go b/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go
new file mode 100644
index 000000000..f4656c913
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go
@@ -0,0 +1,34 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import "github.com/Masterminds/semver/v3"
+
+// IsCompatibleRange compares a version to a constraint.
+// It returns true if the version matches the constraint, and false in all other cases.
+func IsCompatibleRange(constraint, ver string) bool {
+ sv, err := semver.NewVersion(ver)
+ if err != nil {
+ return false
+ }
+
+ c, err := semver.NewConstraint(constraint)
+ if err != nil {
+ return false
+ }
+ return c.Check(sv)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go
new file mode 100644
index 000000000..ca79e7ab2
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go
@@ -0,0 +1,687 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+)
+
+// chartName is a regular expression for testing the supplied name of a chart.
+// This regular expression is probably stricter than it needs to be. We can relax it
+// somewhat. Newline characters, as well as $, quotes, +, parens, and % are known to be
+// problematic.
+var chartName = regexp.MustCompile("^[a-zA-Z0-9._-]+$")
+
+const (
+ // ChartfileName is the default Chart file name.
+ ChartfileName = "Chart.yaml"
+ // ValuesfileName is the default values file name.
+ ValuesfileName = "values.yaml"
+ // SchemafileName is the default values schema file name.
+ SchemafileName = "values.schema.json"
+ // TemplatesDir is the relative directory name for templates.
+ TemplatesDir = "templates"
+ // ChartsDir is the relative directory name for charts dependencies.
+ ChartsDir = "charts"
+ // TemplatesTestsDir is the relative directory name for tests.
+ TemplatesTestsDir = TemplatesDir + sep + "tests"
+ // IgnorefileName is the name of the Helm ignore file.
+ IgnorefileName = ".helmignore"
+ // IngressFileName is the name of the example ingress file.
+ IngressFileName = TemplatesDir + sep + "ingress.yaml"
+ // DeploymentName is the name of the example deployment file.
+ DeploymentName = TemplatesDir + sep + "deployment.yaml"
+ // ServiceName is the name of the example service file.
+ ServiceName = TemplatesDir + sep + "service.yaml"
+ // ServiceAccountName is the name of the example serviceaccount file.
+ ServiceAccountName = TemplatesDir + sep + "serviceaccount.yaml"
+ // HorizontalPodAutoscalerName is the name of the example hpa file.
+ HorizontalPodAutoscalerName = TemplatesDir + sep + "hpa.yaml"
+ // NotesName is the name of the example NOTES.txt file.
+ NotesName = TemplatesDir + sep + "NOTES.txt"
+ // HelpersName is the name of the example helpers file.
+ HelpersName = TemplatesDir + sep + "_helpers.tpl"
+ // TestConnectionName is the name of the example test file.
+ TestConnectionName = TemplatesTestsDir + sep + "test-connection.yaml"
+)
+
+// maxChartNameLength is lower than the limits we know of with certain file systems,
+// and with certain Kubernetes fields.
+const maxChartNameLength = 250
+
+const sep = string(filepath.Separator)
+
+const defaultChartfile = `apiVersion: v2
+name: %s
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"
+`
+
+const defaultValues = `# Default values for %s.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+podAnnotations: {}
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ className: ""
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 100
+ targetCPUUtilizationPercentage: 80
+ # targetMemoryUtilizationPercentage: 80
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+`
+
+const defaultIgnore = `# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
+`
+
+const defaultIngress = `{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "<CHARTNAME>.fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
+ {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
+ {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
+ {{- end }}
+{{- end }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include "<CHARTNAME>.labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+ ingressClassName: {{ .Values.ingress.className }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+ pathType: {{ .pathType }}
+ {{- end }}
+ backend:
+ {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+ service:
+ name: {{ $fullName }}
+ port:
+ number: {{ $svcPort }}
+ {{- else }}
+ serviceName: {{ $fullName }}
+ servicePort: {{ $svcPort }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultDeployment = `apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "<CHARTNAME>.fullname" . }}
+ labels:
+ {{- include "<CHARTNAME>.labels" . | nindent 4 }}
+spec:
+ {{- if not .Values.autoscaling.enabled }}
+ replicas: {{ .Values.replicaCount }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "<CHARTNAME>.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "<CHARTNAME>.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "<CHARTNAME>.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - name: {{ .Chart.Name }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 12 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+`
+
+const defaultService = `apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "<CHARTNAME>.fullname" . }}
+ labels:
+ {{- include "<CHARTNAME>.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "<CHARTNAME>.selectorLabels" . | nindent 4 }}
+`
+
+const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "<CHARTNAME>.serviceAccountName" . }}
+ labels:
+ {{- include "<CHARTNAME>.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultHorizontalPodAutoscaler = `{{- if .Values.autoscaling.enabled }}
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ include "<CHARTNAME>.fullname" . }}
+ labels:
+ {{- include "<CHARTNAME>.labels" . | nindent 4 }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ include "<CHARTNAME>.fullname" . }}
+ minReplicas: {{ .Values.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+ metrics:
+ {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
+ {{- end }}
+ {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultNotes = `1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+ {{- range .paths }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "<CHARTNAME>.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "<CHARTNAME>.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "<CHARTNAME>.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "<CHARTNAME>.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
+{{- end }}
+`
+
+const defaultHelpers = `{{/*
+Expand the name of the chart.
+*/}}
+{{- define "<CHARTNAME>.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "<CHARTNAME>.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "<CHARTNAME>.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "<CHARTNAME>.labels" -}}
+helm.sh/chart: {{ include "<CHARTNAME>.chart" . }}
+{{ include "<CHARTNAME>.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "<CHARTNAME>.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "<CHARTNAME>.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "<CHARTNAME>.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "<CHARTNAME>.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
+`
+
+const defaultTestConnection = `apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "<CHARTNAME>.fullname" . }}-test-connection"
+ labels:
+ {{- include "<CHARTNAME>.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: wget
+ image: busybox
+ command: ['wget']
+ args: ['{{ include "<CHARTNAME>.fullname" . }}:{{ .Values.service.port }}']
+ restartPolicy: Never
+`
+
+// Stderr is an io.Writer to which error messages can be written
+//
+// In Helm 4, this will be replaced. It is needed in Helm 3 to preserve API backward
+// compatibility.
+var Stderr io.Writer = os.Stderr
+
+// CreateFrom creates a new chart, but scaffolds it from the src chart.
+func CreateFrom(chartfile *chart.Metadata, dest, src string) error {
+ schart, err := loader.Load(src)
+ if err != nil {
+ return errors.Wrapf(err, "could not load %s", src)
+ }
+
+ schart.Metadata = chartfile
+
+ var updatedTemplates []*chart.File
+
+ for _, template := range schart.Templates {
+ newData := transform(string(template.Data), schart.Name())
+ updatedTemplates = append(updatedTemplates, &chart.File{Name: template.Name, Data: newData})
+ }
+
+ schart.Templates = updatedTemplates
+ b, err := yaml.Marshal(schart.Values)
+ if err != nil {
+ return errors.Wrap(err, "reading values file")
+ }
+
+ var m map[string]interface{}
+ if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil {
+ return errors.Wrap(err, "transforming values file")
+ }
+ schart.Values = m
+
+ // SaveDir looks for the file values.yaml when saving rather than the values
+ // key in order to preserve the comments in the YAML. The name placeholder
+ // needs to be replaced on that file.
+ for _, f := range schart.Raw {
+ if f.Name == ValuesfileName {
+ f.Data = transform(string(f.Data), schart.Name())
+ }
+ }
+
+ return SaveDir(schart, dest)
+}
+
+// Create creates a new chart in a directory.
+//
+// Inside of dir, this will create a directory based on the name of
+// chartfile.Name. It will then write the Chart.yaml into this directory and
+// create the (empty) appropriate directories.
+//
+// The returned string will point to the newly created directory. It will be
+// an absolute path, even if the provided base directory was relative.
+//
+// If dir does not exist, this will return an error.
+// If Chart.yaml or any directories cannot be created, this will return an
+// error. In such a case, this will attempt to clean up by removing the
+// new chart directory.
+func Create(name, dir string) (string, error) {
+
+ // Sanity-check the name of a chart so user doesn't create one that causes problems.
+ if err := validateChartName(name); err != nil {
+ return "", err
+ }
+
+ path, err := filepath.Abs(dir)
+ if err != nil {
+ return path, err
+ }
+
+ if fi, err := os.Stat(path); err != nil {
+ return path, err
+ } else if !fi.IsDir() {
+ return path, errors.Errorf("no such directory %s", path)
+ }
+
+ cdir := filepath.Join(path, name)
+ if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() {
+ return cdir, errors.Errorf("file %s already exists and is not a directory", cdir)
+ }
+
+ files := []struct {
+ path string
+ content []byte
+ }{
+ {
+ // Chart.yaml
+ path: filepath.Join(cdir, ChartfileName),
+ content: []byte(fmt.Sprintf(defaultChartfile, name)),
+ },
+ {
+ // values.yaml
+ path: filepath.Join(cdir, ValuesfileName),
+ content: []byte(fmt.Sprintf(defaultValues, name)),
+ },
+ {
+ // .helmignore
+ path: filepath.Join(cdir, IgnorefileName),
+ content: []byte(defaultIgnore),
+ },
+ {
+ // ingress.yaml
+ path: filepath.Join(cdir, IngressFileName),
+ content: transform(defaultIngress, name),
+ },
+ {
+ // deployment.yaml
+ path: filepath.Join(cdir, DeploymentName),
+ content: transform(defaultDeployment, name),
+ },
+ {
+ // service.yaml
+ path: filepath.Join(cdir, ServiceName),
+ content: transform(defaultService, name),
+ },
+ {
+ // serviceaccount.yaml
+ path: filepath.Join(cdir, ServiceAccountName),
+ content: transform(defaultServiceAccount, name),
+ },
+ {
+ // hpa.yaml
+ path: filepath.Join(cdir, HorizontalPodAutoscalerName),
+ content: transform(defaultHorizontalPodAutoscaler, name),
+ },
+ {
+ // NOTES.txt
+ path: filepath.Join(cdir, NotesName),
+ content: transform(defaultNotes, name),
+ },
+ {
+ // _helpers.tpl
+ path: filepath.Join(cdir, HelpersName),
+ content: transform(defaultHelpers, name),
+ },
+ {
+ // test-connection.yaml
+ path: filepath.Join(cdir, TestConnectionName),
+ content: transform(defaultTestConnection, name),
+ },
+ }
+
+ for _, file := range files {
+ if _, err := os.Stat(file.path); err == nil {
+ // There is no handle to a preferred output stream here.
+ fmt.Fprintf(Stderr, "WARNING: File %q already exists. Overwriting.\n", file.path)
+ }
+ if err := writeFile(file.path, file.content); err != nil {
+ return cdir, err
+ }
+ }
+ // Need to add the ChartsDir explicitly as it does not contain any file OOTB
+ if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil {
+ return cdir, err
+ }
+ return cdir, nil
+}
+
+// transform performs a string replacement of the specified source for
+// a given key with the replacement string
+func transform(src, replacement string) []byte {
+ return []byte(strings.ReplaceAll(src, "<CHARTNAME>", replacement))
+}
+
+func writeFile(name string, content []byte) error {
+ if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil {
+ return err
+ }
+ return ioutil.WriteFile(name, content, 0644)
+}
+
+func validateChartName(name string) error {
+ if name == "" || len(name) > maxChartNameLength {
+ return fmt.Errorf("chart name must be between 1 and %d characters", maxChartNameLength)
+ }
+ if !chartName.MatchString(name) {
+ return fmt.Errorf("chart name must match the regular expression %q", chartName.String())
+ }
+ return nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go
new file mode 100644
index 000000000..d2e7d6dc9
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go
@@ -0,0 +1,285 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "log"
+ "strings"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+// ProcessDependencies checks through this chart's dependencies, processing accordingly.
+func ProcessDependencies(c *chart.Chart, v Values) error {
+ if err := processDependencyEnabled(c, v, ""); err != nil {
+ return err
+ }
+ return processDependencyImportValues(c)
+}
+
+// processDependencyConditions disables charts based on condition path value in values
+func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath string) {
+ if reqs == nil {
+ return
+ }
+ for _, r := range reqs {
+ for _, c := range strings.Split(strings.TrimSpace(r.Condition), ",") {
+ if len(c) > 0 {
+ // retrieve value
+ vv, err := cvals.PathValue(cpath + c)
+ if err == nil {
+ // if not bool, warn
+ if bv, ok := vv.(bool); ok {
+ r.Enabled = bv
+ break
+ } else {
+ log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name)
+ }
+ } else if _, ok := err.(ErrNoValue); !ok {
+ // this is a real error
+ log.Printf("Warning: PathValue returned error %v", err)
+ }
+ }
+ }
+ }
+}
+
+// processDependencyTags disables charts based on tags in values
+func processDependencyTags(reqs []*chart.Dependency, cvals Values) {
+ if reqs == nil {
+ return
+ }
+ vt, err := cvals.Table("tags")
+ if err != nil {
+ return
+ }
+ for _, r := range reqs {
+ var hasTrue, hasFalse bool
+ for _, k := range r.Tags {
+ if b, ok := vt[k]; ok {
+ // if not bool, warn
+ if bv, ok := b.(bool); ok {
+ if bv {
+ hasTrue = true
+ } else {
+ hasFalse = true
+ }
+ } else {
+ log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name)
+ }
+ }
+ }
+ if !hasTrue && hasFalse {
+ r.Enabled = false
+ } else if hasTrue || !hasTrue && !hasFalse {
+ r.Enabled = true
+ }
+ }
+}
+
+func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart {
+ for _, c := range charts {
+ if c == nil {
+ continue
+ }
+ if c.Name() != dep.Name {
+ continue
+ }
+ if !IsCompatibleRange(dep.Version, c.Metadata.Version) {
+ continue
+ }
+
+ out := *c
+ md := *c.Metadata
+ out.Metadata = &md
+
+ if dep.Alias != "" {
+ md.Name = dep.Alias
+ }
+ return &out
+ }
+ return nil
+}
+
+// processDependencyEnabled removes disabled charts from dependencies
+func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error {
+ if c.Metadata.Dependencies == nil {
+ return nil
+ }
+
+ var chartDependencies []*chart.Chart
+ // If any dependency is not a part of Chart.yaml
+ // then this should be added to chartDependencies.
+ // However, if the dependency is already specified in Chart.yaml
+ // we should not add it, as it would be anyways processed from Chart.yaml
+
+Loop:
+ for _, existing := range c.Dependencies() {
+ for _, req := range c.Metadata.Dependencies {
+ if existing.Name() == req.Name && IsCompatibleRange(req.Version, existing.Metadata.Version) {
+ continue Loop
+ }
+ }
+ chartDependencies = append(chartDependencies, existing)
+ }
+
+ for _, req := range c.Metadata.Dependencies {
+ if chartDependency := getAliasDependency(c.Dependencies(), req); chartDependency != nil {
+ chartDependencies = append(chartDependencies, chartDependency)
+ }
+ if req.Alias != "" {
+ req.Name = req.Alias
+ }
+ }
+ c.SetDependencies(chartDependencies...)
+
+ // set all to true
+ for _, lr := range c.Metadata.Dependencies {
+ lr.Enabled = true
+ }
+ cvals, err := CoalesceValues(c, v)
+ if err != nil {
+ return err
+ }
+ // flag dependencies as enabled/disabled
+ processDependencyTags(c.Metadata.Dependencies, cvals)
+ processDependencyConditions(c.Metadata.Dependencies, cvals, path)
+ // make a map of charts to remove
+ rm := map[string]struct{}{}
+ for _, r := range c.Metadata.Dependencies {
+ if !r.Enabled {
+ // remove disabled chart
+ rm[r.Name] = struct{}{}
+ }
+ }
+ // don't keep disabled charts in new slice
+ cd := []*chart.Chart{}
+ copy(cd, c.Dependencies()[:0])
+ for _, n := range c.Dependencies() {
+ if _, ok := rm[n.Metadata.Name]; !ok {
+ cd = append(cd, n)
+ }
+ }
+ // don't keep disabled charts in metadata
+ cdMetadata := []*chart.Dependency{}
+ copy(cdMetadata, c.Metadata.Dependencies[:0])
+ for _, n := range c.Metadata.Dependencies {
+ if _, ok := rm[n.Name]; !ok {
+ cdMetadata = append(cdMetadata, n)
+ }
+ }
+
+ // recursively call self to process sub dependencies
+ for _, t := range cd {
+ subpath := path + t.Metadata.Name + "."
+ if err := processDependencyEnabled(t, cvals, subpath); err != nil {
+ return err
+ }
+ }
+ // set the correct dependencies in metadata
+ c.Metadata.Dependencies = nil
+ c.Metadata.Dependencies = append(c.Metadata.Dependencies, cdMetadata...)
+ c.SetDependencies(cd...)
+
+ return nil
+}
+
+// pathToMap creates a nested map given a YAML path in dot notation.
+func pathToMap(path string, data map[string]interface{}) map[string]interface{} {
+ if path == "." {
+ return data
+ }
+ return set(parsePath(path), data)
+}
+
+func set(path []string, data map[string]interface{}) map[string]interface{} {
+ if len(path) == 0 {
+ return nil
+ }
+ cur := data
+ for i := len(path) - 1; i >= 0; i-- {
+ cur = map[string]interface{}{path[i]: cur}
+ }
+ return cur
+}
+
+// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field.
+func processImportValues(c *chart.Chart) error {
+ if c.Metadata.Dependencies == nil {
+ return nil
+ }
+ // combine chart values and empty config to get Values
+ cvals, err := CoalesceValues(c, nil)
+ if err != nil {
+ return err
+ }
+ b := make(map[string]interface{})
+ // import values from each dependency if specified in import-values
+ for _, r := range c.Metadata.Dependencies {
+ var outiv []interface{}
+ for _, riv := range r.ImportValues {
+ switch iv := riv.(type) {
+ case map[string]interface{}:
+ child := iv["child"].(string)
+ parent := iv["parent"].(string)
+
+ outiv = append(outiv, map[string]string{
+ "child": child,
+ "parent": parent,
+ })
+
+ // get child table
+ vv, err := cvals.Table(r.Name + "." + child)
+ if err != nil {
+ log.Printf("Warning: ImportValues missing table from chart %s: %v", r.Name, err)
+ continue
+ }
+ // create value map from child to be merged into parent
+ b = CoalesceTables(cvals, pathToMap(parent, vv.AsMap()))
+ case string:
+ child := "exports." + iv
+ outiv = append(outiv, map[string]string{
+ "child": child,
+ "parent": ".",
+ })
+ vm, err := cvals.Table(r.Name + "." + child)
+ if err != nil {
+ log.Printf("Warning: ImportValues missing table: %v", err)
+ continue
+ }
+ b = CoalesceTables(b, vm.AsMap())
+ }
+ }
+ // set our formatted import values
+ r.ImportValues = outiv
+ }
+
+ // set the new values
+ c.Values = CoalesceTables(b, cvals)
+
+ return nil
+}
+
+// processDependencyImportValues imports specified chart values from child to parent.
+func processDependencyImportValues(c *chart.Chart) error {
+ for _, d := range c.Dependencies() {
+ // recurse
+ if err := processDependencyImportValues(d); err != nil {
+ return err
+ }
+ }
+ return processImportValues(c)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go b/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go
new file mode 100644
index 000000000..8f06bcc9a
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package chartutil contains tools for working with charts.
+
+Charts are described in the chart package (pkg/chart).
+This package provides utilities for serializing and deserializing charts.
+
+A chart can be represented on the file system in one of two ways:
+
+ - As a directory that contains a Chart.yaml file and other chart things.
+ - As a tarred gzipped file containing a directory that then contains a
+ Chart.yaml file.
+
+This package provides utilities for working with those file formats.
+
+The preferred way of loading a chart is using 'loader.Load`:
+
+ chart, err := loader.Load(filename)
+
+This will attempt to discover whether the file at 'filename' is a directory or
+a chart archive. It will then load accordingly.
+
+For accepting raw compressed tar file data from an io.Reader, the
+'loader.LoadArchive()' will read in the data, uncompress it, and unpack it
+into a Chart.
+
+When creating charts in memory, use the 'helm.sh/helm/pkg/chart'
+package directly.
+*/
+package chartutil // import "helm.sh/helm/v3/pkg/chartutil"
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go
new file mode 100644
index 000000000..fcdcc27ea
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go
@@ -0,0 +1,35 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "fmt"
+)
+
+// ErrNoTable indicates that a chart does not have a matching table.
+type ErrNoTable struct {
+ Key string
+}
+
+func (e ErrNoTable) Error() string { return fmt.Sprintf("%q is not a table", e.Key) }
+
+// ErrNoValue indicates that Values does not contain a key with a value
+type ErrNoValue struct {
+ Key string
+}
+
+func (e ErrNoValue) Error() string { return fmt.Sprintf("%q is not a value", e.Key) }
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go
new file mode 100644
index 000000000..6ad09e417
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go
@@ -0,0 +1,91 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ securejoin "github.com/cyphar/filepath-securejoin"
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+)
+
+// Expand uncompresses and extracts a chart into the specified directory.
+func Expand(dir string, r io.Reader) error {
+ files, err := loader.LoadArchiveFiles(r)
+ if err != nil {
+ return err
+ }
+
+ // Get the name of the chart
+ var chartName string
+ for _, file := range files {
+ if file.Name == "Chart.yaml" {
+ ch := &chart.Metadata{}
+ if err := yaml.Unmarshal(file.Data, ch); err != nil {
+ return errors.Wrap(err, "cannot load Chart.yaml")
+ }
+ chartName = ch.Name
+ }
+ }
+ if chartName == "" {
+ return errors.New("chart name not specified")
+ }
+
+ // Find the base directory
+ chartdir, err := securejoin.SecureJoin(dir, chartName)
+ if err != nil {
+ return err
+ }
+
+ // Copy all files verbatim. We don't parse these files because parsing can remove
+ // comments.
+ for _, file := range files {
+ outpath, err := securejoin.SecureJoin(chartdir, file.Name)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the necessary subdirs get created.
+ basedir := filepath.Dir(outpath)
+ if err := os.MkdirAll(basedir, 0755); err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(outpath, file.Data, 0644); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ExpandFile expands the src file into the dest directory.
+func ExpandFile(dest, src string) error {
+ h, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer h.Close()
+ return Expand(dest, h)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go
new file mode 100644
index 000000000..753dc98c1
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go
@@ -0,0 +1,87 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/xeipuuv/gojsonschema"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+// ValidateAgainstSchema checks that values does not violate the structure laid out in schema
+func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error {
+ var sb strings.Builder
+ if chrt.Schema != nil {
+ err := ValidateAgainstSingleSchema(values, chrt.Schema)
+ if err != nil {
+ sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name()))
+ sb.WriteString(err.Error())
+ }
+ }
+
+ // For each dependency, recursively call this function with the coalesced values
+ for _, subchart := range chrt.Dependencies() {
+ subchartValues := values[subchart.Name()].(map[string]interface{})
+ if err := ValidateAgainstSchema(subchart, subchartValues); err != nil {
+ sb.WriteString(err.Error())
+ }
+ }
+
+ if sb.Len() > 0 {
+ return errors.New(sb.String())
+ }
+
+ return nil
+}
+
+// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema
+func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) error {
+ valuesData, err := yaml.Marshal(values)
+ if err != nil {
+ return err
+ }
+ valuesJSON, err := yaml.YAMLToJSON(valuesData)
+ if err != nil {
+ return err
+ }
+ if bytes.Equal(valuesJSON, []byte("null")) {
+ valuesJSON = []byte("{}")
+ }
+ schemaLoader := gojsonschema.NewBytesLoader(schemaJSON)
+ valuesLoader := gojsonschema.NewBytesLoader(valuesJSON)
+
+ result, err := gojsonschema.Validate(schemaLoader, valuesLoader)
+ if err != nil {
+ return err
+ }
+
+ if !result.Valid() {
+ var sb strings.Builder
+ for _, desc := range result.Errors() {
+ sb.WriteString(fmt.Sprintf("- %s\n", desc))
+ }
+ return errors.New(sb.String())
+ }
+
+ return nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/save.go b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go
new file mode 100644
index 000000000..2ce4eddaf
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go
@@ -0,0 +1,244 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=")
+
+// SaveDir saves a chart as files in a directory.
+//
+// This takes the chart name, and creates a new subdirectory inside of the given dest
+// directory, writing the chart's contents to that subdirectory.
+func SaveDir(c *chart.Chart, dest string) error {
+ // Create the chart directory
+ outdir := filepath.Join(dest, c.Name())
+ if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() {
+ return errors.Errorf("file %s already exists and is not a directory", outdir)
+ }
+ if err := os.MkdirAll(outdir, 0755); err != nil {
+ return err
+ }
+
+ // Save the chart file.
+ if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil {
+ return err
+ }
+
+ // Save values.yaml
+ for _, f := range c.Raw {
+ if f.Name == ValuesfileName {
+ vf := filepath.Join(outdir, ValuesfileName)
+ if err := writeFile(vf, f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.schema.json if it exists
+ if c.Schema != nil {
+ filename := filepath.Join(outdir, SchemafileName)
+ if err := writeFile(filename, c.Schema); err != nil {
+ return err
+ }
+ }
+
+ // Save templates and files
+ for _, o := range [][]*chart.File{c.Templates, c.Files} {
+ for _, f := range o {
+ n := filepath.Join(outdir, f.Name)
+ if err := writeFile(n, f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save dependencies
+ base := filepath.Join(outdir, ChartsDir)
+ for _, dep := range c.Dependencies() {
+ // Here, we write each dependency as a tar file.
+ if _, err := Save(dep, base); err != nil {
+ return errors.Wrapf(err, "saving %s", dep.ChartFullPath())
+ }
+ }
+ return nil
+}
+
+// Save creates an archived chart to the given directory.
+//
+// This takes an existing chart and a destination directory.
+//
+// If the directory is /foo, and the chart is named bar, with version 1.0.0, this
+// will generate /foo/bar-1.0.0.tgz.
+//
+// This returns the absolute path to the chart archive file.
+func Save(c *chart.Chart, outDir string) (string, error) {
+ if err := c.Validate(); err != nil {
+ return "", errors.Wrap(err, "chart validation")
+ }
+
+ filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version)
+ filename = filepath.Join(outDir, filename)
+ dir := filepath.Dir(filename)
+ if stat, err := os.Stat(dir); err != nil {
+ if os.IsNotExist(err) {
+ if err2 := os.MkdirAll(dir, 0755); err2 != nil {
+ return "", err2
+ }
+ } else {
+ return "", errors.Wrapf(err, "stat %s", dir)
+ }
+ } else if !stat.IsDir() {
+ return "", errors.Errorf("is not a directory: %s", dir)
+ }
+
+ f, err := os.Create(filename)
+ if err != nil {
+ return "", err
+ }
+
+ // Wrap in gzip writer
+ zipper := gzip.NewWriter(f)
+ zipper.Header.Extra = headerBytes
+ zipper.Header.Comment = "Helm"
+
+ // Wrap in tar writer
+ twriter := tar.NewWriter(zipper)
+ rollback := false
+ defer func() {
+ twriter.Close()
+ zipper.Close()
+ f.Close()
+ if rollback {
+ os.Remove(filename)
+ }
+ }()
+
+ if err := writeTarContents(twriter, c, ""); err != nil {
+ rollback = true
+ return filename, err
+ }
+ return filename, nil
+}
+
+func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error {
+ base := filepath.Join(prefix, c.Name())
+
+ // Pull out the dependencies of a v1 Chart, since there's no way
+ // to tell the serializer to skip a field for just this use case
+ savedDependencies := c.Metadata.Dependencies
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ c.Metadata.Dependencies = nil
+ }
+ // Save Chart.yaml
+ cdata, err := yaml.Marshal(c.Metadata)
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ c.Metadata.Dependencies = savedDependencies
+ }
+ if err != nil {
+ return err
+ }
+ if err := writeToTar(out, filepath.Join(base, ChartfileName), cdata); err != nil {
+ return err
+ }
+
+ // Save Chart.lock
+ // TODO: remove the APIVersion check when APIVersionV1 is not used anymore
+ if c.Metadata.APIVersion == chart.APIVersionV2 {
+ if c.Lock != nil {
+ ldata, err := yaml.Marshal(c.Lock)
+ if err != nil {
+ return err
+ }
+ if err := writeToTar(out, filepath.Join(base, "Chart.lock"), ldata); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.yaml
+ for _, f := range c.Raw {
+ if f.Name == ValuesfileName {
+ if err := writeToTar(out, filepath.Join(base, ValuesfileName), f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.schema.json if it exists
+ if c.Schema != nil {
+ if !json.Valid(c.Schema) {
+ return errors.New("Invalid JSON in " + SchemafileName)
+ }
+ if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema); err != nil {
+ return err
+ }
+ }
+
+ // Save templates
+ for _, f := range c.Templates {
+ n := filepath.Join(base, f.Name)
+ if err := writeToTar(out, n, f.Data); err != nil {
+ return err
+ }
+ }
+
+ // Save files
+ for _, f := range c.Files {
+ n := filepath.Join(base, f.Name)
+ if err := writeToTar(out, n, f.Data); err != nil {
+ return err
+ }
+ }
+
+ // Save dependencies
+ for _, dep := range c.Dependencies() {
+ if err := writeTarContents(out, dep, filepath.Join(base, ChartsDir)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// writeToTar writes a single file to a tar archive.
+func writeToTar(out *tar.Writer, name string, body []byte) error {
+ // TODO: Do we need to create dummy parent directory names if none exist?
+ h := &tar.Header{
+ Name: filepath.ToSlash(name),
+ Mode: 0644,
+ Size: int64(len(body)),
+ ModTime: time.Now(),
+ }
+ if err := out.WriteHeader(h); err != nil {
+ return err
+ }
+ _, err := out.Write(body)
+ return err
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go
new file mode 100644
index 000000000..d253731ec
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go
@@ -0,0 +1,107 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/pkg/errors"
+)
+
+// validName is a regular expression for resource names.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+var validName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
+
+var (
+ // errMissingName indicates that a release (name) was not provided.
+ errMissingName = errors.New("no name provided")
+
+ // errInvalidName indicates that an invalid release name was provided
+ errInvalidName = errors.New(fmt.Sprintf(
+ "invalid release name, must match regex %s and the length must not be longer than 53",
+ validName.String()))
+
+ // errInvalidKubernetesName indicates that the name does not meet the Kubernetes
+ // restrictions on metadata names.
+ errInvalidKubernetesName = errors.New(fmt.Sprintf(
+ "invalid metadata name, must match regex %s and the length must not be longer than 253",
+ validName.String()))
+)
+
+const (
+ // maxNameLen is the maximum length Helm allows for a release name
+ maxReleaseNameLen = 53
+ // maxMetadataNameLen is the maximum length Kubernetes allows for any name.
+ maxMetadataNameLen = 253
+)
+
+// ValidateReleaseName performs checks for an entry for a Helm release name
+//
+// For Helm to allow a name, it must be below a certain character count (53) and also match
+// a regular expression.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+func ValidateReleaseName(name string) error {
+ // This case is preserved for backwards compatibility
+ if name == "" {
+ return errMissingName
+
+ }
+ if len(name) > maxReleaseNameLen || !validName.MatchString(name) {
+ return errInvalidName
+ }
+ return nil
+}
+
+// ValidateMetadataName validates the name field of a Kubernetes metadata object.
+//
+// Empty strings, strings longer than 253 chars, or strings that don't match the regexp
+// will fail.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+//
+// Deprecated: remove in Helm 4. Name validation now uses rules defined in
+// pkg/lint/rules.validateMetadataNameFunc()
+func ValidateMetadataName(name string) error {
+ if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) {
+ return errInvalidKubernetesName
+ }
+ return nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/values.go b/vendor/helm.sh/helm/v3/pkg/chartutil/values.go
new file mode 100644
index 000000000..e1cdf4642
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/chartutil/values.go
@@ -0,0 +1,212 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chartutil
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+// GlobalKey is the name of the Values key that is used for storing global vars.
+const GlobalKey = "global"
+
+// Values represents a collection of chart values.
+type Values map[string]interface{}
+
+// YAML encodes the Values into a YAML string.
+func (v Values) YAML() (string, error) {
+ b, err := yaml.Marshal(v)
+ return string(b), err
+}
+
+// Table gets a table (YAML subsection) from a Values object.
+//
+// The table is returned as a Values.
+//
+// Compound table names may be specified with dots:
+//
+// foo.bar
+//
+// The above will be evaluated as "The table bar inside the table
+// foo".
+//
+// An ErrNoTable is returned if the table does not exist.
+func (v Values) Table(name string) (Values, error) {
+ table := v
+ var err error
+
+ for _, n := range parsePath(name) {
+ if table, err = tableLookup(table, n); err != nil {
+ break
+ }
+ }
+ return table, err
+}
+
+// AsMap is a utility function for converting Values to a map[string]interface{}.
+//
+// It protects against nil map panics.
+func (v Values) AsMap() map[string]interface{} {
+ if v == nil || len(v) == 0 {
+ return map[string]interface{}{}
+ }
+ return v
+}
+
+// Encode writes serialized Values information to the given io.Writer.
+func (v Values) Encode(w io.Writer) error {
+ out, err := yaml.Marshal(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(out)
+ return err
+}
+
+func tableLookup(v Values, simple string) (Values, error) {
+ v2, ok := v[simple]
+ if !ok {
+ return v, ErrNoTable{simple}
+ }
+ if vv, ok := v2.(map[string]interface{}); ok {
+ return vv, nil
+ }
+
+ // This catches a case where a value is of type Values, but doesn't (for some
+ // reason) match the map[string]interface{}. This has been observed in the
+ // wild, and might be a result of a nil map of type Values.
+ if vv, ok := v2.(Values); ok {
+ return vv, nil
+ }
+
+ return Values{}, ErrNoTable{simple}
+}
+
+// ReadValues will parse YAML byte data into a Values.
+func ReadValues(data []byte) (vals Values, err error) {
+ err = yaml.Unmarshal(data, &vals)
+ if len(vals) == 0 {
+ vals = Values{}
+ }
+ return vals, err
+}
+
+// ReadValuesFile will parse a YAML file into a map of values.
+func ReadValuesFile(filename string) (Values, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return map[string]interface{}{}, err
+ }
+ return ReadValues(data)
+}
+
+// ReleaseOptions represents the additional release options needed
+// for the composition of the final values struct
+type ReleaseOptions struct {
+ Name string
+ Namespace string
+ Revision int
+ IsUpgrade bool
+ IsInstall bool
+}
+
+// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files
+//
+// This takes both ReleaseOptions and Capabilities to merge into the render values.
+func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) {
+ if caps == nil {
+ caps = DefaultCapabilities
+ }
+ top := map[string]interface{}{
+ "Chart": chrt.Metadata,
+ "Capabilities": caps,
+ "Release": map[string]interface{}{
+ "Name": options.Name,
+ "Namespace": options.Namespace,
+ "IsUpgrade": options.IsUpgrade,
+ "IsInstall": options.IsInstall,
+ "Revision": options.Revision,
+ "Service": "Helm",
+ },
+ }
+
+ vals, err := CoalesceValues(chrt, chrtVals)
+ if err != nil {
+ return top, err
+ }
+
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ errFmt := "values don't meet the specifications of the schema(s) in the following chart(s):\n%s"
+ return top, fmt.Errorf(errFmt, err.Error())
+ }
+
+ top["Values"] = vals
+ return top, nil
+}
+
+// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
+func istable(v interface{}) bool {
+ _, ok := v.(map[string]interface{})
+ return ok
+}
+
+// PathValue takes a path that traverses a YAML structure and returns the value at the end of that path.
+// The path starts at the root of the YAML structure and is comprised of YAML keys separated by periods.
+// Given the following YAML data the value at path "chapter.one.title" is "Loomings".
+//
+// chapter:
+// one:
+// title: "Loomings"
+func (v Values) PathValue(path string) (interface{}, error) {
+ if path == "" {
+ return nil, errors.New("YAML path cannot be empty")
+ }
+ return v.pathValue(parsePath(path))
+}
+
+func (v Values) pathValue(path []string) (interface{}, error) {
+ if len(path) == 1 {
+ // if exists must be root key not table
+ if _, ok := v[path[0]]; ok && !istable(v[path[0]]) {
+ return v[path[0]], nil
+ }
+ return nil, ErrNoValue{path[0]}
+ }
+
+ key, path := path[len(path)-1], path[:len(path)-1]
+ // get our table for table path
+ t, err := v.Table(joinPath(path...))
+ if err != nil {
+ return nil, ErrNoValue{key}
+ }
+ // check table for key and ensure value is not a table
+ if k, ok := t[key]; ok && !istable(k) {
+ return k, nil
+ }
+ return nil, ErrNoValue{key}
+}
+
+func parsePath(key string) []string { return strings.Split(key, ".") }
+
+func joinPath(path ...string) string { return strings.Join(path, ".") }
diff --git a/vendor/helm.sh/helm/v3/pkg/cli/environment.go b/vendor/helm.sh/helm/v3/pkg/cli/environment.go
new file mode 100644
index 000000000..ee60d981f
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/cli/environment.go
@@ -0,0 +1,186 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package cli describes the operating environment for the Helm CLI.
+
+Helm's environment encapsulates all of the service dependencies Helm has.
+These dependencies are expressed as interfaces so that alternate implementations
+(mocks, etc.) can be easily generated.
+*/
+package cli
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/spf13/pflag"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+
+ "helm.sh/helm/v3/pkg/helmpath"
+)
+
+// defaultMaxHistory sets the maximum number of releases to 0: unlimited
+const defaultMaxHistory = 10
+
+// EnvSettings describes all of the environment settings.
+type EnvSettings struct {
+ namespace string
+ config *genericclioptions.ConfigFlags
+
+ // KubeConfig is the path to the kubeconfig file
+ KubeConfig string
+ // KubeContext is the name of the kubeconfig context.
+ KubeContext string
+ // Bearer KubeToken used for authentication
+ KubeToken string
+ // Username to impersonate for the operation
+ KubeAsUser string
+ // Groups to impersonate for the operation, multiple groups parsed from a comma delimited list
+ KubeAsGroups []string
+ // Kubernetes API Server Endpoint for authentication
+ KubeAPIServer string
+ // Custom certificate authority file.
+ KubeCaFile string
+ // Debug indicates whether or not Helm is running in Debug mode.
+ Debug bool
+ // RegistryConfig is the path to the registry config file.
+ RegistryConfig string
+ // RepositoryConfig is the path to the repositories file.
+ RepositoryConfig string
+ // RepositoryCache is the path to the repository cache directory.
+ RepositoryCache string
+ // PluginsDirectory is the path to the plugins directory.
+ PluginsDirectory string
+ // MaxHistory is the max release history maintained.
+ MaxHistory int
+}
+
+func New() *EnvSettings {
+ env := &EnvSettings{
+ namespace: os.Getenv("HELM_NAMESPACE"),
+ MaxHistory: envIntOr("HELM_MAX_HISTORY", defaultMaxHistory),
+ KubeContext: os.Getenv("HELM_KUBECONTEXT"),
+ KubeToken: os.Getenv("HELM_KUBETOKEN"),
+ KubeAsUser: os.Getenv("HELM_KUBEASUSER"),
+ KubeAsGroups: envCSV("HELM_KUBEASGROUPS"),
+ KubeAPIServer: os.Getenv("HELM_KUBEAPISERVER"),
+ KubeCaFile: os.Getenv("HELM_KUBECAFILE"),
+ PluginsDirectory: envOr("HELM_PLUGINS", helmpath.DataPath("plugins")),
+ RegistryConfig: envOr("HELM_REGISTRY_CONFIG", helmpath.ConfigPath("registry.json")),
+ RepositoryConfig: envOr("HELM_REPOSITORY_CONFIG", helmpath.ConfigPath("repositories.yaml")),
+ RepositoryCache: envOr("HELM_REPOSITORY_CACHE", helmpath.CachePath("repository")),
+ }
+ env.Debug, _ = strconv.ParseBool(os.Getenv("HELM_DEBUG"))
+
+ // bind to kubernetes config flags
+ env.config = &genericclioptions.ConfigFlags{
+ Namespace: &env.namespace,
+ Context: &env.KubeContext,
+ BearerToken: &env.KubeToken,
+ APIServer: &env.KubeAPIServer,
+ CAFile: &env.KubeCaFile,
+ KubeConfig: &env.KubeConfig,
+ Impersonate: &env.KubeAsUser,
+ ImpersonateGroup: &env.KubeAsGroups,
+ }
+ return env
+}
+
+// AddFlags binds flags to the given flagset.
+func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) {
+ fs.StringVarP(&s.namespace, "namespace", "n", s.namespace, "namespace scope for this request")
+ fs.StringVar(&s.KubeConfig, "kubeconfig", "", "path to the kubeconfig file")
+ fs.StringVar(&s.KubeContext, "kube-context", s.KubeContext, "name of the kubeconfig context to use")
+ fs.StringVar(&s.KubeToken, "kube-token", s.KubeToken, "bearer token used for authentication")
+ fs.StringVar(&s.KubeAsUser, "kube-as-user", s.KubeAsUser, "username to impersonate for the operation")
+ fs.StringArrayVar(&s.KubeAsGroups, "kube-as-group", s.KubeAsGroups, "group to impersonate for the operation, this flag can be repeated to specify multiple groups.")
+ fs.StringVar(&s.KubeAPIServer, "kube-apiserver", s.KubeAPIServer, "the address and the port for the Kubernetes API server")
+ fs.StringVar(&s.KubeCaFile, "kube-ca-file", s.KubeCaFile, "the certificate authority file for the Kubernetes API server connection")
+ fs.BoolVar(&s.Debug, "debug", s.Debug, "enable verbose output")
+ fs.StringVar(&s.RegistryConfig, "registry-config", s.RegistryConfig, "path to the registry config file")
+ fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs")
+ fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the file containing cached repository indexes")
+}
+
+func envOr(name, def string) string {
+ if v, ok := os.LookupEnv(name); ok {
+ return v
+ }
+ return def
+}
+
+func envIntOr(name string, def int) int {
+ if name == "" {
+ return def
+ }
+ envVal := envOr(name, strconv.Itoa(def))
+ ret, err := strconv.Atoi(envVal)
+ if err != nil {
+ return def
+ }
+ return ret
+}
+
+func envCSV(name string) (ls []string) {
+ trimmed := strings.Trim(os.Getenv(name), ", ")
+ if trimmed != "" {
+ ls = strings.Split(trimmed, ",")
+ }
+ return
+}
+
+func (s *EnvSettings) EnvVars() map[string]string {
+ envvars := map[string]string{
+ "HELM_BIN": os.Args[0],
+ "HELM_CACHE_HOME": helmpath.CachePath(""),
+ "HELM_CONFIG_HOME": helmpath.ConfigPath(""),
+ "HELM_DATA_HOME": helmpath.DataPath(""),
+ "HELM_DEBUG": fmt.Sprint(s.Debug),
+ "HELM_PLUGINS": s.PluginsDirectory,
+ "HELM_REGISTRY_CONFIG": s.RegistryConfig,
+ "HELM_REPOSITORY_CACHE": s.RepositoryCache,
+ "HELM_REPOSITORY_CONFIG": s.RepositoryConfig,
+ "HELM_NAMESPACE": s.Namespace(),
+ "HELM_MAX_HISTORY": strconv.Itoa(s.MaxHistory),
+
+ // broken, these are populated from helm flags and not kubeconfig.
+ "HELM_KUBECONTEXT": s.KubeContext,
+ "HELM_KUBETOKEN": s.KubeToken,
+ "HELM_KUBEASUSER": s.KubeAsUser,
+ "HELM_KUBEASGROUPS": strings.Join(s.KubeAsGroups, ","),
+ "HELM_KUBEAPISERVER": s.KubeAPIServer,
+ "HELM_KUBECAFILE": s.KubeCaFile,
+ }
+ if s.KubeConfig != "" {
+ envvars["KUBECONFIG"] = s.KubeConfig
+ }
+ return envvars
+}
+
+// Namespace gets the namespace from the configuration
+func (s *EnvSettings) Namespace() string {
+ if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil {
+ return ns
+ }
+ return "default"
+}
+
+// RESTClientGetter gets the kubeconfig from EnvSettings
+func (s *EnvSettings) RESTClientGetter() genericclioptions.RESTClientGetter {
+ return s.config
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go
new file mode 100644
index 000000000..93afb1461
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go
@@ -0,0 +1,384 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/internal/experimental/registry"
+ "helm.sh/helm/v3/internal/fileutil"
+ "helm.sh/helm/v3/internal/urlutil"
+ "helm.sh/helm/v3/pkg/getter"
+ "helm.sh/helm/v3/pkg/helmpath"
+ "helm.sh/helm/v3/pkg/provenance"
+ "helm.sh/helm/v3/pkg/repo"
+)
+
+// VerificationStrategy describes a strategy for determining whether to verify a chart.
+type VerificationStrategy int
+
+const (
+ // VerifyNever will skip all verification of a chart.
+ VerifyNever VerificationStrategy = iota
+ // VerifyIfPossible will attempt a verification, it will not error if verification
+ // data is missing. But it will not stop processing if verification fails.
+ VerifyIfPossible
+ // VerifyAlways will always attempt a verification, and will fail if the
+ // verification fails.
+ VerifyAlways
+ // VerifyLater will fetch verification data, but not do any verification.
+ // This is to accommodate the case where another step of the process will
+ // perform verification.
+ VerifyLater
+)
+
+// ErrNoOwnerRepo indicates that a given chart URL can't be found in any repos.
+var ErrNoOwnerRepo = errors.New("could not find a repo containing the given URL")
+
+// ChartDownloader handles downloading a chart.
+//
+// It is capable of performing verifications on charts as well.
+type ChartDownloader struct {
+ // Out is the location to write warning and info messages.
+ Out io.Writer
+ // Verify indicates what verification strategy to use.
+ Verify VerificationStrategy
+ // Keyring is the keyring file used for verification.
+ Keyring string
+ // Getter collection for the operation
+ Getters getter.Providers
+ // Options provide parameters to be passed along to the Getter being initialized.
+ Options []getter.Option
+ RegistryClient *registry.Client
+ RepositoryConfig string
+ RepositoryCache string
+}
+
+// DownloadTo retrieves a chart. Depending on the settings, it may also download a provenance file.
+//
+// If Verify is set to VerifyNever, the verification will be nil.
+// If Verify is set to VerifyIfPossible, this will return a verification (or nil on failure), and print a warning on failure.
+// If Verify is set to VerifyAlways, this will return a verification or an error if the verification fails.
+// If Verify is set to VerifyLater, this will download the prov file (if it exists), but not verify it.
+//
+// For VerifyNever and VerifyIfPossible, the Verification may be empty.
+//
+// Returns a string path to the location where the file was downloaded and a verification
+// (if provenance was verified), or an error if something bad happened.
+func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) {
+ u, err := c.ResolveChartVersion(ref, version)
+ if err != nil {
+ return "", nil, err
+ }
+
+ g, err := c.Getters.ByScheme(u.Scheme)
+ if err != nil {
+ return "", nil, err
+ }
+
+ data, err := g.Get(u.String(), c.Options...)
+ if err != nil {
+ return "", nil, err
+ }
+
+ name := filepath.Base(u.Path)
+ if u.Scheme == registry.OCIScheme {
+ name = fmt.Sprintf("%s-%s.tgz", name, version)
+ }
+
+ destfile := filepath.Join(dest, name)
+ if err := fileutil.AtomicWriteFile(destfile, data, 0644); err != nil {
+ return destfile, nil, err
+ }
+
+ // If provenance is requested, verify it.
+ ver := &provenance.Verification{}
+ if c.Verify > VerifyNever {
+ body, err := g.Get(u.String() + ".prov")
+ if err != nil {
+ if c.Verify == VerifyAlways {
+ return destfile, ver, errors.Errorf("failed to fetch provenance %q", u.String()+".prov")
+ }
+ fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err)
+ return destfile, ver, nil
+ }
+ provfile := destfile + ".prov"
+ if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil {
+ return destfile, nil, err
+ }
+
+ if c.Verify != VerifyLater {
+ ver, err = VerifyChart(destfile, c.Keyring)
+ if err != nil {
+ // Fail always in this case, since it means the verification step
+ // failed.
+ return destfile, ver, err
+ }
+ }
+ }
+ return destfile, ver, nil
+}
+
+// ResolveChartVersion resolves a chart reference to a URL.
+//
+// It returns the URL and sets the ChartDownloader's Options that can fetch
+// the URL using the appropriate Getter.
+//
+// A reference may be an HTTP URL, a 'reponame/chartname' reference, or a local path.
+//
+// A version is a SemVer string (1.2.3-beta.1+f334a6789).
+//
+// - For fully qualified URLs, the version will be ignored (since URLs aren't versioned)
+// - For a chart reference
+// * If version is non-empty, this will return the URL for that version
+// * If version is empty, this will return the URL for the latest version
+// * If no version can be found, an error is returned
+func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, error) {
+ u, err := url.Parse(ref)
+ if err != nil {
+ return nil, errors.Errorf("invalid chart URL format: %s", ref)
+ }
+
+ rf, err := loadRepoConfig(c.RepositoryConfig)
+ if err != nil {
+ return u, err
+ }
+
+ if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 {
+ // In this case, we have to find the parent repo that contains this chart
+ // URL. And this is an unfortunate problem, as it requires actually going
+ // through each repo cache file and finding a matching URL. But basically
+ // we want to find the repo in case we have special SSL cert config
+ // for that repo.
+
+ rc, err := c.scanReposForURL(ref, rf)
+ if err != nil {
+ // If there is no special config, return the default HTTP client and
+ // swallow the error.
+ if err == ErrNoOwnerRepo {
+ // Make sure to add the ref URL as the URL for the getter
+ c.Options = append(c.Options, getter.WithURL(ref))
+ return u, nil
+ }
+ return u, err
+ }
+
+ // If we get here, we don't need to go through the next phase of looking
+ // up the URL. We have it already. So we just set the parameters and return.
+ c.Options = append(
+ c.Options,
+ getter.WithURL(rc.URL),
+ )
+ if rc.CertFile != "" || rc.KeyFile != "" || rc.CAFile != "" {
+ c.Options = append(c.Options, getter.WithTLSClientConfig(rc.CertFile, rc.KeyFile, rc.CAFile))
+ }
+ if rc.Username != "" && rc.Password != "" {
+ c.Options = append(
+ c.Options,
+ getter.WithBasicAuth(rc.Username, rc.Password),
+ getter.WithPassCredentialsAll(rc.PassCredentialsAll),
+ )
+ }
+ return u, nil
+ }
+
+ // See if it's of the form: repo/path_to_chart
+ p := strings.SplitN(u.Path, "/", 2)
+ if len(p) < 2 {
+ return u, errors.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u)
+ }
+
+ repoName := p[0]
+ chartName := p[1]
+ rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories)
+
+ if err != nil {
+ return u, err
+ }
+
+ // Now that we have the chart repository information we can use that URL
+ // to set the URL for the getter.
+ c.Options = append(c.Options, getter.WithURL(rc.URL))
+
+ r, err := repo.NewChartRepository(rc, c.Getters)
+ if err != nil {
+ return u, err
+ }
+
+ if r != nil && r.Config != nil {
+ if r.Config.CertFile != "" || r.Config.KeyFile != "" || r.Config.CAFile != "" {
+ c.Options = append(c.Options, getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile))
+ }
+ if r.Config.Username != "" && r.Config.Password != "" {
+ c.Options = append(c.Options,
+ getter.WithBasicAuth(r.Config.Username, r.Config.Password),
+ getter.WithPassCredentialsAll(r.Config.PassCredentialsAll),
+ )
+ }
+ }
+
+ // Next, we need to load the index, and actually look up the chart.
+ idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name))
+ i, err := repo.LoadIndexFile(idxFile)
+ if err != nil {
+ return u, errors.Wrap(err, "no cached repo found. (try 'helm repo update')")
+ }
+
+ cv, err := i.Get(chartName, version)
+ if err != nil {
+ return u, errors.Wrapf(err, "chart %q matching %s not found in %s index. (try 'helm repo update')", chartName, version, r.Config.Name)
+ }
+
+ if len(cv.URLs) == 0 {
+ return u, errors.Errorf("chart %q has no downloadable URLs", ref)
+ }
+
+ // TODO: Seems that picking first URL is not fully correct
+ u, err = url.Parse(cv.URLs[0])
+ if err != nil {
+ return u, errors.Errorf("invalid chart URL format: %s", ref)
+ }
+
+ // If the URL is relative (no scheme), prepend the chart repo's base URL
+ if !u.IsAbs() {
+ repoURL, err := url.Parse(rc.URL)
+ if err != nil {
+ return repoURL, err
+ }
+ q := repoURL.Query()
+ // We need a trailing slash for ResolveReference to work, but make sure there isn't already one
+ repoURL.Path = strings.TrimSuffix(repoURL.Path, "/") + "/"
+ u = repoURL.ResolveReference(u)
+ u.RawQuery = q.Encode()
+ // TODO add user-agent
+ if _, err := getter.NewHTTPGetter(getter.WithURL(rc.URL)); err != nil {
+ return repoURL, err
+ }
+ return u, err
+ }
+
+ // TODO add user-agent
+ return u, nil
+}
+
+// VerifyChart takes a path to a chart archive and a keyring, and verifies the chart.
+//
+// It assumes that a chart archive file is accompanied by a provenance file whose
+// name is the archive file name plus the ".prov" extension.
+func VerifyChart(path, keyring string) (*provenance.Verification, error) {
+ // For now, error out if it's not a tar file.
+ switch fi, err := os.Stat(path); {
+ case err != nil:
+ return nil, err
+ case fi.IsDir():
+ return nil, errors.New("unpacked charts cannot be verified")
+ case !isTar(path):
+ return nil, errors.New("chart must be a tgz file")
+ }
+
+ provfile := path + ".prov"
+ if _, err := os.Stat(provfile); err != nil {
+ return nil, errors.Wrapf(err, "could not load provenance file %s", provfile)
+ }
+
+ sig, err := provenance.NewFromKeyring(keyring, "")
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to load keyring")
+ }
+ return sig.Verify(path, provfile)
+}
+
+// isTar tests whether the given file is a tar file.
+//
+// Currently, this simply checks extension, since a subsequent function will
+// untar the file and validate its binary format.
+func isTar(filename string) bool {
+ return strings.EqualFold(filepath.Ext(filename), ".tgz")
+}
+
+func pickChartRepositoryConfigByName(name string, cfgs []*repo.Entry) (*repo.Entry, error) {
+ for _, rc := range cfgs {
+ if rc.Name == name {
+ if rc.URL == "" {
+ return nil, errors.Errorf("no URL found for repository %s", name)
+ }
+ return rc, nil
+ }
+ }
+ return nil, errors.Errorf("repo %s not found", name)
+}
+
+// scanReposForURL scans all repos to find which repo contains the given URL.
+//
+// This will attempt to find the given URL in all of the known repositories files.
+//
+// If the URL is found, this will return the repo entry that contained that URL.
+//
+// If all of the repos are checked, but the URL is not found, an ErrNoOwnerRepo
+// error is returned.
+//
+// Other errors may be returned when repositories cannot be loaded or searched.
+//
+// Technically, the fact that a URL is not found in a repo is not a failure indication.
+// Charts are not required to be included in an index before they are valid. So
+// be mindful of this case.
+//
+// The same URL can technically exist in two or more repositories. This algorithm
+// will return the first one it finds. Order is determined by the order of repositories
+// in the repositories.yaml file.
+func (c *ChartDownloader) scanReposForURL(u string, rf *repo.File) (*repo.Entry, error) {
+ // FIXME: This is far from optimal. Larger installations and index files will
+ // incur a performance hit for this type of scanning.
+ for _, rc := range rf.Repositories {
+ r, err := repo.NewChartRepository(rc, c.Getters)
+ if err != nil {
+ return nil, err
+ }
+
+ idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name))
+ i, err := repo.LoadIndexFile(idxFile)
+ if err != nil {
+ return nil, errors.Wrap(err, "no cached repo found. (try 'helm repo update')")
+ }
+
+ for _, entry := range i.Entries {
+ for _, ver := range entry {
+ for _, dl := range ver.URLs {
+ if urlutil.Equal(u, dl) {
+ return rc, nil
+ }
+ }
+ }
+ }
+ }
+ // This means that there is no repo file for the given URL.
+ return nil, ErrNoOwnerRepo
+}
+
+func loadRepoConfig(file string) (*repo.File, error) {
+ r, err := repo.LoadFile(file)
+ if err != nil && !os.IsNotExist(errors.Cause(err)) {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/doc.go b/vendor/helm.sh/helm/v3/pkg/downloader/doc.go
new file mode 100644
index 000000000..9588a7dfe
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/downloader/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package downloader provides a library for downloading charts.
+
+This package contains various tools for downloading charts from repository
+servers, and then storing them in Helm-specific directory structures. This
+library contains many functions that depend on a specific
+filesystem layout.
+*/
+package downloader
diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go
new file mode 100644
index 000000000..b0ade6c90
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go
@@ -0,0 +1,903 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "crypto"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "sync"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/internal/experimental/registry"
+ "helm.sh/helm/v3/internal/resolver"
+ "helm.sh/helm/v3/internal/third_party/dep/fs"
+ "helm.sh/helm/v3/internal/urlutil"
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/getter"
+ "helm.sh/helm/v3/pkg/helmpath"
+ "helm.sh/helm/v3/pkg/repo"
+)
+
+// ErrRepoNotFound indicates that chart repositories can't be found in local repo cache.
+// The value of Repos is missing repos.
+type ErrRepoNotFound struct {
+ Repos []string
+}
+
+// Error implements the error interface.
+func (e ErrRepoNotFound) Error() string {
+ return fmt.Sprintf("no repository definition for %s", strings.Join(e.Repos, ", "))
+}
+
+// Manager handles the lifecycle of fetching, resolving, and storing dependencies.
+type Manager struct {
+ // Out is used to print warnings and notifications.
+ Out io.Writer
+ // ChartPath is the path to the unpacked base chart upon which this operates.
+ ChartPath string
+ // Verification indicates whether the chart should be verified.
+ Verify VerificationStrategy
+ // Debug is the global "--debug" flag
+ Debug bool
+ // Keyring is the key ring file.
+ Keyring string
+ // SkipUpdate indicates that the repository should not be updated first.
+ SkipUpdate bool
+ // Getter collection for the operation
+ Getters []getter.Provider
+ RegistryClient *registry.Client
+ RepositoryConfig string
+ RepositoryCache string
+}
+
+// Build rebuilds a local charts directory from a lockfile.
+//
+// If the lockfile is not present, this will run a Manager.Update()
+//
+// If SkipUpdate is set, this will not update the repository.
+func (m *Manager) Build() error {
+ c, err := m.loadChartDir()
+ if err != nil {
+ return err
+ }
+
+ // If a lock file is found, run a build from that. Otherwise, just do
+ // an update.
+ lock := c.Lock
+ if lock == nil {
+ return m.Update()
+ }
+
+ // Check that all of the repos we're dependent on actually exist.
+ req := c.Metadata.Dependencies
+
+ // If using apiVersion v1, calculate the hash before resolve repo names
+ // because resolveRepoNames will change req if req uses repo alias
+ // and Helm 2 calculate the digest from the original req
+ // Fix for: https://github.com/helm/helm/issues/7619
+ var v2Sum string
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ v2Sum, err = resolver.HashV2Req(req)
+ if err != nil {
+ return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies")
+ }
+ }
+
+ if _, err := m.resolveRepoNames(req); err != nil {
+ return err
+ }
+
+ if sum, err := resolver.HashReq(req, lock.Dependencies); err != nil || sum != lock.Digest {
+ // If lock digest differs and chart is apiVersion v1, it maybe because the lock was built
+ // with Helm 2 and therefore should be checked with Helm v2 hash
+ // Fix for: https://github.com/helm/helm/issues/7233
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ log.Println("warning: a valid Helm v3 hash was not found. Checking against Helm v2 hash...")
+ if v2Sum != lock.Digest {
+ return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies")
+ }
+ } else {
+ return errors.New("the lock file (Chart.lock) is out of sync with the dependencies file (Chart.yaml). Please update the dependencies")
+ }
+ }
+
+ // Check that all of the repos we're dependent on actually exist.
+ if err := m.hasAllRepos(lock.Dependencies); err != nil {
+ return err
+ }
+
+ if !m.SkipUpdate {
+ // For each repo in the file, update the cached copy of that repo
+ if err := m.UpdateRepositories(); err != nil {
+ return err
+ }
+ }
+
+ // Now we need to fetch every package here into charts/
+ return m.downloadAll(lock.Dependencies)
+}
+
+// Update updates a local charts directory.
+//
+// It first reads the Chart.yaml file, and then attempts to
+// negotiate versions based on that. It will download the versions
+// from remote chart repositories unless SkipUpdate is true.
+func (m *Manager) Update() error {
+ c, err := m.loadChartDir()
+ if err != nil {
+ return err
+ }
+
+ // If no dependencies are found, we consider this a successful
+ // completion.
+ req := c.Metadata.Dependencies
+ if req == nil {
+ return nil
+ }
+
+ // Get the names of the repositories the dependencies need that Helm is
+ // configured to know about.
+ repoNames, err := m.resolveRepoNames(req)
+ if err != nil {
+ return err
+ }
+
+ // For the repositories Helm is not configured to know about, ensure Helm
+ // has some information about them and, when possible, the index files
+ // locally.
+ // TODO(mattfarina): Repositories should be explicitly added by end users
+ // rather than automattic. In Helm v4 require users to add repositories. They
+ // should have to add them in order to make sure they are aware of the
+ // repositories and opt-in to any locations, for security.
+ repoNames, err = m.ensureMissingRepos(repoNames, req)
+ if err != nil {
+ return err
+ }
+
+ // For each of the repositories Helm is configured to know about, update
+ // the index information locally.
+ if !m.SkipUpdate {
+ if err := m.UpdateRepositories(); err != nil {
+ return err
+ }
+ }
+
+ // Now we need to find out which version of a chart best satisfies the
+ // dependencies in the Chart.yaml
+ lock, err := m.resolve(req, repoNames)
+ if err != nil {
+ return err
+ }
+
+ // Now we need to fetch every package here into charts/
+ if err := m.downloadAll(lock.Dependencies); err != nil {
+ return err
+ }
+
+ // downloadAll might overwrite dependency version, recalculate lock digest
+ newDigest, err := resolver.HashReq(req, lock.Dependencies)
+ if err != nil {
+ return err
+ }
+ lock.Digest = newDigest
+
+ // If the lock file hasn't changed, don't write a new one.
+ oldLock := c.Lock
+ if oldLock != nil && oldLock.Digest == lock.Digest {
+ return nil
+ }
+
+ // Finally, we need to write the lockfile.
+ return writeLock(m.ChartPath, lock, c.Metadata.APIVersion == chart.APIVersionV1)
+}
+
+func (m *Manager) loadChartDir() (*chart.Chart, error) {
+ if fi, err := os.Stat(m.ChartPath); err != nil {
+ return nil, errors.Wrapf(err, "could not find %s", m.ChartPath)
+ } else if !fi.IsDir() {
+ return nil, errors.New("only unpacked charts can be updated")
+ }
+ return loader.LoadDir(m.ChartPath)
+}
+
+// resolve takes a list of dependencies and translates them into an exact version to download.
+//
+// This returns a lock file, which has all of the dependencies normalized to a specific version.
+func (m *Manager) resolve(req []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) {
+ res := resolver.New(m.ChartPath, m.RepositoryCache)
+ return res.Resolve(req, repoNames)
+}
+
+// downloadAll takes a list of dependencies and downloads them into charts/
+//
+// It will delete versions of the chart that exist on disk and might cause
+// a conflict.
+func (m *Manager) downloadAll(deps []*chart.Dependency) error {
+ repos, err := m.loadChartRepositories()
+ if err != nil {
+ return err
+ }
+
+ destPath := filepath.Join(m.ChartPath, "charts")
+ tmpPath := filepath.Join(m.ChartPath, "tmpcharts")
+
+ // Check if 'charts' directory is not actally a directory. If it does not exist, create it.
+ if fi, err := os.Stat(destPath); err == nil {
+ if !fi.IsDir() {
+ return errors.Errorf("%q is not a directory", destPath)
+ }
+ } else if os.IsNotExist(err) {
+ if err := os.MkdirAll(destPath, 0755); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("unable to retrieve file info for '%s': %v", destPath, err)
+ }
+
+ // Prepare tmpPath
+ if err := os.MkdirAll(tmpPath, 0755); err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpPath)
+
+ fmt.Fprintf(m.Out, "Saving %d charts\n", len(deps))
+ var saveError error
+ churls := make(map[string]struct{})
+ for _, dep := range deps {
+ // No repository means the chart is in charts directory
+ if dep.Repository == "" {
+ fmt.Fprintf(m.Out, "Dependency %s did not declare a repository. Assuming it exists in the charts directory\n", dep.Name)
+ // NOTE: we are only validating the local dependency conforms to the constraints. No copying to tmpPath is necessary.
+ chartPath := filepath.Join(destPath, dep.Name)
+ ch, err := loader.LoadDir(chartPath)
+ if err != nil {
+ return fmt.Errorf("unable to load chart '%s': %v", chartPath, err)
+ }
+
+ constraint, err := semver.NewConstraint(dep.Version)
+ if err != nil {
+ return fmt.Errorf("dependency %s has an invalid version/constraint format: %s", dep.Name, err)
+ }
+
+ v, err := semver.NewVersion(ch.Metadata.Version)
+ if err != nil {
+ return fmt.Errorf("invalid version %s for dependency %s: %s", dep.Version, dep.Name, err)
+ }
+
+ if !constraint.Check(v) {
+ saveError = fmt.Errorf("dependency %s at version %s does not satisfy the constraint %s", dep.Name, ch.Metadata.Version, dep.Version)
+ break
+ }
+ continue
+ }
+ if strings.HasPrefix(dep.Repository, "file://") {
+ if m.Debug {
+ fmt.Fprintf(m.Out, "Archiving %s from repo %s\n", dep.Name, dep.Repository)
+ }
+ ver, err := tarFromLocalDir(m.ChartPath, dep.Name, dep.Repository, dep.Version, tmpPath)
+ if err != nil {
+ saveError = err
+ break
+ }
+ dep.Version = ver
+ continue
+ }
+
+ // Any failure to resolve/download a chart should fail:
+ // https://github.com/helm/helm/issues/1439
+ churl, username, password, insecureskiptlsverify, passcredentialsall, caFile, certFile, keyFile, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos)
+ if err != nil {
+ saveError = errors.Wrapf(err, "could not find %s", churl)
+ break
+ }
+
+ if _, ok := churls[churl]; ok {
+ fmt.Fprintf(m.Out, "Already downloaded %s from repo %s\n", dep.Name, dep.Repository)
+ continue
+ }
+
+ fmt.Fprintf(m.Out, "Downloading %s from repo %s\n", dep.Name, dep.Repository)
+
+ dl := ChartDownloader{
+ Out: m.Out,
+ Verify: m.Verify,
+ Keyring: m.Keyring,
+ RepositoryConfig: m.RepositoryConfig,
+ RepositoryCache: m.RepositoryCache,
+ Getters: m.Getters,
+ Options: []getter.Option{
+ getter.WithBasicAuth(username, password),
+ getter.WithPassCredentialsAll(passcredentialsall),
+ getter.WithInsecureSkipVerifyTLS(insecureskiptlsverify),
+ getter.WithTLSClientConfig(certFile, keyFile, caFile),
+ },
+ }
+
+ version := ""
+ if strings.HasPrefix(churl, "oci://") {
+ if !resolver.FeatureGateOCI.IsEnabled() {
+ return errors.Wrapf(resolver.FeatureGateOCI.Error(),
+ "the repository %s is an OCI registry", churl)
+ }
+
+ churl, version, err = parseOCIRef(churl)
+ if err != nil {
+ return errors.Wrapf(err, "could not parse OCI reference")
+ }
+ dl.Options = append(dl.Options,
+ getter.WithRegistryClient(m.RegistryClient),
+ getter.WithTagName(version))
+ }
+
+ if _, _, err = dl.DownloadTo(churl, version, tmpPath); err != nil {
+ saveError = errors.Wrapf(err, "could not download %s", churl)
+ break
+ }
+
+ churls[churl] = struct{}{}
+ }
+
+ // TODO: this should probably be refactored to be a []error, so we can capture and provide more information rather than "last error wins".
+ if saveError == nil {
+ // now we can move all downloaded charts to destPath and delete outdated dependencies
+ if err := m.safeMoveDeps(deps, tmpPath, destPath); err != nil {
+ return err
+ }
+ } else {
+ fmt.Fprintln(m.Out, "Save error occurred: ", saveError)
+ return saveError
+ }
+ return nil
+}
+
+func parseOCIRef(chartRef string) (string, string, error) {
+ refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
+ caps := refTagRegexp.FindStringSubmatch(chartRef)
+ if len(caps) != 4 {
+ return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
+ }
+ chartRef = caps[1]
+ tag := caps[3]
+
+ return chartRef, tag, nil
+}
+
+// safeMoveDep moves all dependencies in the source and moves them into dest.
+//
+// It does this by first matching the file name to an expected pattern, then loading
+// the file to verify that it is a chart.
+//
+// Any charts in dest that do not exist in source are removed (barring local dependencies)
+//
+// Because it requires tar file introspection, it is more intensive than a basic move.
+//
+// This will only return errors that should stop processing entirely. Other errors
+// will emit log messages or be ignored.
+func (m *Manager) safeMoveDeps(deps []*chart.Dependency, source, dest string) error {
+ existsInSourceDirectory := map[string]bool{}
+ isLocalDependency := map[string]bool{}
+ sourceFiles, err := ioutil.ReadDir(source)
+ if err != nil {
+ return err
+ }
+ // attempt to read destFiles; fail fast if we can't
+ destFiles, err := ioutil.ReadDir(dest)
+ if err != nil {
+ return err
+ }
+
+ for _, dep := range deps {
+ if dep.Repository == "" {
+ isLocalDependency[dep.Name] = true
+ }
+ }
+
+ for _, file := range sourceFiles {
+ if file.IsDir() {
+ continue
+ }
+ filename := file.Name()
+ sourcefile := filepath.Join(source, filename)
+ destfile := filepath.Join(dest, filename)
+ existsInSourceDirectory[filename] = true
+ if _, err := loader.LoadFile(sourcefile); err != nil {
+ fmt.Fprintf(m.Out, "Could not verify %s for moving: %s (Skipping)", sourcefile, err)
+ continue
+ }
+ // NOTE: no need to delete the dest; os.Rename replaces it.
+ if err := fs.RenameWithFallback(sourcefile, destfile); err != nil {
+ fmt.Fprintf(m.Out, "Unable to move %s to charts dir %s (Skipping)", sourcefile, err)
+ continue
+ }
+ }
+
+ fmt.Fprintln(m.Out, "Deleting outdated charts")
+ // find all files that exist in dest that do not exist in source; delete them (outdated dependendencies)
+ for _, file := range destFiles {
+ if !file.IsDir() && !existsInSourceDirectory[file.Name()] {
+ fname := filepath.Join(dest, file.Name())
+ ch, err := loader.LoadFile(fname)
+ if err != nil {
+ fmt.Fprintf(m.Out, "Could not verify %s for deletion: %s (Skipping)\n", fname, err)
+ continue
+ }
+ // local dependency - skip
+ if isLocalDependency[ch.Name()] {
+ continue
+ }
+ if err := os.Remove(fname); err != nil {
+ fmt.Fprintf(m.Out, "Could not delete %s: %s (Skipping)", fname, err)
+ continue
+ }
+ }
+ }
+
+ return nil
+}
+
+// hasAllRepos ensures that all of the referenced deps are in the local repo cache.
+func (m *Manager) hasAllRepos(deps []*chart.Dependency) error {
+ rf, err := loadRepoConfig(m.RepositoryConfig)
+ if err != nil {
+ return err
+ }
+ repos := rf.Repositories
+
+ // Verify that all repositories referenced in the deps are actually known
+ // by Helm.
+ missing := []string{}
+Loop:
+ for _, dd := range deps {
+ // If repo is from local path or OCI, continue
+ if strings.HasPrefix(dd.Repository, "file://") || strings.HasPrefix(dd.Repository, "oci://") {
+ continue
+ }
+
+ if dd.Repository == "" {
+ continue
+ }
+ for _, repo := range repos {
+ if urlutil.Equal(repo.URL, strings.TrimSuffix(dd.Repository, "/")) {
+ continue Loop
+ }
+ }
+ missing = append(missing, dd.Repository)
+ }
+ if len(missing) > 0 {
+ return ErrRepoNotFound{missing}
+ }
+ return nil
+}
+
+// ensureMissingRepos attempts to ensure the repository information for repos
+// not managed by Helm is present. This takes in the repoNames Helm is configured
+// to work with along with the chart dependencies. It will find the deps not
+// in a known repo and attempt to ensure the data is present for steps like
+// version resolution.
+func (m *Manager) ensureMissingRepos(repoNames map[string]string, deps []*chart.Dependency) (map[string]string, error) {
+
+ var ru []*repo.Entry
+
+ for _, dd := range deps {
+
+ // If the chart is in the local charts directory no repository needs
+ // to be specified.
+ if dd.Repository == "" {
+ continue
+ }
+
+ // When the repoName for a dependency is known we can skip ensuring
+ if _, ok := repoNames[dd.Name]; ok {
+ continue
+ }
+
+ // The generated repository name, which will result in an index being
+ // locally cached, has a name pattern of "helm-manager-" followed by a
+ // sha256 of the repo name. This assumes end users will never create
+ // repositories with these names pointing to other repositories. Using
+ // this method of naming allows the existing repository pulling and
+ // resolution code to do most of the work.
+ rn, err := key(dd.Repository)
+ if err != nil {
+ return repoNames, err
+ }
+ rn = managerKeyPrefix + rn
+
+ repoNames[dd.Name] = rn
+
+ // Assuming the repository is generally available. For Helm managed
+ // access controls the repository needs to be added through the user
+ // managed system. This path will work for public charts, like those
+ // supplied by Bitnami, but not for protected charts, like corp ones
+ // behind a username and pass.
+ ri := &repo.Entry{
+ Name: rn,
+ URL: dd.Repository,
+ }
+ ru = append(ru, ri)
+ }
+
+ // Calls to UpdateRepositories (a public function) will only update
+ // repositories configured by the user. Here we update repos found in
+ // the dependencies that are not known to the user if update skipping
+ // is not configured.
+ if !m.SkipUpdate && len(ru) > 0 {
+ fmt.Fprintln(m.Out, "Getting updates for unmanaged Helm repositories...")
+ if err := m.parallelRepoUpdate(ru); err != nil {
+ return repoNames, err
+ }
+ }
+
+ return repoNames, nil
+}
+
+// resolveRepoNames returns the repo names of the referenced deps which can be used to fetch the cached index file
+// and replaces aliased repository URLs into resolved URLs in dependencies.
+func (m *Manager) resolveRepoNames(deps []*chart.Dependency) (map[string]string, error) {
+ rf, err := loadRepoConfig(m.RepositoryConfig)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return make(map[string]string), nil
+ }
+ return nil, err
+ }
+ repos := rf.Repositories
+
+ reposMap := make(map[string]string)
+
+ // Verify that all repositories referenced in the deps are actually known
+ // by Helm.
+ missing := []string{}
+ for _, dd := range deps {
+ // Don't map the repository, we don't need to download chart from charts directory
+ // When OCI is used there is no Helm repository
+ if dd.Repository == "" || strings.HasPrefix(dd.Repository, "oci://") {
+ continue
+ }
+ // if dep chart is from local path, verify the path is valid
+ if strings.HasPrefix(dd.Repository, "file://") {
+ if _, err := resolver.GetLocalPath(dd.Repository, m.ChartPath); err != nil {
+ return nil, err
+ }
+
+ if m.Debug {
+ fmt.Fprintf(m.Out, "Repository from local path: %s\n", dd.Repository)
+ }
+ reposMap[dd.Name] = dd.Repository
+ continue
+ }
+
+ if strings.HasPrefix(dd.Repository, "oci://") {
+ reposMap[dd.Name] = dd.Repository
+ continue
+ }
+
+ found := false
+
+ for _, repo := range repos {
+ if (strings.HasPrefix(dd.Repository, "@") && strings.TrimPrefix(dd.Repository, "@") == repo.Name) ||
+ (strings.HasPrefix(dd.Repository, "alias:") && strings.TrimPrefix(dd.Repository, "alias:") == repo.Name) {
+ found = true
+ dd.Repository = repo.URL
+ reposMap[dd.Name] = repo.Name
+ break
+ } else if urlutil.Equal(repo.URL, dd.Repository) {
+ found = true
+ reposMap[dd.Name] = repo.Name
+ break
+ }
+ }
+ if !found {
+ repository := dd.Repository
+ // Add if URL
+ _, err := url.ParseRequestURI(repository)
+ if err == nil {
+ reposMap[repository] = repository
+ continue
+ }
+ missing = append(missing, repository)
+ }
+ }
+ if len(missing) > 0 {
+ errorMessage := fmt.Sprintf("no repository definition for %s. Please add them via 'helm repo add'", strings.Join(missing, ", "))
+ // It is common for people to try to enter "stable" as a repository instead of the actual URL.
+ // For this case, let's give them a suggestion.
+ containsNonURL := false
+ for _, repo := range missing {
+ if !strings.Contains(repo, "//") && !strings.HasPrefix(repo, "@") && !strings.HasPrefix(repo, "alias:") {
+ containsNonURL = true
+ }
+ }
+ if containsNonURL {
+ errorMessage += `
+Note that repositories must be URLs or aliases. For example, to refer to the "example"
+repository, use "https://charts.example.com/" or "@example" instead of
+"example". Don't forget to add the repo, too ('helm repo add').`
+ }
+ return nil, errors.New(errorMessage)
+ }
+ return reposMap, nil
+}
+
+// UpdateRepositories updates all of the local repos to the latest.
+func (m *Manager) UpdateRepositories() error {
+ rf, err := loadRepoConfig(m.RepositoryConfig)
+ if err != nil {
+ return err
+ }
+ repos := rf.Repositories
+ if len(repos) > 0 {
+ fmt.Fprintln(m.Out, "Hang tight while we grab the latest from your chart repositories...")
+ // This prints warnings straight to out.
+ if err := m.parallelRepoUpdate(repos); err != nil {
+ return err
+ }
+ fmt.Fprintln(m.Out, "Update Complete. ⎈Happy Helming!⎈")
+ }
+ return nil
+}
+
+func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error {
+
+ var wg sync.WaitGroup
+ for _, c := range repos {
+ r, err := repo.NewChartRepository(c, m.Getters)
+ if err != nil {
+ return err
+ }
+ wg.Add(1)
+ go func(r *repo.ChartRepository) {
+ if _, err := r.DownloadIndexFile(); err != nil {
+ // For those dependencies that are not known to helm and using a
+ // generated key name we display the repo url.
+ if strings.HasPrefix(r.Config.Name, managerKeyPrefix) {
+ fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository:\n\t%s\n", r.Config.URL, err)
+ } else {
+ fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", r.Config.Name, r.Config.URL, err)
+ }
+ } else {
+ // For those dependencies that are not known to helm and using a
+ // generated key name we display the repo url.
+ if strings.HasPrefix(r.Config.Name, managerKeyPrefix) {
+ fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.URL)
+ } else {
+ fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.Name)
+ }
+ }
+ wg.Done()
+ }(r)
+ }
+ wg.Wait()
+
+ return nil
+}
+
+// findChartURL searches the cache of repo data for a chart that has the name and the repoURL specified.
+//
+// 'name' is the name of the chart. Version is an exact semver, or an empty string. If empty, the
+// newest version will be returned.
+//
+// repoURL is the repository to search
+//
+// If it finds a URL that is "relative", it will prepend the repoURL.
+func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, insecureskiptlsverify, passcredentialsall bool, caFile, certFile, keyFile string, err error) {
+ if strings.HasPrefix(repoURL, "oci://") {
+ return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", false, false, "", "", "", nil
+ }
+
+ for _, cr := range repos {
+
+ if urlutil.Equal(repoURL, cr.Config.URL) {
+ var entry repo.ChartVersions
+ entry, err = findEntryByName(name, cr)
+ if err != nil {
+ return
+ }
+ var ve *repo.ChartVersion
+ ve, err = findVersionedEntry(version, entry)
+ if err != nil {
+ return
+ }
+ url, err = normalizeURL(repoURL, ve.URLs[0])
+ if err != nil {
+ return
+ }
+ username = cr.Config.Username
+ password = cr.Config.Password
+ passcredentialsall = cr.Config.PassCredentialsAll
+ insecureskiptlsverify = cr.Config.InsecureSkipTLSverify
+ caFile = cr.Config.CAFile
+ certFile = cr.Config.CertFile
+ keyFile = cr.Config.KeyFile
+ return
+ }
+ }
+ url, err = repo.FindChartInRepoURL(repoURL, name, version, certFile, keyFile, caFile, m.Getters)
+ if err == nil {
+ return url, username, password, false, false, "", "", "", err
+ }
+ err = errors.Errorf("chart %s not found in %s: %s", name, repoURL, err)
+ return url, username, password, false, false, "", "", "", err
+}
+
+// findEntryByName finds an entry in the chart repository whose name matches the given name.
+//
+// It returns the ChartVersions for that entry.
+func findEntryByName(name string, cr *repo.ChartRepository) (repo.ChartVersions, error) {
+ for ename, entry := range cr.IndexFile.Entries {
+ if ename == name {
+ return entry, nil
+ }
+ }
+ return nil, errors.New("entry not found")
+}
+
+// findVersionedEntry takes a ChartVersions list and returns a single chart version that satisfies the version constraints.
+//
+// If version is empty, the first chart found is returned.
+func findVersionedEntry(version string, vers repo.ChartVersions) (*repo.ChartVersion, error) {
+ for _, verEntry := range vers {
+ if len(verEntry.URLs) == 0 {
+ // Not a legit entry.
+ continue
+ }
+
+ if version == "" || versionEquals(version, verEntry.Version) {
+ return verEntry, nil
+ }
+ }
+ return nil, errors.New("no matching version")
+}
+
+func versionEquals(v1, v2 string) bool {
+ sv1, err := semver.NewVersion(v1)
+ if err != nil {
+ // Fallback to string comparison.
+ return v1 == v2
+ }
+ sv2, err := semver.NewVersion(v2)
+ if err != nil {
+ return false
+ }
+ return sv1.Equal(sv2)
+}
+
+func normalizeURL(baseURL, urlOrPath string) (string, error) {
+ u, err := url.Parse(urlOrPath)
+ if err != nil {
+ return urlOrPath, err
+ }
+ if u.IsAbs() {
+ return u.String(), nil
+ }
+ u2, err := url.Parse(baseURL)
+ if err != nil {
+ return urlOrPath, errors.Wrap(err, "base URL failed to parse")
+ }
+
+ u2.RawPath = path.Join(u2.RawPath, urlOrPath)
+ u2.Path = path.Join(u2.Path, urlOrPath)
+ return u2.String(), nil
+}
+
+// loadChartRepositories reads the repositories.yaml, and then builds a map of
+// ChartRepositories.
+//
+// The key is the local name (which is only present in the repositories.yaml).
+func (m *Manager) loadChartRepositories() (map[string]*repo.ChartRepository, error) {
+ indices := map[string]*repo.ChartRepository{}
+
+ // Load repositories.yaml file
+ rf, err := loadRepoConfig(m.RepositoryConfig)
+ if err != nil {
+ return indices, errors.Wrapf(err, "failed to load %s", m.RepositoryConfig)
+ }
+
+ for _, re := range rf.Repositories {
+ lname := re.Name
+ idxFile := filepath.Join(m.RepositoryCache, helmpath.CacheIndexFile(lname))
+ index, err := repo.LoadIndexFile(idxFile)
+ if err != nil {
+ return indices, err
+ }
+
+ // TODO: use constructor
+ cr := &repo.ChartRepository{
+ Config: re,
+ IndexFile: index,
+ }
+ indices[lname] = cr
+ }
+ return indices, nil
+}
+
+// writeLock writes a lockfile to disk
+func writeLock(chartpath string, lock *chart.Lock, legacyLockfile bool) error {
+ data, err := yaml.Marshal(lock)
+ if err != nil {
+ return err
+ }
+ lockfileName := "Chart.lock"
+ if legacyLockfile {
+ lockfileName = "requirements.lock"
+ }
+ dest := filepath.Join(chartpath, lockfileName)
+ return ioutil.WriteFile(dest, data, 0644)
+}
+
+// archive a dep chart from local directory and save it into destPath
+func tarFromLocalDir(chartpath, name, repo, version, destPath string) (string, error) {
+ if !strings.HasPrefix(repo, "file://") {
+ return "", errors.Errorf("wrong format: chart %s repository %s", name, repo)
+ }
+
+ origPath, err := resolver.GetLocalPath(repo, chartpath)
+ if err != nil {
+ return "", err
+ }
+
+ ch, err := loader.LoadDir(origPath)
+ if err != nil {
+ return "", err
+ }
+
+ constraint, err := semver.NewConstraint(version)
+ if err != nil {
+ return "", errors.Wrapf(err, "dependency %s has an invalid version/constraint format", name)
+ }
+
+ v, err := semver.NewVersion(ch.Metadata.Version)
+ if err != nil {
+ return "", err
+ }
+
+ if constraint.Check(v) {
+ _, err = chartutil.Save(ch, destPath)
+ return ch.Metadata.Version, err
+ }
+
+ return "", errors.Errorf("can't get a valid version for dependency %s", name)
+}
+
+// The prefix to use for cache keys created by the manager for repo names
+const managerKeyPrefix = "helm-manager-"
+
+// key is used to turn a name, such as a repository url, into a filesystem
+// safe name that is unique for querying. To accomplish this a unique hash of
+// the string is used.
+func key(name string) (string, error) {
+ in := strings.NewReader(name)
+ hash := crypto.SHA256.New()
+ if _, err := io.Copy(hash, in); err != nil {
+ return "", nil
+ }
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/engine/doc.go b/vendor/helm.sh/helm/v3/pkg/engine/doc.go
new file mode 100644
index 000000000..6ff875c46
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/engine/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package engine implements the Go text template engine as needed for Helm.
+
+When Helm renders templates it does so with additional functions and different
+modes (e.g., strict, lint mode). This package handles the helm specific
+implementation.
+*/
+package engine // import "helm.sh/helm/v3/pkg/engine"
diff --git a/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/vendor/helm.sh/helm/v3/pkg/engine/engine.go
new file mode 100644
index 000000000..00494f9d7
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/engine/engine.go
@@ -0,0 +1,401 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "fmt"
+ "log"
+ "path"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "text/template"
+
+ "github.com/pkg/errors"
+ "k8s.io/client-go/rest"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chartutil"
+)
+
+// Engine is an implementation of the Helm rendering implementation for templates.
+type Engine struct {
+ // If strict is enabled, template rendering will fail if a template references
+ // a value that was not passed in.
+ Strict bool
+ // In LintMode, some 'required' template values may be missing, so don't fail
+ LintMode bool
+ // the rest config to connect to the kubernetes api
+ config *rest.Config
+}
+
+// Render takes a chart, optional values, and value overrides, and attempts to render the Go templates.
+//
+// Render can be called repeatedly on the same engine.
+//
+// This will look in the chart's 'templates' data (e.g. the 'templates/' directory)
+// and attempt to render the templates there using the values passed in.
+//
+// Values are scoped to their templates. A dependency template will not have
+// access to the values set for its parent. If chart "foo" includes chart "bar",
+// "bar" will not have access to the values for "foo".
+//
+// Values should be prepared with something like `chartutils.ReadValues`.
+//
+// Values are passed through the templates according to scope. If the top layer
+// chart includes the chart foo, which includes the chart bar, the values map
+// will be examined for a table called "foo". If "foo" is found in vals,
+// that section of the values will be passed into the "foo" chart. And if that
+// section contains a value named "bar", that value will be passed on to the
+// bar chart during render time.
+func (e Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) {
+ tmap := allTemplates(chrt, values)
+ return e.render(tmap)
+}
+
+// Render takes a chart, optional values, and value overrides, and attempts to
+// render the Go templates using the default options.
+func Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) {
+ return new(Engine).Render(chrt, values)
+}
+
+// RenderWithClient takes a chart, optional values, and value overrides, and attempts to
+// render the Go templates using the default options. This engine is client aware and so can have template
+// functions that interact with the client
+func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.Config) (map[string]string, error) {
+ return Engine{
+ config: config,
+ }.Render(chrt, values)
+}
+
+// renderable is an object that can be rendered.
+type renderable struct {
+ // tpl is the current template.
+ tpl string
+ // vals are the values to be supplied to the template.
+ vals chartutil.Values
+ // namespace prefix to the templates of the current chart
+ basePath string
+}
+
+const warnStartDelim = "HELM_ERR_START"
+const warnEndDelim = "HELM_ERR_END"
+const recursionMaxNums = 1000
+
+var warnRegex = regexp.MustCompile(warnStartDelim + `((?s).*)` + warnEndDelim)
+
+func warnWrap(warn string) string {
+ return warnStartDelim + warn + warnEndDelim
+}
+
+// initFunMap creates the Engine's FuncMap and adds context-specific functions.
+func (e Engine) initFunMap(t *template.Template, referenceTpls map[string]renderable) {
+ funcMap := funcMap()
+ includedNames := make(map[string]int)
+
+ // Add the 'include' function here so we can close over t.
+ funcMap["include"] = func(name string, data interface{}) (string, error) {
+ var buf strings.Builder
+ if v, ok := includedNames[name]; ok {
+ if v > recursionMaxNums {
+ return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name)
+ }
+ includedNames[name]++
+ } else {
+ includedNames[name] = 1
+ }
+ err := t.ExecuteTemplate(&buf, name, data)
+ includedNames[name]--
+ return buf.String(), err
+ }
+
+ // Add the 'tpl' function here
+ funcMap["tpl"] = func(tpl string, vals chartutil.Values) (string, error) {
+ basePath, err := vals.PathValue("Template.BasePath")
+ if err != nil {
+ return "", errors.Wrapf(err, "cannot retrieve Template.Basepath from values inside tpl function: %s", tpl)
+ }
+
+ templateName, err := vals.PathValue("Template.Name")
+ if err != nil {
+ return "", errors.Wrapf(err, "cannot retrieve Template.Name from values inside tpl function: %s", tpl)
+ }
+
+ templates := map[string]renderable{
+ templateName.(string): {
+ tpl: tpl,
+ vals: vals,
+ basePath: basePath.(string),
+ },
+ }
+
+ result, err := e.renderWithReferences(templates, referenceTpls)
+ if err != nil {
+ return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl)
+ }
+ return result[templateName.(string)], nil
+ }
+
+ // Add the `required` function here so we can use lintMode
+ funcMap["required"] = func(warn string, val interface{}) (interface{}, error) {
+ if val == nil {
+ if e.LintMode {
+ // Don't fail on missing required values when linting
+ log.Printf("[INFO] Missing required value: %s", warn)
+ return "", nil
+ }
+ return val, errors.Errorf(warnWrap(warn))
+ } else if _, ok := val.(string); ok {
+ if val == "" {
+ if e.LintMode {
+ // Don't fail on missing required values when linting
+ log.Printf("[INFO] Missing required value: %s", warn)
+ return "", nil
+ }
+ return val, errors.Errorf(warnWrap(warn))
+ }
+ }
+ return val, nil
+ }
+
+ // Override sprig fail function for linting and wrapping message
+ funcMap["fail"] = func(msg string) (string, error) {
+ if e.LintMode {
+ // Don't fail when linting
+ log.Printf("[INFO] Fail: %s", msg)
+ return "", nil
+ }
+ return "", errors.New(warnWrap(msg))
+ }
+
+ // If we are not linting and have a cluster connection, provide a Kubernetes-backed
+ // implementation.
+ if !e.LintMode && e.config != nil {
+ funcMap["lookup"] = NewLookupFunction(e.config)
+ }
+
+ t.Funcs(funcMap)
+}
+
+// render takes a map of templates/values and renders them.
+func (e Engine) render(tpls map[string]renderable) (map[string]string, error) {
+ return e.renderWithReferences(tpls, tpls)
+}
+
+// renderWithReferences takes a map of templates/values to render, and a map of
+// templates which can be referenced within them.
+func (e Engine) renderWithReferences(tpls, referenceTpls map[string]renderable) (rendered map[string]string, err error) {
+ // Basically, what we do here is start with an empty parent template and then
+ // build up a list of templates -- one for each file. Once all of the templates
+ // have been parsed, we loop through again and execute every template.
+ //
+ // The idea with this process is to make it possible for more complex templates
+ // to share common blocks, but to make the entire thing feel like a file-based
+ // template engine.
+ defer func() {
+ if r := recover(); r != nil {
+ err = errors.Errorf("rendering template failed: %v", r)
+ }
+ }()
+ t := template.New("gotpl")
+ if e.Strict {
+ t.Option("missingkey=error")
+ } else {
+ // Not that zero will attempt to add default values for types it knows,
+ // but will still emit <no value> for others. We mitigate that later.
+ t.Option("missingkey=zero")
+ }
+
+ e.initFunMap(t, referenceTpls)
+
+ // We want to parse the templates in a predictable order. The order favors
+ // higher-level (in file system) templates over deeply nested templates.
+ keys := sortTemplates(tpls)
+ referenceKeys := sortTemplates(referenceTpls)
+
+ for _, filename := range keys {
+ r := tpls[filename]
+ if _, err := t.New(filename).Parse(r.tpl); err != nil {
+ return map[string]string{}, cleanupParseError(filename, err)
+ }
+ }
+
+ // Adding the reference templates to the template context
+ // so they can be referenced in the tpl function
+ for _, filename := range referenceKeys {
+ if t.Lookup(filename) == nil {
+ r := referenceTpls[filename]
+ if _, err := t.New(filename).Parse(r.tpl); err != nil {
+ return map[string]string{}, cleanupParseError(filename, err)
+ }
+ }
+ }
+
+ rendered = make(map[string]string, len(keys))
+ for _, filename := range keys {
+ // Don't render partials. We don't care out the direct output of partials.
+ // They are only included from other templates.
+ if strings.HasPrefix(path.Base(filename), "_") {
+ continue
+ }
+ // At render time, add information about the template that is being rendered.
+ vals := tpls[filename].vals
+ vals["Template"] = chartutil.Values{"Name": filename, "BasePath": tpls[filename].basePath}
+ var buf strings.Builder
+ if err := t.ExecuteTemplate(&buf, filename, vals); err != nil {
+ return map[string]string{}, cleanupExecError(filename, err)
+ }
+
+ // Work around the issue where Go will emit "<no value>" even if Options(missing=zero)
+ // is set. Since missing=error will never get here, we do not need to handle
+ // the Strict case.
+ rendered[filename] = strings.ReplaceAll(buf.String(), "<no value>", "")
+ }
+
+ return rendered, nil
+}
+
+func cleanupParseError(filename string, err error) error {
+ tokens := strings.Split(err.Error(), ": ")
+ if len(tokens) == 1 {
+ // This might happen if a non-templating error occurs
+ return fmt.Errorf("parse error in (%s): %s", filename, err)
+ }
+ // The first token is "template"
+ // The second token is either "filename:lineno" or "filename:lineNo:columnNo"
+ location := tokens[1]
+ // The remaining tokens make up a stacktrace-like chain, ending with the relevant error
+ errMsg := tokens[len(tokens)-1]
+ return fmt.Errorf("parse error at (%s): %s", string(location), errMsg)
+}
+
+func cleanupExecError(filename string, err error) error {
+ if _, isExecError := err.(template.ExecError); !isExecError {
+ return err
+ }
+
+ tokens := strings.SplitN(err.Error(), ": ", 3)
+ if len(tokens) != 3 {
+ // This might happen if a non-templating error occurs
+ return fmt.Errorf("execution error in (%s): %s", filename, err)
+ }
+
+ // The first token is "template"
+ // The second token is either "filename:lineno" or "filename:lineNo:columnNo"
+ location := tokens[1]
+
+ parts := warnRegex.FindStringSubmatch(tokens[2])
+ if len(parts) >= 2 {
+ return fmt.Errorf("execution error at (%s): %s", string(location), parts[1])
+ }
+
+ return err
+}
+
+func sortTemplates(tpls map[string]renderable) []string {
+ keys := make([]string, len(tpls))
+ i := 0
+ for key := range tpls {
+ keys[i] = key
+ i++
+ }
+ sort.Sort(sort.Reverse(byPathLen(keys)))
+ return keys
+}
+
+type byPathLen []string
+
+func (p byPathLen) Len() int { return len(p) }
+func (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] }
+func (p byPathLen) Less(i, j int) bool {
+ a, b := p[i], p[j]
+ ca, cb := strings.Count(a, "/"), strings.Count(b, "/")
+ if ca == cb {
+ return strings.Compare(a, b) == -1
+ }
+ return ca < cb
+}
+
+// allTemplates returns all templates for a chart and its dependencies.
+//
+// As it goes, it also prepares the values in a scope-sensitive manner.
+func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable {
+ templates := make(map[string]renderable)
+ recAllTpls(c, templates, vals)
+ return templates
+}
+
+// recAllTpls recurses through the templates in a chart.
+//
+// As it recurses, it also sets the values to be appropriate for the template
+// scope.
+func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil.Values) map[string]interface{} {
+ subCharts := make(map[string]interface{})
+ chartMetaData := struct {
+ chart.Metadata
+ IsRoot bool
+ }{*c.Metadata, c.IsRoot()}
+
+ next := map[string]interface{}{
+ "Chart": chartMetaData,
+ "Files": newFiles(c.Files),
+ "Release": vals["Release"],
+ "Capabilities": vals["Capabilities"],
+ "Values": make(chartutil.Values),
+ "Subcharts": subCharts,
+ }
+
+ // If there is a {{.Values.ThisChart}} in the parent metadata,
+ // copy that into the {{.Values}} for this template.
+ if c.IsRoot() {
+ next["Values"] = vals["Values"]
+ } else if vs, err := vals.Table("Values." + c.Name()); err == nil {
+ next["Values"] = vs
+ }
+
+ for _, child := range c.Dependencies() {
+ subCharts[child.Name()] = recAllTpls(child, templates, next)
+ }
+
+ newParentID := c.ChartFullPath()
+ for _, t := range c.Templates {
+ if !isTemplateValid(c, t.Name) {
+ continue
+ }
+ templates[path.Join(newParentID, t.Name)] = renderable{
+ tpl: string(t.Data),
+ vals: next,
+ basePath: path.Join(newParentID, "templates"),
+ }
+ }
+
+ return next
+}
+
+// isTemplateValid returns true if the template is valid for the chart type
+func isTemplateValid(ch *chart.Chart, templateName string) bool {
+ if isLibraryChart(ch) {
+ return strings.HasPrefix(filepath.Base(templateName), "_")
+ }
+ return true
+}
+
+// isLibraryChart returns true if the chart is a library chart
+func isLibraryChart(c *chart.Chart) bool {
+ return strings.EqualFold(c.Metadata.Type, "library")
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/engine/files.go b/vendor/helm.sh/helm/v3/pkg/engine/files.go
new file mode 100644
index 000000000..d7e62da5a
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/engine/files.go
@@ -0,0 +1,160 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "encoding/base64"
+ "path"
+ "strings"
+
+ "github.com/gobwas/glob"
+
+ "helm.sh/helm/v3/pkg/chart"
+)
+
+// files is a map of files in a chart that can be accessed from a template.
+type files map[string][]byte
+
+// NewFiles creates a new files from chart files.
+// Given an []*chart.File (the format for files in a chart.Chart), extract a map of files.
+func newFiles(from []*chart.File) files {
+ files := make(map[string][]byte)
+ for _, f := range from {
+ files[f.Name] = f.Data
+ }
+ return files
+}
+
+// GetBytes gets a file by path.
+//
+// The returned data is raw. In a template context, this is identical to calling
+// {{index .Files $path}}.
+//
+// This is intended to be accessed from within a template, so a missed key returns
+// an empty []byte.
+func (f files) GetBytes(name string) []byte {
+ if v, ok := f[name]; ok {
+ return v
+ }
+ return []byte{}
+}
+
+// Get returns a string representation of the given file.
+//
+// Fetch the contents of a file as a string. It is designed to be called in a
+// template.
+//
+// {{.Files.Get "foo"}}
+func (f files) Get(name string) string {
+ return string(f.GetBytes(name))
+}
+
+// Glob takes a glob pattern and returns another files object only containing
+// matched files.
+//
+// This is designed to be called from a template.
+//
+// {{ range $name, $content := .Files.Glob("foo/**") }}
+// {{ $name }}: |
+// {{ .Files.Get($name) | indent 4 }}{{ end }}
+func (f files) Glob(pattern string) files {
+ g, err := glob.Compile(pattern, '/')
+ if err != nil {
+ g, _ = glob.Compile("**")
+ }
+
+ nf := newFiles(nil)
+ for name, contents := range f {
+ if g.Match(name) {
+ nf[name] = contents
+ }
+ }
+
+ return nf
+}
+
+// AsConfig turns a Files group and flattens it to a YAML map suitable for
+// including in the 'data' section of a Kubernetes ConfigMap definition.
+// Duplicate keys will be overwritten, so be aware that your file names
+// (regardless of path) should be unique.
+//
+// This is designed to be called from a template, and will return empty string
+// (via toYAML function) if it cannot be serialized to YAML, or if the Files
+// object is nil.
+//
+// The output will not be indented, so you will want to pipe this to the
+// 'indent' template function.
+//
+// data:
+// {{ .Files.Glob("config/**").AsConfig() | indent 4 }}
+func (f files) AsConfig() string {
+ if f == nil {
+ return ""
+ }
+
+ m := make(map[string]string)
+
+ // Explicitly convert to strings, and file names
+ for k, v := range f {
+ m[path.Base(k)] = string(v)
+ }
+
+ return toYAML(m)
+}
+
+// AsSecrets returns the base64-encoded value of a Files object suitable for
+// including in the 'data' section of a Kubernetes Secret definition.
+// Duplicate keys will be overwritten, so be aware that your file names
+// (regardless of path) should be unique.
+//
+// This is designed to be called from a template, and will return empty string
+// (via toYAML function) if it cannot be serialized to YAML, or if the Files
+// object is nil.
+//
+// The output will not be indented, so you will want to pipe this to the
+// 'indent' template function.
+//
+// data:
+// {{ .Files.Glob("secrets/*").AsSecrets() }}
+func (f files) AsSecrets() string {
+ if f == nil {
+ return ""
+ }
+
+ m := make(map[string]string)
+
+ for k, v := range f {
+ m[path.Base(k)] = base64.StdEncoding.EncodeToString(v)
+ }
+
+ return toYAML(m)
+}
+
+// Lines returns each line of a named file (split by "\n") as a slice, so it can
+// be ranged over in your templates.
+//
+// This is designed to be called from a template.
+//
+// {{ range .Files.Lines "foo/bar.html" }}
+// {{ . }}{{ end }}
+func (f files) Lines(path string) []string {
+ if f == nil || f[path] == nil {
+ return []string{}
+ }
+
+ return strings.Split(string(f[path]), "\n")
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/engine/funcs.go b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go
new file mode 100644
index 000000000..92b4c3383
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go
@@ -0,0 +1,177 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "bytes"
+ "encoding/json"
+ "strings"
+ "text/template"
+
+ "github.com/BurntSushi/toml"
+ "github.com/Masterminds/sprig/v3"
+ "sigs.k8s.io/yaml"
+)
+
+// funcMap returns a mapping of all of the functions that Engine has.
+//
+// Because some functions are late-bound (e.g. contain context-sensitive
+// data), the functions may not all perform identically outside of an Engine
+// as they will inside of an Engine.
+//
+// Known late-bound functions:
+//
+// - "include"
+// - "tpl"
+//
+// These are late-bound in Engine.Render(). The
+// version included in the FuncMap is a placeholder.
+//
+func funcMap() template.FuncMap {
+ f := sprig.TxtFuncMap()
+ delete(f, "env")
+ delete(f, "expandenv")
+
+ // Add some extra functionality
+ extra := template.FuncMap{
+ "toToml": toTOML,
+ "toYaml": toYAML,
+ "fromYaml": fromYAML,
+ "fromYamlArray": fromYAMLArray,
+ "toJson": toJSON,
+ "fromJson": fromJSON,
+ "fromJsonArray": fromJSONArray,
+
+ // This is a placeholder for the "include" function, which is
+ // late-bound to a template. By declaring it here, we preserve the
+ // integrity of the linter.
+ "include": func(string, interface{}) string { return "not implemented" },
+ "tpl": func(string, interface{}) interface{} { return "not implemented" },
+ "required": func(string, interface{}) (interface{}, error) { return "not implemented", nil },
+ // Provide a placeholder for the "lookup" function, which requires a kubernetes
+ // connection.
+ "lookup": func(string, string, string, string) (map[string]interface{}, error) {
+ return map[string]interface{}{}, nil
+ },
+ }
+
+ for k, v := range extra {
+ f[k] = v
+ }
+
+ return f
+}
+
+// toYAML takes an interface, marshals it to yaml, and returns a string. It will
+// always return a string, even on marshal error (empty string).
+//
+// This is designed to be called from a template.
+func toYAML(v interface{}) string {
+ data, err := yaml.Marshal(v)
+ if err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ return strings.TrimSuffix(string(data), "\n")
+}
+
+// fromYAML converts a YAML document into a map[string]interface{}.
+//
+// This is not a general-purpose YAML parser, and will not parse all valid
+// YAML documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string into
+// m["Error"] in the returned map.
+func fromYAML(str string) map[string]interface{} {
+ m := map[string]interface{}{}
+
+ if err := yaml.Unmarshal([]byte(str), &m); err != nil {
+ m["Error"] = err.Error()
+ }
+ return m
+}
+
+// fromYAMLArray converts a YAML array into a []interface{}.
+//
+// This is not a general-purpose YAML parser, and will not parse all valid
+// YAML documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string as
+// the first and only item in the returned array.
+func fromYAMLArray(str string) []interface{} {
+ a := []interface{}{}
+
+ if err := yaml.Unmarshal([]byte(str), &a); err != nil {
+ a = []interface{}{err.Error()}
+ }
+ return a
+}
+
+// toTOML takes an interface, marshals it to toml, and returns a string. It will
+// always return a string, even on marshal error (empty string).
+//
+// This is designed to be called from a template.
+func toTOML(v interface{}) string {
+ b := bytes.NewBuffer(nil)
+ e := toml.NewEncoder(b)
+ err := e.Encode(v)
+ if err != nil {
+ return err.Error()
+ }
+ return b.String()
+}
+
+// toJSON takes an interface, marshals it to json, and returns a string. It will
+// always return a string, even on marshal error (empty string).
+//
+// This is designed to be called from a template.
+func toJSON(v interface{}) string {
+ data, err := json.Marshal(v)
+ if err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ return string(data)
+}
+
+// fromJSON converts a JSON document into a map[string]interface{}.
+//
+// This is not a general-purpose JSON parser, and will not parse all valid
+// JSON documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string into
+// m["Error"] in the returned map.
+func fromJSON(str string) map[string]interface{} {
+ m := make(map[string]interface{})
+
+ if err := json.Unmarshal([]byte(str), &m); err != nil {
+ m["Error"] = err.Error()
+ }
+ return m
+}
+
+// fromJSONArray converts a JSON array into a []interface{}.
+//
+// This is not a general-purpose JSON parser, and will not parse all valid
+// JSON documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string as
+// the first and only item in the returned array.
+func fromJSONArray(str string) []interface{} {
+ a := []interface{}{}
+
+ if err := json.Unmarshal([]byte(str), &a); err != nil {
+ a = []interface{}{err.Error()}
+ }
+ return a
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go
new file mode 100644
index 000000000..d1bf1105a
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "context"
+ "log"
+ "strings"
+
+ "github.com/pkg/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/rest"
+)
+
+type lookupFunc = func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error)
+
+// NewLookupFunction returns a function for looking up objects in the cluster.
+//
+// If the resource does not exist, no error is raised.
+//
+// This function is considered deprecated, and will be renamed in Helm 4. It will no
+// longer be a public function.
+func NewLookupFunction(config *rest.Config) lookupFunc {
+ return func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) {
+ var client dynamic.ResourceInterface
+ c, namespaced, err := getDynamicClientOnKind(apiversion, resource, config)
+ if err != nil {
+ return map[string]interface{}{}, err
+ }
+ if namespaced && namespace != "" {
+ client = c.Namespace(namespace)
+ } else {
+ client = c
+ }
+ if name != "" {
+ // this will return a single object
+ obj, err := client.Get(context.Background(), name, metav1.GetOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ // Just return an empty interface when the object was not found.
+ // That way, users can use `if not (lookup ...)` in their templates.
+ return map[string]interface{}{}, nil
+ }
+ return map[string]interface{}{}, err
+ }
+ return obj.UnstructuredContent(), nil
+ }
+ // this will return a list
+ obj, err := client.List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ // Just return an empty interface when the object was not found.
+ // That way, users can use `if not (lookup ...)` in their templates.
+ return map[string]interface{}{}, nil
+ }
+ return map[string]interface{}{}, err
+ }
+ return obj.UnstructuredContent(), nil
+ }
+}
+
+// getDynamicClientOnUnstructured returns a dynamic client on an Unstructured type. This client can be further namespaced.
+func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) (dynamic.NamespaceableResourceInterface, bool, error) {
+ gvk := schema.FromAPIVersionAndKind(apiversion, kind)
+ apiRes, err := getAPIResourceForGVK(gvk, config)
+ if err != nil {
+ log.Printf("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err)
+ return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String())
+ }
+ gvr := schema.GroupVersionResource{
+ Group: apiRes.Group,
+ Version: apiRes.Version,
+ Resource: apiRes.Name,
+ }
+ intf, err := dynamic.NewForConfig(config)
+ if err != nil {
+ log.Printf("[ERROR] unable to get dynamic client %s", err)
+ return nil, false, err
+ }
+ res := intf.Resource(gvr)
+ return res, apiRes.Namespaced, nil
+}
+
+func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (metav1.APIResource, error) {
+ res := metav1.APIResource{}
+ discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
+ if err != nil {
+ log.Printf("[ERROR] unable to create discovery client %s", err)
+ return res, err
+ }
+ resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String())
+ if err != nil {
+ log.Printf("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err)
+ return res, err
+ }
+ for _, resource := range resList.APIResources {
+ // if a resource contains a "/" it's referencing a subresource. we don't support suberesource for now.
+ if resource.Kind == gvk.Kind && !strings.Contains(resource.Name, "/") {
+ res = resource
+ res.Group = gvk.Group
+ res.Version = gvk.Version
+ break
+ }
+ }
+ return res, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/gates/doc.go b/vendor/helm.sh/helm/v3/pkg/gates/doc.go
new file mode 100644
index 000000000..762fdb8c6
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/gates/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package gates provides a general tool for working with experimental feature gates.
+
+This provides convenience methods where the user can determine if certain experimental features are enabled.
+*/
+package gates
diff --git a/vendor/helm.sh/helm/v3/pkg/gates/gates.go b/vendor/helm.sh/helm/v3/pkg/gates/gates.go
new file mode 100644
index 000000000..69559219e
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/gates/gates.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gates
+
+import (
+ "fmt"
+ "os"
+)
+
+// Gate is the name of the feature gate.
+type Gate string
+
+// String returns the string representation of this feature gate.
+func (g Gate) String() string {
+ return string(g)
+}
+
+// IsEnabled determines whether a certain feature gate is enabled.
+func (g Gate) IsEnabled() bool {
+ return os.Getenv(string(g)) != ""
+}
+
+func (g Gate) Error() error {
+ return fmt.Errorf("this feature has been marked as experimental and is not enabled by default. Please set %s=1 in your environment to use this feature", g.String())
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/getter/doc.go b/vendor/helm.sh/helm/v3/pkg/getter/doc.go
new file mode 100644
index 000000000..c53ef1ae0
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/getter/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package getter provides a generalize tool for fetching data by scheme.
+
+This provides a method by which the plugin system can load arbitrary protocol
+handlers based upon a URL scheme.
+*/
+package getter
diff --git a/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/vendor/helm.sh/helm/v3/pkg/getter/getter.go
new file mode 100644
index 000000000..3a0567a87
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/getter/getter.go
@@ -0,0 +1,184 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "bytes"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/internal/experimental/registry"
+ "helm.sh/helm/v3/pkg/cli"
+)
+
+// options are generic parameters to be provided to the getter during instantiation.
+//
+// Getters may or may not ignore these parameters as they are passed in.
+type options struct {
+ url string
+ certFile string
+ keyFile string
+ caFile string
+ unTar bool
+ insecureSkipVerifyTLS bool
+ username string
+ password string
+ passCredentialsAll bool
+ userAgent string
+ version string
+ registryClient *registry.Client
+ timeout time.Duration
+}
+
+// Option allows specifying various settings configurable by the user for overriding the defaults
+// used when performing Get operations with the Getter.
+type Option func(*options)
+
+// WithURL informs the getter the server name that will be used when fetching objects. Used in conjunction with
+// WithTLSClientConfig to set the TLSClientConfig's server name.
+func WithURL(url string) Option {
+ return func(opts *options) {
+ opts.url = url
+ }
+}
+
+// WithBasicAuth sets the request's Authorization header to use the provided credentials
+func WithBasicAuth(username, password string) Option {
+ return func(opts *options) {
+ opts.username = username
+ opts.password = password
+ }
+}
+
+func WithPassCredentialsAll(pass bool) Option {
+ return func(opts *options) {
+ opts.passCredentialsAll = pass
+ }
+}
+
+// WithUserAgent sets the request's User-Agent header to use the provided agent name.
+func WithUserAgent(userAgent string) Option {
+ return func(opts *options) {
+ opts.userAgent = userAgent
+ }
+}
+
+// WithInsecureSkipVerifyTLS determines if a TLS Certificate will be checked
+func WithInsecureSkipVerifyTLS(insecureSkipVerifyTLS bool) Option {
+ return func(opts *options) {
+ opts.insecureSkipVerifyTLS = insecureSkipVerifyTLS
+ }
+}
+
+// WithTLSClientConfig sets the client auth with the provided credentials.
+func WithTLSClientConfig(certFile, keyFile, caFile string) Option {
+ return func(opts *options) {
+ opts.certFile = certFile
+ opts.keyFile = keyFile
+ opts.caFile = caFile
+ }
+}
+
+// WithTimeout sets the timeout for requests
+func WithTimeout(timeout time.Duration) Option {
+ return func(opts *options) {
+ opts.timeout = timeout
+ }
+}
+
+func WithTagName(tagname string) Option {
+ return func(opts *options) {
+ opts.version = tagname
+ }
+}
+
+func WithRegistryClient(client *registry.Client) Option {
+ return func(opts *options) {
+ opts.registryClient = client
+ }
+}
+
+func WithUntar() Option {
+ return func(opts *options) {
+ opts.unTar = true
+ }
+}
+
+// Getter is an interface to support GET to the specified URL.
+type Getter interface {
+ // Get file content by url string
+ Get(url string, options ...Option) (*bytes.Buffer, error)
+}
+
+// Constructor is the function for every getter which creates a specific instance
+// according to the configuration
+type Constructor func(options ...Option) (Getter, error)
+
+// Provider represents any getter and the schemes that it supports.
+//
+// For example, an HTTP provider may provide one getter that handles both
+// 'http' and 'https' schemes.
+type Provider struct {
+ Schemes []string
+ New Constructor
+}
+
+// Provides returns true if the given scheme is supported by this Provider.
+func (p Provider) Provides(scheme string) bool {
+ for _, i := range p.Schemes {
+ if i == scheme {
+ return true
+ }
+ }
+ return false
+}
+
+// Providers is a collection of Provider objects.
+type Providers []Provider
+
+// ByScheme returns a Provider that handles the given scheme.
+//
+// If no provider handles this scheme, this will return an error.
+func (p Providers) ByScheme(scheme string) (Getter, error) {
+ for _, pp := range p {
+ if pp.Provides(scheme) {
+ return pp.New()
+ }
+ }
+ return nil, errors.Errorf("scheme %q not supported", scheme)
+}
+
+var httpProvider = Provider{
+ Schemes: []string{"http", "https"},
+ New: NewHTTPGetter,
+}
+
+var ociProvider = Provider{
+ Schemes: []string{registry.OCIScheme},
+ New: NewOCIGetter,
+}
+
+// All finds all of the registered getters as a list of Provider instances.
+// Currently, the built-in getters and the discovered plugins with downloader
+// notations are collected.
+func All(settings *cli.EnvSettings) Providers {
+ result := Providers{httpProvider, ociProvider}
+ pluginDownloaders, _ := collectPlugins(settings)
+ result = append(result, pluginDownloaders...)
+ return result
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go
new file mode 100644
index 000000000..454eb6eb6
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go
@@ -0,0 +1,145 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "bytes"
+ "crypto/tls"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/internal/tlsutil"
+ "helm.sh/helm/v3/internal/urlutil"
+ "helm.sh/helm/v3/internal/version"
+)
+
+// HTTPGetter is the default HTTP(/S) backend handler
+type HTTPGetter struct {
+ opts options
+}
+
+// Get performs a Get from repo.Getter and returns the body.
+func (g *HTTPGetter) Get(href string, options ...Option) (*bytes.Buffer, error) {
+ for _, opt := range options {
+ opt(&g.opts)
+ }
+ return g.get(href)
+}
+
+func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) {
+ // Set a helm specific user agent so that a repo server and metrics can
+ // separate helm calls from other tools interacting with repos.
+ req, err := http.NewRequest(http.MethodGet, href, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("User-Agent", version.GetUserAgent())
+ if g.opts.userAgent != "" {
+ req.Header.Set("User-Agent", g.opts.userAgent)
+ }
+
+ // Before setting the basic auth credentials, make sure the URL associated
+ // with the basic auth is the one being fetched.
+ u1, err := url.Parse(g.opts.url)
+ if err != nil {
+ return nil, errors.Wrap(err, "Unable to parse getter URL")
+ }
+ u2, err := url.Parse(href)
+ if err != nil {
+ return nil, errors.Wrap(err, "Unable to parse URL getting from")
+ }
+
+ // Host on URL (returned from url.Parse) contains the port if present.
+ // This check ensures credentials are not passed between different
+ // services on different ports.
+ if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) {
+ if g.opts.username != "" && g.opts.password != "" {
+ req.SetBasicAuth(g.opts.username, g.opts.password)
+ }
+ }
+
+ client, err := g.httpClient()
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.Errorf("failed to fetch %s : %s", href, resp.Status)
+ }
+
+ buf := bytes.NewBuffer(nil)
+ _, err = io.Copy(buf, resp.Body)
+ return buf, err
+}
+
+// NewHTTPGetter constructs a valid http/https client as a Getter
+func NewHTTPGetter(options ...Option) (Getter, error) {
+ var client HTTPGetter
+
+ for _, opt := range options {
+ opt(&client.opts)
+ }
+
+ return &client, nil
+}
+
+func (g *HTTPGetter) httpClient() (*http.Client, error) {
+ transport := &http.Transport{
+ DisableCompression: true,
+ Proxy: http.ProxyFromEnvironment,
+ }
+ if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" {
+ tlsConf, err := tlsutil.NewClientTLS(g.opts.certFile, g.opts.keyFile, g.opts.caFile)
+ if err != nil {
+ return nil, errors.Wrap(err, "can't create TLS config for client")
+ }
+ tlsConf.BuildNameToCertificate()
+
+ sni, err := urlutil.ExtractHostname(g.opts.url)
+ if err != nil {
+ return nil, err
+ }
+ tlsConf.ServerName = sni
+
+ transport.TLSClientConfig = tlsConf
+ }
+
+ if g.opts.insecureSkipVerifyTLS {
+ if transport.TLSClientConfig == nil {
+ transport.TLSClientConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+ } else {
+ transport.TLSClientConfig.InsecureSkipVerify = true
+ }
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ Timeout: g.opts.timeout,
+ }
+
+ return client, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go b/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go
new file mode 100644
index 000000000..45c92749c
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go
@@ -0,0 +1,86 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "helm.sh/helm/v3/internal/experimental/registry"
+)
+
+// OCIGetter is the default HTTP(/S) backend handler
+type OCIGetter struct {
+ opts options
+}
+
+//Get performs a Get from repo.Getter and returns the body.
+func (g *OCIGetter) Get(href string, options ...Option) (*bytes.Buffer, error) {
+ for _, opt := range options {
+ opt(&g.opts)
+ }
+ return g.get(href)
+}
+
+func (g *OCIGetter) get(href string) (*bytes.Buffer, error) {
+ client := g.opts.registryClient
+
+ ref := strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme))
+
+ var pullOpts []registry.PullOption
+ requestingProv := strings.HasSuffix(ref, ".prov")
+ if requestingProv {
+ ref = strings.TrimSuffix(ref, ".prov")
+ pullOpts = append(pullOpts,
+ registry.PullOptWithChart(false),
+ registry.PullOptWithProv(true))
+ }
+
+ if version := g.opts.version; version != "" {
+ ref = fmt.Sprintf("%s:%s", ref, version)
+ }
+
+ result, err := client.Pull(ref, pullOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if requestingProv {
+ return bytes.NewBuffer(result.Prov.Data), nil
+ }
+ return bytes.NewBuffer(result.Chart.Data), nil
+}
+
+// NewOCIGetter constructs a valid http/https client as a Getter
+func NewOCIGetter(ops ...Option) (Getter, error) {
+ registryClient, err := registry.NewClient()
+ if err != nil {
+ return nil, err
+ }
+
+ client := OCIGetter{
+ opts: options{
+ registryClient: registryClient,
+ },
+ }
+
+ for _, opt := range ops {
+ opt(&client.opts)
+ }
+
+ return &client, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go b/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go
new file mode 100644
index 000000000..0d13ade57
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go
@@ -0,0 +1,102 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/cli"
+ "helm.sh/helm/v3/pkg/plugin"
+)
+
+// collectPlugins scans for getter plugins.
+// This will load plugins according to the cli.
+func collectPlugins(settings *cli.EnvSettings) (Providers, error) {
+ plugins, err := plugin.FindPlugins(settings.PluginsDirectory)
+ if err != nil {
+ return nil, err
+ }
+ var result Providers
+ for _, plugin := range plugins {
+ for _, downloader := range plugin.Metadata.Downloaders {
+ result = append(result, Provider{
+ Schemes: downloader.Protocols,
+ New: NewPluginGetter(
+ downloader.Command,
+ settings,
+ plugin.Metadata.Name,
+ plugin.Dir,
+ ),
+ })
+ }
+ }
+ return result, nil
+}
+
+// pluginGetter is a generic type to invoke custom downloaders,
+// implemented in plugins.
+type pluginGetter struct {
+ command string
+ settings *cli.EnvSettings
+ name string
+ base string
+ opts options
+}
+
+// Get runs downloader plugin command
+func (p *pluginGetter) Get(href string, options ...Option) (*bytes.Buffer, error) {
+ for _, opt := range options {
+ opt(&p.opts)
+ }
+ commands := strings.Split(p.command, " ")
+ argv := append(commands[1:], p.opts.certFile, p.opts.keyFile, p.opts.caFile, href)
+ prog := exec.Command(filepath.Join(p.base, commands[0]), argv...)
+ plugin.SetupPluginEnv(p.settings, p.name, p.base)
+ prog.Env = os.Environ()
+ buf := bytes.NewBuffer(nil)
+ prog.Stdout = buf
+ prog.Stderr = os.Stderr
+ if err := prog.Run(); err != nil {
+ if eerr, ok := err.(*exec.ExitError); ok {
+ os.Stderr.Write(eerr.Stderr)
+ return nil, errors.Errorf("plugin %q exited with error", p.command)
+ }
+ return nil, err
+ }
+ return buf, nil
+}
+
+// NewPluginGetter constructs a valid plugin getter
+func NewPluginGetter(command string, settings *cli.EnvSettings, name, base string) Constructor {
+ return func(options ...Option) (Getter, error) {
+ result := &pluginGetter{
+ command: command,
+ settings: settings,
+ name: name,
+ base: base,
+ }
+ for _, opt := range options {
+ opt(&result.opts)
+ }
+ return result, nil
+ }
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/home.go b/vendor/helm.sh/helm/v3/pkg/helmpath/home.go
new file mode 100644
index 000000000..bd43e8890
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/helmpath/home.go
@@ -0,0 +1,44 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package helmpath calculates filesystem paths to Helm's configuration, cache and data.
+package helmpath
+
+// This helper builds paths to Helm's configuration, cache and data paths.
+const lp = lazypath("helm")
+
+// ConfigPath returns the path where Helm stores configuration.
+func ConfigPath(elem ...string) string { return lp.configPath(elem...) }
+
+// CachePath returns the path where Helm stores cached objects.
+func CachePath(elem ...string) string { return lp.cachePath(elem...) }
+
+// DataPath returns the path where Helm stores data.
+func DataPath(elem ...string) string { return lp.dataPath(elem...) }
+
+// CacheIndexFile returns the path to an index for the given named repository.
+func CacheIndexFile(name string) string {
+ if name != "" {
+ name += "-"
+ }
+ return name + "index.yaml"
+}
+
+// CacheChartsFile returns the path to a text file listing all the charts
+// within the given named repository.
+func CacheChartsFile(name string) string {
+ if name != "" {
+ name += "-"
+ }
+ return name + "charts.txt"
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go
new file mode 100644
index 000000000..22d7bf0a1
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go
@@ -0,0 +1,72 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helmpath
+
+import (
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v3/pkg/helmpath/xdg"
+)
+
+const (
+ // CacheHomeEnvVar is the environment variable used by Helm
+ // for the cache directory. When no value is set a default is used.
+ CacheHomeEnvVar = "HELM_CACHE_HOME"
+
+ // ConfigHomeEnvVar is the environment variable used by Helm
+ // for the config directory. When no value is set a default is used.
+ ConfigHomeEnvVar = "HELM_CONFIG_HOME"
+
+ // DataHomeEnvVar is the environment variable used by Helm
+ // for the data directory. When no value is set a default is used.
+ DataHomeEnvVar = "HELM_DATA_HOME"
+)
+
+// lazypath is an lazy-loaded path buffer for the XDG base directory specification.
+type lazypath string
+
+func (l lazypath) path(helmEnvVar, xdgEnvVar string, defaultFn func() string, elem ...string) string {
+
+ // There is an order to checking for a path.
+ // 1. See if a Helm specific environment variable has been set.
+ // 2. Check if an XDG environment variable is set
+ // 3. Fall back to a default
+ base := os.Getenv(helmEnvVar)
+ if base != "" {
+ return filepath.Join(base, filepath.Join(elem...))
+ }
+ base = os.Getenv(xdgEnvVar)
+ if base == "" {
+ base = defaultFn()
+ }
+ return filepath.Join(base, string(l), filepath.Join(elem...))
+}
+
+// cachePath defines the base directory relative to which user specific non-essential data files
+// should be stored.
+func (l lazypath) cachePath(elem ...string) string {
+ return l.path(CacheHomeEnvVar, xdg.CacheHomeEnvVar, cacheHome, filepath.Join(elem...))
+}
+
+// configPath defines the base directory relative to which user specific configuration files should
+// be stored.
+func (l lazypath) configPath(elem ...string) string {
+ return l.path(ConfigHomeEnvVar, xdg.ConfigHomeEnvVar, configHome, filepath.Join(elem...))
+}
+
+// dataPath defines the base directory relative to which user specific data files should be stored.
+func (l lazypath) dataPath(elem ...string) string {
+ return l.path(DataHomeEnvVar, xdg.DataHomeEnvVar, dataHome, filepath.Join(elem...))
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go
new file mode 100644
index 000000000..80477abab
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go
@@ -0,0 +1,35 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin
+// +build darwin
+
+package helmpath
+
+import (
+ "path/filepath"
+
+ "k8s.io/client-go/util/homedir"
+)
+
+func dataHome() string {
+ return filepath.Join(homedir.HomeDir(), "Library")
+}
+
+func configHome() string {
+ return filepath.Join(homedir.HomeDir(), "Library", "Preferences")
+}
+
+func cacheHome() string {
+ return filepath.Join(homedir.HomeDir(), "Library", "Caches")
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go
new file mode 100644
index 000000000..a5afc1237
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go
@@ -0,0 +1,46 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !darwin
+// +build !windows,!darwin
+
+package helmpath
+
+import (
+ "path/filepath"
+
+ "k8s.io/client-go/util/homedir"
+)
+
+// dataHome defines the base directory relative to which user specific data files should be stored.
+//
+// If $XDG_DATA_HOME is either not set or empty, a default equal to $HOME/.local/share is used.
+func dataHome() string {
+ return filepath.Join(homedir.HomeDir(), ".local", "share")
+}
+
+// configHome defines the base directory relative to which user specific configuration files should
+// be stored.
+//
+// If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config is used.
+func configHome() string {
+ return filepath.Join(homedir.HomeDir(), ".config")
+}
+
+// cacheHome defines the base directory relative to which user specific non-essential data files
+// should be stored.
+//
+// If $XDG_CACHE_HOME is either not set or empty, a default equal to $HOME/.cache is used.
+func cacheHome() string {
+ return filepath.Join(homedir.HomeDir(), ".cache")
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go
new file mode 100644
index 000000000..057a3af14
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go
@@ -0,0 +1,24 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package helmpath
+
+import "os"
+
+func dataHome() string { return configHome() }
+
+func configHome() string { return os.Getenv("APPDATA") }
+
+func cacheHome() string { return os.Getenv("TEMP") }
diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go b/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go
new file mode 100644
index 000000000..eaa3e6864
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go
@@ -0,0 +1,34 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package xdg holds constants pertaining to XDG Base Directory Specification.
+//
+// The XDG Base Directory Specification https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
+// specifies the environment variables that define user-specific base directories for various categories of files.
+package xdg
+
+const (
+ // CacheHomeEnvVar is the environment variable used by the
+ // XDG base directory specification for the cache directory.
+ CacheHomeEnvVar = "XDG_CACHE_HOME"
+
+ // ConfigHomeEnvVar is the environment variable used by the
+ // XDG base directory specification for the config directory.
+ ConfigHomeEnvVar = "XDG_CONFIG_HOME"
+
+ // DataHomeEnvVar is the environment variable used by the
+ // XDG base directory specification for the data directory.
+ DataHomeEnvVar = "XDG_DATA_HOME"
+)
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/client.go b/vendor/helm.sh/helm/v3/pkg/kube/client.go
new file mode 100644
index 000000000..cc38243ac
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/client.go
@@ -0,0 +1,663 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ jsonpatch "github.com/evanphx/json-patch"
+ "github.com/pkg/errors"
+ batch "k8s.io/api/batch/v1"
+ v1 "k8s.io/api/core/v1"
+ apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+ cachetools "k8s.io/client-go/tools/cache"
+ watchtools "k8s.io/client-go/tools/watch"
+ cmdutil "k8s.io/kubectl/pkg/cmd/util"
+)
+
+// ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found.
+var ErrNoObjectsVisited = errors.New("no objects visited")
+
+var metadataAccessor = meta.NewAccessor()
+
+// ManagedFieldsManager is the name of the manager of Kubernetes managedFields
+// first introduced in Kubernetes 1.18
+var ManagedFieldsManager string
+
+// Client represents a client capable of communicating with the Kubernetes API.
+type Client struct {
+ Factory Factory
+ Log func(string, ...interface{})
+ // Namespace allows to bypass the kubeconfig file for the choice of the namespace
+ Namespace string
+
+ kubeClient *kubernetes.Clientset
+}
+
+var addToScheme sync.Once
+
+// New creates a new Client.
+func New(getter genericclioptions.RESTClientGetter) *Client {
+ if getter == nil {
+ getter = genericclioptions.NewConfigFlags(true)
+ }
+ // Add CRDs to the scheme. They are missing by default.
+ addToScheme.Do(func() {
+ if err := apiextv1.AddToScheme(scheme.Scheme); err != nil {
+ // This should never happen.
+ panic(err)
+ }
+ if err := apiextv1beta1.AddToScheme(scheme.Scheme); err != nil {
+ panic(err)
+ }
+ })
+ return &Client{
+ Factory: cmdutil.NewFactory(getter),
+ Log: nopLogger,
+ }
+}
+
+var nopLogger = func(_ string, _ ...interface{}) {}
+
+// getKubeClient get or create a new KubernetesClientSet
+func (c *Client) getKubeClient() (*kubernetes.Clientset, error) {
+ var err error
+ if c.kubeClient == nil {
+ c.kubeClient, err = c.Factory.KubernetesClientSet()
+ }
+
+ return c.kubeClient, err
+}
+
+// IsReachable tests connectivity to the cluster.
+func (c *Client) IsReachable() error {
+ client, err := c.getKubeClient()
+ if err == genericclioptions.ErrEmptyConfig {
+ // re-replace kubernetes ErrEmptyConfig error with a friendy error
+ // moar workarounds for Kubernetes API breaking.
+ return errors.New("Kubernetes cluster unreachable")
+ }
+ if err != nil {
+ return errors.Wrap(err, "Kubernetes cluster unreachable")
+ }
+ if _, err := client.ServerVersion(); err != nil {
+ return errors.Wrap(err, "Kubernetes cluster unreachable")
+ }
+ return nil
+}
+
+// Create creates Kubernetes resources specified in the resource list.
+func (c *Client) Create(resources ResourceList) (*Result, error) {
+ c.Log("creating %d resource(s)", len(resources))
+ if err := perform(resources, createResource); err != nil {
+ return nil, err
+ }
+ return &Result{Created: resources}, nil
+}
+
+// Wait waits up to the given timeout for the specified resources to be ready.
+func (c *Client) Wait(resources ResourceList, timeout time.Duration) error {
+ cs, err := c.getKubeClient()
+ if err != nil {
+ return err
+ }
+ checker := NewReadyChecker(cs, c.Log, PausedAsReady(true))
+ w := waiter{
+ c: checker,
+ log: c.Log,
+ timeout: timeout,
+ }
+ return w.waitForResources(resources)
+}
+
+// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
+func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
+ cs, err := c.getKubeClient()
+ if err != nil {
+ return err
+ }
+ checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true))
+ w := waiter{
+ c: checker,
+ log: c.Log,
+ timeout: timeout,
+ }
+ return w.waitForResources(resources)
+}
+
+// WaitForDelete wait up to the given timeout for the specified resources to be deleted.
+func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error {
+ w := waiter{
+ log: c.Log,
+ timeout: timeout,
+ }
+ return w.waitForDeletedResources(resources)
+}
+
+func (c *Client) namespace() string {
+ if c.Namespace != "" {
+ return c.Namespace
+ }
+ if ns, _, err := c.Factory.ToRawKubeConfigLoader().Namespace(); err == nil {
+ return ns
+ }
+ return v1.NamespaceDefault
+}
+
+// newBuilder returns a new resource builder for structured api objects.
+func (c *Client) newBuilder() *resource.Builder {
+ return c.Factory.NewBuilder().
+ ContinueOnError().
+ NamespaceParam(c.namespace()).
+ DefaultNamespace().
+ Flatten()
+}
+
+// Build validates for Kubernetes objects and returns unstructured infos.
+func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) {
+ schema, err := c.Factory.Validator(validate)
+ if err != nil {
+ return nil, err
+ }
+ result, err := c.newBuilder().
+ Unstructured().
+ Schema(schema).
+ Stream(reader, "").
+ Do().Infos()
+ return result, scrubValidationError(err)
+}
+
+// Update takes the current list of objects and target list of objects and
+// creates resources that don't already exist, updates resources that have been
+// modified in the target configuration, and deletes resources from the current
+// configuration that are not present in the target configuration. If an error
+// occurs, a Result will still be returned with the error, containing all
+// resource updates, creations, and deletions that were attempted. These can be
+// used for cleanup or other logging purposes.
+func (c *Client) Update(original, target ResourceList, force bool) (*Result, error) {
+ updateErrors := []string{}
+ res := &Result{}
+
+ c.Log("checking %d resources for changes", len(target))
+ err := target.Visit(func(info *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ helper := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager())
+ if _, err := helper.Get(info.Namespace, info.Name); err != nil {
+ if !apierrors.IsNotFound(err) {
+ return errors.Wrap(err, "could not get information about the resource")
+ }
+
+ // Append the created resource to the results, even if something fails
+ res.Created = append(res.Created, info)
+
+ // Since the resource does not exist, create it.
+ if err := createResource(info); err != nil {
+ return errors.Wrap(err, "failed to create resource")
+ }
+
+ kind := info.Mapping.GroupVersionKind.Kind
+ c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace)
+ return nil
+ }
+
+ originalInfo := original.Get(info)
+ if originalInfo == nil {
+ kind := info.Mapping.GroupVersionKind.Kind
+ return errors.Errorf("no %s with the name %q found", kind, info.Name)
+ }
+
+ if err := updateResource(c, info, originalInfo.Object, force); err != nil {
+ c.Log("error updating the resource %q:\n\t %v", info.Name, err)
+ updateErrors = append(updateErrors, err.Error())
+ }
+ // Because we check for errors later, append the info regardless
+ res.Updated = append(res.Updated, info)
+
+ return nil
+ })
+
+ switch {
+ case err != nil:
+ return res, err
+ case len(updateErrors) != 0:
+ return res, errors.Errorf(strings.Join(updateErrors, " && "))
+ }
+
+ for _, info := range original.Difference(target) {
+ c.Log("Deleting %q in %s...", info.Name, info.Namespace)
+
+ if err := info.Get(); err != nil {
+ c.Log("Unable to get obj %q, err: %s", info.Name, err)
+ continue
+ }
+ annotations, err := metadataAccessor.Annotations(info.Object)
+ if err != nil {
+ c.Log("Unable to get annotations on %q, err: %s", info.Name, err)
+ }
+ if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy {
+ c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy)
+ continue
+ }
+ if err := deleteResource(info); err != nil {
+ c.Log("Failed to delete %q, err: %s", info.ObjectName(), err)
+ continue
+ }
+ res.Deleted = append(res.Deleted, info)
+ }
+ return res, nil
+}
+
+// Delete deletes Kubernetes resources specified in the resources list. It will
+// attempt to delete all resources even if one or more fail and collect any
+// errors. All successfully deleted items will be returned in the `Deleted`
+// ResourceList that is part of the result.
+func (c *Client) Delete(resources ResourceList) (*Result, []error) {
+ var errs []error
+ res := &Result{}
+ mtx := sync.Mutex{}
+ err := perform(resources, func(info *resource.Info) error {
+ c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
+ if err := c.skipIfNotFound(deleteResource(info)); err != nil {
+ mtx.Lock()
+ defer mtx.Unlock()
+ // Collect the error and continue on
+ errs = append(errs, err)
+ } else {
+ mtx.Lock()
+ defer mtx.Unlock()
+ res.Deleted = append(res.Deleted, info)
+ }
+ return nil
+ })
+ if err != nil {
+ // Rewrite the message from "no objects visited" if that is what we got
+ // back
+ if err == ErrNoObjectsVisited {
+ err = errors.New("object not found, skipping delete")
+ }
+ errs = append(errs, err)
+ }
+ if errs != nil {
+ return nil, errs
+ }
+ return res, nil
+}
+
+func (c *Client) skipIfNotFound(err error) error {
+ if apierrors.IsNotFound(err) {
+ c.Log("%v", err)
+ return nil
+ }
+ return err
+}
+
+func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error {
+ return func(info *resource.Info) error {
+ return c.watchUntilReady(t, info)
+ }
+}
+
+// WatchUntilReady watches the resources given and waits until it is ready.
+//
+// This method is mainly for hook implementations. It watches for a resource to
+// hit a particular milestone. The milestone depends on the Kind.
+//
+// For most kinds, it checks to see if the resource is marked as Added or Modified
+// by the Kubernetes event stream. For some kinds, it does more:
+//
+// - Jobs: A job is marked "Ready" when it has successfully completed. This is
+// ascertained by watching the Status fields in a job's output.
+// - Pods: A pod is marked "Ready" when it has successfully completed. This is
+// ascertained by watching the status.phase field in a pod's output.
+//
+// Handling for other kinds will be added as necessary.
+func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration) error {
+ // For jobs, there's also the option to do poll c.Jobs(namespace).Get():
+ // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
+ return perform(resources, c.watchTimeout(timeout))
+}
+
+func perform(infos ResourceList, fn func(*resource.Info) error) error {
+ if len(infos) == 0 {
+ return ErrNoObjectsVisited
+ }
+
+ errs := make(chan error)
+ go batchPerform(infos, fn, errs)
+
+ for range infos {
+ err := <-errs
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// getManagedFieldsManager returns the manager string. If one was set it will be returned.
+// Otherwise, one is calculated based on the name of the binary.
+func getManagedFieldsManager() string {
+
+ // When a manager is explicitly set use it
+ if ManagedFieldsManager != "" {
+ return ManagedFieldsManager
+ }
+
+ // When no manager is set and no calling application can be found it is unknown
+ if len(os.Args[0]) == 0 {
+ return "unknown"
+ }
+
+ // When there is an application that can be determined and no set manager
+ // use the base name. This is one of the ways Kubernetes libs handle figuring
+ // names out.
+ return filepath.Base(os.Args[0])
+}
+
+func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- error) {
+ var kind string
+ var wg sync.WaitGroup
+ for _, info := range infos {
+ currentKind := info.Object.GetObjectKind().GroupVersionKind().Kind
+ if kind != currentKind {
+ wg.Wait()
+ kind = currentKind
+ }
+ wg.Add(1)
+ go func(i *resource.Info) {
+ errs <- fn(i)
+ wg.Done()
+ }(info)
+ }
+}
+
+func createResource(info *resource.Info) error {
+ obj, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).Create(info.Namespace, true, info.Object)
+ if err != nil {
+ return err
+ }
+ return info.Refresh(obj, true)
+}
+
+func deleteResource(info *resource.Info) error {
+ policy := metav1.DeletePropagationBackground
+ opts := &metav1.DeleteOptions{PropagationPolicy: &policy}
+ _, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).DeleteWithOptions(info.Namespace, info.Name, opts)
+ return err
+}
+
+func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.PatchType, error) {
+ oldData, err := json.Marshal(current)
+ if err != nil {
+ return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing current configuration")
+ }
+ newData, err := json.Marshal(target.Object)
+ if err != nil {
+ return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing target configuration")
+ }
+
+ // Fetch the current object for the three way merge
+ helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
+ currentObj, err := helper.Get(target.Namespace, target.Name)
+ if err != nil && !apierrors.IsNotFound(err) {
+ return nil, types.StrategicMergePatchType, errors.Wrapf(err, "unable to get data for current object %s/%s", target.Namespace, target.Name)
+ }
+
+ // Even if currentObj is nil (because it was not found), it will marshal just fine
+ currentData, err := json.Marshal(currentObj)
+ if err != nil {
+ return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing live configuration")
+ }
+
+ // Get a versioned object
+ versionedObject := AsVersioned(target)
+
+ // Unstructured objects, such as CRDs, may not have an not registered error
+ // returned from ConvertToVersion. Anything that's unstructured should
+ // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported
+ // on objects like CRDs.
+ _, isUnstructured := versionedObject.(runtime.Unstructured)
+
+ // On newer K8s versions, CRDs aren't unstructured but has this dedicated type
+ _, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition)
+
+ if isUnstructured || isCRD {
+ // fall back to generic JSON merge patch
+ patch, err := jsonpatch.CreateMergePatch(oldData, newData)
+ return patch, types.MergePatchType, err
+ }
+
+ patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject)
+ if err != nil {
+ return nil, types.StrategicMergePatchType, errors.Wrap(err, "unable to create patch metadata from object")
+ }
+
+ patch, err := strategicpatch.CreateThreeWayMergePatch(oldData, newData, currentData, patchMeta, true)
+ return patch, types.StrategicMergePatchType, err
+}
+
+func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error {
+ var (
+ obj runtime.Object
+ helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
+ kind = target.Mapping.GroupVersionKind.Kind
+ )
+
+ // if --force is applied, attempt to replace the existing resource with the new object.
+ if force {
+ var err error
+ obj, err = helper.Replace(target.Namespace, target.Name, true, target.Object)
+ if err != nil {
+ return errors.Wrap(err, "failed to replace object")
+ }
+ c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind)
+ } else {
+ patch, patchType, err := createPatch(target, currentObj)
+ if err != nil {
+ return errors.Wrap(err, "failed to create patch")
+ }
+
+ if patch == nil || string(patch) == "{}" {
+ c.Log("Looks like there are no changes for %s %q", target.Mapping.GroupVersionKind.Kind, target.Name)
+ // This needs to happen to make sure that Helm has the latest info from the API
+ // Otherwise there will be no labels and other functions that use labels will panic
+ if err := target.Get(); err != nil {
+ return errors.Wrap(err, "failed to refresh resource information")
+ }
+ return nil
+ }
+ // send patch to server
+ obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
+ if err != nil {
+ return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind)
+ }
+ }
+
+ target.Refresh(obj, true)
+ return nil
+}
+
+func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error {
+ kind := info.Mapping.GroupVersionKind.Kind
+ switch kind {
+ case "Job", "Pod":
+ default:
+ return nil
+ }
+
+ c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
+
+ // Use a selector on the name of the resource. This should be unique for the
+ // given version and kind
+ selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
+ if err != nil {
+ return err
+ }
+ lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
+
+ // What we watch for depends on the Kind.
+ // - For a Job, we watch for completion.
+ // - For all else, we watch until Ready.
+ // In the future, we might want to add some special logic for types
+ // like Ingress, Volume, etc.
+
+ ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
+ defer cancel()
+ _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
+ // Make sure the incoming object is versioned as we use unstructured
+ // objects when we build manifests
+ obj := convertWithMapper(e.Object, info.Mapping)
+ switch e.Type {
+ case watch.Added, watch.Modified:
+ // For things like a secret or a config map, this is the best indicator
+ // we get. We care mostly about jobs, where what we want to see is
+ // the status go into a good state. For other types, like ReplicaSet
+ // we don't really do anything to support these as hooks.
+ c.Log("Add/Modify event for %s: %v", info.Name, e.Type)
+ switch kind {
+ case "Job":
+ return c.waitForJob(obj, info.Name)
+ case "Pod":
+ return c.waitForPodSuccess(obj, info.Name)
+ }
+ return true, nil
+ case watch.Deleted:
+ c.Log("Deleted event for %s", info.Name)
+ return true, nil
+ case watch.Error:
+ // Handle error and return with an error.
+ c.Log("Error event for %s", info.Name)
+ return true, errors.Errorf("failed to deploy %s", info.Name)
+ default:
+ return false, nil
+ }
+ })
+ return err
+}
+
+// waitForJob is a helper that waits for a job to complete.
+//
+// This operates on an event returned from a watcher.
+func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) {
+ o, ok := obj.(*batch.Job)
+ if !ok {
+ return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj)
+ }
+
+ for _, c := range o.Status.Conditions {
+ if c.Type == batch.JobComplete && c.Status == "True" {
+ return true, nil
+ } else if c.Type == batch.JobFailed && c.Status == "True" {
+ return true, errors.Errorf("job failed: %s", c.Reason)
+ }
+ }
+
+ c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded)
+ return false, nil
+}
+
+// waitForPodSuccess is a helper that waits for a pod to complete.
+//
+// This operates on an event returned from a watcher.
+func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) {
+ o, ok := obj.(*v1.Pod)
+ if !ok {
+ return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj)
+ }
+
+ switch o.Status.Phase {
+ case v1.PodSucceeded:
+ c.Log("Pod %s succeeded", o.Name)
+ return true, nil
+ case v1.PodFailed:
+ return true, errors.Errorf("pod %s failed", o.Name)
+ case v1.PodPending:
+ c.Log("Pod %s pending", o.Name)
+ case v1.PodRunning:
+ c.Log("Pod %s running", o.Name)
+ }
+
+ return false, nil
+}
+
+// scrubValidationError removes kubectl info from the message.
+func scrubValidationError(err error) error {
+ if err == nil {
+ return nil
+ }
+ const stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false"
+
+ if strings.Contains(err.Error(), stopValidateMessage) {
+ return errors.New(strings.ReplaceAll(err.Error(), "; "+stopValidateMessage, ""))
+ }
+ return err
+}
+
+// WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase
+// and returns said phase (PodSucceeded or PodFailed qualify).
+func (c *Client) WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) {
+ client, err := c.getKubeClient()
+ if err != nil {
+ return v1.PodUnknown, err
+ }
+ to := int64(timeout)
+ watcher, err := client.CoreV1().Pods(c.namespace()).Watch(context.Background(), metav1.ListOptions{
+ FieldSelector: fmt.Sprintf("metadata.name=%s", name),
+ TimeoutSeconds: &to,
+ })
+
+ for event := range watcher.ResultChan() {
+ p, ok := event.Object.(*v1.Pod)
+ if !ok {
+ return v1.PodUnknown, fmt.Errorf("%s not a pod", name)
+ }
+ switch p.Status.Phase {
+ case v1.PodFailed:
+ return v1.PodFailed, nil
+ case v1.PodSucceeded:
+ return v1.PodSucceeded, nil
+ }
+ }
+
+ return v1.PodUnknown, err
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/config.go b/vendor/helm.sh/helm/v3/pkg/kube/config.go
new file mode 100644
index 000000000..e00c9acb1
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/config.go
@@ -0,0 +1,30 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import "k8s.io/cli-runtime/pkg/genericclioptions"
+
+// GetConfig returns a Kubernetes client config.
+//
+// Deprecated
+func GetConfig(kubeconfig, context, namespace string) *genericclioptions.ConfigFlags {
+ cf := genericclioptions.NewConfigFlags(true)
+ cf.Namespace = &namespace
+ cf.Context = &context
+ cf.KubeConfig = &kubeconfig
+ return cf
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/converter.go b/vendor/helm.sh/helm/v3/pkg/kube/converter.go
new file mode 100644
index 000000000..3bf0e358c
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/converter.go
@@ -0,0 +1,69 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import (
+ "sync"
+
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes/scheme"
+)
+
+var k8sNativeScheme *runtime.Scheme
+var k8sNativeSchemeOnce sync.Once
+
+// AsVersioned converts the given info into a runtime.Object with the correct
+// group and version set
+func AsVersioned(info *resource.Info) runtime.Object {
+ return convertWithMapper(info.Object, info.Mapping)
+}
+
+// convertWithMapper converts the given object with the optional provided
+// RESTMapping. If no mapping is provided, the default schema versioner is used
+func convertWithMapper(obj runtime.Object, mapping *meta.RESTMapping) runtime.Object {
+ s := kubernetesNativeScheme()
+ var gv = runtime.GroupVersioner(schema.GroupVersions(s.PrioritizedVersionsAllGroups()))
+ if mapping != nil {
+ gv = mapping.GroupVersionKind.GroupVersion()
+ }
+ if obj, err := runtime.ObjectConvertor(s).ConvertToVersion(obj, gv); err == nil {
+ return obj
+ }
+ return obj
+}
+
+// kubernetesNativeScheme returns a clean *runtime.Scheme with _only_ Kubernetes
+// native resources added to it. This is required to break free of custom resources
+// that may have been added to scheme.Scheme due to Helm being used as a package in
+// combination with e.g. a versioned kube client. If we would not do this, the client
+// may attempt to perform e.g. a 3-way-merge strategy patch for custom resources.
+func kubernetesNativeScheme() *runtime.Scheme {
+ k8sNativeSchemeOnce.Do(func() {
+ k8sNativeScheme = runtime.NewScheme()
+ scheme.AddToScheme(k8sNativeScheme)
+ // API extensions are not in the above scheme set,
+ // and must thus be added separately.
+ apiextensionsv1beta1.AddToScheme(k8sNativeScheme)
+ apiextensionsv1.AddToScheme(k8sNativeScheme)
+ })
+ return k8sNativeScheme
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/factory.go b/vendor/helm.sh/helm/v3/pkg/kube/factory.go
new file mode 100644
index 000000000..f47f9d9f6
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/factory.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import (
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/tools/clientcmd"
+ "k8s.io/kubectl/pkg/validation"
+)
+
+// Factory provides abstractions that allow the Kubectl command to be extended across multiple types
+// of resources and different API sets.
+type Factory interface {
+ // ToRawKubeConfigLoader return kubeconfig loader as-is
+ ToRawKubeConfigLoader() clientcmd.ClientConfig
+ // KubernetesClientSet gives you back an external clientset
+ KubernetesClientSet() (*kubernetes.Clientset, error)
+ // NewBuilder returns an object that assists in loading objects from both disk and the server
+ // and which implements the common patterns for CLI interactions with generic resources.
+ NewBuilder() *resource.Builder
+ // Returns a schema that can validate objects stored on disk.
+ Validator(validate bool) (validation.Schema, error)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go
new file mode 100644
index 000000000..0fc953116
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go
@@ -0,0 +1,117 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fake implements various fake KubeClients for use in testing
+package fake
+
+import (
+ "io"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v3/pkg/kube"
+)
+
+// FailingKubeClient implements KubeClient for testing purposes. It also has
+// additional errors you can set to fail different functions, otherwise it
+// delegates all its calls to `PrintingKubeClient`
+type FailingKubeClient struct {
+ PrintingKubeClient
+ CreateError error
+ WaitError error
+ DeleteError error
+ WatchUntilReadyError error
+ UpdateError error
+ BuildError error
+ BuildUnstructuredError error
+ WaitAndGetCompletedPodPhaseError error
+ WaitDuration time.Duration
+}
+
+// Create returns the configured error if set or prints
+func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) {
+ if f.CreateError != nil {
+ return nil, f.CreateError
+ }
+ return f.PrintingKubeClient.Create(resources)
+}
+
+// Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints.
+func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error {
+ time.Sleep(f.WaitDuration)
+ if f.WaitError != nil {
+ return f.WaitError
+ }
+ return f.PrintingKubeClient.Wait(resources, d)
+}
+
+// WaitWithJobs returns the configured error if set or prints
+func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error {
+ if f.WaitError != nil {
+ return f.WaitError
+ }
+ return f.PrintingKubeClient.WaitWithJobs(resources, d)
+}
+
+// WaitForDelete returns the configured error if set or prints
+func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error {
+ if f.WaitError != nil {
+ return f.WaitError
+ }
+ return f.PrintingKubeClient.WaitForDelete(resources, d)
+}
+
+// Delete returns the configured error if set or prints
+func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) {
+ if f.DeleteError != nil {
+ return nil, []error{f.DeleteError}
+ }
+ return f.PrintingKubeClient.Delete(resources)
+}
+
+// WatchUntilReady returns the configured error if set or prints
+func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error {
+ if f.WatchUntilReadyError != nil {
+ return f.WatchUntilReadyError
+ }
+ return f.PrintingKubeClient.WatchUntilReady(resources, d)
+}
+
+// Update returns the configured error if set or prints
+func (f *FailingKubeClient) Update(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) {
+ if f.UpdateError != nil {
+ return &kube.Result{}, f.UpdateError
+ }
+ return f.PrintingKubeClient.Update(r, modified, ignoreMe)
+}
+
+// Build returns the configured error if set or prints
+func (f *FailingKubeClient) Build(r io.Reader, _ bool) (kube.ResourceList, error) {
+ if f.BuildError != nil {
+ return []*resource.Info{}, f.BuildError
+ }
+ return f.PrintingKubeClient.Build(r, false)
+}
+
+// WaitAndGetCompletedPodPhase returns the configured error if set or prints
+func (f *FailingKubeClient) WaitAndGetCompletedPodPhase(s string, d time.Duration) (v1.PodPhase, error) {
+ if f.WaitAndGetCompletedPodPhaseError != nil {
+ return v1.PodSucceeded, f.WaitAndGetCompletedPodPhaseError
+ }
+ return f.PrintingKubeClient.WaitAndGetCompletedPodPhase(s, d)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go
new file mode 100644
index 000000000..1e8cf0066
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go
@@ -0,0 +1,110 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "io"
+ "strings"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v3/pkg/kube"
+)
+
+// PrintingKubeClient implements KubeClient, but simply prints the reader to
+// the given output.
+type PrintingKubeClient struct {
+ Out io.Writer
+}
+
+// IsReachable checks if the cluster is reachable
+func (p *PrintingKubeClient) IsReachable() error {
+ return nil
+}
+
+// Create prints the values of what would be created with a real KubeClient.
+func (p *PrintingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ if err != nil {
+ return nil, err
+ }
+ return &kube.Result{Created: resources}, nil
+}
+
+func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+func (p *PrintingKubeClient) WaitForDelete(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+// Delete implements KubeClient delete.
+//
+// It only prints out the content to be deleted.
+func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ if err != nil {
+ return nil, []error{err}
+ }
+ return &kube.Result{Deleted: resources}, nil
+}
+
+// WatchUntilReady implements KubeClient WatchUntilReady.
+func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+// Update implements KubeClient Update.
+func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) {
+ _, err := io.Copy(p.Out, bufferize(modified))
+ if err != nil {
+ return nil, err
+ }
+ // TODO: This doesn't completely mock out have some that get created,
+ // updated, and deleted. I don't think these are used in any unit tests, but
+ // we may want to refactor a way to handle future tests
+ return &kube.Result{Updated: modified}, nil
+}
+
+// Build implements KubeClient Build.
+func (p *PrintingKubeClient) Build(_ io.Reader, _ bool) (kube.ResourceList, error) {
+ return []*resource.Info{}, nil
+}
+
+// WaitAndGetCompletedPodPhase implements KubeClient WaitAndGetCompletedPodPhase.
+func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Duration) (v1.PodPhase, error) {
+ return v1.PodSucceeded, nil
+}
+
+func bufferize(resources kube.ResourceList) io.Reader {
+ var builder strings.Builder
+ for _, info := range resources {
+ builder.WriteString(info.String() + "\n")
+ }
+ return strings.NewReader(builder.String())
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/interface.go b/vendor/helm.sh/helm/v3/pkg/kube/interface.go
new file mode 100644
index 000000000..299e34e95
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/interface.go
@@ -0,0 +1,82 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "io"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+)
+
+// Interface represents a client capable of communicating with the Kubernetes API.
+//
+// A KubernetesClient must be concurrency safe.
+type Interface interface {
+ // Create creates one or more resources.
+ Create(resources ResourceList) (*Result, error)
+
+ // Wait waits up to the given timeout for the specified resources to be ready.
+ Wait(resources ResourceList, timeout time.Duration) error
+
+ // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
+ WaitWithJobs(resources ResourceList, timeout time.Duration) error
+
+ // Delete destroys one or more resources.
+ Delete(resources ResourceList) (*Result, []error)
+
+ // WatchUntilReady watches the resources given and waits until it is ready.
+ //
+ // This method is mainly for hook implementations. It watches for a resource to
+ // hit a particular milestone. The milestone depends on the Kind.
+ //
+ // For Jobs, "ready" means the Job ran to completion (exited without error).
+ // For Pods, "ready" means the Pod phase is marked "succeeded".
+ // For all other kinds, it means the kind was created or modified without
+ // error.
+ WatchUntilReady(resources ResourceList, timeout time.Duration) error
+
+ // Update updates one or more resources or creates the resource
+ // if it doesn't exist.
+ Update(original, target ResourceList, force bool) (*Result, error)
+
+ // Build creates a resource list from a Reader.
+ //
+ // Reader must contain a YAML stream (one or more YAML documents separated
+ // by "\n---\n")
+ //
+ // Validates against OpenAPI schema if validate is true.
+ Build(reader io.Reader, validate bool) (ResourceList, error)
+
+ // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase
+ // and returns said phase (PodSucceeded or PodFailed qualify).
+ WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error)
+
+ // IsReachable checks whether the client is able to connect to the cluster.
+ IsReachable() error
+}
+
+// InterfaceExt is introduced to avoid breaking backwards compatibility for Interface implementers.
+//
+// TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface.
+type InterfaceExt interface {
+ // WaitForDelete wait up to the given timeout for the specified resources to be deleted.
+ WaitForDelete(resources ResourceList, timeout time.Duration) error
+}
+
+var _ Interface = (*Client)(nil)
+var _ InterfaceExt = (*Client)(nil)
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/ready.go b/vendor/helm.sh/helm/v3/pkg/kube/ready.go
new file mode 100644
index 000000000..106c0be51
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/ready.go
@@ -0,0 +1,397 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import (
+ "context"
+
+ appsv1 "k8s.io/api/apps/v1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+
+ deploymentutil "helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util"
+)
+
+// ReadyCheckerOption is a function that configures a ReadyChecker.
+type ReadyCheckerOption func(*ReadyChecker)
+
+// PausedAsReady returns a ReadyCheckerOption that configures a ReadyChecker
+// to consider paused resources to be ready. For example a Deployment
+// with spec.paused equal to true would be considered ready.
+func PausedAsReady(pausedAsReady bool) ReadyCheckerOption {
+ return func(c *ReadyChecker) {
+ c.pausedAsReady = pausedAsReady
+ }
+}
+
+// CheckJobs returns a ReadyCheckerOption that configures a ReadyChecker
+// to consider readiness of Job resources.
+func CheckJobs(checkJobs bool) ReadyCheckerOption {
+ return func(c *ReadyChecker) {
+ c.checkJobs = checkJobs
+ }
+}
+
+// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can
+// be used to override defaults.
+func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker {
+ c := ReadyChecker{
+ client: cl,
+ log: log,
+ }
+ if c.log == nil {
+ c.log = nopLogger
+ }
+ for _, opt := range opts {
+ opt(&c)
+ }
+ return c
+}
+
+// ReadyChecker is a type that can check core Kubernetes types for readiness.
+type ReadyChecker struct {
+ client kubernetes.Interface
+ log func(string, ...interface{})
+ checkJobs bool
+ pausedAsReady bool
+}
+
+// IsReady checks if v is ready. It supports checking readiness for pods,
+// deployments, persistent volume claims, services, daemon sets, custom
+// resource definitions, stateful sets, replication controllers, and replica
+// sets. All other resource kinds are always considered ready.
+//
+// IsReady will fetch the latest state of the object from the server prior to
+// performing readiness checks, and it will return any error encountered.
+func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, error) {
+ var (
+ // This defaults to true, otherwise we get to a point where
+ // things will always return false unless one of the objects
+ // that manages pods has been hit
+ ok = true
+ err error
+ )
+ switch value := AsVersioned(v).(type) {
+ case *corev1.Pod:
+ pod, err := c.client.CoreV1().Pods(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil || !c.isPodReady(pod) {
+ return false, err
+ }
+ case *batchv1.Job:
+ if c.checkJobs {
+ job, err := c.client.BatchV1().Jobs(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil || !c.jobReady(job) {
+ return false, err
+ }
+ }
+ case *appsv1.Deployment, *appsv1beta1.Deployment, *appsv1beta2.Deployment, *extensionsv1beta1.Deployment:
+ currentDeployment, err := c.client.AppsV1().Deployments(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ // If paused deployment will never be ready
+ if currentDeployment.Spec.Paused {
+ return c.pausedAsReady, nil
+ }
+ // Find RS associated with deployment
+ newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, c.client.AppsV1())
+ if err != nil || newReplicaSet == nil {
+ return false, err
+ }
+ if !c.deploymentReady(newReplicaSet, currentDeployment) {
+ return false, nil
+ }
+ case *corev1.PersistentVolumeClaim:
+ claim, err := c.client.CoreV1().PersistentVolumeClaims(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.volumeReady(claim) {
+ return false, nil
+ }
+ case *corev1.Service:
+ svc, err := c.client.CoreV1().Services(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.serviceReady(svc) {
+ return false, nil
+ }
+ case *extensionsv1beta1.DaemonSet, *appsv1.DaemonSet, *appsv1beta2.DaemonSet:
+ ds, err := c.client.AppsV1().DaemonSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.daemonSetReady(ds) {
+ return false, nil
+ }
+ case *apiextv1beta1.CustomResourceDefinition:
+ if err := v.Get(); err != nil {
+ return false, err
+ }
+ crd := &apiextv1beta1.CustomResourceDefinition{}
+ if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil {
+ return false, err
+ }
+ if !c.crdBetaReady(*crd) {
+ return false, nil
+ }
+ case *apiextv1.CustomResourceDefinition:
+ if err := v.Get(); err != nil {
+ return false, err
+ }
+ crd := &apiextv1.CustomResourceDefinition{}
+ if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil {
+ return false, err
+ }
+ if !c.crdReady(*crd) {
+ return false, nil
+ }
+ case *appsv1.StatefulSet, *appsv1beta1.StatefulSet, *appsv1beta2.StatefulSet:
+ sts, err := c.client.AppsV1().StatefulSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.statefulSetReady(sts) {
+ return false, nil
+ }
+ case *corev1.ReplicationController, *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet:
+ ok, err = c.podsReadyForObject(ctx, v.Namespace, value)
+ }
+ if !ok || err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func (c *ReadyChecker) podsReadyForObject(ctx context.Context, namespace string, obj runtime.Object) (bool, error) {
+ pods, err := c.podsforObject(ctx, namespace, obj)
+ if err != nil {
+ return false, err
+ }
+ for _, pod := range pods {
+ if !c.isPodReady(&pod) {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+func (c *ReadyChecker) podsforObject(ctx context.Context, namespace string, obj runtime.Object) ([]corev1.Pod, error) {
+ selector, err := SelectorsForObject(obj)
+ if err != nil {
+ return nil, err
+ }
+ list, err := getPods(ctx, c.client, namespace, selector.String())
+ return list, err
+}
+
+// isPodReady returns true if a pod is ready; false otherwise.
+func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
+ for _, c := range pod.Status.Conditions {
+ if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
+ return true
+ }
+ }
+ c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName())
+ return false
+}
+
+func (c *ReadyChecker) jobReady(job *batchv1.Job) bool {
+ if job.Status.Failed > *job.Spec.BackoffLimit {
+ c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName())
+ return false
+ }
+ if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
+ c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName())
+ return false
+ }
+ return true
+}
+
+func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
+ // ExternalName Services are external to cluster so helm shouldn't be checking to see if they're 'ready' (i.e. have an IP Set)
+ if s.Spec.Type == corev1.ServiceTypeExternalName {
+ return true
+ }
+
+ // Ensure that the service cluster IP is not empty
+ if s.Spec.ClusterIP == "" {
+ c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName())
+ return false
+ }
+
+ // This checks if the service has a LoadBalancer and that balancer has an Ingress defined
+ if s.Spec.Type == corev1.ServiceTypeLoadBalancer {
+ // do not wait when at least 1 external IP is set
+ if len(s.Spec.ExternalIPs) > 0 {
+ c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs)
+ return true
+ }
+
+ if s.Status.LoadBalancer.Ingress == nil {
+ c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName())
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
+ if v.Status.Phase != corev1.ClaimBound {
+ c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName())
+ return false
+ }
+ return true
+}
+
+func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool {
+ expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
+ if !(rs.Status.ReadyReplicas >= expectedReady) {
+ c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady)
+ return false
+ }
+ return true
+}
+
+func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
+ // If the update strategy is not a rolling update, there will be nothing to wait for
+ if ds.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {
+ return true
+ }
+
+ // Make sure all the updated pods have been scheduled
+ if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
+ c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled)
+ return false
+ }
+ maxUnavailable, err := intstr.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true)
+ if err != nil {
+ // If for some reason the value is invalid, set max unavailable to the
+ // number of desired replicas. This is the same behavior as the
+ // `MaxUnavailable` function in deploymentutil
+ maxUnavailable = int(ds.Status.DesiredNumberScheduled)
+ }
+
+ expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable
+ if !(int(ds.Status.NumberReady) >= expectedReady) {
+ c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady)
+ return false
+ }
+ return true
+}
+
+// Because the v1 extensions API is not available on all supported k8s versions
+// yet and because Go doesn't support generics, we need to have a duplicate
+// function to support the v1beta1 types
+func (c *ReadyChecker) crdBetaReady(crd apiextv1beta1.CustomResourceDefinition) bool {
+ for _, cond := range crd.Status.Conditions {
+ switch cond.Type {
+ case apiextv1beta1.Established:
+ if cond.Status == apiextv1beta1.ConditionTrue {
+ return true
+ }
+ case apiextv1beta1.NamesAccepted:
+ if cond.Status == apiextv1beta1.ConditionFalse {
+ // This indicates a naming conflict, but it's probably not the
+ // job of this function to fail because of that. Instead,
+ // we treat it as a success, since the process should be able to
+ // continue.
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
+ for _, cond := range crd.Status.Conditions {
+ switch cond.Type {
+ case apiextv1.Established:
+ if cond.Status == apiextv1.ConditionTrue {
+ return true
+ }
+ case apiextv1.NamesAccepted:
+ if cond.Status == apiextv1.ConditionFalse {
+ // This indicates a naming conflict, but it's probably not the
+ // job of this function to fail because of that. Instead,
+ // we treat it as a success, since the process should be able to
+ // continue.
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
+ // If the update strategy is not a rolling update, there will be nothing to wait for
+ if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
+ return true
+ }
+
+ // Dereference all the pointers because StatefulSets like them
+ var partition int
+ // 1 is the default for replicas if not set
+ var replicas = 1
+ // For some reason, even if the update strategy is a rolling update, the
+ // actual rollingUpdate field can be nil. If it is, we can safely assume
+ // there is no partition value
+ if sts.Spec.UpdateStrategy.RollingUpdate != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
+ partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition)
+ }
+ if sts.Spec.Replicas != nil {
+ replicas = int(*sts.Spec.Replicas)
+ }
+
+ // Because an update strategy can use partitioning, we need to calculate the
+ // number of updated replicas we should have. For example, if the replicas
+ // is set to 3 and the partition is 2, we'd expect only one pod to be
+ // updated
+ expectedReplicas := replicas - partition
+
+ // Make sure all the updated pods have been scheduled
+ if int(sts.Status.UpdatedReplicas) != expectedReplicas {
+ c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas)
+ return false
+ }
+
+ if int(sts.Status.ReadyReplicas) != replicas {
+ c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
+ return false
+ }
+ return true
+}
+
+func getPods(ctx context.Context, client kubernetes.Interface, namespace, selector string) ([]corev1.Pod, error) {
+ list, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
+ LabelSelector: selector,
+ })
+ return list.Items, err
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/resource.go b/vendor/helm.sh/helm/v3/pkg/kube/resource.go
new file mode 100644
index 000000000..ee8f83a25
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/resource.go
@@ -0,0 +1,85 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import "k8s.io/cli-runtime/pkg/resource"
+
+// ResourceList provides convenience methods for comparing collections of Infos.
+type ResourceList []*resource.Info
+
+// Append adds an Info to the Result.
+func (r *ResourceList) Append(val *resource.Info) {
+ *r = append(*r, val)
+}
+
+// Visit implements resource.Visitor.
+func (r ResourceList) Visit(fn resource.VisitorFunc) error {
+ for _, i := range r {
+ if err := fn(i, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Filter returns a new Result with Infos that satisfy the predicate fn.
+func (r ResourceList) Filter(fn func(*resource.Info) bool) ResourceList {
+ var result ResourceList
+ for _, i := range r {
+ if fn(i) {
+ result.Append(i)
+ }
+ }
+ return result
+}
+
+// Get returns the Info from the result that matches the name and kind.
+func (r ResourceList) Get(info *resource.Info) *resource.Info {
+ for _, i := range r {
+ if isMatchingInfo(i, info) {
+ return i
+ }
+ }
+ return nil
+}
+
+// Contains checks to see if an object exists.
+func (r ResourceList) Contains(info *resource.Info) bool {
+ for _, i := range r {
+ if isMatchingInfo(i, info) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference will return a new Result with objects not contained in rs.
+func (r ResourceList) Difference(rs ResourceList) ResourceList {
+ return r.Filter(func(info *resource.Info) bool {
+ return !rs.Contains(info)
+ })
+}
+
+// Intersect will return a new Result with objects contained in both Results.
+func (r ResourceList) Intersect(rs ResourceList) ResourceList {
+ return r.Filter(rs.Contains)
+}
+
+// isMatchingInfo returns true if infos match on Name and GroupVersionKind.
+func isMatchingInfo(a, b *resource.Info) bool {
+ return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.Kind == b.Mapping.GroupVersionKind.Kind
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go b/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go
new file mode 100644
index 000000000..5f391eb50
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go
@@ -0,0 +1,26 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+// ResourcePolicyAnno is the annotation name for a resource policy
+const ResourcePolicyAnno = "helm.sh/resource-policy"
+
+// KeepPolicy is the resource policy type for keep
+//
+// This resource policy type allows resources to skip being deleted
+// during an uninstallRelease action.
+const KeepPolicy = "keep"
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/result.go b/vendor/helm.sh/helm/v3/pkg/kube/result.go
new file mode 100644
index 000000000..c3e171c2e
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/result.go
@@ -0,0 +1,28 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+// Result contains the information of created, updated, and deleted resources
+// for various kube API calls along with helper methods for using those
+// resources
+type Result struct {
+ Created ResourceList
+ Updated ResourceList
+ Deleted ResourceList
+}
+
+// If needed, we can add methods to the Result type for things like diffing
diff --git a/vendor/helm.sh/helm/v3/pkg/kube/wait.go b/vendor/helm.sh/helm/v3/pkg/kube/wait.go
new file mode 100644
index 000000000..8928d6745
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/kube/wait.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/pkg/errors"
+ appsv1 "k8s.io/api/apps/v1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+type waiter struct {
+ c ReadyChecker
+ timeout time.Duration
+ log func(string, ...interface{})
+}
+
+// waitForResources polls to get the current status of all pods, PVCs, Services and
+// Jobs(optional) until all are ready or a timeout is reached
+func (w *waiter) waitForResources(created ResourceList) error {
+ w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout)
+
+ ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
+ defer cancel()
+
+ return wait.PollImmediateUntil(2*time.Second, func() (bool, error) {
+ for _, v := range created {
+ ready, err := w.c.IsReady(ctx, v)
+ if !ready || err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+ }, ctx.Done())
+}
+
+// waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached
+func (w *waiter) waitForDeletedResources(deleted ResourceList) error {
+ w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout)
+
+ ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
+ defer cancel()
+
+ return wait.PollImmediateUntil(2*time.Second, func() (bool, error) {
+ for _, v := range deleted {
+ err := v.Get()
+ if err == nil || !apierrors.IsNotFound(err) {
+ return false, err
+ }
+ }
+ return true, nil
+ }, ctx.Done())
+}
+
+// SelectorsForObject returns the pod label selector for a given object
+//
+// Modified version of https://github.com/kubernetes/kubernetes/blob/v1.14.1/pkg/kubectl/polymorphichelpers/helpers.go#L84
+func SelectorsForObject(object runtime.Object) (selector labels.Selector, err error) {
+ switch t := object.(type) {
+ case *extensionsv1beta1.ReplicaSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1.ReplicaSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta2.ReplicaSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *corev1.ReplicationController:
+ selector = labels.SelectorFromSet(t.Spec.Selector)
+ case *appsv1.StatefulSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta1.StatefulSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta2.StatefulSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *extensionsv1beta1.DaemonSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1.DaemonSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta2.DaemonSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *extensionsv1beta1.Deployment:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1.Deployment:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta1.Deployment:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta2.Deployment:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *batchv1.Job:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *corev1.Service:
+ if t.Spec.Selector == nil || len(t.Spec.Selector) == 0 {
+ return nil, fmt.Errorf("invalid service '%s': Service is defined without a selector", t.Name)
+ }
+ selector = labels.SelectorFromSet(t.Spec.Selector)
+
+ default:
+ return nil, fmt.Errorf("selector for %T not implemented", object)
+ }
+
+ return selector, errors.Wrap(err, "invalid label selector")
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/lint/lint.go b/vendor/helm.sh/helm/v3/pkg/lint/lint.go
new file mode 100644
index 000000000..67e76bd3d
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/lint/lint.go
@@ -0,0 +1,37 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lint // import "helm.sh/helm/v3/pkg/lint"
+
+import (
+ "path/filepath"
+
+ "helm.sh/helm/v3/pkg/lint/rules"
+ "helm.sh/helm/v3/pkg/lint/support"
+)
+
+// All runs all of the available linters on the given base directory.
+func All(basedir string, values map[string]interface{}, namespace string, strict bool) support.Linter {
+ // Using abs path to get directory context
+ chartDir, _ := filepath.Abs(basedir)
+
+ linter := support.Linter{ChartDir: chartDir}
+ rules.Chartfile(&linter)
+ rules.ValuesWithOverrides(&linter, values)
+ rules.Templates(&linter, values, namespace, strict)
+ rules.Dependencies(&linter)
+ return linter
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go
new file mode 100644
index 000000000..b49f2cec0
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go
@@ -0,0 +1,210 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v3/pkg/lint/rules"
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/asaskevich/govalidator"
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/lint/support"
+)
+
+// Chartfile runs a set of linter rules related to Chart.yaml file
+func Chartfile(linter *support.Linter) {
+ chartFileName := "Chart.yaml"
+ chartPath := filepath.Join(linter.ChartDir, chartFileName)
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlNotDirectory(chartPath))
+
+ chartFile, err := chartutil.LoadChartfile(chartPath)
+ validChartFile := linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlFormat(err))
+
+ // Guard clause. Following linter rules require a parsable ChartFile
+ if !validChartFile {
+ return
+ }
+
+ // type check for Chart.yaml . ignoring error as any parse
+ // errors would already be caught in the above load function
+ chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath)
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile))
+
+ // Chart metadata
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile))
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersionType(chartFileForTypeCheck))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAppVersionType(chartFileForTypeCheck))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartSources(chartFile))
+ linter.RunLinterRule(support.InfoSev, chartFileName, validateChartIconPresence(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile))
+}
+
+func validateChartVersionType(data map[string]interface{}) error {
+ return isStringValue(data, "version")
+}
+
+func validateChartAppVersionType(data map[string]interface{}) error {
+ return isStringValue(data, "appVersion")
+}
+
+func isStringValue(data map[string]interface{}, key string) error {
+ value, ok := data[key]
+ if !ok {
+ return nil
+ }
+ valueType := fmt.Sprintf("%T", value)
+ if valueType != "string" {
+ return errors.Errorf("%s should be of type string but it's of type %s", key, valueType)
+ }
+ return nil
+}
+
+func validateChartYamlNotDirectory(chartPath string) error {
+ fi, err := os.Stat(chartPath)
+
+ if err == nil && fi.IsDir() {
+ return errors.New("should be a file, not a directory")
+ }
+ return nil
+}
+
+func validateChartYamlFormat(chartFileError error) error {
+ if chartFileError != nil {
+ return errors.Errorf("unable to parse YAML\n\t%s", chartFileError.Error())
+ }
+ return nil
+}
+
+func validateChartName(cf *chart.Metadata) error {
+ if cf.Name == "" {
+ return errors.New("name is required")
+ }
+ return nil
+}
+
+func validateChartAPIVersion(cf *chart.Metadata) error {
+ if cf.APIVersion == "" {
+ return errors.New("apiVersion is required. The value must be either \"v1\" or \"v2\"")
+ }
+
+ if cf.APIVersion != chart.APIVersionV1 && cf.APIVersion != chart.APIVersionV2 {
+ return fmt.Errorf("apiVersion '%s' is not valid. The value must be either \"v1\" or \"v2\"", cf.APIVersion)
+ }
+
+ return nil
+}
+
+func validateChartVersion(cf *chart.Metadata) error {
+ if cf.Version == "" {
+ return errors.New("version is required")
+ }
+
+ version, err := semver.NewVersion(cf.Version)
+
+ if err != nil {
+ return errors.Errorf("version '%s' is not a valid SemVer", cf.Version)
+ }
+
+ c, err := semver.NewConstraint(">0.0.0-0")
+ if err != nil {
+ return err
+ }
+ valid, msg := c.Validate(version)
+
+ if !valid && len(msg) > 0 {
+ return errors.Errorf("version %v", msg[0])
+ }
+
+ return nil
+}
+
+func validateChartMaintainer(cf *chart.Metadata) error {
+ for _, maintainer := range cf.Maintainers {
+ if maintainer.Name == "" {
+ return errors.New("each maintainer requires a name")
+ } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) {
+ return errors.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name)
+ } else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) {
+ return errors.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name)
+ }
+ }
+ return nil
+}
+
+func validateChartSources(cf *chart.Metadata) error {
+ for _, source := range cf.Sources {
+ if source == "" || !govalidator.IsRequestURL(source) {
+ return errors.Errorf("invalid source URL '%s'", source)
+ }
+ }
+ return nil
+}
+
+func validateChartIconPresence(cf *chart.Metadata) error {
+ if cf.Icon == "" {
+ return errors.New("icon is recommended")
+ }
+ return nil
+}
+
+func validateChartIconURL(cf *chart.Metadata) error {
+ if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) {
+ return errors.Errorf("invalid icon URL '%s'", cf.Icon)
+ }
+ return nil
+}
+
+func validateChartDependencies(cf *chart.Metadata) error {
+ if len(cf.Dependencies) > 0 && cf.APIVersion != chart.APIVersionV2 {
+ return fmt.Errorf("dependencies are not valid in the Chart file with apiVersion '%s'. They are valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2)
+ }
+ return nil
+}
+
+func validateChartType(cf *chart.Metadata) error {
+ if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV2 {
+ return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2)
+ }
+ return nil
+}
+
+// loadChartFileForTypeCheck loads the Chart.yaml
+// in a generic form of a map[string]interface{}, so that the type
+// of the values can be checked
+func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := make(map[string]interface{})
+ err = yaml.Unmarshal(b, &y)
+ return y, err
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go
new file mode 100644
index 000000000..abecd1feb
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go
@@ -0,0 +1,82 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v3/pkg/lint/rules"
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/lint/support"
+)
+
+// Dependencies runs lints against a chart's dependencies
+//
+// See https://github.com/helm/helm/issues/7910
+func Dependencies(linter *support.Linter) {
+ c, err := loader.LoadDir(linter.ChartDir)
+ if !linter.RunLinterRule(support.ErrorSev, "", validateChartFormat(err)) {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c))
+ linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c))
+}
+
+func validateChartFormat(chartError error) error {
+ if chartError != nil {
+ return errors.Errorf("unable to load chart\n\t%s", chartError)
+ }
+ return nil
+}
+
+func validateDependencyInChartsDir(c *chart.Chart) (err error) {
+ dependencies := map[string]struct{}{}
+ missing := []string{}
+ for _, dep := range c.Dependencies() {
+ dependencies[dep.Metadata.Name] = struct{}{}
+ }
+ for _, dep := range c.Metadata.Dependencies {
+ if _, ok := dependencies[dep.Name]; !ok {
+ missing = append(missing, dep.Name)
+ }
+ }
+ if len(missing) > 0 {
+ err = fmt.Errorf("chart directory is missing these dependencies: %s", strings.Join(missing, ","))
+ }
+ return err
+}
+
+func validateDependencyInMetadata(c *chart.Chart) (err error) {
+ dependencies := map[string]struct{}{}
+ missing := []string{}
+ for _, dep := range c.Metadata.Dependencies {
+ dependencies[dep.Name] = struct{}{}
+ }
+ for _, dep := range c.Dependencies() {
+ if _, ok := dependencies[dep.Metadata.Name]; !ok {
+ missing = append(missing, dep.Metadata.Name)
+ }
+ }
+ if len(missing) > 0 {
+ err = fmt.Errorf("chart metadata is missing these dependencies: %s", strings.Join(missing, ","))
+ }
+ return err
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go
new file mode 100644
index 000000000..ce19b91d5
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go
@@ -0,0 +1,95 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v3/pkg/lint/rules"
+
+import (
+ "fmt"
+ "strconv"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apiserver/pkg/endpoints/deprecation"
+ kscheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+var (
+ // This should be set in the Makefile based on the version of client-go being imported.
+ // These constants will be overwritten with LDFLAGS. The version components must be
+ // strings in order for LDFLAGS to set them.
+ k8sVersionMajor = "1"
+ k8sVersionMinor = "20"
+)
+
+// deprecatedAPIError indicates than an API is deprecated in Kubernetes
+type deprecatedAPIError struct {
+ Deprecated string
+ Message string
+}
+
+func (e deprecatedAPIError) Error() string {
+ msg := e.Message
+ return msg
+}
+
+func validateNoDeprecations(resource *K8sYamlStruct) error {
+ // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation
+ if resource.APIVersion == "" {
+ return nil
+ }
+ if resource.Kind == "" {
+ return nil
+ }
+
+ runtimeObject, err := resourceToRuntimeObject(resource)
+ if err != nil {
+ // do not error for non-kubernetes resources
+ if runtime.IsNotRegisteredError(err) {
+ return nil
+ }
+ return err
+ }
+ maj, err := strconv.Atoi(k8sVersionMajor)
+ if err != nil {
+ return err
+ }
+ min, err := strconv.Atoi(k8sVersionMinor)
+ if err != nil {
+ return err
+ }
+
+ if !deprecation.IsDeprecated(runtimeObject, maj, min) {
+ return nil
+ }
+ gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind)
+ return deprecatedAPIError{
+ Deprecated: gvk,
+ Message: deprecation.WarningMessage(runtimeObject),
+ }
+}
+
+func resourceToRuntimeObject(resource *K8sYamlStruct) (runtime.Object, error) {
+ scheme := runtime.NewScheme()
+ kscheme.AddToScheme(scheme)
+
+ gvk := schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind)
+ out, err := scheme.New(gvk)
+ if err != nil {
+ return nil, err
+ }
+ out.GetObjectKind().SetGroupVersionKind(gvk)
+ return out, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go
new file mode 100644
index 000000000..b4bfe33e2
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go
@@ -0,0 +1,333 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+ "k8s.io/apimachinery/pkg/api/validation"
+ apipath "k8s.io/apimachinery/pkg/api/validation/path"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/engine"
+ "helm.sh/helm/v3/pkg/lint/support"
+)
+
+var (
+ crdHookSearch = regexp.MustCompile(`"?helm\.sh/hook"?:\s+crd-install`)
+ releaseTimeSearch = regexp.MustCompile(`\.Release\.Time`)
+)
+
+// Templates lints the templates in the Linter.
+func Templates(linter *support.Linter, values map[string]interface{}, namespace string, strict bool) {
+ fpath := "templates/"
+ templatesPath := filepath.Join(linter.ChartDir, fpath)
+
+ templatesDirExist := linter.RunLinterRule(support.WarningSev, fpath, validateTemplatesDir(templatesPath))
+
+ // Templates directory is optional for now
+ if !templatesDirExist {
+ return
+ }
+
+ // Load chart and parse templates
+ chart, err := loader.Load(linter.ChartDir)
+
+ chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !chartLoaded {
+ return
+ }
+
+ options := chartutil.ReleaseOptions{
+ Name: "test-release",
+ Namespace: namespace,
+ }
+
+ cvals, err := chartutil.CoalesceValues(chart, values)
+ if err != nil {
+ return
+ }
+ valuesToRender, err := chartutil.ToRenderValues(chart, cvals, options, nil)
+ if err != nil {
+ linter.RunLinterRule(support.ErrorSev, fpath, err)
+ return
+ }
+ var e engine.Engine
+ e.LintMode = true
+ renderedContentMap, err := e.Render(chart, valuesToRender)
+
+ renderOk := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !renderOk {
+ return
+ }
+
+ /* Iterate over all the templates to check:
+ - It is a .yaml file
+ - All the values in the template file is defined
+ - {{}} include | quote
+ - Generated content is a valid Yaml file
+ - Metadata.Namespace is not set
+ */
+ for _, template := range chart.Templates {
+ fileName, data := template.Name, template.Data
+ fpath = fileName
+
+ linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName))
+ // These are v3 specific checks to make sure and warn people if their
+ // chart is not compatible with v3
+ linter.RunLinterRule(support.WarningSev, fpath, validateNoCRDHooks(data))
+ linter.RunLinterRule(support.ErrorSev, fpath, validateNoReleaseTime(data))
+
+ // We only apply the following lint rules to yaml files
+ if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" {
+ continue
+ }
+
+ // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1463
+ // Check that all the templates have a matching value
+ // linter.RunLinterRule(support.WarningSev, fpath, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate))
+
+ // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1037
+ // linter.RunLinterRule(support.WarningSev, fpath, validateQuotes(string(preExecutedTemplate)))
+
+ renderedContent := renderedContentMap[path.Join(chart.Name(), fileName)]
+ if strings.TrimSpace(renderedContent) != "" {
+ linter.RunLinterRule(support.WarningSev, fpath, validateTopIndentLevel(renderedContent))
+
+ decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(renderedContent), 4096)
+
+ // Lint all resources if the file contains multiple documents separated by ---
+ for {
+ // Even though K8sYamlStruct only defines a few fields, an error in any other
+ // key will be raised as well
+ var yamlStruct *K8sYamlStruct
+
+ err := decoder.Decode(&yamlStruct)
+ if err == io.EOF {
+ break
+ }
+
+ // If YAML linting fails, we sill progress. So we don't capture the returned state
+ // on this linter run.
+ linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err))
+
+ if yamlStruct != nil {
+ // NOTE: set to warnings to allow users to support out-of-date kubernetes
+ // Refs https://github.com/helm/helm/issues/8596
+ linter.RunLinterRule(support.WarningSev, fpath, validateMetadataName(yamlStruct))
+ linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct))
+
+ linter.RunLinterRule(support.ErrorSev, fpath, validateMatchSelector(yamlStruct, renderedContent))
+ linter.RunLinterRule(support.ErrorSev, fpath, validateListAnnotations(yamlStruct, renderedContent))
+ }
+ }
+ }
+ }
+}
+
+// validateTopIndentLevel checks that the content does not start with an indent level > 0.
+//
+// This error can occur when a template accidentally inserts space. It can cause
+// unpredictable errors depending on whether the text is normalized before being passed
+// into the YAML parser. So we trap it here.
+//
+// See https://github.com/helm/helm/issues/8467
+func validateTopIndentLevel(content string) error {
+ // Read lines until we get to a non-empty one
+ scanner := bufio.NewScanner(bytes.NewBufferString(content))
+ for scanner.Scan() {
+ line := scanner.Text()
+ // If line is empty, skip
+ if strings.TrimSpace(line) == "" {
+ continue
+ }
+ // If it starts with one or more spaces, this is an error
+ if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") {
+ return fmt.Errorf("document starts with an illegal indent: %q, which may cause parsing problems", line)
+ }
+ // Any other condition passes.
+ return nil
+ }
+ return scanner.Err()
+}
+
+// Validation functions
+func validateTemplatesDir(templatesPath string) error {
+ if fi, err := os.Stat(templatesPath); err != nil {
+ return errors.New("directory not found")
+ } else if !fi.IsDir() {
+ return errors.New("not a directory")
+ }
+ return nil
+}
+
+func validateAllowedExtension(fileName string) error {
+ ext := filepath.Ext(fileName)
+ validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"}
+
+ for _, b := range validExtensions {
+ if b == ext {
+ return nil
+ }
+ }
+
+ return errors.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext)
+}
+
+func validateYamlContent(err error) error {
+ return errors.Wrap(err, "unable to parse YAML")
+}
+
+// validateMetadataName uses the correct validation function for the object
+// Kind, or if not set, defaults to the standard definition of a subdomain in
+// DNS (RFC 1123), used by most resources.
+func validateMetadataName(obj *K8sYamlStruct) error {
+ fn := validateMetadataNameFunc(obj)
+ allErrs := field.ErrorList{}
+ for _, msg := range fn(obj.Metadata.Name, false) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg))
+ }
+ if len(allErrs) > 0 {
+ return errors.Wrapf(allErrs.ToAggregate(), "object name does not conform to Kubernetes naming requirements: %q", obj.Metadata.Name)
+ }
+ return nil
+}
+
+// validateMetadataNameFunc will return a name validation function for the
+// object kind, if defined below.
+//
+// Rules should match those set in the various api validations:
+// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/core/validation/validation.go#L205-L274
+// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/apps/validation/validation.go#L39
+// ...
+//
+// Implementing here to avoid importing k/k.
+//
+// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object
+// kinds that don't have special requirements, so is the most likely to work if
+// new kinds are added.
+func validateMetadataNameFunc(obj *K8sYamlStruct) validation.ValidateNameFunc {
+ switch strings.ToLower(obj.Kind) {
+ case "pod", "node", "secret", "endpoints", "resourcequota", // core
+ "controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps
+ "autoscaler", // autoscaler
+ "cronjob", "job", // batch
+ "lease", // coordination
+ "endpointslice", // discovery
+ "networkpolicy", "ingress", // networking
+ "podsecuritypolicy", // policy
+ "priorityclass", // scheduling
+ "podpreset", // settings
+ "storageclass", "volumeattachment", "csinode": // storage
+ return validation.NameIsDNSSubdomain
+ case "service":
+ return validation.NameIsDNS1035Label
+ case "namespace":
+ return validation.ValidateNamespaceName
+ case "serviceaccount":
+ return validation.ValidateServiceAccountName
+ case "certificatesigningrequest":
+ // No validation.
+ // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/certificates/validation/validation.go#L137-L140
+ return func(name string, prefix bool) []string { return nil }
+ case "role", "clusterrole", "rolebinding", "clusterrolebinding":
+ // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34
+ return func(name string, prefix bool) []string {
+ return apipath.IsValidPathSegmentName(name)
+ }
+ default:
+ return validation.NameIsDNSSubdomain
+ }
+}
+
+func validateNoCRDHooks(manifest []byte) error {
+ if crdHookSearch.Match(manifest) {
+ return errors.New("manifest is a crd-install hook. This hook is no longer supported in v3 and all CRDs should also exist the crds/ directory at the top level of the chart")
+ }
+ return nil
+}
+
+func validateNoReleaseTime(manifest []byte) error {
+ if releaseTimeSearch.Match(manifest) {
+ return errors.New(".Release.Time has been removed in v3, please replace with the `now` function in your templates")
+ }
+ return nil
+}
+
+// validateMatchSelector ensures that template specs have a selector declared.
+// See https://github.com/helm/helm/issues/1990
+func validateMatchSelector(yamlStruct *K8sYamlStruct, manifest string) error {
+ switch yamlStruct.Kind {
+ case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet":
+ // verify that matchLabels or matchExpressions is present
+ if !(strings.Contains(manifest, "matchLabels") || strings.Contains(manifest, "matchExpressions")) {
+ return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name)
+ }
+ }
+ return nil
+}
+func validateListAnnotations(yamlStruct *K8sYamlStruct, manifest string) error {
+ if yamlStruct.Kind == "List" {
+ m := struct {
+ Items []struct {
+ Metadata struct {
+ Annotations map[string]string
+ }
+ }
+ }{}
+
+ if err := yaml.Unmarshal([]byte(manifest), &m); err != nil {
+ return validateYamlContent(err)
+ }
+
+ for _, i := range m.Items {
+ if _, ok := i.Metadata.Annotations["helm.sh/resource-policy"]; ok {
+ return errors.New("Annotation 'helm.sh/resource-policy' within List objects are ignored")
+ }
+ }
+ }
+ return nil
+}
+
+// K8sYamlStruct stubs a Kubernetes YAML file.
+//
+// DEPRECATED: In Helm 4, this will be made a private type, as it is for use only within
+// the rules package.
+type K8sYamlStruct struct {
+ APIVersion string `json:"apiVersion"`
+ Kind string
+ Metadata k8sYamlMetadata
+}
+
+type k8sYamlMetadata struct {
+ Namespace string
+ Name string
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go
new file mode 100644
index 000000000..79a294326
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go
@@ -0,0 +1,87 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/pkg/errors"
+
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/lint/support"
+)
+
+// Values lints a chart's values.yaml file.
+//
+// This function is deprecated and will be removed in Helm 4.
+func Values(linter *support.Linter) {
+ ValuesWithOverrides(linter, map[string]interface{}{})
+}
+
+// ValuesWithOverrides tests the values.yaml file.
+//
+// If a schema is present in the chart, values are tested against that. Otherwise,
+// they are only tested for well-formedness.
+//
+// If additional values are supplied, they are coalesced into the values in values.yaml.
+func ValuesWithOverrides(linter *support.Linter, values map[string]interface{}) {
+ file := "values.yaml"
+ vf := filepath.Join(linter.ChartDir, file)
+ fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf))
+
+ if !fileExists {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, values))
+}
+
+func validateValuesFileExistence(valuesPath string) error {
+ _, err := os.Stat(valuesPath)
+ if err != nil {
+ return errors.Errorf("file does not exist")
+ }
+ return nil
+}
+
+func validateValuesFile(valuesPath string, overrides map[string]interface{}) error {
+ values, err := chartutil.ReadValuesFile(valuesPath)
+ if err != nil {
+ return errors.Wrap(err, "unable to parse YAML")
+ }
+
+ // Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top
+ // level values against the top-level expectations. Subchart values are not linted.
+ // We could change that. For now, though, we retain that strategy, and thus can
+ // coalesce tables (like reuse-values does) instead of doing the full chart
+ // CoalesceValues
+ coalescedValues := chartutil.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides)
+ coalescedValues = chartutil.CoalesceTables(coalescedValues, values)
+
+ ext := filepath.Ext(valuesPath)
+ schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json"
+ schema, err := ioutil.ReadFile(schemaPath)
+ if len(schema) == 0 {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ return chartutil.ValidateAgainstSingleSchema(coalescedValues, schema)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go b/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go
new file mode 100644
index 000000000..b9a9d0918
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package support contains tools for linting charts.
+
+Linting is the process of testing charts for errors or warnings regarding
+formatting, compilation, or standards compliance.
+*/
+package support // import "helm.sh/helm/v3/pkg/lint/support"
diff --git a/vendor/helm.sh/helm/v3/pkg/lint/support/message.go b/vendor/helm.sh/helm/v3/pkg/lint/support/message.go
new file mode 100644
index 000000000..5efbc7a61
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/lint/support/message.go
@@ -0,0 +1,76 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package support
+
+import "fmt"
+
+// Severity indicates the severity of a Message.
+const (
+ // UnknownSev indicates that the severity of the error is unknown, and should not stop processing.
+ UnknownSev = iota
+ // InfoSev indicates information, for example missing values.yaml file
+ InfoSev
+ // WarningSev indicates that something does not meet code standards, but will likely function.
+ WarningSev
+ // ErrorSev indicates that something will not likely function.
+ ErrorSev
+)
+
+// sev matches the *Sev states.
+var sev = []string{"UNKNOWN", "INFO", "WARNING", "ERROR"}
+
+// Linter encapsulates a linting run of a particular chart.
+type Linter struct {
+ Messages []Message
+ // The highest severity of all the failing lint rules
+ HighestSeverity int
+ ChartDir string
+}
+
+// Message describes an error encountered while linting.
+type Message struct {
+ // Severity is one of the *Sev constants
+ Severity int
+ Path string
+ Err error
+}
+
+func (m Message) Error() string {
+ return fmt.Sprintf("[%s] %s: %s", sev[m.Severity], m.Path, m.Err.Error())
+}
+
+// NewMessage creates a new Message struct
+func NewMessage(severity int, path string, err error) Message {
+ return Message{Severity: severity, Path: path, Err: err}
+}
+
+// RunLinterRule returns true if the validation passed
+func (l *Linter) RunLinterRule(severity int, path string, err error) bool {
+ // severity is out of bound
+ if severity < 0 || severity >= len(sev) {
+ return false
+ }
+
+ if err != nil {
+ l.Messages = append(l.Messages, NewMessage(severity, path, err))
+
+ if severity > l.HighestSeverity {
+ l.HighestSeverity = severity
+ }
+ }
+ return err == nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go b/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go
new file mode 100644
index 000000000..e3481515f
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go
@@ -0,0 +1,29 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin // import "helm.sh/helm/v3/pkg/plugin"
+
+// Types of hooks
+const (
+ // Install is executed after the plugin is added.
+ Install = "install"
+ // Delete is executed after the plugin is removed.
+ Delete = "delete"
+ // Update is executed after the plugin is updated.
+ Update = "update"
+)
+
+// Hooks is a map of events to commands.
+type Hooks map[string]string
diff --git a/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go
new file mode 100644
index 000000000..1399b7116
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go
@@ -0,0 +1,282 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin // import "helm.sh/helm/v3/pkg/plugin"
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/cli"
+)
+
+const PluginFileName = "plugin.yaml"
+
+// Downloaders represents the plugins capability if it can retrieve
+// charts from special sources
+type Downloaders struct {
+ // Protocols are the list of schemes from the charts URL.
+ Protocols []string `json:"protocols"`
+ // Command is the executable path with which the plugin performs
+ // the actual download for the corresponding Protocols
+ Command string `json:"command"`
+}
+
+// PlatformCommand represents a command for a particular operating system and architecture
+type PlatformCommand struct {
+ OperatingSystem string `json:"os"`
+ Architecture string `json:"arch"`
+ Command string `json:"command"`
+}
+
+// Metadata describes a plugin.
+//
+// This is the plugin equivalent of a chart.Metadata.
+type Metadata struct {
+ // Name is the name of the plugin
+ Name string `json:"name"`
+
+ // Version is a SemVer 2 version of the plugin.
+ Version string `json:"version"`
+
+ // Usage is the single-line usage text shown in help
+ Usage string `json:"usage"`
+
+ // Description is a long description shown in places like `helm help`
+ Description string `json:"description"`
+
+ // Command is the command, as a single string.
+ //
+ // The command will be passed through environment expansion, so env vars can
+ // be present in this command. Unless IgnoreFlags is set, this will
+ // also merge the flags passed from Helm.
+ //
+ // Note that command is not executed in a shell. To do so, we suggest
+ // pointing the command to a shell script.
+ //
+ // The following rules will apply to processing commands:
+ // - If platformCommand is present, it will be searched first
+ // - If both OS and Arch match the current platform, search will stop and the command will be executed
+ // - If OS matches and there is no more specific match, the command will be executed
+ // - If no OS/Arch match is found, the default command will be executed
+ // - If no command is present and no matches are found in platformCommand, Helm will exit with an error
+ PlatformCommand []PlatformCommand `json:"platformCommand"`
+ Command string `json:"command"`
+
+ // IgnoreFlags ignores any flags passed in from Helm
+ //
+ // For example, if the plugin is invoked as `helm --debug myplugin`, if this
+ // is false, `--debug` will be appended to `--command`. If this is true,
+ // the `--debug` flag will be discarded.
+ IgnoreFlags bool `json:"ignoreFlags"`
+
+ // Hooks are commands that will run on events.
+ Hooks Hooks
+
+ // Downloaders field is used if the plugin supply downloader mechanism
+ // for special protocols.
+ Downloaders []Downloaders `json:"downloaders"`
+
+ // UseTunnelDeprecated indicates that this command needs a tunnel.
+ // Setting this will cause a number of side effects, such as the
+ // automatic setting of HELM_HOST.
+ // DEPRECATED and unused, but retained for backwards compatibility with Helm 2 plugins. Remove in Helm 4
+ UseTunnelDeprecated bool `json:"useTunnel,omitempty"`
+}
+
+// Plugin represents a plugin.
+type Plugin struct {
+ // Metadata is a parsed representation of a plugin.yaml
+ Metadata *Metadata
+ // Dir is the string path to the directory that holds the plugin.
+ Dir string
+}
+
+// The following rules will apply to processing the Plugin.PlatformCommand.Command:
+// - If both OS and Arch match the current platform, search will stop and the command will be prepared for execution
+// - If OS matches and there is no more specific match, the command will be prepared for execution
+// - If no OS/Arch match is found, return nil
+func getPlatformCommand(cmds []PlatformCommand) []string {
+ var command []string
+ eq := strings.EqualFold
+ for _, c := range cmds {
+ if eq(c.OperatingSystem, runtime.GOOS) {
+ command = strings.Split(os.ExpandEnv(c.Command), " ")
+ }
+ if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) {
+ return strings.Split(os.ExpandEnv(c.Command), " ")
+ }
+ }
+ return command
+}
+
+// PrepareCommand takes a Plugin.PlatformCommand.Command, a Plugin.Command and will applying the following processing:
+// - If platformCommand is present, it will be searched first
+// - If both OS and Arch match the current platform, search will stop and the command will be prepared for execution
+// - If OS matches and there is no more specific match, the command will be prepared for execution
+// - If no OS/Arch match is found, the default command will be prepared for execution
+// - If no command is present and no matches are found in platformCommand, will exit with an error
+//
+// It merges extraArgs into any arguments supplied in the plugin. It
+// returns the name of the command and an args array.
+//
+// The result is suitable to pass to exec.Command.
+func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) {
+ var parts []string
+ platCmdLen := len(p.Metadata.PlatformCommand)
+ if platCmdLen > 0 {
+ parts = getPlatformCommand(p.Metadata.PlatformCommand)
+ }
+ if platCmdLen == 0 || parts == nil {
+ parts = strings.Split(os.ExpandEnv(p.Metadata.Command), " ")
+ }
+ if len(parts) == 0 || parts[0] == "" {
+ return "", nil, fmt.Errorf("no plugin command is applicable")
+ }
+
+ main := parts[0]
+ baseArgs := []string{}
+ if len(parts) > 1 {
+ baseArgs = parts[1:]
+ }
+ if !p.Metadata.IgnoreFlags {
+ baseArgs = append(baseArgs, extraArgs...)
+ }
+ return main, baseArgs, nil
+}
+
+// validPluginName is a regular expression that validates plugin names.
+//
+// Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, ​_​ and ​-.
+var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$")
+
+// validatePluginData validates a plugin's YAML data.
+func validatePluginData(plug *Plugin, filepath string) error {
+ if !validPluginName.MatchString(plug.Metadata.Name) {
+ return fmt.Errorf("invalid plugin name at %q", filepath)
+ }
+ plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage)
+
+ // We could also validate SemVer, executable, and other fields should we so choose.
+ return nil
+}
+
+// sanitizeString normalize spaces and removes non-printable characters.
+func sanitizeString(str string) string {
+ return strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return ' '
+ }
+ if unicode.IsPrint(r) {
+ return r
+ }
+ return -1
+ }, str)
+}
+
+func detectDuplicates(plugs []*Plugin) error {
+ names := map[string]string{}
+
+ for _, plug := range plugs {
+ if oldpath, ok := names[plug.Metadata.Name]; ok {
+ return fmt.Errorf(
+ "two plugins claim the name %q at %q and %q",
+ plug.Metadata.Name,
+ oldpath,
+ plug.Dir,
+ )
+ }
+ names[plug.Metadata.Name] = plug.Dir
+ }
+
+ return nil
+}
+
+// LoadDir loads a plugin from the given directory.
+func LoadDir(dirname string) (*Plugin, error) {
+ pluginfile := filepath.Join(dirname, PluginFileName)
+ data, err := ioutil.ReadFile(pluginfile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to read plugin at %q", pluginfile)
+ }
+
+ plug := &Plugin{Dir: dirname}
+ if err := yaml.UnmarshalStrict(data, &plug.Metadata); err != nil {
+ return nil, errors.Wrapf(err, "failed to load plugin at %q", pluginfile)
+ }
+ return plug, validatePluginData(plug, pluginfile)
+}
+
+// LoadAll loads all plugins found beneath the base directory.
+//
+// This scans only one directory level.
+func LoadAll(basedir string) ([]*Plugin, error) {
+ plugins := []*Plugin{}
+ // We want basedir/*/plugin.yaml
+ scanpath := filepath.Join(basedir, "*", PluginFileName)
+ matches, err := filepath.Glob(scanpath)
+ if err != nil {
+ return plugins, errors.Wrapf(err, "failed to find plugins in %q", scanpath)
+ }
+
+ if matches == nil {
+ return plugins, nil
+ }
+
+ for _, yaml := range matches {
+ dir := filepath.Dir(yaml)
+ p, err := LoadDir(dir)
+ if err != nil {
+ return plugins, err
+ }
+ plugins = append(plugins, p)
+ }
+ return plugins, detectDuplicates(plugins)
+}
+
+// FindPlugins returns a list of YAML files that describe plugins.
+func FindPlugins(plugdirs string) ([]*Plugin, error) {
+ found := []*Plugin{}
+ // Let's get all UNIXy and allow path separators
+ for _, p := range filepath.SplitList(plugdirs) {
+ matches, err := LoadAll(p)
+ if err != nil {
+ return matches, err
+ }
+ found = append(found, matches...)
+ }
+ return found, nil
+}
+
+// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because
+// the plugin subsystem itself needs access to the environment variables
+// created here.
+func SetupPluginEnv(settings *cli.EnvSettings, name, base string) {
+ env := settings.EnvVars()
+ env["HELM_PLUGIN_NAME"] = name
+ env["HELM_PLUGIN_DIR"] = base
+ for key, val := range env {
+ os.Setenv(key, val)
+ }
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/postrender/exec.go b/vendor/helm.sh/helm/v3/pkg/postrender/exec.go
new file mode 100644
index 000000000..1de70b024
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/postrender/exec.go
@@ -0,0 +1,108 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postrender
+
+import (
+ "bytes"
+ "io"
+ "os/exec"
+ "path/filepath"
+
+ "github.com/pkg/errors"
+)
+
+type execRender struct {
+ binaryPath string
+}
+
+// NewExec returns a PostRenderer implementation that calls the provided binary.
+// It returns an error if the binary cannot be found. If the path does not
+// contain any separators, it will search in $PATH, otherwise it will resolve
+// any relative paths to a fully qualified path
+func NewExec(binaryPath string) (PostRenderer, error) {
+ fullPath, err := getFullPath(binaryPath)
+ if err != nil {
+ return nil, err
+ }
+ return &execRender{fullPath}, nil
+}
+
+// Run the configured binary for the post render
+func (p *execRender) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) {
+ cmd := exec.Command(p.binaryPath)
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ var postRendered = &bytes.Buffer{}
+ var stderr = &bytes.Buffer{}
+ cmd.Stdout = postRendered
+ cmd.Stderr = stderr
+
+ go func() {
+ defer stdin.Close()
+ io.Copy(stdin, renderedManifests)
+ }()
+ err = cmd.Run()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error while running command %s. error output:\n%s", p.binaryPath, stderr.String())
+ }
+
+ return postRendered, nil
+}
+
+// getFullPath returns the full filepath to the binary to execute. If the path
+// does not contain any separators, it will search in $PATH, otherwise it will
+// resolve any relative paths to a fully qualified path
+func getFullPath(binaryPath string) (string, error) {
+ // NOTE(thomastaylor312): I am leaving this code commented out here. During
+ // the implementation of post-render, it was brought up that if we are
+ // relying on plugins, we should actually use the plugin system so it can
+ // properly handle multiple OSs. This will be a feature add in the future,
+ // so I left this code for reference. It can be deleted or reused once the
+ // feature is implemented
+
+ // Manually check the plugin dir first
+ // if !strings.Contains(binaryPath, string(filepath.Separator)) {
+ // // First check the plugin dir
+ // pluginDir := helmpath.DataPath("plugins") // Default location
+ // // If location for plugins is explicitly set, check there
+ // if v, ok := os.LookupEnv("HELM_PLUGINS"); ok {
+ // pluginDir = v
+ // }
+ // // The plugins variable can actually contain multiple paths, so loop through those
+ // for _, p := range filepath.SplitList(pluginDir) {
+ // _, err := os.Stat(filepath.Join(p, binaryPath))
+ // if err != nil && !os.IsNotExist(err) {
+ // return "", err
+ // } else if err == nil {
+ // binaryPath = filepath.Join(p, binaryPath)
+ // break
+ // }
+ // }
+ // }
+
+ // Now check for the binary using the given path or check if it exists in
+ // the path and is executable
+ checkedPath, err := exec.LookPath(binaryPath)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to find binary at %s", binaryPath)
+ }
+
+ return filepath.Abs(checkedPath)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go b/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go
new file mode 100644
index 000000000..3af384290
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go
@@ -0,0 +1,29 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package postrender contains an interface that can be implemented for custom
+// post-renderers and an exec implementation that can be used for arbitrary
+// binaries and scripts
+package postrender
+
+import "bytes"
+
+type PostRenderer interface {
+ // Run expects a single buffer filled with Helm rendered manifests. It
+ // expects the modified results to be returned on a separate buffer or an
+ // error if there was an issue or failure while running the post render step
+ Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/provenance/doc.go b/vendor/helm.sh/helm/v3/pkg/provenance/doc.go
new file mode 100644
index 000000000..3d2d0ea97
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/provenance/doc.go
@@ -0,0 +1,37 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package provenance provides tools for establishing the authenticity of a chart.
+
+In Helm, provenance is established via several factors. The primary factor is the
+cryptographic signature of a chart. Chart authors may sign charts, which in turn
+provide the necessary metadata to ensure the integrity of the chart file, the
+Chart.yaml, and the referenced Docker images.
+
+A provenance file is clear-signed. This provides cryptographic verification that
+a particular block of information (Chart.yaml, archive file, images) have not
+been tampered with or altered. To learn more, read the GnuPG documentation on
+clear signatures:
+https://www.gnupg.org/gph/en/manual/x135.html
+
+The cryptography used by Helm should be compatible with OpenGPG. For example,
+you should be able to verify a signature by importing the desired public key
+and using `gpg --verify`, `keybase pgp verify`, or similar:
+
+ $ gpg --verify some.sig
+ gpg: Signature made Mon Jul 25 17:23:44 2016 MDT using RSA key ID 1FC18762
+ gpg: Good signature from "Helm Testing (This key should only be used for testing. DO NOT TRUST.) <helm-testing@helm.sh>" [ultimate]
+*/
+package provenance // import "helm.sh/helm/v3/pkg/provenance"
diff --git a/vendor/helm.sh/helm/v3/pkg/provenance/sign.go b/vendor/helm.sh/helm/v3/pkg/provenance/sign.go
new file mode 100644
index 000000000..5d16779f1
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/provenance/sign.go
@@ -0,0 +1,409 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provenance
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/openpgp"
+ "golang.org/x/crypto/openpgp/clearsign"
+ "golang.org/x/crypto/openpgp/packet"
+ "sigs.k8s.io/yaml"
+
+ hapi "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+)
+
+var defaultPGPConfig = packet.Config{
+ DefaultHash: crypto.SHA512,
+}
+
+// SumCollection represents a collection of file and image checksums.
+//
+// Files are of the form:
+// FILENAME: "sha256:SUM"
+// Images are of the form:
+// "IMAGE:TAG": "sha256:SUM"
+// Docker optionally supports sha512, and if this is the case, the hash marker
+// will be 'sha512' instead of 'sha256'.
+type SumCollection struct {
+ Files map[string]string `json:"files"`
+ Images map[string]string `json:"images,omitempty"`
+}
+
+// Verification contains information about a verification operation.
+type Verification struct {
+ // SignedBy contains the entity that signed a chart.
+ SignedBy *openpgp.Entity
+ // FileHash is the hash, prepended with the scheme, for the file that was verified.
+ FileHash string
+ // FileName is the name of the file that FileHash verifies.
+ FileName string
+}
+
+// Signatory signs things.
+//
+// Signatories can be constructed from a PGP private key file using NewFromFiles
+// or they can be constructed manually by setting the Entity to a valid
+// PGP entity.
+//
+// The same Signatory can be used to sign or validate multiple charts.
+type Signatory struct {
+ // The signatory for this instance of Helm. This is used for signing.
+ Entity *openpgp.Entity
+ // The keyring for this instance of Helm. This is used for verification.
+ KeyRing openpgp.EntityList
+}
+
+// NewFromFiles constructs a new Signatory from the PGP key in the given filename.
+//
+// This will emit an error if it cannot find a valid GPG keyfile (entity) at the
+// given location.
+//
+// Note that the keyfile may have just a public key, just a private key, or
+// both. The Signatory methods may have different requirements of the keys. For
+// example, ClearSign must have a valid `openpgp.Entity.PrivateKey` before it
+// can sign something.
+func NewFromFiles(keyfile, keyringfile string) (*Signatory, error) {
+ e, err := loadKey(keyfile)
+ if err != nil {
+ return nil, err
+ }
+
+ ring, err := loadKeyRing(keyringfile)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Signatory{
+ Entity: e,
+ KeyRing: ring,
+ }, nil
+}
+
+// NewFromKeyring reads a keyring file and creates a Signatory.
+//
+// If id is not the empty string, this will also try to find an Entity in the
+// keyring whose name matches, and set that as the signing entity. It will return
+// an error if the id is not empty and also not found.
+func NewFromKeyring(keyringfile, id string) (*Signatory, error) {
+ ring, err := loadKeyRing(keyringfile)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &Signatory{KeyRing: ring}
+
+ // If the ID is empty, we can return now.
+ if id == "" {
+ return s, nil
+ }
+
+ // We're gonna go all GnuPG on this and look for a string that _contains_. If
+ // two or more keys contain the string and none are a direct match, we error
+ // out.
+ var candidate *openpgp.Entity
+ vague := false
+ for _, e := range ring {
+ for n := range e.Identities {
+ if n == id {
+ s.Entity = e
+ return s, nil
+ }
+ if strings.Contains(n, id) {
+ if candidate != nil {
+ vague = true
+ }
+ candidate = e
+ }
+ }
+ }
+ if vague {
+ return s, errors.Errorf("more than one key contain the id %q", id)
+ }
+
+ s.Entity = candidate
+ return s, nil
+}
+
+// PassphraseFetcher returns a passphrase for decrypting keys.
+//
+// This is used as a callback to read a passphrase from some other location. The
+// given name is the Name field on the key, typically of the form:
+//
+// USER_NAME (COMMENT) <EMAIL>
+type PassphraseFetcher func(name string) ([]byte, error)
+
+// DecryptKey decrypts a private key in the Signatory.
+//
+// If the key is not encrypted, this will return without error.
+//
+// If the key does not exist, this will return an error.
+//
+// If the key exists, but cannot be unlocked with the passphrase returned by
+// the PassphraseFetcher, this will return an error.
+//
+// If the key is successfully unlocked, it will return nil.
+func (s *Signatory) DecryptKey(fn PassphraseFetcher) error {
+ if s.Entity == nil {
+ return errors.New("private key not found")
+ } else if s.Entity.PrivateKey == nil {
+ return errors.New("provided key is not a private key. Try providing a keyring with secret keys")
+ }
+
+ // Nothing else to do if key is not encrypted.
+ if !s.Entity.PrivateKey.Encrypted {
+ return nil
+ }
+
+ fname := "Unknown"
+ for i := range s.Entity.Identities {
+ if i != "" {
+ fname = i
+ break
+ }
+ }
+
+ p, err := fn(fname)
+ if err != nil {
+ return err
+ }
+
+ return s.Entity.PrivateKey.Decrypt(p)
+}
+
+// ClearSign signs a chart with the given key.
+//
+// This takes the path to a chart archive file and a key, and it returns a clear signature.
+//
+// The Signatory must have a valid Entity.PrivateKey for this to work. If it does
+// not, an error will be returned.
+func (s *Signatory) ClearSign(chartpath string) (string, error) {
+ if s.Entity == nil {
+ return "", errors.New("private key not found")
+ } else if s.Entity.PrivateKey == nil {
+ return "", errors.New("provided key is not a private key. Try providing a keyring with secret keys")
+ }
+
+ if fi, err := os.Stat(chartpath); err != nil {
+ return "", err
+ } else if fi.IsDir() {
+ return "", errors.New("cannot sign a directory")
+ }
+
+ out := bytes.NewBuffer(nil)
+
+ b, err := messageBlock(chartpath)
+ if err != nil {
+ return "", nil
+ }
+
+ // Sign the buffer
+ w, err := clearsign.Encode(out, s.Entity.PrivateKey, &defaultPGPConfig)
+ if err != nil {
+ return "", err
+ }
+ _, err = io.Copy(w, b)
+ w.Close()
+ return out.String(), err
+}
+
+// Verify checks a signature and verifies that it is legit for a chart.
+func (s *Signatory) Verify(chartpath, sigpath string) (*Verification, error) {
+ ver := &Verification{}
+ for _, fname := range []string{chartpath, sigpath} {
+ if fi, err := os.Stat(fname); err != nil {
+ return ver, err
+ } else if fi.IsDir() {
+ return ver, errors.Errorf("%s cannot be a directory", fname)
+ }
+ }
+
+ // First verify the signature
+ sig, err := s.decodeSignature(sigpath)
+ if err != nil {
+ return ver, errors.Wrap(err, "failed to decode signature")
+ }
+
+ by, err := s.verifySignature(sig)
+ if err != nil {
+ return ver, err
+ }
+ ver.SignedBy = by
+
+ // Second, verify the hash of the tarball.
+ sum, err := DigestFile(chartpath)
+ if err != nil {
+ return ver, err
+ }
+ _, sums, err := parseMessageBlock(sig.Plaintext)
+ if err != nil {
+ return ver, err
+ }
+
+ sum = "sha256:" + sum
+ basename := filepath.Base(chartpath)
+ if sha, ok := sums.Files[basename]; !ok {
+ return ver, errors.Errorf("provenance does not contain a SHA for a file named %q", basename)
+ } else if sha != sum {
+ return ver, errors.Errorf("sha256 sum does not match for %s: %q != %q", basename, sha, sum)
+ }
+ ver.FileHash = sum
+ ver.FileName = basename
+
+ // TODO: when image signing is added, verify that here.
+
+ return ver, nil
+}
+
+func (s *Signatory) decodeSignature(filename string) (*clearsign.Block, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ block, _ := clearsign.Decode(data)
+ if block == nil {
+ // There was no sig in the file.
+ return nil, errors.New("signature block not found")
+ }
+
+ return block, nil
+}
+
+// verifySignature verifies that the given block is validly signed, and returns the signer.
+func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, error) {
+ return openpgp.CheckDetachedSignature(
+ s.KeyRing,
+ bytes.NewBuffer(block.Bytes),
+ block.ArmoredSignature.Body,
+ )
+}
+
+func messageBlock(chartpath string) (*bytes.Buffer, error) {
+ var b *bytes.Buffer
+ // Checksum the archive
+ chash, err := DigestFile(chartpath)
+ if err != nil {
+ return b, err
+ }
+
+ base := filepath.Base(chartpath)
+ sums := &SumCollection{
+ Files: map[string]string{
+ base: "sha256:" + chash,
+ },
+ }
+
+ // Load the archive into memory.
+ chart, err := loader.LoadFile(chartpath)
+ if err != nil {
+ return b, err
+ }
+
+ // Buffer a hash + checksums YAML file
+ data, err := yaml.Marshal(chart.Metadata)
+ if err != nil {
+ return b, err
+ }
+
+ // FIXME: YAML uses ---\n as a file start indicator, but this is not legal in a PGP
+ // clearsign block. So we use ...\n, which is the YAML document end marker.
+ // http://yaml.org/spec/1.2/spec.html#id2800168
+ b = bytes.NewBuffer(data)
+ b.WriteString("\n...\n")
+
+ data, err = yaml.Marshal(sums)
+ if err != nil {
+ return b, err
+ }
+ b.Write(data)
+
+ return b, nil
+}
+
+// parseMessageBlock
+func parseMessageBlock(data []byte) (*hapi.Metadata, *SumCollection, error) {
+ // This sucks.
+ parts := bytes.Split(data, []byte("\n...\n"))
+ if len(parts) < 2 {
+ return nil, nil, errors.New("message block must have at least two parts")
+ }
+
+ md := &hapi.Metadata{}
+ sc := &SumCollection{}
+
+ if err := yaml.Unmarshal(parts[0], md); err != nil {
+ return md, sc, err
+ }
+ err := yaml.Unmarshal(parts[1], sc)
+ return md, sc, err
+}
+
+// loadKey loads a GPG key found at a particular path.
+func loadKey(keypath string) (*openpgp.Entity, error) {
+ f, err := os.Open(keypath)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ pr := packet.NewReader(f)
+ return openpgp.ReadEntity(pr)
+}
+
+func loadKeyRing(ringpath string) (openpgp.EntityList, error) {
+ f, err := os.Open(ringpath)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return openpgp.ReadKeyRing(f)
+}
+
+// DigestFile calculates a SHA256 hash (like Docker) for a given file.
+//
+// It takes the path to the archive file, and returns a string representation of
+// the SHA256 sum.
+//
+// The intended use of this function is to generate a sum of a chart TGZ file.
+func DigestFile(filename string) (string, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ return Digest(f)
+}
+
+// Digest hashes a reader and returns a SHA256 digest.
+//
+// Helm uses SHA256 as its default hash for all non-cryptographic applications.
+func Digest(in io.Reader) (string, error) {
+ hash := crypto.SHA256.New()
+ if _, err := io.Copy(hash, in); err != nil {
+ return "", nil
+ }
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/release/hook.go b/vendor/helm.sh/helm/v3/pkg/release/hook.go
new file mode 100644
index 000000000..cb9955582
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/release/hook.go
@@ -0,0 +1,106 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+import (
+ "helm.sh/helm/v3/pkg/time"
+)
+
+// HookEvent specifies the hook event
+type HookEvent string
+
+// Hook event types
+const (
+ HookPreInstall HookEvent = "pre-install"
+ HookPostInstall HookEvent = "post-install"
+ HookPreDelete HookEvent = "pre-delete"
+ HookPostDelete HookEvent = "post-delete"
+ HookPreUpgrade HookEvent = "pre-upgrade"
+ HookPostUpgrade HookEvent = "post-upgrade"
+ HookPreRollback HookEvent = "pre-rollback"
+ HookPostRollback HookEvent = "post-rollback"
+ HookTest HookEvent = "test"
+)
+
+func (x HookEvent) String() string { return string(x) }
+
+// HookDeletePolicy specifies the hook delete policy
+type HookDeletePolicy string
+
+// Hook delete policy types
+const (
+ HookSucceeded HookDeletePolicy = "hook-succeeded"
+ HookFailed HookDeletePolicy = "hook-failed"
+ HookBeforeHookCreation HookDeletePolicy = "before-hook-creation"
+)
+
+func (x HookDeletePolicy) String() string { return string(x) }
+
+// HookAnnotation is the label name for a hook
+const HookAnnotation = "helm.sh/hook"
+
+// HookWeightAnnotation is the label name for a hook weight
+const HookWeightAnnotation = "helm.sh/hook-weight"
+
+// HookDeleteAnnotation is the label name for the delete policy for a hook
+const HookDeleteAnnotation = "helm.sh/hook-delete-policy"
+
+// Hook defines a hook object.
+type Hook struct {
+ Name string `json:"name,omitempty"`
+ // Kind is the Kubernetes kind.
+ Kind string `json:"kind,omitempty"`
+ // Path is the chart-relative path to the template.
+ Path string `json:"path,omitempty"`
+ // Manifest is the manifest contents.
+ Manifest string `json:"manifest,omitempty"`
+ // Events are the events that this hook fires on.
+ Events []HookEvent `json:"events,omitempty"`
+ // LastRun indicates the date/time this was last run.
+ LastRun HookExecution `json:"last_run,omitempty"`
+ // Weight indicates the sort order for execution among similar Hook type
+ Weight int `json:"weight,omitempty"`
+ // DeletePolicies are the policies that indicate when to delete the hook
+ DeletePolicies []HookDeletePolicy `json:"delete_policies,omitempty"`
+}
+
+// A HookExecution records the result for the last execution of a hook for a given release.
+type HookExecution struct {
+ // StartedAt indicates the date/time this hook was started
+ StartedAt time.Time `json:"started_at,omitempty"`
+ // CompletedAt indicates the date/time this hook was completed.
+ CompletedAt time.Time `json:"completed_at,omitempty"`
+ // Phase indicates whether the hook completed successfully
+ Phase HookPhase `json:"phase"`
+}
+
+// A HookPhase indicates the state of a hook execution
+type HookPhase string
+
+const (
+ // HookPhaseUnknown indicates that a hook is in an unknown state
+ HookPhaseUnknown HookPhase = "Unknown"
+ // HookPhaseRunning indicates that a hook is currently executing
+ HookPhaseRunning HookPhase = "Running"
+ // HookPhaseSucceeded indicates that hook execution succeeded
+ HookPhaseSucceeded HookPhase = "Succeeded"
+ // HookPhaseFailed indicates that hook execution failed
+ HookPhaseFailed HookPhase = "Failed"
+)
+
+// String converts a hook phase to a printable string
+func (x HookPhase) String() string { return string(x) }
diff --git a/vendor/helm.sh/helm/v3/pkg/release/info.go b/vendor/helm.sh/helm/v3/pkg/release/info.go
new file mode 100644
index 000000000..0cb2bab64
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/release/info.go
@@ -0,0 +1,36 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+import (
+ "helm.sh/helm/v3/pkg/time"
+)
+
+// Info describes release information.
+type Info struct {
+ // FirstDeployed is when the release was first deployed.
+ FirstDeployed time.Time `json:"first_deployed,omitempty"`
+ // LastDeployed is when the release was last deployed.
+ LastDeployed time.Time `json:"last_deployed,omitempty"`
+ // Deleted tracks when this object was deleted.
+ Deleted time.Time `json:"deleted"`
+ // Description is human-friendly "log entry" about this release.
+ Description string `json:"description,omitempty"`
+ // Status is the current state of the release
+ Status Status `json:"status,omitempty"`
+ // Contains the rendered templates/NOTES.txt if available
+ Notes string `json:"notes,omitempty"`
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/release/mock.go b/vendor/helm.sh/helm/v3/pkg/release/mock.go
new file mode 100644
index 000000000..a28e1dc16
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/release/mock.go
@@ -0,0 +1,116 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+import (
+ "fmt"
+ "math/rand"
+
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/time"
+)
+
+// MockHookTemplate is the hook template used for all mock release objects.
+var MockHookTemplate = `apiVersion: v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": pre-install
+`
+
+// MockManifest is the manifest used for all mock release objects.
+var MockManifest = `apiVersion: v1
+kind: Secret
+metadata:
+ name: fixture
+`
+
+// MockReleaseOptions allows for user-configurable options on mock release objects.
+type MockReleaseOptions struct {
+ Name string
+ Version int
+ Chart *chart.Chart
+ Status Status
+ Namespace string
+}
+
+// Mock creates a mock release object based on options set by MockReleaseOptions. This function should typically not be used outside of testing.
+func Mock(opts *MockReleaseOptions) *Release {
+ date := time.Unix(242085845, 0).UTC()
+
+ name := opts.Name
+ if name == "" {
+ name = "testrelease-" + fmt.Sprint(rand.Intn(100))
+ }
+
+ version := 1
+ if opts.Version != 0 {
+ version = opts.Version
+ }
+
+ namespace := opts.Namespace
+ if namespace == "" {
+ namespace = "default"
+ }
+
+ ch := opts.Chart
+ if opts.Chart == nil {
+ ch = &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "foo",
+ Version: "0.1.0-beta.1",
+ AppVersion: "1.0",
+ },
+ Templates: []*chart.File{
+ {Name: "templates/foo.tpl", Data: []byte(MockManifest)},
+ },
+ }
+ }
+
+ scode := StatusDeployed
+ if len(opts.Status) > 0 {
+ scode = opts.Status
+ }
+
+ info := &Info{
+ FirstDeployed: date,
+ LastDeployed: date,
+ Status: scode,
+ Description: "Release mock",
+ Notes: "Some mock release notes!",
+ }
+
+ return &Release{
+ Name: name,
+ Info: info,
+ Chart: ch,
+ Config: map[string]interface{}{"name": "value"},
+ Version: version,
+ Namespace: namespace,
+ Hooks: []*Hook{
+ {
+ Name: "pre-install-hook",
+ Kind: "Job",
+ Path: "pre-install-hook.yaml",
+ Manifest: MockHookTemplate,
+ LastRun: HookExecution{},
+ Events: []HookEvent{HookPreInstall},
+ },
+ },
+ Manifest: MockManifest,
+ }
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/release/release.go b/vendor/helm.sh/helm/v3/pkg/release/release.go
new file mode 100644
index 000000000..b90612873
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/release/release.go
@@ -0,0 +1,49 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+import "helm.sh/helm/v3/pkg/chart"
+
+// Release describes a deployment of a chart, together with the chart
+// and the variables used to deploy that chart.
+type Release struct {
+ // Name is the name of the release
+ Name string `json:"name,omitempty"`
+ // Info provides information about a release
+ Info *Info `json:"info,omitempty"`
+ // Chart is the chart that was released.
+ Chart *chart.Chart `json:"chart,omitempty"`
+ // Config is the set of extra Values added to the chart.
+ // These values override the default values inside of the chart.
+ Config map[string]interface{} `json:"config,omitempty"`
+ // Manifest is the string representation of the rendered template.
+ Manifest string `json:"manifest,omitempty"`
+ // Hooks are all of the hooks declared for this release.
+ Hooks []*Hook `json:"hooks,omitempty"`
+ // Version is an int which represents the revision of the release.
+ Version int `json:"version,omitempty"`
+ // Namespace is the kubernetes namespace of the release.
+ Namespace string `json:"namespace,omitempty"`
+ // Labels of the release.
+ // Disabled encoding into Json cause labels are stored in storage driver metadata field.
+ Labels map[string]string `json:"-"`
+}
+
+// SetStatus is a helper for setting the status on a release.
+func (r *Release) SetStatus(status Status, msg string) {
+ r.Info.Status = status
+ r.Info.Description = msg
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/release/responses.go b/vendor/helm.sh/helm/v3/pkg/release/responses.go
new file mode 100644
index 000000000..7ee1fc2ee
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/release/responses.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+// UninstallReleaseResponse represents a successful response to an uninstall request.
+type UninstallReleaseResponse struct {
+ // Release is the release that was marked deleted.
+ Release *Release `json:"release,omitempty"`
+ // Info is an uninstall message
+ Info string `json:"info,omitempty"`
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/release/status.go b/vendor/helm.sh/helm/v3/pkg/release/status.go
new file mode 100644
index 000000000..e0e3ed62a
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/release/status.go
@@ -0,0 +1,49 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+// Status is the status of a release
+type Status string
+
+// Describe the status of a release
+// NOTE: Make sure to update cmd/helm/status.go when adding or modifying any of these statuses.
+const (
+ // StatusUnknown indicates that a release is in an uncertain state.
+ StatusUnknown Status = "unknown"
+ // StatusDeployed indicates that the release has been pushed to Kubernetes.
+ StatusDeployed Status = "deployed"
+ // StatusUninstalled indicates that a release has been uninstalled from Kubernetes.
+ StatusUninstalled Status = "uninstalled"
+ // StatusSuperseded indicates that this release object is outdated and a newer one exists.
+ StatusSuperseded Status = "superseded"
+ // StatusFailed indicates that the release was not successfully deployed.
+ StatusFailed Status = "failed"
+ // StatusUninstalling indicates that a uninstall operation is underway.
+ StatusUninstalling Status = "uninstalling"
+ // StatusPendingInstall indicates that an install operation is underway.
+ StatusPendingInstall Status = "pending-install"
+ // StatusPendingUpgrade indicates that an upgrade operation is underway.
+ StatusPendingUpgrade Status = "pending-upgrade"
+ // StatusPendingRollback indicates that an rollback operation is underway.
+ StatusPendingRollback Status = "pending-rollback"
+)
+
+func (x Status) String() string { return string(x) }
+
+// IsPending determines if this status is a state or a transition.
+func (x Status) IsPending() bool {
+ return x == StatusPendingInstall || x == StatusPendingUpgrade || x == StatusPendingRollback
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go
new file mode 100644
index 000000000..dbd0df8e2
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go
@@ -0,0 +1,78 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil"
+
+import rspb "helm.sh/helm/v3/pkg/release"
+
+// FilterFunc returns true if the release object satisfies
+// the predicate of the underlying filter func.
+type FilterFunc func(*rspb.Release) bool
+
+// Check applies the FilterFunc to the release object.
+func (fn FilterFunc) Check(rls *rspb.Release) bool {
+ if rls == nil {
+ return false
+ }
+ return fn(rls)
+}
+
+// Filter applies the filter(s) to the list of provided releases
+// returning the list that satisfies the filtering predicate.
+func (fn FilterFunc) Filter(rels []*rspb.Release) (rets []*rspb.Release) {
+ for _, rel := range rels {
+ if fn.Check(rel) {
+ rets = append(rets, rel)
+ }
+ }
+ return
+}
+
+// Any returns a FilterFunc that filters a list of releases
+// determined by the predicate 'f0 || f1 || ... || fn'.
+func Any(filters ...FilterFunc) FilterFunc {
+ return func(rls *rspb.Release) bool {
+ for _, filter := range filters {
+ if filter(rls) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// All returns a FilterFunc that filters a list of releases
+// determined by the predicate 'f0 && f1 && ... && fn'.
+func All(filters ...FilterFunc) FilterFunc {
+ return func(rls *rspb.Release) bool {
+ for _, filter := range filters {
+ if !filter(rls) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// StatusFilter filters a set of releases by status code.
+func StatusFilter(status rspb.Status) FilterFunc {
+ return FilterFunc(func(rls *rspb.Release) bool {
+ if rls == nil {
+ return true
+ }
+ return rls.Info.Status == status
+ })
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go
new file mode 100644
index 000000000..a340dfc29
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go
@@ -0,0 +1,156 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package releaseutil
+
+import (
+ "sort"
+
+ "helm.sh/helm/v3/pkg/release"
+)
+
+// KindSortOrder is an ordering of Kinds.
+type KindSortOrder []string
+
+// InstallOrder is the order in which manifests should be installed (by Kind).
+//
+// Those occurring earlier in the list get installed before those occurring later in the list.
+var InstallOrder KindSortOrder = []string{
+ "Namespace",
+ "NetworkPolicy",
+ "ResourceQuota",
+ "LimitRange",
+ "PodSecurityPolicy",
+ "PodDisruptionBudget",
+ "ServiceAccount",
+ "Secret",
+ "SecretList",
+ "ConfigMap",
+ "StorageClass",
+ "PersistentVolume",
+ "PersistentVolumeClaim",
+ "CustomResourceDefinition",
+ "ClusterRole",
+ "ClusterRoleList",
+ "ClusterRoleBinding",
+ "ClusterRoleBindingList",
+ "Role",
+ "RoleList",
+ "RoleBinding",
+ "RoleBindingList",
+ "Service",
+ "DaemonSet",
+ "Pod",
+ "ReplicationController",
+ "ReplicaSet",
+ "Deployment",
+ "HorizontalPodAutoscaler",
+ "StatefulSet",
+ "Job",
+ "CronJob",
+ "Ingress",
+ "APIService",
+}
+
+// UninstallOrder is the order in which manifests should be uninstalled (by Kind).
+//
+// Those occurring earlier in the list get uninstalled before those occurring later in the list.
+var UninstallOrder KindSortOrder = []string{
+ "APIService",
+ "Ingress",
+ "Service",
+ "CronJob",
+ "Job",
+ "StatefulSet",
+ "HorizontalPodAutoscaler",
+ "Deployment",
+ "ReplicaSet",
+ "ReplicationController",
+ "Pod",
+ "DaemonSet",
+ "RoleBindingList",
+ "RoleBinding",
+ "RoleList",
+ "Role",
+ "ClusterRoleBindingList",
+ "ClusterRoleBinding",
+ "ClusterRoleList",
+ "ClusterRole",
+ "CustomResourceDefinition",
+ "PersistentVolumeClaim",
+ "PersistentVolume",
+ "StorageClass",
+ "ConfigMap",
+ "SecretList",
+ "Secret",
+ "ServiceAccount",
+ "PodDisruptionBudget",
+ "PodSecurityPolicy",
+ "LimitRange",
+ "ResourceQuota",
+ "NetworkPolicy",
+ "Namespace",
+}
+
+// sort manifests by kind.
+//
+// Results are sorted by 'ordering', keeping order of items with equal kind/priority
+func sortManifestsByKind(manifests []Manifest, ordering KindSortOrder) []Manifest {
+ sort.SliceStable(manifests, func(i, j int) bool {
+ return lessByKind(manifests[i], manifests[j], manifests[i].Head.Kind, manifests[j].Head.Kind, ordering)
+ })
+
+ return manifests
+}
+
+// sort hooks by kind, using an out-of-place sort to preserve the input parameters.
+//
+// Results are sorted by 'ordering', keeping order of items with equal kind/priority
+func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.Hook {
+ h := hooks
+ sort.SliceStable(h, func(i, j int) bool {
+ return lessByKind(h[i], h[j], h[i].Kind, h[j].Kind, ordering)
+ })
+
+ return h
+}
+
+func lessByKind(a interface{}, b interface{}, kindA string, kindB string, o KindSortOrder) bool {
+ ordering := make(map[string]int, len(o))
+ for v, k := range o {
+ ordering[k] = v
+ }
+
+ first, aok := ordering[kindA]
+ second, bok := ordering[kindB]
+
+ if !aok && !bok {
+ // if both are unknown then sort alphabetically by kind, keep original order if same kind
+ if kindA != kindB {
+ return kindA < kindB
+ }
+ return first < second
+ }
+ // unknown kind is last
+ if !aok {
+ return false
+ }
+ if !bok {
+ return true
+ }
+ // sort different kinds, keep original order if same priority
+ return first < second
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go
new file mode 100644
index 000000000..0b04a4599
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go
@@ -0,0 +1,72 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package releaseutil
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// SimpleHead defines what the structure of the head of a manifest file
+type SimpleHead struct {
+ Version string `json:"apiVersion"`
+ Kind string `json:"kind,omitempty"`
+ Metadata *struct {
+ Name string `json:"name"`
+ Annotations map[string]string `json:"annotations"`
+ } `json:"metadata,omitempty"`
+}
+
+var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*")
+
+// SplitManifests takes a string of manifest and returns a map contains individual manifests
+func SplitManifests(bigFile string) map[string]string {
+ // Basically, we're quickly splitting a stream of YAML documents into an
+ // array of YAML docs. The file name is just a place holder, but should be
+ // integer-sortable so that manifests get output in the same order as the
+ // input (see `BySplitManifestsOrder`).
+ tpl := "manifest-%d"
+ res := map[string]string{}
+ // Making sure that any extra whitespace in YAML stream doesn't interfere in splitting documents correctly.
+ bigFileTmp := strings.TrimSpace(bigFile)
+ docs := sep.Split(bigFileTmp, -1)
+ var count int
+ for _, d := range docs {
+ if d == "" {
+ continue
+ }
+
+ d = strings.TrimSpace(d)
+ res[fmt.Sprintf(tpl, count)] = d
+ count = count + 1
+ }
+ return res
+}
+
+// BySplitManifestsOrder sorts by in-file manifest order, as provided in function `SplitManifests`
+type BySplitManifestsOrder []string
+
+func (a BySplitManifestsOrder) Len() int { return len(a) }
+func (a BySplitManifestsOrder) Less(i, j int) bool {
+ // Split `manifest-%d`
+ anum, _ := strconv.ParseInt(a[i][len("manifest-"):], 10, 0)
+ bnum, _ := strconv.ParseInt(a[j][len("manifest-"):], 10, 0)
+ return anum < bnum
+}
+func (a BySplitManifestsOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go
new file mode 100644
index 000000000..e83414500
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go
@@ -0,0 +1,233 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package releaseutil
+
+import (
+ "log"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v3/pkg/release"
+)
+
+// Manifest represents a manifest file, which has a name and some content.
+type Manifest struct {
+ Name string
+ Content string
+ Head *SimpleHead
+}
+
+// manifestFile represents a file that contains a manifest.
+type manifestFile struct {
+ entries map[string]string
+ path string
+ apis chartutil.VersionSet
+}
+
+// result is an intermediate structure used during sorting.
+type result struct {
+ hooks []*release.Hook
+ generic []Manifest
+}
+
+// TODO: Refactor this out. It's here because naming conventions were not followed through.
+// So fix the Test hook names and then remove this.
+var events = map[string]release.HookEvent{
+ release.HookPreInstall.String(): release.HookPreInstall,
+ release.HookPostInstall.String(): release.HookPostInstall,
+ release.HookPreDelete.String(): release.HookPreDelete,
+ release.HookPostDelete.String(): release.HookPostDelete,
+ release.HookPreUpgrade.String(): release.HookPreUpgrade,
+ release.HookPostUpgrade.String(): release.HookPostUpgrade,
+ release.HookPreRollback.String(): release.HookPreRollback,
+ release.HookPostRollback.String(): release.HookPostRollback,
+ release.HookTest.String(): release.HookTest,
+ // Support test-success for backward compatibility with Helm 2 tests
+ "test-success": release.HookTest,
+}
+
+// SortManifests takes a map of filename/YAML contents, splits the file
+// by manifest entries, and sorts the entries into hook types.
+//
+// The resulting hooks struct will be populated with all of the generated hooks.
+// Any file that does not declare one of the hook types will be placed in the
+// 'generic' bucket.
+//
+// Files that do not parse into the expected format are simply placed into a map and
+// returned.
+func SortManifests(files map[string]string, apis chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) {
+ result := &result{}
+
+ var sortedFilePaths []string
+ for filePath := range files {
+ sortedFilePaths = append(sortedFilePaths, filePath)
+ }
+ sort.Strings(sortedFilePaths)
+
+ for _, filePath := range sortedFilePaths {
+ content := files[filePath]
+
+ // Skip partials. We could return these as a separate map, but there doesn't
+ // seem to be any need for that at this time.
+ if strings.HasPrefix(path.Base(filePath), "_") {
+ continue
+ }
+ // Skip empty files and log this.
+ if strings.TrimSpace(content) == "" {
+ continue
+ }
+
+ manifestFile := &manifestFile{
+ entries: SplitManifests(content),
+ path: filePath,
+ apis: apis,
+ }
+
+ if err := manifestFile.sort(result); err != nil {
+ return result.hooks, result.generic, err
+ }
+ }
+
+ return sortHooksByKind(result.hooks, ordering), sortManifestsByKind(result.generic, ordering), nil
+}
+
+// sort takes a manifestFile object which may contain multiple resource definition
+// entries and sorts each entry by hook types, and saves the resulting hooks and
+// generic manifests (or non-hooks) to the result struct.
+//
+// To determine hook type, it looks for a YAML structure like this:
+//
+// kind: SomeKind
+// apiVersion: v1
+// metadata:
+// annotations:
+// helm.sh/hook: pre-install
+//
+// To determine the policy to delete the hook, it looks for a YAML structure like this:
+//
+// kind: SomeKind
+// apiVersion: v1
+// metadata:
+// annotations:
+// helm.sh/hook-delete-policy: hook-succeeded
+func (file *manifestFile) sort(result *result) error {
+ // Go through manifests in order found in file (function `SplitManifests` creates integer-sortable keys)
+ var sortedEntryKeys []string
+ for entryKey := range file.entries {
+ sortedEntryKeys = append(sortedEntryKeys, entryKey)
+ }
+ sort.Sort(BySplitManifestsOrder(sortedEntryKeys))
+
+ for _, entryKey := range sortedEntryKeys {
+ m := file.entries[entryKey]
+
+ var entry SimpleHead
+ if err := yaml.Unmarshal([]byte(m), &entry); err != nil {
+ return errors.Wrapf(err, "YAML parse error on %s", file.path)
+ }
+
+ if !hasAnyAnnotation(entry) {
+ result.generic = append(result.generic, Manifest{
+ Name: file.path,
+ Content: m,
+ Head: &entry,
+ })
+ continue
+ }
+
+ hookTypes, ok := entry.Metadata.Annotations[release.HookAnnotation]
+ if !ok {
+ result.generic = append(result.generic, Manifest{
+ Name: file.path,
+ Content: m,
+ Head: &entry,
+ })
+ continue
+ }
+
+ hw := calculateHookWeight(entry)
+
+ h := &release.Hook{
+ Name: entry.Metadata.Name,
+ Kind: entry.Kind,
+ Path: file.path,
+ Manifest: m,
+ Events: []release.HookEvent{},
+ Weight: hw,
+ DeletePolicies: []release.HookDeletePolicy{},
+ }
+
+ isUnknownHook := false
+ for _, hookType := range strings.Split(hookTypes, ",") {
+ hookType = strings.ToLower(strings.TrimSpace(hookType))
+ e, ok := events[hookType]
+ if !ok {
+ isUnknownHook = true
+ break
+ }
+ h.Events = append(h.Events, e)
+ }
+
+ if isUnknownHook {
+ log.Printf("info: skipping unknown hook: %q", hookTypes)
+ continue
+ }
+
+ result.hooks = append(result.hooks, h)
+
+ operateAnnotationValues(entry, release.HookDeleteAnnotation, func(value string) {
+ h.DeletePolicies = append(h.DeletePolicies, release.HookDeletePolicy(value))
+ })
+ }
+
+ return nil
+}
+
+// hasAnyAnnotation returns true if the given entry has any annotations at all.
+func hasAnyAnnotation(entry SimpleHead) bool {
+ return entry.Metadata != nil &&
+ entry.Metadata.Annotations != nil &&
+ len(entry.Metadata.Annotations) != 0
+}
+
+// calculateHookWeight finds the weight in the hook weight annotation.
+//
+// If no weight is found, the assigned weight is 0
+func calculateHookWeight(entry SimpleHead) int {
+ hws := entry.Metadata.Annotations[release.HookWeightAnnotation]
+ hw, err := strconv.Atoi(hws)
+ if err != nil {
+ hw = 0
+ }
+ return hw
+}
+
+// operateAnnotationValues finds the given annotation and runs the operate function with the value of that annotation
+func operateAnnotationValues(entry SimpleHead, annotation string, operate func(p string)) {
+ if dps, ok := entry.Metadata.Annotations[annotation]; ok {
+ for _, dp := range strings.Split(dps, ",") {
+ dp = strings.ToLower(strings.TrimSpace(dp))
+ operate(dp)
+ }
+ }
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go
new file mode 100644
index 000000000..1a8aa78a6
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go
@@ -0,0 +1,78 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil"
+
+import (
+ "sort"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+)
+
+type list []*rspb.Release
+
+func (s list) Len() int { return len(s) }
+func (s list) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// ByName sorts releases by name
+type ByName struct{ list }
+
+// Less compares to releases
+func (s ByName) Less(i, j int) bool { return s.list[i].Name < s.list[j].Name }
+
+// ByDate sorts releases by date
+type ByDate struct{ list }
+
+// Less compares to releases
+func (s ByDate) Less(i, j int) bool {
+ ti := s.list[i].Info.LastDeployed.Unix()
+ tj := s.list[j].Info.LastDeployed.Unix()
+ return ti < tj
+}
+
+// ByRevision sorts releases by revision number
+type ByRevision struct{ list }
+
+// Less compares to releases
+func (s ByRevision) Less(i, j int) bool {
+ return s.list[i].Version < s.list[j].Version
+}
+
+// Reverse reverses the list of releases sorted by the sort func.
+func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) {
+ sortFn(list)
+ for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
+ list[i], list[j] = list[j], list[i]
+ }
+}
+
+// SortByName returns the list of releases sorted
+// in lexicographical order.
+func SortByName(list []*rspb.Release) {
+ sort.Sort(ByName{list})
+}
+
+// SortByDate returns the list of releases sorted by a
+// release's last deployed time (in seconds).
+func SortByDate(list []*rspb.Release) {
+ sort.Sort(ByDate{list})
+}
+
+// SortByRevision returns the list of releases sorted by a
+// release's revision number (release.Version).
+func SortByRevision(list []*rspb.Release) {
+ sort.Sort(ByRevision{list})
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go
new file mode 100644
index 000000000..956997cc9
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go
@@ -0,0 +1,309 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo // import "helm.sh/helm/v3/pkg/repo"
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/getter"
+ "helm.sh/helm/v3/pkg/helmpath"
+ "helm.sh/helm/v3/pkg/provenance"
+)
+
+// Entry represents a collection of parameters for chart repository
+type Entry struct {
+ Name string `json:"name"`
+ URL string `json:"url"`
+ Username string `json:"username"`
+ Password string `json:"password"`
+ CertFile string `json:"certFile"`
+ KeyFile string `json:"keyFile"`
+ CAFile string `json:"caFile"`
+ InsecureSkipTLSverify bool `json:"insecure_skip_tls_verify"`
+ PassCredentialsAll bool `json:"pass_credentials_all"`
+}
+
+// ChartRepository represents a chart repository
+type ChartRepository struct {
+ Config *Entry
+ ChartPaths []string
+ IndexFile *IndexFile
+ Client getter.Getter
+ CachePath string
+}
+
+// NewChartRepository constructs ChartRepository
+func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository, error) {
+ u, err := url.Parse(cfg.URL)
+ if err != nil {
+ return nil, errors.Errorf("invalid chart URL format: %s", cfg.URL)
+ }
+
+ client, err := getters.ByScheme(u.Scheme)
+ if err != nil {
+ return nil, errors.Errorf("could not find protocol handler for: %s", u.Scheme)
+ }
+
+ return &ChartRepository{
+ Config: cfg,
+ IndexFile: NewIndexFile(),
+ Client: client,
+ CachePath: helmpath.CachePath("repository"),
+ }, nil
+}
+
+// Load loads a directory of charts as if it were a repository.
+//
+// It requires the presence of an index.yaml file in the directory.
+//
+// Deprecated: remove in Helm 4.
+func (r *ChartRepository) Load() error {
+ dirInfo, err := os.Stat(r.Config.Name)
+ if err != nil {
+ return err
+ }
+ if !dirInfo.IsDir() {
+ return errors.Errorf("%q is not a directory", r.Config.Name)
+ }
+
+ // FIXME: Why are we recursively walking directories?
+ // FIXME: Why are we not reading the repositories.yaml to figure out
+ // what repos to use?
+ filepath.Walk(r.Config.Name, func(path string, f os.FileInfo, err error) error {
+ if !f.IsDir() {
+ if strings.Contains(f.Name(), "-index.yaml") {
+ i, err := LoadIndexFile(path)
+ if err != nil {
+ return err
+ }
+ r.IndexFile = i
+ } else if strings.HasSuffix(f.Name(), ".tgz") {
+ r.ChartPaths = append(r.ChartPaths, path)
+ }
+ }
+ return nil
+ })
+ return nil
+}
+
+// DownloadIndexFile fetches the index from a repository.
+func (r *ChartRepository) DownloadIndexFile() (string, error) {
+ parsedURL, err := url.Parse(r.Config.URL)
+ if err != nil {
+ return "", err
+ }
+ parsedURL.RawPath = path.Join(parsedURL.RawPath, "index.yaml")
+ parsedURL.Path = path.Join(parsedURL.Path, "index.yaml")
+
+ indexURL := parsedURL.String()
+ // TODO add user-agent
+ resp, err := r.Client.Get(indexURL,
+ getter.WithURL(r.Config.URL),
+ getter.WithInsecureSkipVerifyTLS(r.Config.InsecureSkipTLSverify),
+ getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile),
+ getter.WithBasicAuth(r.Config.Username, r.Config.Password),
+ getter.WithPassCredentialsAll(r.Config.PassCredentialsAll),
+ )
+ if err != nil {
+ return "", err
+ }
+
+ index, err := ioutil.ReadAll(resp)
+ if err != nil {
+ return "", err
+ }
+
+ indexFile, err := loadIndex(index, r.Config.URL)
+ if err != nil {
+ return "", err
+ }
+
+ // Create the chart list file in the cache directory
+ var charts strings.Builder
+ for name := range indexFile.Entries {
+ fmt.Fprintln(&charts, name)
+ }
+ chartsFile := filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name))
+ os.MkdirAll(filepath.Dir(chartsFile), 0755)
+ ioutil.WriteFile(chartsFile, []byte(charts.String()), 0644)
+
+ // Create the index file in the cache directory
+ fname := filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name))
+ os.MkdirAll(filepath.Dir(fname), 0755)
+ return fname, ioutil.WriteFile(fname, index, 0644)
+}
+
+// Index generates an index for the chart repository and writes an index.yaml file.
+func (r *ChartRepository) Index() error {
+ err := r.generateIndex()
+ if err != nil {
+ return err
+ }
+ return r.saveIndexFile()
+}
+
+func (r *ChartRepository) saveIndexFile() error {
+ index, err := yaml.Marshal(r.IndexFile)
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(filepath.Join(r.Config.Name, indexPath), index, 0644)
+}
+
+func (r *ChartRepository) generateIndex() error {
+ for _, path := range r.ChartPaths {
+ ch, err := loader.Load(path)
+ if err != nil {
+ return err
+ }
+
+ digest, err := provenance.DigestFile(path)
+ if err != nil {
+ return err
+ }
+
+ if !r.IndexFile.Has(ch.Name(), ch.Metadata.Version) {
+ if err := r.IndexFile.MustAdd(ch.Metadata, path, r.Config.URL, digest); err != nil {
+ return errors.Wrapf(err, "failed adding to %s to index", path)
+ }
+ }
+ // TODO: If a chart exists, but has a different Digest, should we error?
+ }
+ r.IndexFile.SortEntries()
+ return nil
+}
+
+// FindChartInRepoURL finds chart in chart repository pointed by repoURL
+// without adding repo to repositories
+func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) {
+ return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters)
+}
+
+// FindChartInAuthRepoURL finds chart in chart repository pointed by repoURL
+// without adding repo to repositories, like FindChartInRepoURL,
+// but it also receives credentials for the chart repository.
+func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) {
+ return FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, getters)
+}
+
+// FindChartInAuthAndTLSRepoURL finds chart in chart repository pointed by repoURL
+// without adding repo to repositories, like FindChartInRepoURL,
+// but it also receives credentials and TLS verify flag for the chart repository.
+// TODO Helm 4, FindChartInAuthAndTLSRepoURL should be integrated into FindChartInAuthRepoURL.
+func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify bool, getters getter.Providers) (string, error) {
+ return FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, false, getters)
+}
+
+// FindChartInAuthAndTLSAndPassRepoURL finds chart in chart repository pointed by repoURL
+// without adding repo to repositories, like FindChartInRepoURL,
+// but it also receives credentials, TLS verify flag, and if credentials should
+// be passed on to other domains.
+// TODO Helm 4, FindChartInAuthAndTLSAndPassRepoURL should be integrated into FindChartInAuthRepoURL.
+func FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify, passCredentialsAll bool, getters getter.Providers) (string, error) {
+
+ // Download and write the index file to a temporary location
+ buf := make([]byte, 20)
+ rand.Read(buf)
+ name := strings.ReplaceAll(base64.StdEncoding.EncodeToString(buf), "/", "-")
+
+ c := Entry{
+ URL: repoURL,
+ Username: username,
+ Password: password,
+ PassCredentialsAll: passCredentialsAll,
+ CertFile: certFile,
+ KeyFile: keyFile,
+ CAFile: caFile,
+ Name: name,
+ InsecureSkipTLSverify: insecureSkipTLSverify,
+ }
+ r, err := NewChartRepository(&c, getters)
+ if err != nil {
+ return "", err
+ }
+ idx, err := r.DownloadIndexFile()
+ if err != nil {
+ return "", errors.Wrapf(err, "looks like %q is not a valid chart repository or cannot be reached", repoURL)
+ }
+
+ // Read the index file for the repository to get chart information and return chart URL
+ repoIndex, err := LoadIndexFile(idx)
+ if err != nil {
+ return "", err
+ }
+
+ errMsg := fmt.Sprintf("chart %q", chartName)
+ if chartVersion != "" {
+ errMsg = fmt.Sprintf("%s version %q", errMsg, chartVersion)
+ }
+ cv, err := repoIndex.Get(chartName, chartVersion)
+ if err != nil {
+ return "", errors.Errorf("%s not found in %s repository", errMsg, repoURL)
+ }
+
+ if len(cv.URLs) == 0 {
+ return "", errors.Errorf("%s has no downloadable URLs", errMsg)
+ }
+
+ chartURL := cv.URLs[0]
+
+ absoluteChartURL, err := ResolveReferenceURL(repoURL, chartURL)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to make chart URL absolute")
+ }
+
+ return absoluteChartURL, nil
+}
+
+// ResolveReferenceURL resolves refURL relative to baseURL.
+// If refURL is absolute, it simply returns refURL.
+func ResolveReferenceURL(baseURL, refURL string) (string, error) {
+ // We need a trailing slash for ResolveReference to work, but make sure there isn't already one
+ parsedBaseURL, err := url.Parse(strings.TrimSuffix(baseURL, "/") + "/")
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to parse %s as URL", baseURL)
+ }
+
+ parsedRefURL, err := url.Parse(refURL)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to parse %s as URL", refURL)
+ }
+
+ return parsedBaseURL.ResolveReference(parsedRefURL).String(), nil
+}
+
+func (e *Entry) String() string {
+ buf, err := json.Marshal(e)
+ if err != nil {
+ log.Panic(err)
+ }
+ return string(buf)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/repo/doc.go b/vendor/helm.sh/helm/v3/pkg/repo/doc.go
new file mode 100644
index 000000000..05650100b
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/repo/doc.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package repo implements the Helm Chart Repository.
+
+A chart repository is an HTTP server that provides information on charts. A local
+repository cache is an on-disk representation of a chart repository.
+
+There are two important file formats for chart repositories.
+
+The first is the 'index.yaml' format, which is expressed like this:
+
+ apiVersion: v1
+ entries:
+ frobnitz:
+ - created: 2016-09-29T12:14:34.830161306-06:00
+ description: This is a frobnitz.
+ digest: 587bd19a9bd9d2bc4a6d25ab91c8c8e7042c47b4ac246e37bf8e1e74386190f4
+ home: http://example.com
+ keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+ maintainers:
+ - email: helm@example.com
+ name: The Helm Team
+ - email: nobody@example.com
+ name: Someone Else
+ name: frobnitz
+ urls:
+ - http://example-charts.com/testdata/repository/frobnitz-1.2.3.tgz
+ version: 1.2.3
+ sprocket:
+ - created: 2016-09-29T12:14:34.830507606-06:00
+ description: This is a sprocket"
+ digest: 8505ff813c39502cc849a38e1e4a8ac24b8e6e1dcea88f4c34ad9b7439685ae6
+ home: http://example.com
+ keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+ maintainers:
+ - email: helm@example.com
+ name: The Helm Team
+ - email: nobody@example.com
+ name: Someone Else
+ name: sprocket
+ urls:
+ - http://example-charts.com/testdata/repository/sprocket-1.2.0.tgz
+ version: 1.2.0
+ generated: 2016-09-29T12:14:34.829721375-06:00
+
+An index.yaml file contains the necessary descriptive information about what
+charts are available in a repository, and how to get them.
+
+The second file format is the repositories.yaml file format. This file is for
+facilitating local cached copies of one or more chart repositories.
+
+The format of a repository.yaml file is:
+
+ apiVersion: v1
+ generated: TIMESTAMP
+ repositories:
+ - name: stable
+ url: http://example.com/charts
+ cache: stable-index.yaml
+ - name: incubator
+ url: http://example.com/incubator
+ cache: incubator-index.yaml
+
+This file maps three bits of information about a repository:
+
+ - The name the user uses to refer to it
+ - The fully qualified URL to the repository (index.yaml will be appended)
+ - The name of the local cachefile
+
+The format for both files was changed after Helm v2.0.0-Alpha.4. Helm is not
+backwards compatible with those earlier versions.
+*/
+package repo
diff --git a/vendor/helm.sh/helm/v3/pkg/repo/index.go b/vendor/helm.sh/helm/v3/pkg/repo/index.go
new file mode 100644
index 000000000..1b65ac497
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/repo/index.go
@@ -0,0 +1,356 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo
+
+import (
+ "bytes"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v3/internal/fileutil"
+ "helm.sh/helm/v3/internal/urlutil"
+ "helm.sh/helm/v3/pkg/chart"
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/provenance"
+)
+
+var indexPath = "index.yaml"
+
+// APIVersionV1 is the v1 API version for index and repository files.
+const APIVersionV1 = "v1"
+
+var (
+ // ErrNoAPIVersion indicates that an API version was not specified.
+ ErrNoAPIVersion = errors.New("no API version specified")
+ // ErrNoChartVersion indicates that a chart with the given version is not found.
+ ErrNoChartVersion = errors.New("no chart version found")
+ // ErrNoChartName indicates that a chart with the given name is not found.
+ ErrNoChartName = errors.New("no chart name found")
+ // ErrEmptyIndexYaml indicates that the content of index.yaml is empty.
+ ErrEmptyIndexYaml = errors.New("empty index.yaml file")
+)
+
+// ChartVersions is a list of versioned chart references.
+// Implements a sorter on Version.
+type ChartVersions []*ChartVersion
+
+// Len returns the length.
+func (c ChartVersions) Len() int { return len(c) }
+
+// Swap swaps the position of two items in the versions slice.
+func (c ChartVersions) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+
+// Less returns true if the version of entry a is less than the version of entry b.
+func (c ChartVersions) Less(a, b int) bool {
+ // Failed parse pushes to the back.
+ i, err := semver.NewVersion(c[a].Version)
+ if err != nil {
+ return true
+ }
+ j, err := semver.NewVersion(c[b].Version)
+ if err != nil {
+ return false
+ }
+ return i.LessThan(j)
+}
+
+// IndexFile represents the index file in a chart repository
+type IndexFile struct {
+ // This is used ONLY for validation against chartmuseum's index files and is discarded after validation.
+ ServerInfo map[string]interface{} `json:"serverInfo,omitempty"`
+ APIVersion string `json:"apiVersion"`
+ Generated time.Time `json:"generated"`
+ Entries map[string]ChartVersions `json:"entries"`
+ PublicKeys []string `json:"publicKeys,omitempty"`
+
+ // Annotations are additional mappings uninterpreted by Helm. They are made available for
+ // other applications to add information to the index file.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// NewIndexFile initializes an index.
+func NewIndexFile() *IndexFile {
+ return &IndexFile{
+ APIVersion: APIVersionV1,
+ Generated: time.Now(),
+ Entries: map[string]ChartVersions{},
+ PublicKeys: []string{},
+ }
+}
+
+// LoadIndexFile takes a file at the given path and returns an IndexFile object
+func LoadIndexFile(path string) (*IndexFile, error) {
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ i, err := loadIndex(b, path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error loading %s", path)
+ }
+ return i, nil
+}
+
+// MustAdd adds a file to the index
+// This can leave the index in an unsorted state
+func (i IndexFile) MustAdd(md *chart.Metadata, filename, baseURL, digest string) error {
+ if md.APIVersion == "" {
+ md.APIVersion = chart.APIVersionV1
+ }
+ if err := md.Validate(); err != nil {
+ return errors.Wrapf(err, "validate failed for %s", filename)
+ }
+
+ u := filename
+ if baseURL != "" {
+ _, file := filepath.Split(filename)
+ var err error
+ u, err = urlutil.URLJoin(baseURL, file)
+ if err != nil {
+ u = path.Join(baseURL, file)
+ }
+ }
+ cr := &ChartVersion{
+ URLs: []string{u},
+ Metadata: md,
+ Digest: digest,
+ Created: time.Now(),
+ }
+ ee := i.Entries[md.Name]
+ i.Entries[md.Name] = append(ee, cr)
+ return nil
+}
+
+// Add adds a file to the index and logs an error.
+//
+// Deprecated: Use index.MustAdd instead.
+func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) {
+ if err := i.MustAdd(md, filename, baseURL, digest); err != nil {
+ log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", md.Name, md.Version, filename, err)
+ }
+}
+
+// Has returns true if the index has an entry for a chart with the given name and exact version.
+func (i IndexFile) Has(name, version string) bool {
+ _, err := i.Get(name, version)
+ return err == nil
+}
+
+// SortEntries sorts the entries by version in descending order.
+//
+// In canonical form, the individual version records should be sorted so that
+// the most recent release for every version is in the 0th slot in the
+// Entries.ChartVersions array. That way, tooling can predict the newest
+// version without needing to parse SemVers.
+func (i IndexFile) SortEntries() {
+ for _, versions := range i.Entries {
+ sort.Sort(sort.Reverse(versions))
+ }
+}
+
+// Get returns the ChartVersion for the given name.
+//
+// If version is empty, this will return the chart with the latest stable version,
+// prerelease versions will be skipped.
+func (i IndexFile) Get(name, version string) (*ChartVersion, error) {
+ vs, ok := i.Entries[name]
+ if !ok {
+ return nil, ErrNoChartName
+ }
+ if len(vs) == 0 {
+ return nil, ErrNoChartVersion
+ }
+
+ var constraint *semver.Constraints
+ if version == "" {
+ constraint, _ = semver.NewConstraint("*")
+ } else {
+ var err error
+ constraint, err = semver.NewConstraint(version)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // when customer input exact version, check whether have exact match one first
+ if len(version) != 0 {
+ for _, ver := range vs {
+ if version == ver.Version {
+ return ver, nil
+ }
+ }
+ }
+
+ for _, ver := range vs {
+ test, err := semver.NewVersion(ver.Version)
+ if err != nil {
+ continue
+ }
+
+ if constraint.Check(test) {
+ return ver, nil
+ }
+ }
+ return nil, errors.Errorf("no chart version found for %s-%s", name, version)
+}
+
+// WriteFile writes an index file to the given destination path.
+//
+// The mode on the file is set to 'mode'.
+func (i IndexFile) WriteFile(dest string, mode os.FileMode) error {
+ b, err := yaml.Marshal(i)
+ if err != nil {
+ return err
+ }
+ return fileutil.AtomicWriteFile(dest, bytes.NewReader(b), mode)
+}
+
+// Merge merges the given index file into this index.
+//
+// This merges by name and version.
+//
+// If one of the entries in the given index does _not_ already exist, it is added.
+// In all other cases, the existing record is preserved.
+//
+// This can leave the index in an unsorted state
+func (i *IndexFile) Merge(f *IndexFile) {
+ for _, cvs := range f.Entries {
+ for _, cv := range cvs {
+ if !i.Has(cv.Name, cv.Version) {
+ e := i.Entries[cv.Name]
+ i.Entries[cv.Name] = append(e, cv)
+ }
+ }
+ }
+}
+
+// ChartVersion represents a chart entry in the IndexFile
+type ChartVersion struct {
+ *chart.Metadata
+ URLs []string `json:"urls"`
+ Created time.Time `json:"created,omitempty"`
+ Removed bool `json:"removed,omitempty"`
+ Digest string `json:"digest,omitempty"`
+
+ // ChecksumDeprecated is deprecated in Helm 3, and therefore ignored. Helm 3 replaced
+ // this with Digest. However, with a strict YAML parser enabled, a field must be
+ // present on the struct for backwards compatibility.
+ ChecksumDeprecated string `json:"checksum,omitempty"`
+
+ // EngineDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict
+ // YAML parser enabled, this field must be present.
+ EngineDeprecated string `json:"engine,omitempty"`
+
+ // TillerVersionDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict
+ // YAML parser enabled, this field must be present.
+ TillerVersionDeprecated string `json:"tillerVersion,omitempty"`
+
+ // URLDeprecated is deprecated in Helm 3, superseded by URLs. It is ignored. However,
+ // with a strict YAML parser enabled, this must be present on the struct.
+ URLDeprecated string `json:"url,omitempty"`
+}
+
+// IndexDirectory reads a (flat) directory and generates an index.
+//
+// It indexes only charts that have been packaged (*.tgz).
+//
+// The index returned will be in an unsorted state
+func IndexDirectory(dir, baseURL string) (*IndexFile, error) {
+ archives, err := filepath.Glob(filepath.Join(dir, "*.tgz"))
+ if err != nil {
+ return nil, err
+ }
+ moreArchives, err := filepath.Glob(filepath.Join(dir, "**/*.tgz"))
+ if err != nil {
+ return nil, err
+ }
+ archives = append(archives, moreArchives...)
+
+ index := NewIndexFile()
+ for _, arch := range archives {
+ fname, err := filepath.Rel(dir, arch)
+ if err != nil {
+ return index, err
+ }
+
+ var parentDir string
+ parentDir, fname = filepath.Split(fname)
+ // filepath.Split appends an extra slash to the end of parentDir. We want to strip that out.
+ parentDir = strings.TrimSuffix(parentDir, string(os.PathSeparator))
+ parentURL, err := urlutil.URLJoin(baseURL, parentDir)
+ if err != nil {
+ parentURL = path.Join(baseURL, parentDir)
+ }
+
+ c, err := loader.Load(arch)
+ if err != nil {
+ // Assume this is not a chart.
+ continue
+ }
+ hash, err := provenance.DigestFile(arch)
+ if err != nil {
+ return index, err
+ }
+ if err := index.MustAdd(c.Metadata, fname, parentURL, hash); err != nil {
+ return index, errors.Wrapf(err, "failed adding to %s to index", fname)
+ }
+ }
+ return index, nil
+}
+
+// loadIndex loads an index file and does minimal validity checking.
+//
+// The source parameter is only used for logging.
+// This will fail if API Version is not set (ErrNoAPIVersion) or if the unmarshal fails.
+func loadIndex(data []byte, source string) (*IndexFile, error) {
+ i := &IndexFile{}
+
+ if len(data) == 0 {
+ return i, ErrEmptyIndexYaml
+ }
+
+ if err := yaml.UnmarshalStrict(data, i); err != nil {
+ return i, err
+ }
+
+ for name, cvs := range i.Entries {
+ for idx := len(cvs) - 1; idx >= 0; idx-- {
+ if cvs[idx].APIVersion == "" {
+ cvs[idx].APIVersion = chart.APIVersionV1
+ }
+ if err := cvs[idx].Validate(); err != nil {
+ log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err)
+ cvs = append(cvs[:idx], cvs[idx+1:]...)
+ }
+ }
+ }
+ i.SortEntries()
+ if i.APIVersion == "" {
+ return i, ErrNoAPIVersion
+ }
+ return i, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/repo/repo.go b/vendor/helm.sh/helm/v3/pkg/repo/repo.go
new file mode 100644
index 000000000..6f1e90dad
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/repo/repo.go
@@ -0,0 +1,123 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo // import "helm.sh/helm/v3/pkg/repo"
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+)
+
+// File represents the repositories.yaml file
+type File struct {
+ APIVersion string `json:"apiVersion"`
+ Generated time.Time `json:"generated"`
+ Repositories []*Entry `json:"repositories"`
+}
+
+// NewFile generates an empty repositories file.
+//
+// Generated and APIVersion are automatically set.
+func NewFile() *File {
+ return &File{
+ APIVersion: APIVersionV1,
+ Generated: time.Now(),
+ Repositories: []*Entry{},
+ }
+}
+
+// LoadFile takes a file at the given path and returns a File object
+func LoadFile(path string) (*File, error) {
+ r := new(File)
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ return r, errors.Wrapf(err, "couldn't load repositories file (%s)", path)
+ }
+
+ err = yaml.Unmarshal(b, r)
+ return r, err
+}
+
+// Add adds one or more repo entries to a repo file.
+func (r *File) Add(re ...*Entry) {
+ r.Repositories = append(r.Repositories, re...)
+}
+
+// Update attempts to replace one or more repo entries in a repo file. If an
+// entry with the same name doesn't exist in the repo file it will add it.
+func (r *File) Update(re ...*Entry) {
+ for _, target := range re {
+ r.update(target)
+ }
+}
+
+func (r *File) update(e *Entry) {
+ for j, repo := range r.Repositories {
+ if repo.Name == e.Name {
+ r.Repositories[j] = e
+ return
+ }
+ }
+ r.Add(e)
+}
+
+// Has returns true if the given name is already a repository name.
+func (r *File) Has(name string) bool {
+ entry := r.Get(name)
+ return entry != nil
+}
+
+// Get returns an entry with the given name if it exists, otherwise returns nil
+func (r *File) Get(name string) *Entry {
+ for _, entry := range r.Repositories {
+ if entry.Name == name {
+ return entry
+ }
+ }
+ return nil
+}
+
+// Remove removes the entry from the list of repositories.
+func (r *File) Remove(name string) bool {
+ cp := []*Entry{}
+ found := false
+ for _, rf := range r.Repositories {
+ if rf.Name == name {
+ found = true
+ continue
+ }
+ cp = append(cp, rf)
+ }
+ r.Repositories = cp
+ return found
+}
+
+// WriteFile writes a repositories file to the given path.
+func (r *File) WriteFile(path string, perm os.FileMode) error {
+ data, err := yaml.Marshal(r)
+ if err != nil {
+ return err
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ return ioutil.WriteFile(path, data, perm)
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go
new file mode 100644
index 000000000..94c278875
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go
@@ -0,0 +1,257 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v3/pkg/storage/driver"
+
+import (
+ "context"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kblabels "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/validation"
+ corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+)
+
+var _ Driver = (*ConfigMaps)(nil)
+
+// ConfigMapsDriverName is the string name of the driver.
+const ConfigMapsDriverName = "ConfigMap"
+
+// ConfigMaps is a wrapper around an implementation of a kubernetes
+// ConfigMapsInterface.
+type ConfigMaps struct {
+ impl corev1.ConfigMapInterface
+ Log func(string, ...interface{})
+}
+
+// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of
+// the kubernetes ConfigMapsInterface.
+func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps {
+ return &ConfigMaps{
+ impl: impl,
+ Log: func(_ string, _ ...interface{}) {},
+ }
+}
+
+// Name returns the name of the driver.
+func (cfgmaps *ConfigMaps) Name() string {
+ return ConfigMapsDriverName
+}
+
+// Get fetches the release named by key. The corresponding release is returned
+// or error if not found.
+func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) {
+ // fetch the configmap holding the release named by key
+ obj, err := cfgmaps.impl.Get(context.Background(), key, metav1.GetOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil, ErrReleaseNotFound
+ }
+
+ cfgmaps.Log("get: failed to get %q: %s", key, err)
+ return nil, err
+ }
+ // found the configmap, decode the base64 data string
+ r, err := decodeRelease(obj.Data["release"])
+ if err != nil {
+ cfgmaps.Log("get: failed to decode data %q: %s", key, err)
+ return nil, err
+ }
+ // return the release object
+ return r, nil
+}
+
+// List fetches all releases and returns the list releases such
+// that filter(release) == true. An error is returned if the
+// configmap fails to retrieve the releases.
+func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
+ lsel := kblabels.Set{"owner": "helm"}.AsSelector()
+ opts := metav1.ListOptions{LabelSelector: lsel.String()}
+
+ list, err := cfgmaps.impl.List(context.Background(), opts)
+ if err != nil {
+ cfgmaps.Log("list: failed to list: %s", err)
+ return nil, err
+ }
+
+ var results []*rspb.Release
+
+ // iterate over the configmaps object list
+ // and decode each release
+ for _, item := range list.Items {
+ rls, err := decodeRelease(item.Data["release"])
+ if err != nil {
+ cfgmaps.Log("list: failed to decode release: %v: %s", item, err)
+ continue
+ }
+
+ rls.Labels = item.ObjectMeta.Labels
+
+ if filter(rls) {
+ results = append(results, rls)
+ }
+ }
+ return results, nil
+}
+
+// Query fetches all releases that match the provided map of labels.
+// An error is returned if the configmap fails to retrieve the releases.
+func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, error) {
+ ls := kblabels.Set{}
+ for k, v := range labels {
+ if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
+ return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+ }
+ ls[k] = v
+ }
+
+ opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()}
+
+ list, err := cfgmaps.impl.List(context.Background(), opts)
+ if err != nil {
+ cfgmaps.Log("query: failed to query with labels: %s", err)
+ return nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ var results []*rspb.Release
+ for _, item := range list.Items {
+ rls, err := decodeRelease(item.Data["release"])
+ if err != nil {
+ cfgmaps.Log("query: failed to decode release: %s", err)
+ continue
+ }
+ results = append(results, rls)
+ }
+ return results, nil
+}
+
+// Create creates a new ConfigMap holding the release. If the
+// ConfigMap already exists, ErrReleaseExists is returned.
+func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error {
+ // set labels for configmaps object meta data
+ var lbs labels
+
+ lbs.init()
+ lbs.set("createdAt", strconv.Itoa(int(time.Now().Unix())))
+
+ // create a new configmap to hold the release
+ obj, err := newConfigMapsObject(key, rls, lbs)
+ if err != nil {
+ cfgmaps.Log("create: failed to encode release %q: %s", rls.Name, err)
+ return err
+ }
+ // push the configmap object out into the kubiverse
+ if _, err := cfgmaps.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil {
+ if apierrors.IsAlreadyExists(err) {
+ return ErrReleaseExists
+ }
+
+ cfgmaps.Log("create: failed to create: %s", err)
+ return err
+ }
+ return nil
+}
+
+// Update updates the ConfigMap holding the release. If not found
+// the ConfigMap is created to hold the release.
+func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error {
+ // set labels for configmaps object meta data
+ var lbs labels
+
+ lbs.init()
+ lbs.set("modifiedAt", strconv.Itoa(int(time.Now().Unix())))
+
+ // create a new configmap object to hold the release
+ obj, err := newConfigMapsObject(key, rls, lbs)
+ if err != nil {
+ cfgmaps.Log("update: failed to encode release %q: %s", rls.Name, err)
+ return err
+ }
+ // push the configmap object out into the kubiverse
+ _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
+ if err != nil {
+ cfgmaps.Log("update: failed to update: %s", err)
+ return err
+ }
+ return nil
+}
+
+// Delete deletes the ConfigMap holding the release named by key.
+func (cfgmaps *ConfigMaps) Delete(key string) (rls *rspb.Release, err error) {
+ // fetch the release to check existence
+ if rls, err = cfgmaps.Get(key); err != nil {
+ return nil, err
+ }
+ // delete the release
+ if err = cfgmaps.impl.Delete(context.Background(), key, metav1.DeleteOptions{}); err != nil {
+ return rls, err
+ }
+ return rls, nil
+}
+
+// newConfigMapsObject constructs a kubernetes ConfigMap object
+// to store a release. Each configmap data entry is the base64
+// encoded gzipped string of a release.
+//
+// The following labels are used within each configmap:
+//
+// "modifiedAt" - timestamp indicating when this configmap was last modified. (set in Update)
+// "createdAt" - timestamp indicating when this configmap was created. (set in Create)
+// "version" - version of the release.
+// "status" - status of the release (see pkg/release/status.go for variants)
+// "owner" - owner of the configmap, currently "helm".
+// "name" - name of the release.
+//
+func newConfigMapsObject(key string, rls *rspb.Release, lbs labels) (*v1.ConfigMap, error) {
+ const owner = "helm"
+
+ // encode the release
+ s, err := encodeRelease(rls)
+ if err != nil {
+ return nil, err
+ }
+
+ if lbs == nil {
+ lbs.init()
+ }
+
+ // apply labels
+ lbs.set("name", rls.Name)
+ lbs.set("owner", owner)
+ lbs.set("status", rls.Info.Status.String())
+ lbs.set("version", strconv.Itoa(rls.Version))
+
+ // create and return configmap object
+ return &v1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: key,
+ Labels: lbs.toMap(),
+ },
+ Data: map[string]string{"release": s},
+ }, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go
new file mode 100644
index 000000000..9c01f3766
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go
@@ -0,0 +1,105 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v3/pkg/storage/driver"
+
+import (
+ "fmt"
+
+ "github.com/pkg/errors"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+)
+
+var (
+ // ErrReleaseNotFound indicates that a release is not found.
+ ErrReleaseNotFound = errors.New("release: not found")
+ // ErrReleaseExists indicates that a release already exists.
+ ErrReleaseExists = errors.New("release: already exists")
+ // ErrInvalidKey indicates that a release key could not be parsed.
+ ErrInvalidKey = errors.New("release: invalid key")
+ // ErrNoDeployedReleases indicates that there are no releases with the given key in the deployed state
+ ErrNoDeployedReleases = errors.New("has no deployed releases")
+)
+
+// StorageDriverError records an error and the release name that caused it
+type StorageDriverError struct {
+ ReleaseName string
+ Err error
+}
+
+func (e *StorageDriverError) Error() string {
+ return fmt.Sprintf("%q %s", e.ReleaseName, e.Err.Error())
+}
+
+func (e *StorageDriverError) Unwrap() error { return e.Err }
+
+func NewErrNoDeployedReleases(releaseName string) error {
+ return &StorageDriverError{
+ ReleaseName: releaseName,
+ Err: ErrNoDeployedReleases,
+ }
+}
+
+// Creator is the interface that wraps the Create method.
+//
+// Create stores the release or returns ErrReleaseExists
+// if an identical release already exists.
+type Creator interface {
+ Create(key string, rls *rspb.Release) error
+}
+
+// Updator is the interface that wraps the Update method.
+//
+// Update updates an existing release or returns
+// ErrReleaseNotFound if the release does not exist.
+type Updator interface {
+ Update(key string, rls *rspb.Release) error
+}
+
+// Deletor is the interface that wraps the Delete method.
+//
+// Delete deletes the release named by key or returns
+// ErrReleaseNotFound if the release does not exist.
+type Deletor interface {
+ Delete(key string) (*rspb.Release, error)
+}
+
+// Queryor is the interface that wraps the Get and List methods.
+//
+// Get returns the release named by key or returns ErrReleaseNotFound
+// if the release does not exist.
+//
+// List returns the set of all releases that satisfy the filter predicate.
+//
+// Query returns the set of all releases that match the provided label set.
+type Queryor interface {
+ Get(key string) (*rspb.Release, error)
+ List(filter func(*rspb.Release) bool) ([]*rspb.Release, error)
+ Query(labels map[string]string) ([]*rspb.Release, error)
+}
+
+// Driver is the interface composed of Creator, Updator, Deletor, and Queryor
+// interfaces. It defines the behavior for storing, updating, deleted,
+// and retrieving Helm releases from some underlying storage mechanism,
+// e.g. memory, configmaps.
+type Driver interface {
+ Creator
+ Updator
+ Deletor
+ Queryor
+ Name() string
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go
new file mode 100644
index 000000000..eb7118fe5
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+// labels is a map of key value pairs to be included as metadata in a configmap object.
+type labels map[string]string
+
+func (lbs *labels) init() { *lbs = labels(make(map[string]string)) }
+func (lbs labels) get(key string) string { return lbs[key] }
+func (lbs labels) set(key, val string) { lbs[key] = val }
+
+func (lbs labels) keys() (ls []string) {
+ for key := range lbs {
+ ls = append(ls, key)
+ }
+ return
+}
+
+func (lbs labels) match(set labels) bool {
+ for _, key := range set.keys() {
+ if lbs.get(key) != set.get(key) {
+ return false
+ }
+ }
+ return true
+}
+
+func (lbs labels) toMap() map[string]string { return lbs }
+
+func (lbs *labels) fromMap(kvs map[string]string) {
+ for k, v := range kvs {
+ lbs.set(k, v)
+ }
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go
new file mode 100644
index 000000000..91378f588
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go
@@ -0,0 +1,240 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+import (
+ "strconv"
+ "strings"
+ "sync"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+)
+
+var _ Driver = (*Memory)(nil)
+
+const (
+ // MemoryDriverName is the string name of this driver.
+ MemoryDriverName = "Memory"
+
+ defaultNamespace = "default"
+)
+
+// A map of release names to list of release records
+type memReleases map[string]records
+
+// Memory is the in-memory storage driver implementation.
+type Memory struct {
+ sync.RWMutex
+ namespace string
+ // A map of namespaces to releases
+ cache map[string]memReleases
+}
+
+// NewMemory initializes a new memory driver.
+func NewMemory() *Memory {
+ return &Memory{cache: map[string]memReleases{}, namespace: "default"}
+}
+
+// SetNamespace sets a specific namespace in which releases will be accessed.
+// An empty string indicates all namespaces (for the list operation)
+func (mem *Memory) SetNamespace(ns string) {
+ mem.namespace = ns
+}
+
+// Name returns the name of the driver.
+func (mem *Memory) Name() string {
+ return MemoryDriverName
+}
+
+// Get returns the release named by key or returns ErrReleaseNotFound.
+func (mem *Memory) Get(key string) (*rspb.Release, error) {
+ defer unlock(mem.rlock())
+
+ keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.")
+ switch elems := strings.Split(keyWithoutPrefix, ".v"); len(elems) {
+ case 2:
+ name, ver := elems[0], elems[1]
+ if _, err := strconv.Atoi(ver); err != nil {
+ return nil, ErrInvalidKey
+ }
+ if recs, ok := mem.cache[mem.namespace][name]; ok {
+ if r := recs.Get(key); r != nil {
+ return r.rls, nil
+ }
+ }
+ return nil, ErrReleaseNotFound
+ default:
+ return nil, ErrInvalidKey
+ }
+}
+
+// List returns the list of all releases such that filter(release) == true
+func (mem *Memory) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
+ defer unlock(mem.rlock())
+
+ var ls []*rspb.Release
+ for namespace := range mem.cache {
+ if mem.namespace != "" {
+ // Should only list releases of this namespace
+ namespace = mem.namespace
+ }
+ for _, recs := range mem.cache[namespace] {
+ recs.Iter(func(_ int, rec *record) bool {
+ if filter(rec.rls) {
+ ls = append(ls, rec.rls)
+ }
+ return true
+ })
+ }
+ if mem.namespace != "" {
+ // Should only list releases of this namespace
+ break
+ }
+ }
+ return ls, nil
+}
+
+// Query returns the set of releases that match the provided set of labels
+func (mem *Memory) Query(keyvals map[string]string) ([]*rspb.Release, error) {
+ defer unlock(mem.rlock())
+
+ var lbs labels
+
+ lbs.init()
+ lbs.fromMap(keyvals)
+
+ var ls []*rspb.Release
+ for namespace := range mem.cache {
+ if mem.namespace != "" {
+ // Should only query releases of this namespace
+ namespace = mem.namespace
+ }
+ for _, recs := range mem.cache[namespace] {
+ recs.Iter(func(_ int, rec *record) bool {
+ // A query for a release name that doesn't exist (has been deleted)
+ // can cause rec to be nil.
+ if rec == nil {
+ return false
+ }
+ if rec.lbs.match(lbs) {
+ ls = append(ls, rec.rls)
+ }
+ return true
+ })
+ }
+ if mem.namespace != "" {
+ // Should only query releases of this namespace
+ break
+ }
+ }
+
+ if len(ls) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ return ls, nil
+}
+
+// Create creates a new release or returns ErrReleaseExists.
+func (mem *Memory) Create(key string, rls *rspb.Release) error {
+ defer unlock(mem.wlock())
+
+ // For backwards compatibility, we protect against an unset namespace
+ namespace := rls.Namespace
+ if namespace == "" {
+ namespace = defaultNamespace
+ }
+ mem.SetNamespace(namespace)
+
+ if _, ok := mem.cache[namespace]; !ok {
+ mem.cache[namespace] = memReleases{}
+ }
+
+ if recs, ok := mem.cache[namespace][rls.Name]; ok {
+ if err := recs.Add(newRecord(key, rls)); err != nil {
+ return err
+ }
+ mem.cache[namespace][rls.Name] = recs
+ return nil
+ }
+ mem.cache[namespace][rls.Name] = records{newRecord(key, rls)}
+ return nil
+}
+
+// Update updates a release or returns ErrReleaseNotFound.
+func (mem *Memory) Update(key string, rls *rspb.Release) error {
+ defer unlock(mem.wlock())
+
+ // For backwards compatibility, we protect against an unset namespace
+ namespace := rls.Namespace
+ if namespace == "" {
+ namespace = defaultNamespace
+ }
+ mem.SetNamespace(namespace)
+
+ if _, ok := mem.cache[namespace]; ok {
+ if rs, ok := mem.cache[namespace][rls.Name]; ok && rs.Exists(key) {
+ rs.Replace(key, newRecord(key, rls))
+ return nil
+ }
+ }
+ return ErrReleaseNotFound
+}
+
+// Delete deletes a release or returns ErrReleaseNotFound.
+func (mem *Memory) Delete(key string) (*rspb.Release, error) {
+ defer unlock(mem.wlock())
+
+ keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.")
+ elems := strings.Split(keyWithoutPrefix, ".v")
+
+ if len(elems) != 2 {
+ return nil, ErrInvalidKey
+ }
+
+ name, ver := elems[0], elems[1]
+ if _, err := strconv.Atoi(ver); err != nil {
+ return nil, ErrInvalidKey
+ }
+ if _, ok := mem.cache[mem.namespace]; ok {
+ if recs, ok := mem.cache[mem.namespace][name]; ok {
+ if r := recs.Remove(key); r != nil {
+ // recs.Remove changes the slice reference, so we have to re-assign it.
+ mem.cache[mem.namespace][name] = recs
+ return r.rls, nil
+ }
+ }
+ }
+ return nil, ErrReleaseNotFound
+}
+
+// wlock locks mem for writing
+func (mem *Memory) wlock() func() {
+ mem.Lock()
+ return func() { mem.Unlock() }
+}
+
+// rlock locks mem for reading
+func (mem *Memory) rlock() func() {
+ mem.RLock()
+ return func() { mem.RUnlock() }
+}
+
+// unlock calls fn which reverses a mem.rlock or mem.wlock. e.g:
+// ```defer unlock(mem.rlock())```, locks mem for reading at the
+// call point of defer and unlocks upon exiting the block.
+func unlock(fn func()) { fn() }
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go
new file mode 100644
index 000000000..9df173384
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v3/pkg/storage/driver"
+
+import (
+ "sort"
+ "strconv"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+)
+
+// records holds a list of in-memory release records
+type records []*record
+
+func (rs records) Len() int { return len(rs) }
+func (rs records) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }
+func (rs records) Less(i, j int) bool { return rs[i].rls.Version < rs[j].rls.Version }
+
+func (rs *records) Add(r *record) error {
+ if r == nil {
+ return nil
+ }
+
+ if rs.Exists(r.key) {
+ return ErrReleaseExists
+ }
+
+ *rs = append(*rs, r)
+ sort.Sort(*rs)
+
+ return nil
+}
+
+func (rs records) Get(key string) *record {
+ if i, ok := rs.Index(key); ok {
+ return rs[i]
+ }
+ return nil
+}
+
+func (rs *records) Iter(fn func(int, *record) bool) {
+ cp := make([]*record, len(*rs))
+ copy(cp, *rs)
+
+ for i, r := range cp {
+ if !fn(i, r) {
+ return
+ }
+ }
+}
+
+func (rs *records) Index(key string) (int, bool) {
+ for i, r := range *rs {
+ if r.key == key {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+func (rs records) Exists(key string) bool {
+ _, ok := rs.Index(key)
+ return ok
+}
+
+func (rs *records) Remove(key string) (r *record) {
+ if i, ok := rs.Index(key); ok {
+ return rs.removeAt(i)
+ }
+ return nil
+}
+
+func (rs *records) Replace(key string, rec *record) *record {
+ if i, ok := rs.Index(key); ok {
+ old := (*rs)[i]
+ (*rs)[i] = rec
+ return old
+ }
+ return nil
+}
+
+func (rs *records) removeAt(index int) *record {
+ r := (*rs)[index]
+ (*rs)[index] = nil
+ copy((*rs)[index:], (*rs)[index+1:])
+ *rs = (*rs)[:len(*rs)-1]
+ return r
+}
+
+// record is the data structure used to cache releases
+// for the in-memory storage driver
+type record struct {
+ key string
+ lbs labels
+ rls *rspb.Release
+}
+
+// newRecord creates a new in-memory release record
+func newRecord(key string, rls *rspb.Release) *record {
+ var lbs labels
+
+ lbs.init()
+ lbs.set("name", rls.Name)
+ lbs.set("owner", "helm")
+ lbs.set("status", rls.Info.Status.String())
+ lbs.set("version", strconv.Itoa(rls.Version))
+
+ // return &record{key: key, lbs: lbs, rls: proto.Clone(rls).(*rspb.Release)}
+ return &record{key: key, lbs: lbs, rls: rls}
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go
new file mode 100644
index 000000000..2e8530d0c
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go
@@ -0,0 +1,250 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v3/pkg/storage/driver"
+
+import (
+ "context"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kblabels "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/validation"
+ corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+)
+
+var _ Driver = (*Secrets)(nil)
+
+// SecretsDriverName is the string name of the driver.
+const SecretsDriverName = "Secret"
+
+// Secrets is a wrapper around an implementation of a kubernetes
+// SecretsInterface.
+type Secrets struct {
+ impl corev1.SecretInterface
+ Log func(string, ...interface{})
+}
+
+// NewSecrets initializes a new Secrets wrapping an implementation of
+// the kubernetes SecretsInterface.
+func NewSecrets(impl corev1.SecretInterface) *Secrets {
+ return &Secrets{
+ impl: impl,
+ Log: func(_ string, _ ...interface{}) {},
+ }
+}
+
+// Name returns the name of the driver.
+func (secrets *Secrets) Name() string {
+ return SecretsDriverName
+}
+
+// Get fetches the release named by key. The corresponding release is returned
+// or error if not found.
+func (secrets *Secrets) Get(key string) (*rspb.Release, error) {
+ // fetch the secret holding the release named by key
+ obj, err := secrets.impl.Get(context.Background(), key, metav1.GetOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil, ErrReleaseNotFound
+ }
+ return nil, errors.Wrapf(err, "get: failed to get %q", key)
+ }
+ // found the secret, decode the base64 data string
+ r, err := decodeRelease(string(obj.Data["release"]))
+ return r, errors.Wrapf(err, "get: failed to decode data %q", key)
+}
+
+// List fetches all releases and returns the list releases such
+// that filter(release) == true. An error is returned if the
+// secret fails to retrieve the releases.
+func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
+ lsel := kblabels.Set{"owner": "helm"}.AsSelector()
+ opts := metav1.ListOptions{LabelSelector: lsel.String()}
+
+ list, err := secrets.impl.List(context.Background(), opts)
+ if err != nil {
+ return nil, errors.Wrap(err, "list: failed to list")
+ }
+
+ var results []*rspb.Release
+
+ // iterate over the secrets object list
+ // and decode each release
+ for _, item := range list.Items {
+ rls, err := decodeRelease(string(item.Data["release"]))
+ if err != nil {
+ secrets.Log("list: failed to decode release: %v: %s", item, err)
+ continue
+ }
+
+ rls.Labels = item.ObjectMeta.Labels
+
+ if filter(rls) {
+ results = append(results, rls)
+ }
+ }
+ return results, nil
+}
+
+// Query fetches all releases that match the provided map of labels.
+// An error is returned if the secret fails to retrieve the releases.
+func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) {
+ ls := kblabels.Set{}
+ for k, v := range labels {
+ if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
+ return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+ }
+ ls[k] = v
+ }
+
+ opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()}
+
+ list, err := secrets.impl.List(context.Background(), opts)
+ if err != nil {
+ return nil, errors.Wrap(err, "query: failed to query with labels")
+ }
+
+ if len(list.Items) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ var results []*rspb.Release
+ for _, item := range list.Items {
+ rls, err := decodeRelease(string(item.Data["release"]))
+ if err != nil {
+ secrets.Log("query: failed to decode release: %s", err)
+ continue
+ }
+ results = append(results, rls)
+ }
+ return results, nil
+}
+
+// Create creates a new Secret holding the release. If the
+// Secret already exists, ErrReleaseExists is returned.
+func (secrets *Secrets) Create(key string, rls *rspb.Release) error {
+ // set labels for secrets object meta data
+ var lbs labels
+
+ lbs.init()
+ lbs.set("createdAt", strconv.Itoa(int(time.Now().Unix())))
+
+ // create a new secret to hold the release
+ obj, err := newSecretsObject(key, rls, lbs)
+ if err != nil {
+ return errors.Wrapf(err, "create: failed to encode release %q", rls.Name)
+ }
+ // push the secret object out into the kubiverse
+ if _, err := secrets.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil {
+ if apierrors.IsAlreadyExists(err) {
+ return ErrReleaseExists
+ }
+
+ return errors.Wrap(err, "create: failed to create")
+ }
+ return nil
+}
+
+// Update updates the Secret holding the release. If not found
+// the Secret is created to hold the release.
+func (secrets *Secrets) Update(key string, rls *rspb.Release) error {
+ // set labels for secrets object meta data
+ var lbs labels
+
+ lbs.init()
+ lbs.set("modifiedAt", strconv.Itoa(int(time.Now().Unix())))
+
+ // create a new secret object to hold the release
+ obj, err := newSecretsObject(key, rls, lbs)
+ if err != nil {
+ return errors.Wrapf(err, "update: failed to encode release %q", rls.Name)
+ }
+ // push the secret object out into the kubiverse
+ _, err = secrets.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
+ return errors.Wrap(err, "update: failed to update")
+}
+
+// Delete deletes the Secret holding the release named by key.
+func (secrets *Secrets) Delete(key string) (rls *rspb.Release, err error) {
+ // fetch the release to check existence
+ if rls, err = secrets.Get(key); err != nil {
+ return nil, err
+ }
+ // delete the release
+ err = secrets.impl.Delete(context.Background(), key, metav1.DeleteOptions{})
+ return rls, err
+}
+
+// newSecretsObject constructs a kubernetes Secret object
+// to store a release. Each secret data entry is the base64
+// encoded gzipped string of a release.
+//
+// The following labels are used within each secret:
+//
+// "modifiedAt" - timestamp indicating when this secret was last modified. (set in Update)
+// "createdAt" - timestamp indicating when this secret was created. (set in Create)
+// "version" - version of the release.
+// "status" - status of the release (see pkg/release/status.go for variants)
+// "owner" - owner of the secret, currently "helm".
+// "name" - name of the release.
+//
+func newSecretsObject(key string, rls *rspb.Release, lbs labels) (*v1.Secret, error) {
+ const owner = "helm"
+
+ // encode the release
+ s, err := encodeRelease(rls)
+ if err != nil {
+ return nil, err
+ }
+
+ if lbs == nil {
+ lbs.init()
+ }
+
+ // apply labels
+ lbs.set("name", rls.Name)
+ lbs.set("owner", owner)
+ lbs.set("status", rls.Info.Status.String())
+ lbs.set("version", strconv.Itoa(rls.Version))
+
+ // create and return secret object.
+ // Helm 3 introduced setting the 'Type' field
+ // in the Kubernetes storage object.
+ // Helm defines the field content as follows:
+ // <helm_domain>/<helm_object>.v<helm_object_version>
+ // Type field for Helm 3: helm.sh/release.v1
+ // Note: Version starts at 'v1' for Helm 3 and
+ // should be incremented if the release object
+ // metadata is modified.
+ // This would potentially be a breaking change
+ // and should only happen between major versions.
+ return &v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: key,
+ Labels: lbs.toMap(),
+ },
+ Type: "helm.sh/release.v1",
+ Data: map[string][]byte{"release": []byte(s)},
+ }, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go
new file mode 100644
index 000000000..c8a6ae04f
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go
@@ -0,0 +1,496 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v3/pkg/storage/driver"
+
+import (
+ "fmt"
+ "sort"
+ "time"
+
+ "github.com/jmoiron/sqlx"
+ migrate "github.com/rubenv/sql-migrate"
+
+ sq "github.com/Masterminds/squirrel"
+
+ // Import pq for postgres dialect
+ _ "github.com/lib/pq"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+)
+
+var _ Driver = (*SQL)(nil)
+
+var labelMap = map[string]struct{}{
+ "modifiedAt": {},
+ "createdAt": {},
+ "version": {},
+ "status": {},
+ "owner": {},
+ "name": {},
+}
+
+const postgreSQLDialect = "postgres"
+
+// SQLDriverName is the string name of this driver.
+const SQLDriverName = "SQL"
+
+const sqlReleaseTableName = "releases_v1"
+
+const (
+ sqlReleaseTableKeyColumn = "key"
+ sqlReleaseTableTypeColumn = "type"
+ sqlReleaseTableBodyColumn = "body"
+ sqlReleaseTableNameColumn = "name"
+ sqlReleaseTableNamespaceColumn = "namespace"
+ sqlReleaseTableVersionColumn = "version"
+ sqlReleaseTableStatusColumn = "status"
+ sqlReleaseTableOwnerColumn = "owner"
+ sqlReleaseTableCreatedAtColumn = "createdAt"
+ sqlReleaseTableModifiedAtColumn = "modifiedAt"
+)
+
+const (
+ sqlReleaseDefaultOwner = "helm"
+ sqlReleaseDefaultType = "helm.sh/release.v1"
+)
+
+// SQL is the sql storage driver implementation.
+type SQL struct {
+ db *sqlx.DB
+ namespace string
+ statementBuilder sq.StatementBuilderType
+
+ Log func(string, ...interface{})
+}
+
+// Name returns the name of the driver.
+func (s *SQL) Name() string {
+ return SQLDriverName
+}
+
+func (s *SQL) ensureDBSetup() error {
+ // Populate the database with the relations we need if they don't exist yet
+ migrations := &migrate.MemoryMigrationSource{
+ Migrations: []*migrate.Migration{
+ {
+ Id: "init",
+ Up: []string{
+ fmt.Sprintf(`
+ CREATE TABLE %s (
+ %s VARCHAR(67),
+ %s VARCHAR(64) NOT NULL,
+ %s TEXT NOT NULL,
+ %s VARCHAR(64) NOT NULL,
+ %s VARCHAR(64) NOT NULL,
+ %s INTEGER NOT NULL,
+ %s TEXT NOT NULL,
+ %s TEXT NOT NULL,
+ %s INTEGER NOT NULL,
+ %s INTEGER NOT NULL DEFAULT 0,
+ PRIMARY KEY(%s, %s)
+ );
+ CREATE INDEX ON %s (%s, %s);
+ CREATE INDEX ON %s (%s);
+ CREATE INDEX ON %s (%s);
+ CREATE INDEX ON %s (%s);
+ CREATE INDEX ON %s (%s);
+ CREATE INDEX ON %s (%s);
+
+ GRANT ALL ON %s TO PUBLIC;
+
+ ALTER TABLE %s ENABLE ROW LEVEL SECURITY;
+ `,
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableTypeColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableCreatedAtColumn,
+ sqlReleaseTableModifiedAtColumn,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableCreatedAtColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableModifiedAtColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableName,
+ ),
+ },
+ Down: []string{
+ fmt.Sprintf(`
+ DROP TABLE %s;
+ `, sqlReleaseTableName),
+ },
+ },
+ },
+ }
+
+ _, err := migrate.Exec(s.db.DB, postgreSQLDialect, migrations, migrate.Up)
+ return err
+}
+
+// SQLReleaseWrapper describes how Helm releases are stored in an SQL database
+type SQLReleaseWrapper struct {
+ // The primary key, made of {release-name}.{release-version}
+ Key string `db:"key"`
+
+ // See https://github.com/helm/helm/blob/c9fe3d118caec699eb2565df9838673af379ce12/pkg/storage/driver/secrets.go#L231
+ Type string `db:"type"`
+
+ // The rspb.Release body, as a base64-encoded string
+ Body string `db:"body"`
+
+ // Release "labels" that can be used as filters in the storage.Query(labels map[string]string)
+ // we implemented. Note that allowing Helm users to filter against new dimensions will require a
+ // new migration to be added, and the Create and/or update functions to be updated accordingly.
+ Name string `db:"name"`
+ Namespace string `db:"namespace"`
+ Version int `db:"version"`
+ Status string `db:"status"`
+ Owner string `db:"owner"`
+ CreatedAt int `db:"createdAt"`
+ ModifiedAt int `db:"modifiedAt"`
+}
+
+// NewSQL initializes a new sql driver.
+func NewSQL(connectionString string, logger func(string, ...interface{}), namespace string) (*SQL, error) {
+ db, err := sqlx.Connect(postgreSQLDialect, connectionString)
+ if err != nil {
+ return nil, err
+ }
+
+ driver := &SQL{
+ db: db,
+ Log: logger,
+ statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
+ }
+
+ if err := driver.ensureDBSetup(); err != nil {
+ return nil, err
+ }
+
+ driver.namespace = namespace
+
+ return driver, nil
+}
+
+// Get returns the release named by key.
+func (s *SQL) Get(key string) (*rspb.Release, error) {
+ var record SQLReleaseWrapper
+
+ qb := s.statementBuilder.
+ Select(sqlReleaseTableBodyColumn).
+ From(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace})
+
+ query, args, err := qb.ToSql()
+ if err != nil {
+ s.Log("failed to build query: %v", err)
+ return nil, err
+ }
+
+ // Get will return an error if the result is empty
+ if err := s.db.Get(&record, query, args...); err != nil {
+ s.Log("got SQL error when getting release %s: %v", key, err)
+ return nil, ErrReleaseNotFound
+ }
+
+ release, err := decodeRelease(record.Body)
+ if err != nil {
+ s.Log("get: failed to decode data %q: %v", key, err)
+ return nil, err
+ }
+
+ return release, nil
+}
+
+// List returns the list of all releases such that filter(release) == true
+func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) {
+ sb := s.statementBuilder.
+ Select(sqlReleaseTableBodyColumn).
+ From(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableOwnerColumn: sqlReleaseDefaultOwner})
+
+ // If a namespace was specified, we only list releases from that namespace
+ if s.namespace != "" {
+ sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace})
+ }
+
+ query, args, err := sb.ToSql()
+ if err != nil {
+ s.Log("failed to build query: %v", err)
+ return nil, err
+ }
+
+ var records = []SQLReleaseWrapper{}
+ if err := s.db.Select(&records, query, args...); err != nil {
+ s.Log("list: failed to list: %v", err)
+ return nil, err
+ }
+
+ var releases []*rspb.Release
+ for _, record := range records {
+ release, err := decodeRelease(record.Body)
+ if err != nil {
+ s.Log("list: failed to decode release: %v: %v", record, err)
+ continue
+ }
+ if filter(release) {
+ releases = append(releases, release)
+ }
+ }
+
+ return releases, nil
+}
+
+// Query returns the set of releases that match the provided set of labels.
+func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) {
+ sb := s.statementBuilder.
+ Select(sqlReleaseTableBodyColumn).
+ From(sqlReleaseTableName)
+
+ keys := make([]string, 0, len(labels))
+ for key := range labels {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ if _, ok := labelMap[key]; ok {
+ sb = sb.Where(sq.Eq{key: labels[key]})
+ } else {
+ s.Log("unknown label %s", key)
+ return nil, fmt.Errorf("unknown label %s", key)
+ }
+ }
+
+ // If a namespace was specified, we only list releases from that namespace
+ if s.namespace != "" {
+ sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace})
+ }
+
+ // Build our query
+ query, args, err := sb.ToSql()
+ if err != nil {
+ s.Log("failed to build query: %v", err)
+ return nil, err
+ }
+
+ var records = []SQLReleaseWrapper{}
+ if err := s.db.Select(&records, query, args...); err != nil {
+ s.Log("list: failed to query with labels: %v", err)
+ return nil, err
+ }
+
+ if len(records) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ var releases []*rspb.Release
+ for _, record := range records {
+ release, err := decodeRelease(record.Body)
+ if err != nil {
+ s.Log("list: failed to decode release: %v: %v", record, err)
+ continue
+ }
+ releases = append(releases, release)
+ }
+
+ if len(releases) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ return releases, nil
+}
+
+// Create creates a new release.
+func (s *SQL) Create(key string, rls *rspb.Release) error {
+ namespace := rls.Namespace
+ if namespace == "" {
+ namespace = defaultNamespace
+ }
+ s.namespace = namespace
+
+ body, err := encodeRelease(rls)
+ if err != nil {
+ s.Log("failed to encode release: %v", err)
+ return err
+ }
+
+ transaction, err := s.db.Beginx()
+ if err != nil {
+ s.Log("failed to start SQL transaction: %v", err)
+ return fmt.Errorf("error beginning transaction: %v", err)
+ }
+
+ insertQuery, args, err := s.statementBuilder.
+ Insert(sqlReleaseTableName).
+ Columns(
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableTypeColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableCreatedAtColumn,
+ ).
+ Values(
+ key,
+ sqlReleaseDefaultType,
+ body,
+ rls.Name,
+ namespace,
+ int(rls.Version),
+ rls.Info.Status.String(),
+ sqlReleaseDefaultOwner,
+ int(time.Now().Unix()),
+ ).ToSql()
+ if err != nil {
+ s.Log("failed to build insert query: %v", err)
+ return err
+ }
+
+ if _, err := transaction.Exec(insertQuery, args...); err != nil {
+ defer transaction.Rollback()
+
+ selectQuery, args, buildErr := s.statementBuilder.
+ Select(sqlReleaseTableKeyColumn).
+ From(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
+ ToSql()
+ if buildErr != nil {
+ s.Log("failed to build select query: %v", buildErr)
+ return err
+ }
+
+ var record SQLReleaseWrapper
+ if err := transaction.Get(&record, selectQuery, args...); err == nil {
+ s.Log("release %s already exists", key)
+ return ErrReleaseExists
+ }
+
+ s.Log("failed to store release %s in SQL database: %v", key, err)
+ return err
+ }
+ defer transaction.Commit()
+
+ return nil
+}
+
+// Update updates a release.
+func (s *SQL) Update(key string, rls *rspb.Release) error {
+ namespace := rls.Namespace
+ if namespace == "" {
+ namespace = defaultNamespace
+ }
+ s.namespace = namespace
+
+ body, err := encodeRelease(rls)
+ if err != nil {
+ s.Log("failed to encode release: %v", err)
+ return err
+ }
+
+ query, args, err := s.statementBuilder.
+ Update(sqlReleaseTableName).
+ Set(sqlReleaseTableBodyColumn, body).
+ Set(sqlReleaseTableNameColumn, rls.Name).
+ Set(sqlReleaseTableVersionColumn, int(rls.Version)).
+ Set(sqlReleaseTableStatusColumn, rls.Info.Status.String()).
+ Set(sqlReleaseTableOwnerColumn, sqlReleaseDefaultOwner).
+ Set(sqlReleaseTableModifiedAtColumn, int(time.Now().Unix())).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: namespace}).
+ ToSql()
+
+ if err != nil {
+ s.Log("failed to build update query: %v", err)
+ return err
+ }
+
+ if _, err := s.db.Exec(query, args...); err != nil {
+ s.Log("failed to update release %s in SQL database: %v", key, err)
+ return err
+ }
+
+ return nil
+}
+
+// Delete deletes a release or returns ErrReleaseNotFound.
+func (s *SQL) Delete(key string) (*rspb.Release, error) {
+ transaction, err := s.db.Beginx()
+ if err != nil {
+ s.Log("failed to start SQL transaction: %v", err)
+ return nil, fmt.Errorf("error beginning transaction: %v", err)
+ }
+
+ selectQuery, args, err := s.statementBuilder.
+ Select(sqlReleaseTableBodyColumn).
+ From(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
+ ToSql()
+ if err != nil {
+ s.Log("failed to build select query: %v", err)
+ return nil, err
+ }
+
+ var record SQLReleaseWrapper
+ err = transaction.Get(&record, selectQuery, args...)
+ if err != nil {
+ s.Log("release %s not found: %v", key, err)
+ return nil, ErrReleaseNotFound
+ }
+
+ release, err := decodeRelease(record.Body)
+ if err != nil {
+ s.Log("failed to decode release %s: %v", key, err)
+ transaction.Rollback()
+ return nil, err
+ }
+ defer transaction.Commit()
+
+ deleteQuery, args, err := s.statementBuilder.
+ Delete(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
+ ToSql()
+ if err != nil {
+ s.Log("failed to build select query: %v", err)
+ return nil, err
+ }
+
+ _, err = transaction.Exec(deleteQuery, args...)
+ return release, err
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go
new file mode 100644
index 000000000..e5b846163
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go
@@ -0,0 +1,85 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v3/pkg/storage/driver"
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "encoding/json"
+ "io/ioutil"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+)
+
+var b64 = base64.StdEncoding
+
+var magicGzip = []byte{0x1f, 0x8b, 0x08}
+
+// encodeRelease encodes a release returning a base64 encoded
+// gzipped string representation, or error.
+func encodeRelease(rls *rspb.Release) (string, error) {
+ b, err := json.Marshal(rls)
+ if err != nil {
+ return "", err
+ }
+ var buf bytes.Buffer
+ w, err := gzip.NewWriterLevel(&buf, gzip.BestCompression)
+ if err != nil {
+ return "", err
+ }
+ if _, err = w.Write(b); err != nil {
+ return "", err
+ }
+ w.Close()
+
+ return b64.EncodeToString(buf.Bytes()), nil
+}
+
+// decodeRelease decodes the bytes of data into a release
+// type. Data must contain a base64 encoded gzipped string of a
+// valid release, otherwise an error is returned.
+func decodeRelease(data string) (*rspb.Release, error) {
+ // base64 decode string
+ b, err := b64.DecodeString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // For backwards compatibility with releases that were stored before
+ // compression was introduced we skip decompression if the
+ // gzip magic header is not found
+ if bytes.Equal(b[0:3], magicGzip) {
+ r, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+ b2, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ b = b2
+ }
+
+ var rls rspb.Release
+ // unmarshal release object bytes
+ if err := json.Unmarshal(b, &rls); err != nil {
+ return nil, err
+ }
+ return &rls, nil
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/storage/storage.go b/vendor/helm.sh/helm/v3/pkg/storage/storage.go
new file mode 100644
index 000000000..370fec4b4
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/storage/storage.go
@@ -0,0 +1,266 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package storage // import "helm.sh/helm/v3/pkg/storage"
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ rspb "helm.sh/helm/v3/pkg/release"
+ relutil "helm.sh/helm/v3/pkg/releaseutil"
+ "helm.sh/helm/v3/pkg/storage/driver"
+)
+
+// HelmStorageType is the type field of the Kubernetes storage object which stores the Helm release
+// version. It is modified slightly replacing the '/': sh.helm/release.v1
+// Note: The version 'v1' is incremented if the release object metadata is
+// modified between major releases.
+// This constant is used as a prefix for the Kubernetes storage object name.
+const HelmStorageType = "sh.helm.release.v1"
+
+// Storage represents a storage engine for a Release.
+type Storage struct {
+ driver.Driver
+
+ // MaxHistory specifies the maximum number of historical releases that will
+ // be retained, including the most recent release. Values of 0 or less are
+ // ignored (meaning no limits are imposed).
+ MaxHistory int
+
+ Log func(string, ...interface{})
+}
+
+// Get retrieves the release from storage. An error is returned
+// if the storage driver failed to fetch the release, or the
+// release identified by the key, version pair does not exist.
+func (s *Storage) Get(name string, version int) (*rspb.Release, error) {
+ s.Log("getting release %q", makeKey(name, version))
+ return s.Driver.Get(makeKey(name, version))
+}
+
+// Create creates a new storage entry holding the release. An
+// error is returned if the storage driver fails to store the
+// release, or a release with an identical key already exists.
+func (s *Storage) Create(rls *rspb.Release) error {
+ s.Log("creating release %q", makeKey(rls.Name, rls.Version))
+ if s.MaxHistory > 0 {
+ // Want to make space for one more release.
+ if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil &&
+ !errors.Is(err, driver.ErrReleaseNotFound) {
+ return err
+ }
+ }
+ return s.Driver.Create(makeKey(rls.Name, rls.Version), rls)
+}
+
+// Update updates the release in storage. An error is returned if the
+// storage backend fails to update the release or if the release
+// does not exist.
+func (s *Storage) Update(rls *rspb.Release) error {
+ s.Log("updating release %q", makeKey(rls.Name, rls.Version))
+ return s.Driver.Update(makeKey(rls.Name, rls.Version), rls)
+}
+
+// Delete deletes the release from storage. An error is returned if
+// the storage backend fails to delete the release or if the release
+// does not exist.
+func (s *Storage) Delete(name string, version int) (*rspb.Release, error) {
+ s.Log("deleting release %q", makeKey(name, version))
+ return s.Driver.Delete(makeKey(name, version))
+}
+
+// ListReleases returns all releases from storage. An error is returned if the
+// storage backend fails to retrieve the releases.
+func (s *Storage) ListReleases() ([]*rspb.Release, error) {
+ s.Log("listing all releases in storage")
+ return s.Driver.List(func(_ *rspb.Release) bool { return true })
+}
+
+// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned
+// if the storage backend fails to retrieve the releases.
+func (s *Storage) ListUninstalled() ([]*rspb.Release, error) {
+ s.Log("listing uninstalled releases in storage")
+ return s.Driver.List(func(rls *rspb.Release) bool {
+ return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls)
+ })
+}
+
+// ListDeployed returns all releases with Status == DEPLOYED. An error is returned
+// if the storage backend fails to retrieve the releases.
+func (s *Storage) ListDeployed() ([]*rspb.Release, error) {
+ s.Log("listing all deployed releases in storage")
+ return s.Driver.List(func(rls *rspb.Release) bool {
+ return relutil.StatusFilter(rspb.StatusDeployed).Check(rls)
+ })
+}
+
+// Deployed returns the last deployed release with the provided release name, or
+// returns ErrReleaseNotFound if not found.
+func (s *Storage) Deployed(name string) (*rspb.Release, error) {
+ ls, err := s.DeployedAll(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(ls) == 0 {
+ return nil, driver.NewErrNoDeployedReleases(name)
+ }
+
+ // If executed concurrently, Helm's database gets corrupted
+ // and multiple releases are DEPLOYED. Take the latest.
+ relutil.Reverse(ls, relutil.SortByRevision)
+
+ return ls[0], nil
+}
+
+// DeployedAll returns all deployed releases with the provided name, or
+// returns ErrReleaseNotFound if not found.
+func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) {
+ s.Log("getting deployed releases from %q history", name)
+
+ ls, err := s.Driver.Query(map[string]string{
+ "name": name,
+ "owner": "helm",
+ "status": "deployed",
+ })
+ if err == nil {
+ return ls, nil
+ }
+ if strings.Contains(err.Error(), "not found") {
+ return nil, driver.NewErrNoDeployedReleases(name)
+ }
+ return nil, err
+}
+
+// History returns the revision history for the release with the provided name, or
+// returns ErrReleaseNotFound if no such release name exists.
+func (s *Storage) History(name string) ([]*rspb.Release, error) {
+ s.Log("getting release history for %q", name)
+
+ return s.Driver.Query(map[string]string{"name": name, "owner": "helm"})
+}
+
+// removeLeastRecent removes items from history until the length number of releases
+// does not exceed max.
+//
+// We allow max to be set explicitly so that calling functions can "make space"
+// for the new records they are going to write.
+func (s *Storage) removeLeastRecent(name string, max int) error {
+ if max < 0 {
+ return nil
+ }
+ h, err := s.History(name)
+ if err != nil {
+ return err
+ }
+ if len(h) <= max {
+ return nil
+ }
+
+ // We want oldest to newest
+ relutil.SortByRevision(h)
+
+ lastDeployed, err := s.Deployed(name)
+ if err != nil {
+ return err
+ }
+
+ var toDelete []*rspb.Release
+ for _, rel := range h {
+ // once we have enough releases to delete to reach the max, stop
+ if len(h)-len(toDelete) == max {
+ break
+ }
+ if lastDeployed != nil {
+ if rel.Version != lastDeployed.Version {
+ toDelete = append(toDelete, rel)
+ }
+ } else {
+ toDelete = append(toDelete, rel)
+ }
+ }
+
+ // Delete as many as possible. In the case of API throughput limitations,
+ // multiple invocations of this function will eventually delete them all.
+ errs := []error{}
+ for _, rel := range toDelete {
+ err = s.deleteReleaseVersion(name, rel.Version)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs))
+ switch c := len(errs); c {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errors.Errorf("encountered %d deletion errors. First is: %s", c, errs[0])
+ }
+}
+
+func (s *Storage) deleteReleaseVersion(name string, version int) error {
+ key := makeKey(name, version)
+ _, err := s.Delete(name, version)
+ if err != nil {
+ s.Log("error pruning %s from release history: %s", key, err)
+ return err
+ }
+ return nil
+}
+
+// Last fetches the last revision of the named release.
+func (s *Storage) Last(name string) (*rspb.Release, error) {
+ s.Log("getting last revision of %q", name)
+ h, err := s.History(name)
+ if err != nil {
+ return nil, err
+ }
+ if len(h) == 0 {
+ return nil, errors.Errorf("no revision for release %q", name)
+ }
+
+ relutil.Reverse(h, relutil.SortByRevision)
+ return h[0], nil
+}
+
+// makeKey concatenates the Kubernetes storage object type, a release name and version
+// into a string with format:```<helm_storage_type>.<release_name>.v<release_version>```.
+// The storage type is prepended to keep name uniqueness between different
+// release storage types. An example of clash when not using the type:
+// https://github.com/helm/helm/issues/6435.
+// This key is used to uniquely identify storage objects.
+func makeKey(rlsname string, version int) string {
+ return fmt.Sprintf("%s.%s.v%d", HelmStorageType, rlsname, version)
+}
+
+// Init initializes a new storage backend with the driver d.
+// If d is nil, the default in-memory driver is used.
+func Init(d driver.Driver) *Storage {
+ // default driver is in memory
+ if d == nil {
+ d = driver.NewMemory()
+ }
+ return &Storage{
+ Driver: d,
+ Log: func(_ string, _ ...interface{}) {},
+ }
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/strvals/doc.go b/vendor/helm.sh/helm/v3/pkg/strvals/doc.go
new file mode 100644
index 000000000..f17290587
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/strvals/doc.go
@@ -0,0 +1,32 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*Package strvals provides tools for working with strval lines.
+
+Helm supports a compressed format for YAML settings which we call strvals.
+The format is roughly like this:
+
+ name=value,topname.subname=value
+
+The above is equivalent to the YAML document
+
+ name: value
+ topname:
+ subname: value
+
+This package provides a parser and utilities for converting the strvals format
+to other formats.
+*/
+package strvals
diff --git a/vendor/helm.sh/helm/v3/pkg/strvals/parser.go b/vendor/helm.sh/helm/v3/pkg/strvals/parser.go
new file mode 100644
index 000000000..457b99f94
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/strvals/parser.go
@@ -0,0 +1,446 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package strvals
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/yaml"
+)
+
+// ErrNotList indicates that a non-list was treated as a list.
+var ErrNotList = errors.New("not a list")
+
+// ToYAML takes a string of arguments and converts to a YAML document.
+func ToYAML(s string) (string, error) {
+ m, err := Parse(s)
+ if err != nil {
+ return "", err
+ }
+ d, err := yaml.Marshal(m)
+ return strings.TrimSuffix(string(d), "\n"), err
+}
+
+// Parse parses a set line.
+//
+// A set line is of the form name1=value1,name2=value2
+func Parse(s string) (map[string]interface{}, error) {
+ vals := map[string]interface{}{}
+ scanner := bytes.NewBufferString(s)
+ t := newParser(scanner, vals, false)
+ err := t.parse()
+ return vals, err
+}
+
+// ParseString parses a set line and forces a string value.
+//
+// A set line is of the form name1=value1,name2=value2
+func ParseString(s string) (map[string]interface{}, error) {
+ vals := map[string]interface{}{}
+ scanner := bytes.NewBufferString(s)
+ t := newParser(scanner, vals, true)
+ err := t.parse()
+ return vals, err
+}
+
+// ParseInto parses a strvals line and merges the result into dest.
+//
+// If the strval string has a key that exists in dest, it overwrites the
+// dest version.
+func ParseInto(s string, dest map[string]interface{}) error {
+ scanner := bytes.NewBufferString(s)
+ t := newParser(scanner, dest, false)
+ return t.parse()
+}
+
+// ParseFile parses a set line, but its final value is loaded from the file at the path specified by the original value.
+//
+// A set line is of the form name1=path1,name2=path2
+//
+// When the files at path1 and path2 contained "val1" and "val2" respectively, the set line is consumed as
+// name1=val1,name2=val2
+func ParseFile(s string, reader RunesValueReader) (map[string]interface{}, error) {
+ vals := map[string]interface{}{}
+ scanner := bytes.NewBufferString(s)
+ t := newFileParser(scanner, vals, reader)
+ err := t.parse()
+ return vals, err
+}
+
+// ParseIntoString parses a strvals line and merges the result into dest.
+//
+// This method always returns a string as the value.
+func ParseIntoString(s string, dest map[string]interface{}) error {
+ scanner := bytes.NewBufferString(s)
+ t := newParser(scanner, dest, true)
+ return t.parse()
+}
+
+// ParseIntoFile parses a filevals line and merges the result into dest.
+//
+// This method always returns a string as the value.
+func ParseIntoFile(s string, dest map[string]interface{}, reader RunesValueReader) error {
+ scanner := bytes.NewBufferString(s)
+ t := newFileParser(scanner, dest, reader)
+ return t.parse()
+}
+
+// RunesValueReader is a function that takes the given value (a slice of runes)
+// and returns the parsed value
+type RunesValueReader func([]rune) (interface{}, error)
+
+// parser is a simple parser that takes a strvals line and parses it into a
+// map representation.
+//
+// where sc is the source of the original data being parsed
+// where data is the final parsed data from the parses with correct types
+type parser struct {
+ sc *bytes.Buffer
+ data map[string]interface{}
+ reader RunesValueReader
+}
+
+func newParser(sc *bytes.Buffer, data map[string]interface{}, stringBool bool) *parser {
+ stringConverter := func(rs []rune) (interface{}, error) {
+ return typedVal(rs, stringBool), nil
+ }
+ return &parser{sc: sc, data: data, reader: stringConverter}
+}
+
+func newFileParser(sc *bytes.Buffer, data map[string]interface{}, reader RunesValueReader) *parser {
+ return &parser{sc: sc, data: data, reader: reader}
+}
+
+func (t *parser) parse() error {
+ for {
+ err := t.key(t.data)
+ if err == nil {
+ continue
+ }
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+}
+
+func runeSet(r []rune) map[rune]bool {
+ s := make(map[rune]bool, len(r))
+ for _, rr := range r {
+ s[rr] = true
+ }
+ return s
+}
+
+func (t *parser) key(data map[string]interface{}) (reterr error) {
+ defer func() {
+ if r := recover(); r != nil {
+ reterr = fmt.Errorf("unable to parse key: %s", r)
+ }
+ }()
+ stop := runeSet([]rune{'=', '[', ',', '.'})
+ for {
+ switch k, last, err := runesUntil(t.sc, stop); {
+ case err != nil:
+ if len(k) == 0 {
+ return err
+ }
+ return errors.Errorf("key %q has no value", string(k))
+ //set(data, string(k), "")
+ //return err
+ case last == '[':
+ // We are in a list index context, so we need to set an index.
+ i, err := t.keyIndex()
+ if err != nil {
+ return errors.Wrap(err, "error parsing index")
+ }
+ kk := string(k)
+ // Find or create target list
+ list := []interface{}{}
+ if _, ok := data[kk]; ok {
+ list = data[kk].([]interface{})
+ }
+
+ // Now we need to get the value after the ].
+ list, err = t.listItem(list, i)
+ set(data, kk, list)
+ return err
+ case last == '=':
+ //End of key. Consume =, Get value.
+ // FIXME: Get value list first
+ vl, e := t.valList()
+ switch e {
+ case nil:
+ set(data, string(k), vl)
+ return nil
+ case io.EOF:
+ set(data, string(k), "")
+ return e
+ case ErrNotList:
+ rs, e := t.val()
+ if e != nil && e != io.EOF {
+ return e
+ }
+ v, e := t.reader(rs)
+ set(data, string(k), v)
+ return e
+ default:
+ return e
+ }
+
+ case last == ',':
+ // No value given. Set the value to empty string. Return error.
+ set(data, string(k), "")
+ return errors.Errorf("key %q has no value (cannot end with ,)", string(k))
+ case last == '.':
+ // First, create or find the target map.
+ inner := map[string]interface{}{}
+ if _, ok := data[string(k)]; ok {
+ inner = data[string(k)].(map[string]interface{})
+ }
+
+ // Recurse
+ e := t.key(inner)
+ if len(inner) == 0 {
+ return errors.Errorf("key map %q has no value", string(k))
+ }
+ set(data, string(k), inner)
+ return e
+ }
+ }
+}
+
+func set(data map[string]interface{}, key string, val interface{}) {
+ // If key is empty, don't set it.
+ if len(key) == 0 {
+ return
+ }
+ data[key] = val
+}
+
+func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, err error) {
+ // There are possible index values that are out of range on a target system
+ // causing a panic. This will catch the panic and return an error instead.
+ // The value of the index that causes a panic varies from system to system.
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("error processing index %d: %s", index, r)
+ }
+ }()
+
+ if index < 0 {
+ return list, fmt.Errorf("negative %d index not allowed", index)
+ }
+ if len(list) <= index {
+ newlist := make([]interface{}, index+1)
+ copy(newlist, list)
+ list = newlist
+ }
+ list[index] = val
+ return list, nil
+}
+
+func (t *parser) keyIndex() (int, error) {
+ // First, get the key.
+ stop := runeSet([]rune{']'})
+ v, _, err := runesUntil(t.sc, stop)
+ if err != nil {
+ return 0, err
+ }
+ // v should be the index
+ return strconv.Atoi(string(v))
+
+}
+func (t *parser) listItem(list []interface{}, i int) ([]interface{}, error) {
+ if i < 0 {
+ return list, fmt.Errorf("negative %d index not allowed", i)
+ }
+ stop := runeSet([]rune{'[', '.', '='})
+ switch k, last, err := runesUntil(t.sc, stop); {
+ case len(k) > 0:
+ return list, errors.Errorf("unexpected data at end of array index: %q", k)
+ case err != nil:
+ return list, err
+ case last == '=':
+ vl, e := t.valList()
+ switch e {
+ case nil:
+ return setIndex(list, i, vl)
+ case io.EOF:
+ return setIndex(list, i, "")
+ case ErrNotList:
+ rs, e := t.val()
+ if e != nil && e != io.EOF {
+ return list, e
+ }
+ v, e := t.reader(rs)
+ if e != nil {
+ return list, e
+ }
+ return setIndex(list, i, v)
+ default:
+ return list, e
+ }
+ case last == '[':
+ // now we have a nested list. Read the index and handle.
+ nextI, err := t.keyIndex()
+ if err != nil {
+ return list, errors.Wrap(err, "error parsing index")
+ }
+ var crtList []interface{}
+ if len(list) > i {
+ // If nested list already exists, take the value of list to next cycle.
+ existed := list[i]
+ if existed != nil {
+ crtList = list[i].([]interface{})
+ }
+ }
+ // Now we need to get the value after the ].
+ list2, err := t.listItem(crtList, nextI)
+ if err != nil {
+ return list, err
+ }
+ return setIndex(list, i, list2)
+ case last == '.':
+ // We have a nested object. Send to t.key
+ inner := map[string]interface{}{}
+ if len(list) > i {
+ var ok bool
+ inner, ok = list[i].(map[string]interface{})
+ if !ok {
+ // We have indices out of order. Initialize empty value.
+ list[i] = map[string]interface{}{}
+ inner = list[i].(map[string]interface{})
+ }
+ }
+
+ // Recurse
+ e := t.key(inner)
+ if e != nil {
+ return list, e
+ }
+ return setIndex(list, i, inner)
+ default:
+ return nil, errors.Errorf("parse error: unexpected token %v", last)
+ }
+}
+
+func (t *parser) val() ([]rune, error) {
+ stop := runeSet([]rune{','})
+ v, _, err := runesUntil(t.sc, stop)
+ return v, err
+}
+
+func (t *parser) valList() ([]interface{}, error) {
+ r, _, e := t.sc.ReadRune()
+ if e != nil {
+ return []interface{}{}, e
+ }
+
+ if r != '{' {
+ t.sc.UnreadRune()
+ return []interface{}{}, ErrNotList
+ }
+
+ list := []interface{}{}
+ stop := runeSet([]rune{',', '}'})
+ for {
+ switch rs, last, err := runesUntil(t.sc, stop); {
+ case err != nil:
+ if err == io.EOF {
+ err = errors.New("list must terminate with '}'")
+ }
+ return list, err
+ case last == '}':
+ // If this is followed by ',', consume it.
+ if r, _, e := t.sc.ReadRune(); e == nil && r != ',' {
+ t.sc.UnreadRune()
+ }
+ v, e := t.reader(rs)
+ list = append(list, v)
+ return list, e
+ case last == ',':
+ v, e := t.reader(rs)
+ if e != nil {
+ return list, e
+ }
+ list = append(list, v)
+ }
+ }
+}
+
+func runesUntil(in io.RuneReader, stop map[rune]bool) ([]rune, rune, error) {
+ v := []rune{}
+ for {
+ switch r, _, e := in.ReadRune(); {
+ case e != nil:
+ return v, r, e
+ case inMap(r, stop):
+ return v, r, nil
+ case r == '\\':
+ next, _, e := in.ReadRune()
+ if e != nil {
+ return v, next, e
+ }
+ v = append(v, next)
+ default:
+ v = append(v, r)
+ }
+ }
+}
+
+func inMap(k rune, m map[rune]bool) bool {
+ _, ok := m[k]
+ return ok
+}
+
+func typedVal(v []rune, st bool) interface{} {
+ val := string(v)
+
+ if st {
+ return val
+ }
+
+ if strings.EqualFold(val, "true") {
+ return true
+ }
+
+ if strings.EqualFold(val, "false") {
+ return false
+ }
+
+ if strings.EqualFold(val, "null") {
+ return nil
+ }
+
+ if strings.EqualFold(val, "0") {
+ return int64(0)
+ }
+
+ // If this value does not start with zero, try parsing it to an int
+ if len(val) != 0 && val[0] != '0' {
+ if iv, err := strconv.ParseInt(val, 10, 64); err == nil {
+ return iv
+ }
+ }
+
+ return val
+}
diff --git a/vendor/helm.sh/helm/v3/pkg/time/time.go b/vendor/helm.sh/helm/v3/pkg/time/time.go
new file mode 100644
index 000000000..44f3fedfb
--- /dev/null
+++ b/vendor/helm.sh/helm/v3/pkg/time/time.go
@@ -0,0 +1,91 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package time contains a wrapper for time.Time in the standard library and
+// associated methods. This package mainly exists to workaround an issue in Go
+// where the serializer doesn't omit an empty value for time:
+// https://github.com/golang/go/issues/11939. As such, this can be removed if a
+// proposal is ever accepted for Go
+package time
+
+import (
+ "bytes"
+ "time"
+)
+
+// emptyString contains an empty JSON string value to be used as output
+var emptyString = `""`
+
+// Time is a convenience wrapper around stdlib time, but with different
+// marshalling and unmarshaling for zero values
+type Time struct {
+ time.Time
+}
+
+// Now returns the current time. It is a convenience wrapper around time.Now()
+func Now() Time {
+ return Time{time.Now()}
+}
+
+func (t Time) MarshalJSON() ([]byte, error) {
+ if t.Time.IsZero() {
+ return []byte(emptyString), nil
+ }
+
+ return t.Time.MarshalJSON()
+}
+
+func (t *Time) UnmarshalJSON(b []byte) error {
+ if bytes.Equal(b, []byte("null")) {
+ return nil
+ }
+ // If it is empty, we don't have to set anything since time.Time is not a
+ // pointer and will be set to the zero value
+ if bytes.Equal([]byte(emptyString), b) {
+ return nil
+ }
+
+ return t.Time.UnmarshalJSON(b)
+}
+
+func Parse(layout, value string) (Time, error) {
+ t, err := time.Parse(layout, value)
+ return Time{Time: t}, err
+}
+func ParseInLocation(layout, value string, loc *time.Location) (Time, error) {
+ t, err := time.ParseInLocation(layout, value, loc)
+ return Time{Time: t}, err
+}
+
+func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time {
+ return Time{Time: time.Date(year, month, day, hour, min, sec, nsec, loc)}
+}
+
+func Unix(sec int64, nsec int64) Time { return Time{Time: time.Unix(sec, nsec)} }
+
+func (t Time) Add(d time.Duration) Time { return Time{Time: t.Time.Add(d)} }
+func (t Time) AddDate(years int, months int, days int) Time {
+ return Time{Time: t.Time.AddDate(years, months, days)}
+}
+func (t Time) After(u Time) bool { return t.Time.After(u.Time) }
+func (t Time) Before(u Time) bool { return t.Time.Before(u.Time) }
+func (t Time) Equal(u Time) bool { return t.Time.Equal(u.Time) }
+func (t Time) In(loc *time.Location) Time { return Time{Time: t.Time.In(loc)} }
+func (t Time) Local() Time { return Time{Time: t.Time.Local()} }
+func (t Time) Round(d time.Duration) Time { return Time{Time: t.Time.Round(d)} }
+func (t Time) Sub(u Time) time.Duration { return t.Time.Sub(u.Time) }
+func (t Time) Truncate(d time.Duration) Time { return Time{Time: t.Time.Truncate(d)} }
+func (t Time) UTC() Time { return Time{Time: t.Time.UTC()} }