summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yaml54
-rw-r--r--Makefile17
-rw-r--r--build/conformance/e2e-runner/run.go5
-rw-r--r--build/conformance/kubernetes/edge_skip_case.yaml1
-rw-r--r--cloud/cmd/admission/app/options/options_test.go128
-rw-r--r--cloud/cmd/admission/app/server_test.go67
-rw-r--r--cloud/cmd/cloudcore/app/server.go11
-rw-r--r--cloud/pkg/cloudhub/authorization/authorizer.go134
-rw-r--r--cloud/pkg/cloudhub/authorization/authorizer_test.go185
-rw-r--r--cloud/pkg/cloudhub/authorization/config.go125
-rw-r--r--cloud/pkg/cloudhub/authorization/config_test.go54
-rw-r--r--cloud/pkg/cloudhub/authorization/kubeedge_resource_authorizer.go45
-rw-r--r--cloud/pkg/cloudhub/authorization/kubeedge_resource_authorizer_test.go74
-rw-r--r--cloud/pkg/cloudhub/authorization/resource_attributes.go202
-rw-r--r--cloud/pkg/cloudhub/authorization/resource_attributes_test.go198
-rw-r--r--cloud/pkg/cloudhub/cloudhub.go52
-rw-r--r--cloud/pkg/cloudhub/common/helper_test.go279
-rw-r--r--cloud/pkg/cloudhub/handler/message_handler.go38
-rw-r--r--cloud/pkg/cloudhub/servers/httpserver/server.go16
-rw-r--r--cloud/pkg/cloudhub/servers/httpserver/signcerts.go76
-rw-r--r--cloud/pkg/common/client/client.go8
-rw-r--r--cloud/pkg/common/client/impersonation.go122
-rw-r--r--cloud/pkg/common/context/context.go43
-rw-r--r--cloud/pkg/devicecontroller/controller/upstream.go3
-rw-r--r--cloud/pkg/dynamiccontroller/application/application.go74
-rw-r--r--cloud/pkg/dynamiccontroller/application/application_test.go70
-rw-r--r--cloud/pkg/dynamiccontroller/config/config.go8
-rw-r--r--cloud/pkg/dynamiccontroller/dynamiccontroller.go4
-rw-r--r--cloud/pkg/edgecontroller/controller/upstream.go133
-rw-r--r--cloud/pkg/edgecontroller/manager/configmap_test.go2
-rw-r--r--cloud/pkg/edgecontroller/manager/node_test.go2
-rw-r--r--cloud/pkg/edgecontroller/manager/pod_test.go2
-rw-r--r--cloud/pkg/edgecontroller/manager/rule_test.go2
-rw-r--r--cloud/pkg/edgecontroller/manager/ruleendpoint_test.go2
-rw-r--r--cloud/pkg/edgecontroller/manager/secret_test.go2
-rw-r--r--cloud/pkg/router/listener/http.go244
-rw-r--r--cloud/pkg/router/provider/eventbus/eventbus.go9
-rw-r--r--cloud/pkg/router/provider/servicebus/servicebus.go9
-rw-r--r--common/constants/default.go3
-rw-r--r--docs/images/proposals/authorizer-chain.pngbin0 -> 166923 bytes
-rw-r--r--docs/images/proposals/cloudhub-enhancement-design.pngbin0 -> 167430 bytes
-rw-r--r--docs/images/proposals/edge-pod-get.pngbin0 -> 14145 bytes
-rw-r--r--docs/images/proposals/keadm-get-pod.pngbin0 -> 17690 bytes
-rw-r--r--docs/images/proposals/keadm-restart-pod.pngbin0 -> 12614 bytes
-rw-r--r--docs/proposals/cloudhub-enhancement.md71
-rw-r--r--docs/proposals/device-crd-v1beta1.md4
-rw-r--r--docs/proposals/edgepodgetandrestart.md136
-rw-r--r--edge/cmd/edgecore/app/server.go3
-rw-r--r--edge/pkg/edgehub/certificate/certmanager.go31
-rw-r--r--edge/pkg/edgehub/certificate/certmanager_test.go81
-rw-r--r--edge/pkg/metamanager/client/pod.go5
-rw-r--r--edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/store.go49
-rw-r--r--edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/util/patch.go178
-rw-r--r--edge/pkg/metamanager/process.go7
-rw-r--r--go.mod2
-rw-r--r--keadm/cmd/keadm/app/cmd/debug/check_test.go230
-rw-r--r--keadm/cmd/keadm/app/cmd/debug/collect.go4
-rw-r--r--keadm/cmd/keadm/app/cmd/debug/collect_test.go145
-rw-r--r--keadm/cmd/keadm/app/cmd/debug/debug_test.go46
-rw-r--r--keadm/cmd/keadm/app/cmd/debug/diagnose_test.go137
-rw-r--r--keadm/cmd/keadm/app/cmd/debug/get_flags.go6
-rw-r--r--keadm/cmd/keadm/app/cmd/debug/get_flags_test.go187
-rw-r--r--keadm/cmd/keadm/app/cmd/debug/get_test.go246
-rw-r--r--pkg/apis/componentconfig/cloudcore/v1alpha1/default.go11
-rw-r--r--pkg/apis/componentconfig/cloudcore/v1alpha1/types.go28
-rw-r--r--pkg/metaserver/application.go7
-rw-r--r--pkg/security/token/token.go79
-rw-r--r--pkg/security/token/token_test.go110
-rw-r--r--staging/src/github.com/kubeedge/beehive/pkg/core/model/message.go2
-rw-r--r--staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/influxdb2/handler.go3
-rw-r--r--staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/redis/handler.go2
-rw-r--r--staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/tdengine/handler.go2
-rw-r--r--staging/src/github.com/kubeedge/mapper-framework/_template/mapper/device/device.go4
-rw-r--r--staging/src/github.com/kubeedge/viaduct/pkg/client/quic.go5
-rw-r--r--staging/src/github.com/kubeedge/viaduct/pkg/client/ws.go10
-rw-r--r--staging/src/github.com/kubeedge/viaduct/pkg/conn/quic.go6
-rw-r--r--staging/src/github.com/kubeedge/viaduct/pkg/conn/ws.go5
-rw-r--r--staging/src/github.com/kubeedge/viaduct/pkg/mux/mux.go24
-rw-r--r--staging/src/github.com/kubeedge/viaduct/pkg/server/quic.go5
-rw-r--r--staging/src/github.com/kubeedge/viaduct/pkg/server/ws.go5
-rwxr-xr-xtests/scripts/conformance_e2e.sh89
-rw-r--r--vendor/golang.org/x/tools/container/intsets/sparse.go1114
-rw-r--r--vendor/k8s.io/kubernetes/pkg/api/v1/persistentvolume/util.go157
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/OWNERS8
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/doc.go19
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/latest/latest.go27
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/register.go55
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/types.go74
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v0/conversion.go62
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v0/doc.go22
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v0/register.go63
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v0/types.go56
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.conversion.go43
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.deepcopy.go51
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/conversion.go40
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/doc.go24
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go63
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/types.go73
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.conversion.go109
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go68
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.defaults.go33
-rw-r--r--vendor/k8s.io/kubernetes/pkg/apis/abac/zz_generated.deepcopy.go68
-rw-r--r--vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go279
-rw-r--r--vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/example_policy_file.jsonl11
-rw-r--r--vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/default.go66
-rw-r--r--vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/interfaces.go30
-rw-r--r--vendor/k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking/controller.go197
-rw-r--r--vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/OWNERS8
-rw-r--r--vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/config.go158
-rw-r--r--vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go42
-rw-r--r--vendor/k8s.io/kubernetes/pkg/registry/authorization/util/helpers.go87
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/OWNERS8
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go494
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go186
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/intset.go62
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/metrics.go49
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go344
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go505
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go162
-rw-r--r--vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go667
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/LICENSE23
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/README.md1
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/graph.go153
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear/linear.go74
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/directed_acyclic.go83
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/edgeholder.go122
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/simple.go45
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/undirected.go242
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/traverse.go186
-rw-r--r--vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/visit_depth_first.go86
-rw-r--r--vendor/modules.txt18
131 files changed, 10734 insertions, 351 deletions
diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml
index 7931451bd..1b5feaec0 100644
--- a/.github/workflows/main.yaml
+++ b/.github/workflows/main.yaml
@@ -391,4 +391,56 @@ jobs:
name: ${{ matrix.container-runtime }}-e2e-test-logs
path: |
/tmp/cloudcore.log
- /tmp/edgecore.log \ No newline at end of file
+ /tmp/edgecore.log
+
+ conformance_e2e_test:
+ runs-on: ubuntu-22.04
+ timeout-minutes: 40
+ name: conformance e2e test
+ needs: image-prepare
+ env:
+ GO111MODULE: on
+ steps:
+ - name: Install Go
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.21.x
+
+ - uses: actions/cache@v3
+ with:
+ path: ~/go/pkg/mod
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+
+ - name: Checkout code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Cleanup images
+ run: docker system prune -a -f
+
+ - name: Retrieve saved kubeedge/build-tools image
+ uses: actions/download-artifact@v3
+ with:
+ name: build-tools-docker-artifact
+ path: /home/runner/build-tools
+
+ - name: Docker load kubeedge/build-tools image
+ run: |
+ docker load < /home/runner/build-tools/build-tools.tar
+
+ - name: Enable cri config in containerd service
+ run: |
+ containerd config default | sudo tee /etc/containerd/config.toml && sudo systemctl restart containerd.service
+
+ - name: Run conformance e2e
+ run: |
+ export KIND_IMAGE=kindest/node:v1.28.0
+ make conformance_e2e
+
+ - name: Upload conformance e2e test results
+ uses: actions/upload-artifact@v3
+ if: always()
+ with:
+ name: kube-conformance-e2e-results
+ path: /tmp/results/ \ No newline at end of file
diff --git a/Makefile b/Makefile
index 548a93fe6..3cc9605b5 100644
--- a/Makefile
+++ b/Makefile
@@ -466,3 +466,20 @@ else
keadm_compatibility_e2e:
tests/scripts/keadm_compatibility_e2e.sh ${CLOUD_EDGE_VERSION}
endif
+
+define CONFORMANCE_E2E_HELP_INFO
+# conformance_e2e test.
+#
+# Example:
+# make conformance_e2e
+# make conformance_e2e HELP=y
+#
+endef
+.PHONY: conformance_e2e
+ifeq ($(HELP),y)
+conformance_e2e:
+ @echo "$$CONFORMANCE_E2E_HELP_INFO"
+else
+conformance_e2e:
+ tests/scripts/conformance_e2e.sh ${KIND_IMAGE}
+endif \ No newline at end of file
diff --git a/build/conformance/e2e-runner/run.go b/build/conformance/e2e-runner/run.go
index 75018c960..613ced340 100644
--- a/build/conformance/e2e-runner/run.go
+++ b/build/conformance/e2e-runner/run.go
@@ -33,6 +33,7 @@ import (
)
const (
+ parallelEnvKey = "E2E_PARALLEL"
dryRunEnvKey = "E2E_DRYRUN"
skipEnvKey = "E2E_SKIP"
focusEnvKey = "E2E_FOCUS"
@@ -139,6 +140,10 @@ func makeCmd(w io.Writer) (*exec.Cmd, error) {
ginkgoArgs = append(ginkgoArgs, "--dryRun=true")
}
+ if parallelEnvValue := util.GetEnvWithDefault(parallelEnvKey, ""); len(parallelEnvValue) > 0 {
+ ginkgoArgs = append(ginkgoArgs, parallelEnvValue)
+ }
+
extraArgs := []string{
"--report-dir=" + util.GetEnvWithDefault(resultsDirEnvKey, defaultResultsDir),
"--report-prefix=" + util.GetEnvWithDefault(reportPrefixEnvKey, defaultReportPrefix),
diff --git a/build/conformance/kubernetes/edge_skip_case.yaml b/build/conformance/kubernetes/edge_skip_case.yaml
index b3cacec5b..7464bd926 100644
--- a/build/conformance/kubernetes/edge_skip_case.yaml
+++ b/build/conformance/kubernetes/edge_skip_case.yaml
@@ -20,3 +20,4 @@
- codename: Should recreate evicted statefulset
- codename: should execute poststart exec hook properly
- codename: should execute prestop exec hook properly
+- codename: should not cause race condition when used for configmaps \ No newline at end of file
diff --git a/cloud/cmd/admission/app/options/options_test.go b/cloud/cmd/admission/app/options/options_test.go
new file mode 100644
index 000000000..084231cf7
--- /dev/null
+++ b/cloud/cmd/admission/app/options/options_test.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewAdmissionOptions(t *testing.T) {
+ assert := assert.New(t)
+
+ opt := NewAdmissionOptions()
+ assert.NotNil(opt, "Expected NewAdmissionOptions to return a non-nil value")
+ assert.Equal(int32(0), opt.Port, "Expected Port to be 0 by default")
+ assert.False(opt.PrintVersion, "Expected PrintVersion to be false by default")
+ assert.Equal("", opt.Master)
+ assert.Equal("", opt.Kubeconfig)
+ assert.Equal("", opt.CertFile)
+ assert.Equal("", opt.KeyFile)
+ assert.Equal("", opt.CaCertFile)
+ assert.Equal("", opt.AdmissionServiceName)
+ assert.Equal("", opt.AdmissionServiceNamespace)
+ assert.Equal("", opt.SchedulerName)
+}
+
+func TestAdmissionOptions_Flags(t *testing.T) {
+ assert := assert.New(t)
+
+ opt := NewAdmissionOptions()
+ fss := opt.Flags()
+ fs := fss.FlagSet("admission")
+
+ assert.NotNil(fs, "Expected Flags to return a non-nil FlagSet")
+
+ flags := []struct {
+ name string
+ defaultVal string
+ usage string
+ }{
+ {
+ name: "master",
+ defaultVal: "",
+ usage: "The address of the Kubernetes API server (overrides any value in kubeconfig)",
+ },
+ {
+ name: "kubeconfig",
+ defaultVal: "",
+ usage: "Path to kubeconfig file with authorization and master location information.",
+ },
+ {
+ name: "tls-cert-file",
+ defaultVal: "",
+ usage: "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert).",
+ },
+ {
+ name: "tls-private-key-file",
+ defaultVal: "",
+ usage: "File containing the default x509 private key matching --tls-cert-file.",
+ },
+ {
+ name: "ca-cert-file",
+ defaultVal: "",
+ usage: "File containing the x509 Certificate for HTTPS.",
+ },
+ {
+ name: "port",
+ defaultVal: "443",
+ usage: "the port used by admission-controller-server.",
+ },
+ {
+ name: "webhook-namespace",
+ defaultVal: "kubeedge",
+ usage: "The namespace of this webhook",
+ },
+ {
+ name: "webhook-service-name",
+ defaultVal: "kubeedge-admission-service",
+ usage: "The name of this admission service",
+ },
+ }
+
+ for _, f := range flags {
+ flag := fs.Lookup(f.name)
+ assert.NotNil(flag, "Expected '%s' flag to be present in the FlagSet", f.name)
+ assert.Equal(f.name, flag.Name)
+ assert.Equal(f.defaultVal, flag.DefValue)
+ assert.Equal(f.usage, flag.Usage)
+ }
+
+ err := fs.Parse([]string{
+ "--master=http://localhost:8080",
+ "--kubeconfig=/path/to/kubeconfig",
+ "--tls-cert-file=/path/to/cert",
+ "--tls-private-key-file=/path/to/key",
+ "--ca-cert-file=/path/to/ca",
+ "--port=8443",
+ "--webhook-namespace=test-namespace",
+ "--webhook-service-name=test-service",
+ })
+ assert.NoError(err)
+
+ assert.Equal("http://localhost:8080", opt.Master)
+ assert.Equal("/path/to/kubeconfig", opt.Kubeconfig)
+ assert.Equal("/path/to/cert", opt.CertFile)
+ assert.Equal("/path/to/key", opt.KeyFile)
+ assert.Equal("/path/to/ca", opt.CaCertFile)
+ assert.Equal(int32(8443), opt.Port)
+ assert.Equal("test-namespace", opt.AdmissionServiceNamespace)
+ assert.Equal("test-service", opt.AdmissionServiceName)
+ assert.Equal(false, opt.PrintVersion, "Expected PrintVersion to be false by default")
+ assert.Equal("", opt.SchedulerName, "Expected SchedulerName to be an empty string by default")
+}
diff --git a/cloud/cmd/admission/app/server_test.go b/cloud/cmd/admission/app/server_test.go
new file mode 100644
index 000000000..6277b16f5
--- /dev/null
+++ b/cloud/cmd/admission/app/server_test.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+ "k8s.io/component-base/cli/globalflag"
+
+ "github.com/kubeedge/kubeedge/cloud/cmd/admission/app/options"
+ "github.com/kubeedge/kubeedge/pkg/version/verflag"
+)
+
+func TestNewAdmissionCommand(t *testing.T) {
+ assert := assert.New(t)
+
+ cmd := NewAdmissionCommand()
+ assert.NotNil(cmd)
+ assert.Equal("admission", cmd.Use)
+ assert.Equal(cmd.Long, `Admission leverage the feature of Dynamic Admission Control from kubernetes, start it
+if want to admission control some kubeedge resources.`)
+
+ fs := cmd.Flags()
+ assert.NotNil(fs, "Command should have flags")
+ namedFs := options.NewAdmissionOptions().Flags()
+ verflag.AddFlags(namedFs.FlagSet("global"))
+ globalflag.AddGlobalFlags(namedFs.FlagSet("global"), cmd.Name())
+
+ for _, f := range namedFs.FlagSets {
+ fs.AddFlagSet(f)
+ }
+
+ for _, f := range namedFs.FlagSets {
+ f.VisitAll(func(flag *pflag.Flag) {
+ assert.NotNil(fs.Lookup(flag.Name), "Flag %s should be added to the command", flag.Name)
+ })
+ }
+
+ usage := &bytes.Buffer{}
+ cmd.SetOut(usage)
+ err := cmd.Usage()
+ assert.NoError(err)
+ assert.Contains(usage.String(), "Usage:\n admission")
+
+ help := &bytes.Buffer{}
+ cmd.SetOut(help)
+ err = cmd.Help()
+ assert.NoError(err)
+ assert.Contains(help.String(), "Admission leverage the feature of Dynamic Admission Control from kubernetes")
+}
diff --git a/cloud/cmd/cloudcore/app/server.go b/cloud/cmd/cloudcore/app/server.go
index 4a0efbeb8..ca2765b7e 100644
--- a/cloud/cmd/cloudcore/app/server.go
+++ b/cloud/cmd/cloudcore/app/server.go
@@ -97,7 +97,10 @@ kubernetes controller which manages devices so that the device metadata/status d
// To help debugging, immediately log version
klog.Infof("Version: %+v", version.Get())
- client.InitKubeEdgeClient(config.KubeAPIConfig)
+ enableImpersonation := config.Modules.CloudHub.Authorization != nil &&
+ config.Modules.CloudHub.Authorization.Enable &&
+ !config.Modules.CloudHub.Authorization.Debug
+ client.InitKubeEdgeClient(config.KubeAPIConfig, enableImpersonation)
// Negotiate TunnelPort for multi cloudcore instances
waitTime := rand.Int31n(10)
@@ -161,6 +164,10 @@ kubernetes controller which manages devices so that the device metadata/status d
// registerModules register all the modules started in cloudcore
func registerModules(c *v1alpha1.CloudCoreConfig) {
+ enableAuthorization := c.Modules.CloudHub.Authorization != nil &&
+ c.Modules.CloudHub.Authorization.Enable &&
+ !c.Modules.CloudHub.Authorization.Debug
+
cloudhub.Register(c.Modules.CloudHub)
edgecontroller.Register(c.Modules.EdgeController)
devicecontroller.Register(c.Modules.DeviceController)
@@ -168,7 +175,7 @@ func registerModules(c *v1alpha1.CloudCoreConfig) {
synccontroller.Register(c.Modules.SyncController)
cloudstream.Register(c.Modules.CloudStream, c.CommonConfig)
router.Register(c.Modules.Router)
- dynamiccontroller.Register(c.Modules.DynamicController)
+ dynamiccontroller.Register(c.Modules.DynamicController, enableAuthorization)
policycontroller.Register(client.CrdConfig)
}
diff --git a/cloud/pkg/cloudhub/authorization/authorizer.go b/cloud/pkg/cloudhub/authorization/authorizer.go
new file mode 100644
index 000000000..9376e6f3e
--- /dev/null
+++ b/cloud/pkg/cloudhub/authorization/authorizer.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "context"
+ "crypto/tls"
+ stdx509 "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "net/http"
+
+ "k8s.io/apiserver/pkg/authentication/request/x509"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/apiserver/pkg/endpoints/request"
+ certutil "k8s.io/client-go/util/cert"
+ "k8s.io/klog/v2"
+ "k8s.io/kubernetes/cmd/kubeadm/app/constants"
+
+ beehivemodel "github.com/kubeedge/beehive/pkg/core/model"
+ cloudhubmodel "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
+ hubconfig "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config"
+ "github.com/kubeedge/viaduct/pkg/conn"
+)
+
+type cloudhubAuthorizer struct {
+ enabled bool
+ debug bool
+ authz authorizer.Authorizer
+}
+
+func (r *cloudhubAuthorizer) AdmitMessage(message beehivemodel.Message, hubInfo cloudhubmodel.HubInfo) error {
+ if !r.enabled {
+ return nil
+ }
+
+ err := r.admitMessage(message, hubInfo)
+ if err == nil {
+ return nil
+ }
+
+ klog.Error(err.Error())
+ if r.debug {
+ return nil
+ }
+ return err
+}
+
+func (r *cloudhubAuthorizer) AuthenticateConnection(connection conn.Connection) error {
+ if !r.enabled {
+ return nil
+ }
+
+ err := r.authenticateConnection(connection)
+ if err == nil {
+ return nil
+ }
+
+ klog.Error(err.Error())
+ if r.debug {
+ return nil
+ }
+ return err
+}
+
+// admitMessage determines whether the message should be admitted.
+func (r *cloudhubAuthorizer) admitMessage(message beehivemodel.Message, hubInfo cloudhubmodel.HubInfo) error {
+ klog.V(4).Infof("message: %s: authorization start", message.Header.ID)
+
+ attrs, err := getAuthorizerAttributes(message.Router, hubInfo)
+ if err != nil {
+ return fmt.Errorf("node %q transfer message failed: %v", hubInfo.NodeID, err)
+ }
+
+ ctx := request.WithUser(context.TODO(), attrs.GetUser())
+ authorized, reason, err := r.authz.Authorize(ctx, attrs)
+ if err != nil {
+ return fmt.Errorf("node %q authz failed: %v", hubInfo.NodeID, err)
+ }
+
+ if authorized != authorizer.DecisionAllow {
+ return fmt.Errorf("node %q deny: %s", hubInfo.NodeID, reason)
+ }
+
+ klog.V(4).Infof("message: %s: authorization succeeded", message.Header.ID)
+ return nil
+}
+
+// authenticateConnection authenticates the new connection by certificates
+func (r *cloudhubAuthorizer) authenticateConnection(connection conn.Connection) error {
+ peerCerts := connection.ConnectionState().PeerCertificates
+ nodeID := connection.ConnectionState().Headers.Get("node_id")
+
+ klog.V(4).Infof("node %q: authentication start", nodeID)
+ switch len(peerCerts) {
+ case 0:
+ return fmt.Errorf("node %q: no client certificate provided", nodeID)
+ case 1:
+ default:
+ return fmt.Errorf("node %q: immediate certificates are not supported", nodeID)
+ }
+
+ options := x509.DefaultVerifyOptions()
+ // ca cloud be available util CloudHub starts
+ options.Roots = stdx509.NewCertPool()
+ options.Roots.AppendCertsFromPEM(pem.EncodeToMemory(&pem.Block{Type: certutil.CertificateBlockType, Bytes: hubconfig.Config.Ca}))
+
+ authenticator := x509.New(options, x509.CommonNameUserConversion)
+ resp, ok, err := authenticator.AuthenticateRequest(&http.Request{TLS: &tls.ConnectionState{PeerCertificates: peerCerts}})
+ if err != nil || !ok {
+ return fmt.Errorf("node %q: unable to verify peer connection by client certificates: %v", nodeID, err)
+ }
+
+ if resp.User.GetName() != constants.NodesUserPrefix+nodeID {
+ return fmt.Errorf("node %q: common name of peer certificate didn't match node ID", nodeID)
+ }
+
+ klog.V(4).Infof("node %q: authentication succeeded", nodeID)
+ return nil
+}
diff --git a/cloud/pkg/cloudhub/authorization/authorizer_test.go b/cloud/pkg/cloudhub/authorization/authorizer_test.go
new file mode 100644
index 000000000..f2de9276a
--- /dev/null
+++ b/cloud/pkg/cloudhub/authorization/authorizer_test.go
@@ -0,0 +1,185 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "math/big"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/gorilla/websocket"
+ "k8s.io/apiserver/pkg/authorization/authorizerfactory"
+ "k8s.io/kubernetes/cmd/kubeadm/app/constants"
+
+ beehivemodel "github.com/kubeedge/beehive/pkg/core/model"
+ cloudhubmodel "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
+ hubconfig "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config"
+ "github.com/kubeedge/viaduct/pkg/conn"
+)
+
+func TestAdmitMessage(t *testing.T) {
+ tests := []struct {
+ name string
+ authz cloudhubAuthorizer
+ message beehivemodel.Message
+ hubInfo cloudhubmodel.HubInfo
+ allow bool
+ }{
+ {
+ name: "authz is disabled",
+ authz: cloudhubAuthorizer{enabled: false},
+ allow: true,
+ },
+ {
+ name: "debug mode",
+ authz: cloudhubAuthorizer{enabled: true, debug: true, authz: authorizerfactory.NewAlwaysDenyAuthorizer()},
+ allow: true,
+ },
+ {
+ name: "authz reject",
+ authz: cloudhubAuthorizer{enabled: true, authz: authorizerfactory.NewAlwaysDenyAuthorizer()},
+ message: beehivemodel.Message{Router: beehivemodel.MessageRoute{Operation: beehivemodel.QueryOperation, Resource: "ns/configmap/test"}},
+ allow: false,
+ },
+ {
+ name: "authz accept",
+ authz: cloudhubAuthorizer{enabled: true, authz: authorizerfactory.NewAlwaysAllowAuthorizer()},
+ message: beehivemodel.Message{Router: beehivemodel.MessageRoute{Operation: beehivemodel.QueryOperation, Resource: "ns/configmap/test"}},
+ allow: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.authz.AdmitMessage(tt.message, tt.hubInfo)
+ if (err == nil) != tt.allow {
+ t.Errorf("AdmitMessage(): expect allow=%+v, got err=%+v", tt.allow, err)
+ }
+ })
+ }
+}
+
+func TestAuthenticateConnection(t *testing.T) {
+ const testNodeName = "test"
+ cert, err := makeTestCert(constants.NodesUserPrefix + testNodeName)
+ if err != nil {
+ t.Errorf("make test cert failed: %v", err)
+ }
+
+ headers := http.Header{}
+ headers.Add("node_id", testNodeName)
+ tests := []struct {
+ name string
+ authz cloudhubAuthorizer
+ connState conn.ConnectionState
+ allow bool
+ }{
+ {
+ name: "authz is disabled",
+ authz: cloudhubAuthorizer{enabled: false},
+ allow: true,
+ },
+ {
+ name: "debug mode",
+ authz: cloudhubAuthorizer{enabled: true, debug: true},
+ allow: true,
+ },
+ {
+ name: "authz reject",
+ authz: cloudhubAuthorizer{enabled: true},
+ connState: conn.ConnectionState{
+ Headers: headers,
+ PeerCertificates: []*x509.Certificate{},
+ },
+ allow: false,
+ },
+ {
+ name: "authz accept",
+ authz: cloudhubAuthorizer{enabled: true},
+ connState: conn.ConnectionState{
+ Headers: headers,
+ PeerCertificates: []*x509.Certificate{cert},
+ },
+ allow: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := conn.NewWSConn(&conn.ConnectionOptions{
+ Base: &websocket.Conn{},
+ State: &tt.connState,
+ })
+ err := tt.authz.AuthenticateConnection(c)
+ if (err == nil) != tt.allow {
+ t.Errorf("AuthenticateConnection(): expect allow=%+v, got err=%+v", tt.allow, err)
+ }
+ })
+ }
+}
+
+func makeTestCert(cn string) (*x509.Certificate, error) {
+ rootCaPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return nil, err
+ }
+
+ template := &x509.Certificate{
+ Subject: pkix.Name{CommonName: "root-ca"},
+ SerialNumber: big.NewInt(1),
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Hour),
+ KeyUsage: x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+ rootCaCertDer, err := x509.CreateCertificate(rand.Reader, template, template, &rootCaPrivateKey.PublicKey, rootCaPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ rootCaCert, err := x509.ParseCertificate(rootCaCertDer)
+ if err != nil {
+ return nil, err
+ }
+
+ serviceCertPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return nil, err
+ }
+ template = &x509.Certificate{
+ Subject: pkix.Name{CommonName: cn},
+ SerialNumber: big.NewInt(2),
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Hour),
+ }
+ serviceCertDer, err := x509.CreateCertificate(rand.Reader, template, rootCaCert, &serviceCertPrivateKey.PublicKey, rootCaPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ serviceCert, err := x509.ParseCertificate(serviceCertDer)
+ if err != nil {
+ return nil, err
+ }
+
+ hubconfig.Config.Ca = rootCaCertDer
+ return serviceCert, nil
+}
diff --git a/cloud/pkg/cloudhub/authorization/config.go b/cloud/pkg/cloudhub/authorization/config.go
new file mode 100644
index 000000000..b138571df
--- /dev/null
+++ b/cloud/pkg/cloudhub/authorization/config.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "fmt"
+
+ "k8s.io/apiserver/pkg/apis/apiserver"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/apiserver/pkg/authorization/authorizerfactory"
+ "k8s.io/apiserver/pkg/authorization/union"
+ "k8s.io/client-go/informers"
+ "k8s.io/kubernetes/pkg/auth/nodeidentifier"
+ apiserverauthorizer "k8s.io/kubernetes/pkg/kubeapiserver/authorizer"
+ "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
+ "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node"
+ "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
+
+ beehivemodel "github.com/kubeedge/beehive/pkg/core/model"
+ cloudhubmodel "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
+ "github.com/kubeedge/viaduct/pkg/conn"
+)
+
+// Authorizer provides authorization enhancements for CloudHub
+type Authorizer interface {
+ // AdmitMessage determines whether the message should be admitted
+ AdmitMessage(message beehivemodel.Message, info cloudhubmodel.HubInfo) error
+ // AuthenticateConnection authenticates the new connection
+ AuthenticateConnection(connection conn.Connection) error
+}
+
+// Config Authorizer's configurations
+type Config struct {
+ Enabled bool
+ // Debug Authorizer logs errors but always allows messages
+ Debug bool
+ AuthorizationModes []string
+ VersionedInformerFactory informers.SharedInformerFactory
+}
+
+// New creates new Authorizer
+func (c Config) New() (Authorizer, error) {
+ authorizers := make([]apiserver.AuthorizerConfiguration, 0, len(c.AuthorizationModes))
+ for _, mode := range c.AuthorizationModes {
+ authorizers = append(authorizers, apiserver.AuthorizerConfiguration{
+ Type: apiserver.AuthorizerType(mode),
+ })
+ }
+ config := apiserverauthorizer.Config{
+ VersionedInformerFactory: c.VersionedInformerFactory,
+ AuthorizationConfiguration: &apiserver.AuthorizationConfiguration{
+ Authorizers: authorizers,
+ },
+ }
+ authz, _, err := assembleAuthorizer(config)
+ if err != nil {
+ return nil, err
+ }
+ return &cloudhubAuthorizer{
+ enabled: c.Enabled,
+ debug: c.Debug,
+ authz: authz,
+ }, nil
+}
+
+func assembleAuthorizer(config apiserverauthorizer.Config) (authorizer.Authorizer, authorizer.RuleResolver, error) {
+ if len(config.AuthorizationConfiguration.Authorizers) == 0 {
+ return nil, nil, fmt.Errorf("at least one authorization mode must be passed")
+ }
+
+ var (
+ authorizers []authorizer.Authorizer
+ ruleResolvers []authorizer.RuleResolver
+ )
+
+ for _, authzConfig := range config.AuthorizationConfiguration.Authorizers {
+ // Keep cases in sync with constant list in k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go.
+ switch authzConfig.Type {
+ case apiserver.AuthorizerType(modes.ModeNode):
+ node.RegisterMetrics()
+ graph := node.NewGraph()
+ node.AddGraphEventHandlers(
+ graph,
+ config.VersionedInformerFactory.Core().V1().Nodes(),
+ config.VersionedInformerFactory.Core().V1().Pods(),
+ config.VersionedInformerFactory.Core().V1().PersistentVolumes(),
+ config.VersionedInformerFactory.Storage().V1().VolumeAttachments(),
+ )
+ nodeAuthorizer := node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), bootstrappolicy.NodeRules())
+ authorizers = append(authorizers, nodeAuthorizer)
+ ruleResolvers = append(ruleResolvers, nodeAuthorizer)
+
+ case apiserver.AuthorizerType(modes.ModeAlwaysAllow):
+ alwaysAllowAuthorizer := authorizerfactory.NewAlwaysAllowAuthorizer()
+ authorizers = append(authorizers, alwaysAllowAuthorizer)
+ ruleResolvers = append(ruleResolvers, alwaysAllowAuthorizer)
+ case apiserver.AuthorizerType(modes.ModeAlwaysDeny):
+ alwaysDenyAuthorizer := authorizerfactory.NewAlwaysDenyAuthorizer()
+ authorizers = append(authorizers, alwaysDenyAuthorizer)
+ ruleResolvers = append(ruleResolvers, alwaysDenyAuthorizer)
+ default:
+ return nil, nil, fmt.Errorf("unknown authorization mode %s specified", authzConfig.Type)
+ }
+ }
+
+ // put kubeedgeResourceAuthorizer at the tail of authorizer chain to allow kubeedge custom messages
+ authorizers = append(authorizers, &kubeedgeResourceAuthorizer{})
+ ruleResolvers = append(ruleResolvers, &kubeedgeResourceAuthorizer{})
+
+ return union.New(authorizers...), union.NewRuleResolvers(ruleResolvers...), nil
+}
diff --git a/cloud/pkg/cloudhub/authorization/config_test.go b/cloud/pkg/cloudhub/authorization/config_test.go
new file mode 100644
index 000000000..cec6f9485
--- /dev/null
+++ b/cloud/pkg/cloudhub/authorization/config_test.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "testing"
+
+ "k8s.io/kubernetes/cmd/kubeadm/app/constants"
+
+ "github.com/kubeedge/kubeedge/cloud/pkg/common/informers"
+)
+
+func TestNewAuthorizer(t *testing.T) {
+ tests := []struct {
+ name string
+ config Config
+ wantErr bool
+ }{
+ {
+ name: "all supported modes",
+ config: Config{
+ AuthorizationModes: []string{constants.ModeNode, constants.ModeAlwaysAllow, constants.ModeAlwaysDeny},
+ VersionedInformerFactory: informers.NewFakeInformerManager().GetKubeInformerFactory(),
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := tt.config.New()
+ if err != nil {
+ if !tt.wantErr {
+ t.Errorf("New(): unexpect error: %v", err)
+ }
+ return
+ }
+ })
+ }
+}
diff --git a/cloud/pkg/cloudhub/authorization/kubeedge_resource_authorizer.go b/cloud/pkg/cloudhub/authorization/kubeedge_resource_authorizer.go
new file mode 100644
index 000000000..d112797a9
--- /dev/null
+++ b/cloud/pkg/cloudhub/authorization/kubeedge_resource_authorizer.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "k8s.io/apiserver/pkg/authentication/user"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/klog/v2"
+)
+
+type kubeedgeResourceAuthorizer struct {
+}
+
+func (kubeedgeResourceAuthorizer) Authorize(_ context.Context, attrs authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
+ // allows all the kubeedge custom requests
+ if isKubeedgeResourceAttributes(attrs) {
+ klog.V(4).Infof("allow kubeedge request. verb=%s resource=%s, subresource=%s", attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource())
+ return authorizer.DecisionAllow, "", nil
+ }
+
+ klog.V(4).Infof("deny request. verb=%s resource=%s, subresource=%s", attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource())
+ return authorizer.DecisionNoOpinion, fmt.Sprintf("unknown request: verb=%s resource=%s, subresource=%s", attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()), nil
+}
+
+func (kubeedgeResourceAuthorizer) RulesFor(user.Info, string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) {
+ return nil, nil, true, errors.New("kubeedge resource authorizer does not support user rule resolution")
+}
diff --git a/cloud/pkg/cloudhub/authorization/kubeedge_resource_authorizer_test.go b/cloud/pkg/cloudhub/authorization/kubeedge_resource_authorizer_test.go
new file mode 100644
index 000000000..886ccbdb2
--- /dev/null
+++ b/cloud/pkg/cloudhub/authorization/kubeedge_resource_authorizer_test.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "context"
+ "testing"
+
+ "k8s.io/apiserver/pkg/authentication/user"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+)
+
+func TestAuthorize(t *testing.T) {
+ var authz kubeedgeResourceAuthorizer
+
+ tests := []struct {
+ name string
+ attrs authorizer.Attributes
+ decision authorizer.Decision
+ wantErr bool
+ }{
+ {
+ name: "kubeedge message",
+ attrs: &authorizer.AttributesRecord{
+ User: &user.DefaultInfo{Extra: map[string][]string{kubeedgeResourceKey: nil}},
+ },
+ decision: authorizer.DecisionAllow,
+ },
+ {
+ name: "nonkubeedge message",
+ attrs: &authorizer.AttributesRecord{},
+ decision: authorizer.DecisionNoOpinion,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ decision, _, err := authz.Authorize(context.Background(), tt.attrs)
+ if err != nil {
+ if !tt.wantErr {
+ t.Errorf("Authorize(): unexpect error: %v", err)
+ }
+ return
+ }
+
+ if decision != tt.decision {
+ t.Errorf("Authorize() got = %v, want %v", decision, tt.decision)
+ }
+ })
+ }
+}
+
+func TestRulesFor(t *testing.T) {
+ var authz kubeedgeResourceAuthorizer
+
+ _, _, _, err := authz.RulesFor(&user.DefaultInfo{}, "")
+ if err == nil {
+ t.Error("RulesFor() should not support user rule resolution")
+ }
+}
diff --git a/cloud/pkg/cloudhub/authorization/resource_attributes.go b/cloud/pkg/cloudhub/authorization/resource_attributes.go
new file mode 100644
index 000000000..c7236728b
--- /dev/null
+++ b/cloud/pkg/cloudhub/authorization/resource_attributes.go
@@ -0,0 +1,202 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "fmt"
+ "strings"
+
+ certificatesv1 "k8s.io/api/certificates/v1"
+ coordinationv1 "k8s.io/api/coordination/v1"
+ v1 "k8s.io/api/core/v1"
+ storagev1 "k8s.io/api/storage/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/kubernetes/cmd/kubeadm/app/constants"
+ "k8s.io/kubernetes/pkg/apis/authorization"
+ "k8s.io/kubernetes/pkg/registry/authorization/util"
+
+ beehivemodel "github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common"
+ cloudhubmodel "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
+ taskutil "github.com/kubeedge/kubeedge/cloud/pkg/taskmanager/util"
+ commonconstants "github.com/kubeedge/kubeedge/common/constants"
+ "github.com/kubeedge/kubeedge/pkg/metaserver"
+)
+
+const (
+ kubeedgeResourceKey = "kubeedgeResource"
+)
+
+// getAuthorizerAttributes maps a beehive message to k8s api request
+func getAuthorizerAttributes(router beehivemodel.MessageRoute, hubInfo cloudhubmodel.HubInfo) (authorizer.Attributes, error) {
+ var (
+ resAttrs *authorization.ResourceAttributes
+ nonResAttrs *authorization.NonResourceAttributes
+ extra = make(map[string]authorization.ExtraValue)
+ err error
+ )
+ if isKubeedgeResourceMessage(router) {
+ nonResAttrs = getKubeedgeResourceAttributes(router)
+
+ // put this key into extra to tells authorizers that this message operates kubeedge resource
+ extra[kubeedgeResourceKey] = authorization.ExtraValue{}
+ } else {
+ resAttrs, err = getBuiltinResourceAttributes(router)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ spec := authorization.SubjectAccessReviewSpec{
+ ResourceAttributes: resAttrs,
+ NonResourceAttributes: nonResAttrs,
+ User: constants.NodesUserPrefix + hubInfo.NodeID,
+ Groups: []string{constants.NodesGroup},
+ Extra: extra,
+ }
+ attrs := util.AuthorizationAttributesFrom(spec)
+ return &attrs, nil
+}
+
+// isKubeedgeResource judges whether a message accesses the kubeedge's resources
+func isKubeedgeResourceMessage(router beehivemodel.MessageRoute) bool {
+ switch router.Operation {
+ case beehivemodel.ResponseOperation, beehivemodel.ResponseErrorOperation, beehivemodel.UploadOperation,
+ taskutil.TaskPrePull, taskutil.TaskUpgrade, cloudhubmodel.OpKeepalive:
+ return true
+ }
+ switch router.Source {
+ case metaserver.MetaServerSource, cloudhubmodel.ResTwin:
+ return true
+ }
+ if router.Resource == beehivemodel.ResourceTypeK8sCA || common.IsVolumeResource(router.Resource) {
+ return true
+ }
+
+ _, resourceType, resourceName := splitResource(router.Resource)
+ switch resourceType {
+ case beehivemodel.ResourceTypeRuleStatus:
+ return true
+ }
+ // kubeedge allows node to update a list of pod status
+ if resourceType == beehivemodel.ResourceTypePodStatus && resourceName == "" {
+ return true
+ }
+
+ return false
+}
+
+func getKubeedgeResourceAttributes(router beehivemodel.MessageRoute) *authorization.NonResourceAttributes {
+ return &authorization.NonResourceAttributes{
+ Path: router.Resource,
+ Verb: router.Operation,
+ }
+}
+
+func getBuiltinResourceAttributes(router beehivemodel.MessageRoute) (*authorization.ResourceAttributes, error) {
+ namespace, resourceType, resourceName := splitResource(router.Resource)
+ switch router.Operation {
+ // nodestatus, podstatus is not allowed to insert
+ case beehivemodel.InsertOperation:
+ switch resourceType {
+ case beehivemodel.ResourceTypePodStatus:
+ resourceType = beehivemodel.ResourceTypePod
+ case beehivemodel.ResourceTypeNodeStatus:
+ resourceType = beehivemodel.ResourceTypeNode
+ }
+ }
+
+ kubeRes, ok := resourceTypeToKubeResources[resourceType]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type %q", resourceType)
+ }
+
+ var verb string
+ switch router.Operation {
+ case beehivemodel.InsertOperation:
+ verb = "create"
+ case beehivemodel.DeleteOperation:
+ verb = "delete"
+ case beehivemodel.UpdateOperation:
+ verb = "update"
+ case beehivemodel.PatchOperation:
+ verb = "patch"
+ case beehivemodel.QueryOperation:
+ verb = "get"
+ // the actual verb for serviceaccounts/token is `create`
+ if resourceType == beehivemodel.ResourceTypeServiceAccountToken {
+ verb = "create"
+ }
+ default:
+ return nil, fmt.Errorf("unknown opeation %q", router.Operation)
+ }
+
+ if !kubeRes.namespaced {
+ namespace = ""
+ }
+ return &authorization.ResourceAttributes{
+ Namespace: namespace,
+ Verb: verb,
+ Group: kubeRes.groupVersion.Group,
+ Version: kubeRes.groupVersion.Version,
+ Resource: kubeRes.resource,
+ Subresource: kubeRes.subresource,
+ Name: resourceName,
+ }, nil
+}
+
+func splitResource(resource string) (namespace string, resourceType string, resourceName string) {
+ sli := strings.Split(resource, "/")
+ for i := len(sli); i < 3; i++ {
+ sli = append(sli, "")
+ }
+ namespace, resourceType, resourceName = sli[0], sli[1], sli[2]
+ return
+}
+
+func isKubeedgeResourceAttributes(attrs authorizer.Attributes) bool {
+ if attrs == nil || attrs.GetUser() == nil {
+ return false
+ }
+ _, ok := attrs.GetUser().GetExtra()[kubeedgeResourceKey]
+ return ok
+}
+
+type kubeResource struct {
+ resource string
+ subresource string
+ groupVersion schema.GroupVersion
+ namespaced bool
+}
+
+var resourceTypeToKubeResources = map[string]kubeResource{
+ beehivemodel.ResourceTypeNodeStatus: {resource: "nodes", subresource: "status", groupVersion: v1.SchemeGroupVersion, namespaced: false},
+ beehivemodel.ResourceTypePodStatus: {resource: "pods", subresource: "status", groupVersion: v1.SchemeGroupVersion, namespaced: true},
+ beehivemodel.ResourceTypeConfigmap: {resource: "configmaps", groupVersion: v1.SchemeGroupVersion, namespaced: true},
+ beehivemodel.ResourceTypeSecret: {resource: "secrets", groupVersion: v1.SchemeGroupVersion, namespaced: true},
+ beehivemodel.ResourceTypeServiceAccountToken: {resource: "serviceaccounts", subresource: "token", groupVersion: v1.SchemeGroupVersion, namespaced: true},
+ commonconstants.ResourceTypePersistentVolume: {resource: "persistentvolumes", groupVersion: v1.SchemeGroupVersion, namespaced: false},
+ commonconstants.ResourceTypePersistentVolumeClaim: {resource: "persistentvolumeclaims", groupVersion: v1.SchemeGroupVersion, namespaced: true},
+ commonconstants.ResourceTypeVolumeAttachment: {resource: "volumeattachments", groupVersion: storagev1.SchemeGroupVersion, namespaced: false},
+ beehivemodel.ResourceTypeNode: {resource: "nodes", groupVersion: v1.SchemeGroupVersion, namespaced: false},
+ beehivemodel.ResourceTypePod: {resource: "pods", groupVersion: v1.SchemeGroupVersion, namespaced: true},
+ beehivemodel.ResourceTypeNodePatch: {resource: "nodes", subresource: "status", groupVersion: v1.SchemeGroupVersion, namespaced: false},
+ beehivemodel.ResourceTypePodPatch: {resource: "pods", subresource: "status", groupVersion: v1.SchemeGroupVersion, namespaced: true},
+ beehivemodel.ResourceTypeLease: {resource: "leases", groupVersion: coordinationv1.SchemeGroupVersion, namespaced: true},
+ beehivemodel.ResourceTypeCSR: {resource: "certificatesigningrequests", groupVersion: certificatesv1.SchemeGroupVersion, namespaced: false},
+}
diff --git a/cloud/pkg/cloudhub/authorization/resource_attributes_test.go b/cloud/pkg/cloudhub/authorization/resource_attributes_test.go
new file mode 100644
index 000000000..45f21a3d2
--- /dev/null
+++ b/cloud/pkg/cloudhub/authorization/resource_attributes_test.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "testing"
+
+ "k8s.io/kubernetes/pkg/apis/authorization"
+
+ "github.com/kubeedge/beehive/pkg/core/model"
+ cloudhubmodel "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
+)
+
+func TestGetBuiltinResourceAttributes(t *testing.T) {
+ tests := []struct {
+ name string
+ router model.MessageRoute
+ want authorization.ResourceAttributes
+ wantErr bool
+ }{
+ {
+ name: "secret query message",
+ router: model.MessageRoute{Operation: model.QueryOperation, Resource: "ns/secret/sc"},
+ want: authorization.ResourceAttributes{Namespace: "ns", Name: "sc", Verb: "get", Group: "", Version: "v1", Resource: "secrets"},
+ },
+ {
+ name: "patch node message",
+ router: model.MessageRoute{Operation: model.PatchOperation, Resource: "default/nodepatch/node"},
+ want: authorization.ResourceAttributes{Namespace: "", Name: "node", Verb: "patch", Group: "", Version: "v1", Resource: "nodes", Subresource: "status"},
+ },
+ {
+ name: "insert nodestatus message",
+ router: model.MessageRoute{Operation: model.InsertOperation, Resource: "default/nodestatus/node"},
+ want: authorization.ResourceAttributes{Namespace: "", Name: "node", Verb: "create", Group: "", Version: "v1", Resource: "nodes"},
+ },
+ {
+ name: "insert podstatus message",
+ router: model.MessageRoute{Operation: model.InsertOperation, Resource: "default/podstatus/pod"},
+ want: authorization.ResourceAttributes{Namespace: "default", Name: "pod", Verb: "create", Group: "", Version: "v1", Resource: "pods"},
+ },
+ {
+ name: "secret patch message",
+ router: model.MessageRoute{Operation: model.DeleteOperation, Resource: "ns/secret/sc"},
+ want: authorization.ResourceAttributes{Namespace: "ns", Name: "sc", Verb: "delete", Group: "", Version: "v1", Resource: "secrets"},
+ },
+ {
+ name: "secret update message",
+ router: model.MessageRoute{Operation: model.UpdateOperation, Resource: "ns/secret/sc"},
+ want: authorization.ResourceAttributes{Namespace: "ns", Name: "sc", Verb: "update", Group: "", Version: "v1", Resource: "secrets"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := getBuiltinResourceAttributes(tt.router)
+ if err != nil {
+ if !tt.wantErr {
+ t.Errorf("getBuiltinResourceAttributes(): unexpect error: %v", err)
+ }
+ return
+ }
+ if tt.wantErr || *got != tt.want {
+ t.Errorf("getBuiltinResourceAttributes() got = %v, want %v", *got, tt.want)
+ }
+ })
+ }
+}
+
+func TestGetKubeedgeResourceAttributes(t *testing.T) {
+ tests := []struct {
+ name string
+ router model.MessageRoute
+ want authorization.NonResourceAttributes
+ }{
+ {
+ name: "keepalive message",
+ router: model.MessageRoute{Operation: cloudhubmodel.OpKeepalive, Resource: "test"},
+ want: authorization.NonResourceAttributes{Verb: cloudhubmodel.OpKeepalive, Path: "test"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := getKubeedgeResourceAttributes(tt.router)
+ if *got != tt.want {
+ t.Errorf("getKubeedgeResourceAttributes() got = %v, want %v", *got, tt.want)
+ }
+ })
+ }
+}
+
+func TestIsKubeedgeResourceMessage(t *testing.T) {
+ tests := []struct {
+ name string
+ router model.MessageRoute
+ result bool
+ }{
+ {
+ name: "keepalive message",
+ router: model.MessageRoute{Operation: cloudhubmodel.OpKeepalive},
+ result: true,
+ },
+ {
+ name: "device twin message",
+ router: model.MessageRoute{Source: cloudhubmodel.ResTwin},
+ result: true,
+ },
+ {
+ name: "k8s ca message",
+ router: model.MessageRoute{Resource: model.ResourceTypeK8sCA},
+ result: true,
+ },
+ {
+ name: "rule status message",
+ router: model.MessageRoute{Resource: "ns/rulestatus/rs"},
+ result: true,
+ },
+ {
+ name: "device twin message",
+ router: model.MessageRoute{Source: cloudhubmodel.ResTwin},
+ result: true,
+ },
+ {
+ name: "configmap message",
+ router: model.MessageRoute{Operation: model.QueryOperation, Resource: "ns/configmap/test-cm", Source: "edged", Group: "meta"},
+ result: false,
+ },
+ {
+ name: "podstatus list message",
+ router: model.MessageRoute{Operation: model.UpdateOperation, Resource: "ns/podstatus"},
+ result: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := isKubeedgeResourceMessage(tt.router)
+ if got != tt.result {
+ t.Errorf("isKubeedgeResourceMessage() got = %v, want %v", got, tt.result)
+ }
+ })
+ }
+}
+
+func TestGetAuthorizerAttributes(t *testing.T) {
+ tests := []struct {
+ name string
+ router model.MessageRoute
+ hubInfo cloudhubmodel.HubInfo
+ wantErr bool
+ isKubeedgeMessage bool
+ }{
+ {
+ name: "invalid message",
+ router: model.MessageRoute{},
+ wantErr: true,
+ },
+ {
+ name: "device twin message",
+ router: model.MessageRoute{Source: cloudhubmodel.ResTwin},
+ isKubeedgeMessage: true,
+ },
+ {
+ name: "configmap message",
+ router: model.MessageRoute{Operation: model.QueryOperation, Resource: "ns/configmap/test-cm", Source: "edged", Group: "meta"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := getAuthorizerAttributes(tt.router, tt.hubInfo)
+ if err != nil {
+ if !tt.wantErr {
+ t.Errorf("getAuthorizerAttributes(): unexpect error: %v", err)
+ }
+ return
+ }
+
+ if isKubeedgeResourceAttributes(got) != tt.isKubeedgeMessage {
+ t.Errorf("getAuthorizerAttributes() got = %v, want %v", got, tt.isKubeedgeMessage)
+ }
+ })
+ }
+}
diff --git a/cloud/pkg/cloudhub/cloudhub.go b/cloud/pkg/cloudhub/cloudhub.go
index c3fcc0186..41e0debfa 100644
--- a/cloud/pkg/cloudhub/cloudhub.go
+++ b/cloud/pkg/cloudhub/cloudhub.go
@@ -1,13 +1,17 @@
package cloudhub
import (
+ "errors"
+ "fmt"
"os"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
+ "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"github.com/kubeedge/beehive/pkg/core"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
+ "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/authorization"
hubconfig "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/dispatcher"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/handler"
@@ -22,6 +26,7 @@ import (
)
var DoneTLSTunnelCerts = make(chan bool, 1)
+var sessionMgr *session.Manager
type cloudHub struct {
enable bool
@@ -45,9 +50,17 @@ func newCloudHub(enable bool) *cloudHub {
sessionManager, objectSyncInformer.Lister(),
clusterObjectSyncInformer.Lister(), client.GetCRDClient())
+ config := getAuthConfig()
+ authorizer, err := config.New()
+ if err != nil {
+ panic(fmt.Sprintf("unable to create new authorizer for CloudHub: %v", err))
+ }
+
messageHandler := handler.NewMessageHandler(
int(hubconfig.Config.KeepaliveInterval),
- sessionManager, client.GetCRDClient(), messageDispatcher)
+ sessionManager, client.GetCRDClient(),
+ messageDispatcher, authorizer)
+ sessionMgr = sessionManager
ch := &cloudHub{
enable: enable,
@@ -98,7 +111,7 @@ func (ch *cloudHub) Start() {
close(DoneTLSTunnelCerts)
// generate Token
- if err := httpserver.GenerateToken(); err != nil {
+ if err := httpserver.GenerateAndRefresh(beehiveContext.GetContext()); err != nil {
klog.Exit(err)
}
@@ -113,3 +126,38 @@ func (ch *cloudHub) Start() {
go udsserver.StartServer(hubconfig.Config.UnixSocket.Address)
}
}
+
+func getAuthConfig() authorization.Config {
+ enabled := hubconfig.Config.Authorization != nil && hubconfig.Config.Authorization.Enable
+ debug := enabled && hubconfig.Config.Authorization.Debug
+ builtinInformerFactory := informers.GetInformersManager().GetKubeInformerFactory()
+
+ var authorizationModes []string
+ if enabled {
+ for _, modeConfig := range hubconfig.Config.Authorization.Modes {
+ switch {
+ case modeConfig.Node != nil && modeConfig.Node.Enable:
+ {
+ authorizationModes = append(authorizationModes, modes.ModeNode)
+ }
+ }
+ }
+ }
+ if len(authorizationModes) == 0 {
+ authorizationModes = []string{modes.ModeAlwaysAllow}
+ }
+
+ return authorization.Config{
+ Enabled: enabled,
+ Debug: debug,
+ AuthorizationModes: authorizationModes,
+ VersionedInformerFactory: builtinInformerFactory,
+ }
+}
+
+func GetSessionManager() (*session.Manager, error) {
+ if sessionMgr != nil {
+ return sessionMgr, nil
+ }
+ return nil, errors.New("cloudhub not initialized")
+}
diff --git a/cloud/pkg/cloudhub/common/helper_test.go b/cloud/pkg/cloudhub/common/helper_test.go
new file mode 100644
index 000000000..6ed9cdef8
--- /dev/null
+++ b/cloud/pkg/cloudhub/common/helper_test.go
@@ -0,0 +1,279 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ beehivemodel "github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
+ edgecon "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/constants"
+ "github.com/kubeedge/kubeedge/common/constants"
+)
+
+func TestIsVolumeResource(t *testing.T) {
+ assert := assert.New(t)
+
+ validResource := "test/" + constants.CSIResourceTypeVolume + "/resource"
+ invalidResource := "test/resourcePath/resource"
+
+ assert.True(IsVolumeResource(validResource))
+ assert.False(IsVolumeResource(invalidResource))
+}
+
+func TestGetMessageUID(t *testing.T) {
+ assert := assert.New(t)
+ cases := []struct {
+ name string
+ msg beehivemodel.Message
+ stdResult string
+ hasError bool
+ }{
+ {
+ name: "Valid UID",
+ msg: beehivemodel.Message{
+ Content: &v1.ObjectMeta{
+ UID: "test-uid",
+ },
+ },
+ stdResult: "test-uid",
+ hasError: false,
+ },
+ {
+ name: "Invalid content type",
+ msg: beehivemodel.Message{
+ Content: "",
+ },
+ stdResult: "",
+ hasError: true,
+ },
+ }
+
+ for _, test := range cases {
+ t.Run(test.name, func(t *testing.T) {
+ result, err := GetMessageUID(test.msg)
+ if test.hasError {
+ assert.Error(err)
+ } else {
+ assert.NoError(err)
+ }
+ assert.Equal(test.stdResult, result)
+ })
+ }
+}
+
+func TestGetMessageDeletionTimestamp(t *testing.T) {
+ assert := assert.New(t)
+
+ now := v1.Now()
+ cases := []struct {
+ name string
+ msg beehivemodel.Message
+ stdResult *v1.Time
+ hasError bool
+ }{
+ {
+ name: "Valid DeletionTimestamp",
+ msg: beehivemodel.Message{
+ Content: &v1.ObjectMeta{
+ DeletionTimestamp: &now,
+ },
+ },
+ stdResult: &now,
+ hasError: false,
+ },
+ {
+ name: "Invalid content type",
+ msg: beehivemodel.Message{
+ Content: "",
+ },
+ stdResult: nil,
+ hasError: true,
+ },
+ }
+
+ for _, test := range cases {
+ t.Run(test.name, func(t *testing.T) {
+ result, err := GetMessageDeletionTimestamp(&test.msg)
+ if test.hasError {
+ assert.Error(err)
+ } else {
+ assert.NoError(err)
+ }
+ assert.Equal(test.stdResult, result)
+ })
+ }
+}
+
+func TestTrimMessage(t *testing.T) {
+ assert := assert.New(t)
+
+ cases := []struct {
+ name string
+ resource string
+ stdResult string
+ }{
+ {
+ name: "Valid resource",
+ resource: "node/test-node/namespace/pod/test-pod",
+ stdResult: "namespace/pod/test-pod",
+ },
+ {
+ name: "Invalid length of resource",
+ resource: "node/nodeName",
+ stdResult: "node/nodeName",
+ },
+ {
+ name: "Resource is not starting with node",
+ resource: "namespace/pod/test-pod-two",
+ stdResult: "namespace/pod/test-pod-two",
+ },
+ }
+
+ for _, test := range cases {
+ t.Run(test.name, func(t *testing.T) {
+ msg := beehivemodel.NewMessage("")
+ msg.SetResourceOperation(test.resource, "operation")
+ TrimMessage(msg)
+ assert.Equal(test.stdResult, msg.GetResource())
+ })
+ }
+}
+
+func TestConstructConnectMessage(t *testing.T) {
+ assert := assert.New(t)
+
+ nodeID := "test-node-id"
+ info := &model.HubInfo{NodeID: nodeID}
+
+ msg := ConstructConnectMessage(info, true)
+ assert.NotNil(msg)
+ assert.Equal(model.SrcCloudHub, msg.GetSource())
+ assert.Equal(model.GpResource, msg.GetGroup())
+ assert.Equal(model.NewResource(model.ResNode, nodeID, nil), msg.GetResource())
+
+ var body map[string]interface{}
+ err := json.Unmarshal(msg.GetContent().([]byte), &body)
+ assert.NoError(err)
+ assert.Equal(model.OpConnect, body["event_type"])
+ assert.Equal(nodeID, body["client_id"])
+}
+
+func TestDeepCopy(t *testing.T) {
+ assert := assert.New(t)
+
+ msg := beehivemodel.NewMessage("sample message")
+ msg.FillBody("sample content")
+
+ dc := DeepCopy(msg)
+ assert.NotNil(dc)
+ assert.Equal(msg.GetID(), dc.GetID())
+ assert.Equal(msg.GetContent(), dc.GetContent())
+}
+
+func TestAckMessageKeyFunc(t *testing.T) {
+ assert := assert.New(t)
+
+ cases := []struct {
+ name string
+ obj interface{}
+ stdResult string
+ hasError bool
+ }{
+ {
+ name: "Valid message with GroupResource",
+ obj: &beehivemodel.Message{
+ Header: beehivemodel.MessageHeader{ID: "test-id"},
+ Router: beehivemodel.MessageRoute{Group: edgecon.GroupResource},
+ Content: &v1.ObjectMeta{
+ UID: "test-uid",
+ },
+ },
+ stdResult: "test-uid",
+ hasError: false,
+ },
+ {
+ name: "Invalid object type",
+ obj: "invalid",
+ stdResult: "",
+ hasError: true,
+ },
+ {
+ name: "Message without GroupResource",
+ obj: &beehivemodel.Message{
+ Header: beehivemodel.MessageHeader{ID: "test-id"},
+ Router: beehivemodel.MessageRoute{Group: "other-group"},
+ },
+ stdResult: "",
+ hasError: true,
+ },
+ }
+
+ for _, test := range cases {
+ t.Run(test.name, func(t *testing.T) {
+ result, err := AckMessageKeyFunc(test.obj)
+ if test.hasError {
+ assert.Error(err)
+ } else {
+ assert.NoError(err)
+ }
+ assert.Equal(test.stdResult, result)
+ })
+ }
+}
+
+func TestNoAckMessageKeyFunc(t *testing.T) {
+ assert := assert.New(t)
+
+ cases := []struct {
+ name string
+ obj interface{}
+ stdResult string
+ hasError bool
+ }{
+ {
+ name: "Valid message",
+ obj: &beehivemodel.Message{
+ Header: beehivemodel.MessageHeader{ID: "test-id"},
+ },
+ stdResult: "test-id",
+ hasError: false,
+ },
+ {
+ name: "Invalid object type",
+ obj: "invalid",
+ stdResult: "",
+ hasError: true,
+ },
+ }
+
+ for _, test := range cases {
+ t.Run(test.name, func(t *testing.T) {
+ result, err := NoAckMessageKeyFunc(test.obj)
+ if test.hasError {
+ assert.Error(err)
+ } else {
+ assert.NoError(err)
+ }
+ assert.Equal(test.stdResult, result)
+ })
+ }
+}
diff --git a/cloud/pkg/cloudhub/handler/message_handler.go b/cloud/pkg/cloudhub/handler/message_handler.go
index 7f0cd6168..8f56c0cf1 100644
--- a/cloud/pkg/cloudhub/handler/message_handler.go
+++ b/cloud/pkg/cloudhub/handler/message_handler.go
@@ -17,14 +17,18 @@ limitations under the License.
package handler
import (
+ "context"
"time"
+ "github.com/avast/retry-go"
"k8s.io/klog/v2"
+ "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/authorization"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/dispatcher"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/session"
+ "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/controller"
reliableclient "github.com/kubeedge/kubeedge/pkg/client/clientset/versioned"
"github.com/kubeedge/viaduct/pkg/conn"
"github.com/kubeedge/viaduct/pkg/mux"
@@ -51,12 +55,14 @@ func NewMessageHandler(
KeepaliveInterval int,
manager *session.Manager,
reliableClient reliableclient.Interface,
- dispatcher dispatcher.MessageDispatcher) Handler {
+ dispatcher dispatcher.MessageDispatcher,
+ authorizer authorization.Authorizer) Handler {
messageHandler := &messageHandler{
KeepaliveInterval: KeepaliveInterval,
SessionManager: manager,
MessageDispatcher: dispatcher,
reliableClient: reliableClient,
+ authorizer: authorizer,
}
// init handler that process upstream message
@@ -76,6 +82,9 @@ type messageHandler struct {
// reliableClient
reliableClient reliableclient.Interface
+
+ // authorizer
+ authorizer authorization.Authorizer
}
// initServerEntries register handler func
@@ -96,8 +105,15 @@ func (mh *messageHandler) HandleMessage(container *mux.MessageContainer, _ mux.R
klog.V(4).Infof("[messageHandler]get msg from node(%s): %+v", nodeID, container.Message)
+ hubInfo := model.HubInfo{ProjectID: projectID, NodeID: nodeID}
+
+ if err := mh.authorizer.AdmitMessage(*container.Message, hubInfo); err != nil {
+ klog.Errorf("The message is rejected by CloudHub: node=%q, message=(%+v), error=%v", nodeID, container.Message.Router, err)
+ return
+ }
+
// dispatch upstream message
- mh.MessageDispatcher.DispatchUpstream(container.Message, &model.HubInfo{ProjectID: projectID, NodeID: nodeID})
+ mh.MessageDispatcher.DispatchUpstream(container.Message, &hubInfo)
}
// HandleConnection is invoked when a new connection is established
@@ -105,6 +121,11 @@ func (mh *messageHandler) HandleConnection(connection conn.Connection) {
nodeID := connection.ConnectionState().Headers.Get("node_id")
projectID := connection.ConnectionState().Headers.Get("project_id")
+ if err := mh.authorizer.AuthenticateConnection(connection); err != nil {
+ klog.Errorf("The connection is rejected by CloudHub: node=%q, error=%v", nodeID, err)
+ return
+ }
+
if mh.SessionManager.ReachLimit() {
klog.Errorf("Fail to serve node %s, reach node limit", nodeID)
return
@@ -131,6 +152,19 @@ func (mh *messageHandler) HandleConnection(connection conn.Connection) {
keepaliveInterval, nodeMessagePool, mh.reliableClient)
// add node session to the session manager
mh.SessionManager.AddSession(nodeSession)
+ go func() {
+ err := retry.Do(
+ func() error {
+ return controller.UpdateAnnotation(context.TODO(), nodeID)
+ },
+ retry.Delay(1*time.Second),
+ retry.Attempts(3),
+ retry.DelayType(retry.FixedDelay),
+ )
+ if err != nil {
+ klog.Errorf(err.Error())
+ }
+ }()
// start session for each edge node and it will keep running until
// it encounters some Transport Error from underlying connection.
diff --git a/cloud/pkg/cloudhub/servers/httpserver/server.go b/cloud/pkg/cloudhub/servers/httpserver/server.go
index 4c9cddd5f..8afa1a61a 100644
--- a/cloud/pkg/cloudhub/servers/httpserver/server.go
+++ b/cloud/pkg/cloudhub/servers/httpserver/server.go
@@ -37,6 +37,7 @@ import (
hubconfig "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config"
"github.com/kubeedge/kubeedge/common/constants"
"github.com/kubeedge/kubeedge/common/types"
+ "github.com/kubeedge/kubeedge/pkg/security/token"
)
// StartHTTPServer starts the http service
@@ -142,6 +143,7 @@ func verifyCertSubject(cert *x509.Certificate, nodeName string) error {
func verifyAuthorization(w http.ResponseWriter, r *http.Request) bool {
authorizationHeader := r.Header.Get("authorization")
if authorizationHeader == "" {
+ klog.Warning("token validation failure, token is empty")
w.WriteHeader(http.StatusUnauthorized)
if _, err := w.Write([]byte("Invalid authorization token")); err != nil {
klog.Errorf("failed to write http response, err: %v", err)
@@ -150,20 +152,16 @@ func verifyAuthorization(w http.ResponseWriter, r *http.Request) bool {
}
bearerToken := strings.Split(authorizationHeader, " ")
if len(bearerToken) != 2 {
+ klog.Warning("token validation failure, token cannot be splited")
w.WriteHeader(http.StatusUnauthorized)
if _, err := w.Write([]byte("Invalid authorization token")); err != nil {
klog.Errorf("failed to write http response, err: %v", err)
}
return false
}
- token, err := jwt.Parse(bearerToken[1], func(token *jwt.Token) (interface{}, error) {
- if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
- return nil, fmt.Errorf("there was an error")
- }
- caKey := hubconfig.Config.CaKey
- return caKey, nil
- })
+ valid, err := token.Verify(bearerToken[1], hubconfig.Config.CaKey)
if err != nil {
+ klog.Warning("token validation failure, ", err.Error())
if err == jwt.ErrSignatureInvalid {
w.WriteHeader(http.StatusUnauthorized)
if _, err := w.Write([]byte("Invalid authorization token")); err != nil {
@@ -175,10 +173,10 @@ func verifyAuthorization(w http.ResponseWriter, r *http.Request) bool {
if _, err := w.Write([]byte("Invalid authorization token")); err != nil {
klog.Errorf("Write body error %v", err)
}
-
return false
}
- if !token.Valid {
+ if !valid {
+ klog.Warning("token validation failure, valid is false")
w.WriteHeader(http.StatusUnauthorized)
if _, err := w.Write([]byte("Invalid authorization token")); err != nil {
klog.Errorf("Write body error %v", err)
diff --git a/cloud/pkg/cloudhub/servers/httpserver/signcerts.go b/cloud/pkg/cloudhub/servers/httpserver/signcerts.go
index d9e1d4d59..93d309eac 100644
--- a/cloud/pkg/cloudhub/servers/httpserver/signcerts.go
+++ b/cloud/pkg/cloudhub/servers/httpserver/signcerts.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,24 +13,21 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-
package httpserver
import (
- "crypto/sha256"
+ "context"
"crypto/x509"
- "encoding/hex"
"fmt"
"net"
- "strings"
"time"
- "github.com/golang-jwt/jwt"
certutil "k8s.io/client-go/util/cert"
"k8s.io/klog/v2"
hubconfig "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config"
"github.com/kubeedge/kubeedge/common/constants"
+ "github.com/kubeedge/kubeedge/pkg/security/token"
)
// SignCerts creates server's certificate and key
@@ -60,28 +57,13 @@ func getIps(advertiseAddress []string) (Ips []net.IP) {
return
}
-// GenerateToken will create a token consisting of caHash and jwt Token and save it to secret
-func GenerateToken() error {
- // set double TokenRefreshDuration as expirationTime, which can guarantee that the validity period
- // of the token obtained at anytime is greater than or equal to TokenRefreshDuration
- expiresAt := time.Now().Add(time.Hour * hubconfig.Config.CloudHub.TokenRefreshDuration * 2).Unix()
-
- token := jwt.New(jwt.SigningMethodHS256)
-
- token.Claims = jwt.StandardClaims{
- ExpiresAt: expiresAt,
- }
-
- keyPEM := getCaKey()
- tokenString, err := token.SignedString(keyPEM)
-
+// GenerateAndRefresh creates a token and save it to secret, then craete a timer to refresh the token.
+func GenerateAndRefresh(ctx context.Context) error {
+ caHashToken, err := token.Create(hubconfig.Config.Ca, hubconfig.Config.CaKey,
+ hubconfig.Config.CloudHub.TokenRefreshDuration)
if err != nil {
- return fmt.Errorf("failed to generate the token for EdgeCore register, err: %v", err)
+ return fmt.Errorf("failed to generate the token for edgecore register, err: %v", err)
}
-
- caHash := getCaHash()
- // combine caHash and tokenString into caHashAndToken
- caHashToken := strings.Join([]string{caHash, tokenString}, ".")
// save caHashAndToken to secret
err = CreateTokenSecret([]byte(caHashToken))
if err != nil {
@@ -91,42 +73,18 @@ func GenerateToken() error {
t := time.NewTicker(time.Hour * hubconfig.Config.CloudHub.TokenRefreshDuration)
go func() {
for {
- <-t.C
- refreshedCaHashToken := refreshToken()
- if err := CreateTokenSecret([]byte(refreshedCaHashToken)); err != nil {
- klog.Exitf("Failed to create the ca token for edgecore register, err: %v", err)
+ select {
+ case <-t.C:
+ caHashToken, err = token.Create(hubconfig.Config.Ca, hubconfig.Config.CaKey,
+ hubconfig.Config.CloudHub.TokenRefreshDuration)
+ if err != nil {
+ klog.Errorf("failed to refresh the token for edgecore register, err: %v", err)
+ }
+ case <-ctx.Done():
+ break
}
}
}()
klog.Info("Succeed to creating token")
return nil
}
-
-func refreshToken() string {
- claims := &jwt.StandardClaims{}
- expirationTime := time.Now().Add(time.Hour * hubconfig.Config.CloudHub.TokenRefreshDuration * 2)
- claims.ExpiresAt = expirationTime.Unix()
- token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
- keyPEM := getCaKey()
- tokenString, err := token.SignedString(keyPEM)
- if err != nil {
- klog.Errorf("Failed to generate token signed by caKey, err: %v", err)
- }
- caHash := getCaHash()
- //put caHash in token
- caHashAndToken := strings.Join([]string{caHash, tokenString}, ".")
- return caHashAndToken
-}
-
-// getCaHash gets ca-hash
-func getCaHash() string {
- caDER := hubconfig.Config.Ca
- digest := sha256.Sum256(caDER)
- return hex.EncodeToString(digest[:])
-}
-
-// getCaKey gets caKey to encrypt token
-func getCaKey() []byte {
- caKey := hubconfig.Config.CaKey
- return caKey
-}
diff --git a/cloud/pkg/common/client/client.go b/cloud/pkg/common/client/client.go
index 5caac1ccd..19279ceaf 100644
--- a/cloud/pkg/common/client/client.go
+++ b/cloud/pkg/common/client/client.go
@@ -47,7 +47,7 @@ var (
CrdConfig *rest.Config
)
-func InitKubeEdgeClient(config *cloudcoreConfig.KubeAPIConfig) {
+func InitKubeEdgeClient(config *cloudcoreConfig.KubeAPIConfig, enableImpersonation bool) {
initOnce.Do(func() {
kubeConfig, err := clientcmd.BuildConfigFromFlags(config.Master, config.KubeConfig)
if err != nil {
@@ -59,15 +59,15 @@ func InitKubeEdgeClient(config *cloudcoreConfig.KubeAPIConfig) {
KubeConfig = kubeConfig
- dynamicClient = dynamic.NewForConfigOrDie(kubeConfig)
+ dynamicClient = newForDynamicConfigOrDie(kubeConfig, enableImpersonation)
kubeConfig.ContentType = runtime.ContentTypeProtobuf
- kubeClient = kubernetes.NewForConfigOrDie(kubeConfig)
+ kubeClient = newForK8sConfigOrDie(kubeConfig, enableImpersonation)
crdKubeConfig := rest.CopyConfig(kubeConfig)
crdKubeConfig.ContentType = runtime.ContentTypeJSON
CrdConfig = crdKubeConfig
- crdClient = crdClientset.NewForConfigOrDie(crdKubeConfig)
+ crdClient = newForCrdConfigOrDie(crdKubeConfig, enableImpersonation)
authKubeConfig, err = clientcmd.BuildConfigFromFlags(kubeConfig.Host, "")
if err != nil {
diff --git a/cloud/pkg/common/client/impersonation.go b/cloud/pkg/common/client/impersonation.go
new file mode 100644
index 000000000..a905a48a1
--- /dev/null
+++ b/cloud/pkg/common/client/impersonation.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "net/http"
+ "strings"
+
+ authenticationv1 "k8s.io/api/authentication/v1"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "k8s.io/klog/v2"
+
+ crdClientset "github.com/kubeedge/kubeedge/pkg/client/clientset/versioned"
+)
+
+func newForK8sConfigOrDie(c *rest.Config, enableImpersonation bool) *kubernetes.Clientset {
+ configShallowCopy := *c
+
+ if configShallowCopy.UserAgent == "" {
+ configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ httpClient, err := httpClientFor(&configShallowCopy, enableImpersonation)
+ if err != nil {
+ panic(err)
+ }
+
+ cs, err := kubernetes.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ panic(err)
+ }
+ return cs
+}
+
+func newForDynamicConfigOrDie(c *rest.Config, enableImpersonation bool) *dynamic.DynamicClient {
+ configShallowCopy := dynamic.ConfigFor(c)
+ httpClient, err := httpClientFor(configShallowCopy, enableImpersonation)
+ if err != nil {
+ panic(err)
+ }
+
+ cs, err := dynamic.NewForConfigAndClient(configShallowCopy, httpClient)
+ if err != nil {
+ panic(err)
+ }
+ return cs
+}
+
+func newForCrdConfigOrDie(c *rest.Config, enableImpersonation bool) *crdClientset.Clientset {
+ configShallowCopy := *c
+
+ if configShallowCopy.UserAgent == "" {
+ configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ httpClient, err := httpClientFor(&configShallowCopy, enableImpersonation)
+ if err != nil {
+ panic(err)
+ }
+
+ cs, err := crdClientset.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ panic(err)
+ }
+ return cs
+}
+
+func httpClientFor(c *rest.Config, enableImpersonation bool) (*http.Client, error) {
+ transport, err := rest.TransportFor(c)
+ if err != nil {
+ return nil, err
+ }
+
+ return &http.Client{
+ Transport: &impersonationRoundTripper{
+ enable: enableImpersonation,
+ rt: transport,
+ },
+ Timeout: c.Timeout,
+ }, nil
+}
+
+type impersonationRoundTripper struct {
+ enable bool
+ rt http.RoundTripper
+}
+
+func (r *impersonationRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ // extract user and group from context and set impersonation headers
+ var userStr, groupStr string
+ user := req.Context().Value(authenticationv1.ImpersonateUserHeader)
+ if user != nil && r.enable {
+ userStr = user.(string)
+ req.Header.Set(authenticationv1.ImpersonateUserHeader, userStr)
+ }
+ group := req.Context().Value(authenticationv1.ImpersonateGroupHeader)
+ if group != nil && r.enable {
+ groupStr = group.(string)
+ for _, g := range strings.Split(groupStr, "|") {
+ req.Header.Set(authenticationv1.ImpersonateGroupHeader, g)
+ }
+ }
+
+ klog.V(4).Infof("KubeClient: request.method=%s, request.path=%s, user=%q, group= %q", req.Method, req.URL.Path, userStr, groupStr)
+ return r.rt.RoundTrip(req)
+}
diff --git a/cloud/pkg/common/context/context.go b/cloud/pkg/common/context/context.go
new file mode 100644
index 000000000..f15c47eb5
--- /dev/null
+++ b/cloud/pkg/common/context/context.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package context
+
+import (
+ "context"
+ "strings"
+
+ authenticationv1 "k8s.io/api/authentication/v1"
+ "k8s.io/kubernetes/cmd/kubeadm/app/constants"
+
+ "github.com/kubeedge/beehive/pkg/core/model"
+)
+
+// WithEdgeNode injects node ID into context
+func WithEdgeNode(ctx context.Context, nodeID string) context.Context {
+ ctx = context.WithValue(ctx, authenticationv1.ImpersonateUserHeader, constants.NodesUserPrefix+nodeID)
+ ctx = context.WithValue(ctx, authenticationv1.ImpersonateGroupHeader, constants.NodesGroup)
+ return ctx
+}
+
+// FromMessage injects node ID into context. Resource must start with `node` and message source
+func FromMessage(ctx context.Context, msg model.Message) context.Context {
+ tokens := strings.Split(msg.Router.Resource, "/")
+ if len(tokens) >= 2 && tokens[0] == "node" {
+ ctx = WithEdgeNode(ctx, tokens[1])
+ }
+ return ctx
+}
diff --git a/cloud/pkg/devicecontroller/controller/upstream.go b/cloud/pkg/devicecontroller/controller/upstream.go
index 6f2298095..817abc352 100644
--- a/cloud/pkg/devicecontroller/controller/upstream.go
+++ b/cloud/pkg/devicecontroller/controller/upstream.go
@@ -26,6 +26,7 @@ import (
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/beehive/pkg/core/model"
keclient "github.com/kubeedge/kubeedge/cloud/pkg/common/client"
+ utilcontext "github.com/kubeedge/kubeedge/cloud/pkg/common/context"
"github.com/kubeedge/kubeedge/cloud/pkg/common/messagelayer"
"github.com/kubeedge/kubeedge/cloud/pkg/common/modules"
"github.com/kubeedge/kubeedge/cloud/pkg/devicecontroller/config"
@@ -174,7 +175,7 @@ func (uc *UpstreamController) updateDeviceStatus() {
klog.Errorf("Failed to marshal device status %v", deviceStatus)
continue
}
- err = uc.crdClient.DevicesV1beta1().RESTClient().Patch(MergePatchType).Namespace(cacheDevice.Namespace).Resource(ResourceTypeDevices).Name(cacheDevice.Name).Body(body).Do(context.Background()).Error()
+ err = uc.crdClient.DevicesV1beta1().RESTClient().Patch(MergePatchType).Namespace(cacheDevice.Namespace).Resource(ResourceTypeDevices).Name(cacheDevice.Name).Body(body).Do(utilcontext.FromMessage(context.Background(), msg)).Error()
if err != nil {
klog.Errorf("Failed to patch device status %v of device %v in namespace %v, err: %v", deviceStatus, deviceID, cacheDevice.Namespace, err)
continue
diff --git a/cloud/pkg/dynamiccontroller/application/application.go b/cloud/pkg/dynamiccontroller/application/application.go
index 00b48fb48..f2ddf7605 100644
--- a/cloud/pkg/dynamiccontroller/application/application.go
+++ b/cloud/pkg/dynamiccontroller/application/application.go
@@ -5,6 +5,7 @@ import (
"fmt"
"strings"
+ authorizationv1 "k8s.io/api/authorization/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -13,11 +14,14 @@ import (
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
+ "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"github.com/kubeedge/beehive/pkg/core/model"
"github.com/kubeedge/kubeedge/cloud/pkg/common/client"
+ utilcontext "github.com/kubeedge/kubeedge/cloud/pkg/common/context"
"github.com/kubeedge/kubeedge/cloud/pkg/common/messagelayer"
"github.com/kubeedge/kubeedge/cloud/pkg/common/modules"
+ "github.com/kubeedge/kubeedge/cloud/pkg/dynamiccontroller/config"
"github.com/kubeedge/kubeedge/cloud/pkg/dynamiccontroller/filter"
"github.com/kubeedge/kubeedge/edge/pkg/common/message"
"github.com/kubeedge/kubeedge/pkg/metaserver"
@@ -59,6 +63,14 @@ func (c *Center) Process(msg model.Message) {
klog.Infof("[metaserver/ApplicationCenter] get a Application %v", app.String())
+ if config.Config.EnableAuthorization {
+ nodeID, err := messagelayer.GetNodeID(msg)
+ if err != nil || nodeID != app.Nodename {
+ klog.Errorf("[metaserver/authorization]failed to process Application(%+v), %v", app, err)
+ return
+ }
+ }
+
if passthrough.IsPassThroughPath(app.Key, string(app.Verb)) {
resp, err := c.passThroughRequest(app)
if err != nil {
@@ -95,12 +107,15 @@ func (c *Center) ProcessApplication(app *metaserver.Application) (interface{}, e
if err := app.OptionTo(option); err != nil {
return nil, err
}
- list, err := c.dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), *option)
+ list, err := c.dynamicClient.Resource(gvr).Namespace(ns).List(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), *option)
if err != nil {
return nil, fmt.Errorf("get current list error: %v", err)
}
return list, nil
case metaserver.Watch:
+ if err := c.checkNodePermission(app); err != nil {
+ return nil, err
+ }
listener, err := applicationToListener(app)
if err != nil {
return nil, err
@@ -115,7 +130,7 @@ func (c *Center) ProcessApplication(app *metaserver.Application) (interface{}, e
if err := app.OptionTo(option); err != nil {
return nil, err
}
- retObj, err := c.dynamicClient.Resource(gvr).Namespace(ns).Get(context.TODO(), name, *option)
+ retObj, err := c.dynamicClient.Resource(gvr).Namespace(ns).Get(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), name, *option)
if err != nil {
return nil, err
}
@@ -132,9 +147,9 @@ func (c *Center) ProcessApplication(app *metaserver.Application) (interface{}, e
var retObj interface{}
var err error
if app.Subresource == "" {
- retObj, err = c.dynamicClient.Resource(gvr).Namespace(ns).Create(context.TODO(), obj, *option)
+ retObj, err = c.dynamicClient.Resource(gvr).Namespace(ns).Create(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), obj, *option)
} else {
- retObj, err = c.dynamicClient.Resource(gvr).Namespace(ns).Create(context.TODO(), obj, *option, app.Subresource)
+ retObj, err = c.dynamicClient.Resource(gvr).Namespace(ns).Create(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), obj, *option, app.Subresource)
}
if err != nil {
return nil, err
@@ -145,7 +160,7 @@ func (c *Center) ProcessApplication(app *metaserver.Application) (interface{}, e
if err := app.OptionTo(&option); err != nil {
return nil, err
}
- if err := c.dynamicClient.Resource(gvr).Namespace(ns).Delete(context.TODO(), name, *option); err != nil {
+ if err := c.dynamicClient.Resource(gvr).Namespace(ns).Delete(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), name, *option); err != nil {
return nil, err
}
return nil, nil
@@ -161,9 +176,9 @@ func (c *Center) ProcessApplication(app *metaserver.Application) (interface{}, e
var retObj interface{}
var err error
if app.Subresource == "" {
- retObj, err = c.dynamicClient.Resource(gvr).Namespace(ns).Update(context.TODO(), obj, *option)
+ retObj, err = c.dynamicClient.Resource(gvr).Namespace(ns).Update(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), obj, *option)
} else {
- retObj, err = c.dynamicClient.Resource(gvr).Namespace(ns).Update(context.TODO(), obj, *option, app.Subresource)
+ retObj, err = c.dynamicClient.Resource(gvr).Namespace(ns).Update(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), obj, *option, app.Subresource)
}
if err != nil {
return nil, err
@@ -178,7 +193,7 @@ func (c *Center) ProcessApplication(app *metaserver.Application) (interface{}, e
if err := app.ReqBodyTo(obj); err != nil {
return nil, err
}
- retObj, err := c.dynamicClient.Resource(gvr).Namespace(ns).UpdateStatus(context.TODO(), obj, *option)
+ retObj, err := c.dynamicClient.Resource(gvr).Namespace(ns).UpdateStatus(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), obj, *option)
if err != nil {
return nil, err
}
@@ -188,7 +203,7 @@ func (c *Center) ProcessApplication(app *metaserver.Application) (interface{}, e
if err := app.OptionTo(pi); err != nil {
return nil, err
}
- retObj, err := c.dynamicClient.Resource(gvr).Namespace(ns).Patch(context.TODO(), pi.Name, pi.PatchType, pi.Data, pi.Options, pi.Subresources...)
+ retObj, err := c.dynamicClient.Resource(gvr).Namespace(ns).Patch(utilcontext.WithEdgeNode(context.TODO(), app.Nodename), pi.Name, pi.PatchType, pi.Data, pi.Options, pi.Subresources...)
if err != nil {
return nil, err
}
@@ -204,7 +219,7 @@ func (c *Center) passThroughRequest(app *metaserver.Application) (interface{}, e
return nil, fmt.Errorf("converting kubeClient to *kubernetes.Clientset type failed")
}
verb := strings.ToUpper(string(app.Verb))
- return kubeClient.RESTClient().Verb(verb).AbsPath(app.Key).Body(app.ReqBody).Do(context.TODO()).Raw()
+ return kubeClient.RESTClient().Verb(verb).AbsPath(app.Key).Body(app.ReqBody).Do(utilcontext.WithEdgeNode(context.TODO(), app.Nodename)).Raw()
}
// Response update application, generate and send resp message to edge
@@ -264,6 +279,9 @@ func (c *Center) ProcessWatchSync(msg model.Message) error {
// add listener for new added watch app
for _, watchApp := range addedWatchApp {
+ if config.Config.EnableAuthorization && nodeID != watchApp.Nodename {
+ return fmt.Errorf("node name %q is not allowed", watchApp.Nodename)
+ }
err := c.processWatchApp(&watchApp)
if err != nil {
watchApp.Status = metaserver.Rejected
@@ -314,6 +332,10 @@ func (c *Center) getWatchDiff(allWatchAppInEdge map[string]metaserver.Applicatio
}
func (c *Center) processWatchApp(watchApp *metaserver.Application) error {
+ if err := c.checkNodePermission(watchApp); err != nil {
+ return err
+ }
+
watchApp.Status = metaserver.InProcessing
listener, err := applicationToListener(watchApp)
if err != nil {
@@ -327,6 +349,38 @@ func (c *Center) processWatchApp(watchApp *metaserver.Application) error {
return nil
}
+func (c *Center) checkNodePermission(app *metaserver.Application) error {
+ if !config.Config.EnableAuthorization {
+ return nil
+ }
+ gvr, ns, name := metaserver.ParseKey(app.Key)
+
+ subjectAccessReview := &authorizationv1.SubjectAccessReview{
+ Spec: authorizationv1.SubjectAccessReviewSpec{
+ ResourceAttributes: &authorizationv1.ResourceAttributes{
+ Namespace: ns,
+ Verb: string(app.Verb),
+ Group: gvr.Group,
+ Version: gvr.Version,
+ Resource: gvr.Resource,
+ Subresource: app.Subresource,
+ Name: name,
+ },
+ User: constants.NodesUserPrefix + app.Nodename,
+ Groups: []string{constants.NodesGroup},
+ },
+ }
+ ret, err := c.kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), subjectAccessReview, metav1.CreateOptions{})
+ if err != nil {
+ return fmt.Errorf("node %s permission check failed: %v", app.Nodename, err)
+ }
+ if !ret.Status.Allowed {
+ return fmt.Errorf("node %q is not allowed to access this resource", app.Nodename)
+ }
+
+ return nil
+}
+
func applicationToListener(app *metaserver.Application) (*SelectorListener, error) {
var option = new(metav1.ListOptions)
if err := app.OptionTo(option); err != nil {
diff --git a/cloud/pkg/dynamiccontroller/application/application_test.go b/cloud/pkg/dynamiccontroller/application/application_test.go
index bf9234469..a4d234243 100644
--- a/cloud/pkg/dynamiccontroller/application/application_test.go
+++ b/cloud/pkg/dynamiccontroller/application/application_test.go
@@ -1,17 +1,23 @@
package application
import (
+ "errors"
"io"
"net/http"
"reflect"
"strings"
"testing"
+ "k8s.io/api/authorization/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
fakerest "k8s.io/client-go/rest/fake"
+ clientgotesting "k8s.io/client-go/testing"
+ "github.com/kubeedge/kubeedge/cloud/pkg/dynamiccontroller/config"
"github.com/kubeedge/kubeedge/pkg/metaserver"
)
@@ -84,3 +90,67 @@ func TestCenter_passThroughRequest(t *testing.T) {
})
}
}
+
+func TestCheckNodePermission(t *testing.T) {
+ originalEnableAuthorization := config.Config.EnableAuthorization
+ config.Config.EnableAuthorization = true
+ defer func() {
+ config.Config.EnableAuthorization = originalEnableAuthorization
+ }()
+
+ tests := []struct {
+ name string
+ app *metaserver.Application
+ allowed bool
+ err error
+ wantErr bool
+ }{
+ {
+ name: "get version success",
+ app: &metaserver.Application{
+ Verb: "get",
+ Key: "/version",
+ Subresource: "",
+ Nodename: "test-node",
+ },
+ allowed: true,
+ wantErr: false,
+ }, {
+ name: "get version success",
+ app: &metaserver.Application{
+ Verb: "get",
+ Key: "/version",
+ Subresource: "",
+ Nodename: "test-node",
+ },
+ allowed: true,
+ err: errors.New("permission denied"),
+ wantErr: true,
+ }, {
+ name: "get configmap failed",
+ app: &metaserver.Application{
+ Verb: "get",
+ Key: "/core/v1/configmaps/ns/test-cm",
+ Subresource: "",
+ Nodename: "test-node",
+ },
+ allowed: false,
+ wantErr: true,
+ },
+ }
+
+ fakeClientSet := fake.NewSimpleClientset()
+ center := &Center{kubeClient: fakeClientSet}
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fakeClientSet.PrependReactor("create", "subjectaccessreviews", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
+ return true, &v1.SubjectAccessReview{Status: v1.SubjectAccessReviewStatus{Allowed: tt.allowed}}, tt.err
+ })
+
+ err := center.checkNodePermission(tt.app)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("checkNodePermission() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/cloud/pkg/dynamiccontroller/config/config.go b/cloud/pkg/dynamiccontroller/config/config.go
index 34f96af40..479486c17 100644
--- a/cloud/pkg/dynamiccontroller/config/config.go
+++ b/cloud/pkg/dynamiccontroller/config/config.go
@@ -26,13 +26,15 @@ var Config Configure
var once sync.Once
type Configure struct {
- DynamicController *configv1alpha1.DynamicController
+ DynamicController *configv1alpha1.DynamicController
+ EnableAuthorization bool
}
-func InitConfigure(dc *configv1alpha1.DynamicController) {
+func InitConfigure(dc *configv1alpha1.DynamicController, enableAuthorization bool) {
once.Do(func() {
Config = Configure{
- DynamicController: dc,
+ DynamicController: dc,
+ EnableAuthorization: enableAuthorization,
}
})
}
diff --git a/cloud/pkg/dynamiccontroller/dynamiccontroller.go b/cloud/pkg/dynamiccontroller/dynamiccontroller.go
index 873c01944..6c6557874 100644
--- a/cloud/pkg/dynamiccontroller/dynamiccontroller.go
+++ b/cloud/pkg/dynamiccontroller/dynamiccontroller.go
@@ -46,8 +46,8 @@ var (
dynamicController *DynamicController
)
-func Register(dc *configv1alpha1.DynamicController) {
- config.InitConfigure(dc)
+func Register(dc *configv1alpha1.DynamicController, enableAuthorization bool) {
+ config.InitConfigure(dc, enableAuthorization)
dynamicController = newDynamicController(dc.Enable)
core.Register(dynamicController)
}
diff --git a/cloud/pkg/edgecontroller/controller/upstream.go b/cloud/pkg/edgecontroller/controller/upstream.go
index 48cd2d7e6..d249814fe 100644
--- a/cloud/pkg/edgecontroller/controller/upstream.go
+++ b/cloud/pkg/edgecontroller/controller/upstream.go
@@ -42,6 +42,7 @@ import (
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
apimachineryType "k8s.io/apimachinery/pkg/types"
+ patchtypes "k8s.io/apimachinery/pkg/types"
k8sinformer "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
coordinationlisters "k8s.io/client-go/listers/coordination/v1"
@@ -51,24 +52,28 @@ import (
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/beehive/pkg/core/model"
"github.com/kubeedge/kubeedge/cloud/pkg/common/client"
+ utilcontext "github.com/kubeedge/kubeedge/cloud/pkg/common/context"
"github.com/kubeedge/kubeedge/cloud/pkg/common/messagelayer"
"github.com/kubeedge/kubeedge/cloud/pkg/common/modules"
"github.com/kubeedge/kubeedge/cloud/pkg/devicecontroller/controller"
"github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/constants"
"github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/types"
routerrule "github.com/kubeedge/kubeedge/cloud/pkg/router/rule"
+ comconstants "github.com/kubeedge/kubeedge/common/constants"
common "github.com/kubeedge/kubeedge/common/constants"
edgeapi "github.com/kubeedge/kubeedge/common/types"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/cloudcore/v1alpha1"
rulesv1 "github.com/kubeedge/kubeedge/pkg/apis/rules/v1"
crdClientset "github.com/kubeedge/kubeedge/pkg/client/clientset/versioned"
"github.com/kubeedge/kubeedge/pkg/metaserver/util"
+ kubeedgeutil "github.com/kubeedge/kubeedge/pkg/util"
)
// SortedContainerStatuses define A type to help sort container statuses based on container names.
type SortedContainerStatuses []v1.ContainerStatus
-func (s SortedContainerStatuses) Len() int { return len(s) }
+func (s SortedContainerStatuses) Len() int { return len(s) }
+
func (s SortedContainerStatuses) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SortedContainerStatuses) Less(i, j int) bool {
@@ -296,7 +301,7 @@ func (uc *UpstreamController) updateRuleStatus() {
continue
}
var rule *rulesv1.Rule
- rule, err = uc.crdClient.RulesV1().Rules(namespace).Get(context.Background(), ruleID, metaV1.GetOptions{})
+ rule, err = uc.crdClient.RulesV1().Rules(namespace).Get(utilcontext.FromMessage(context.Background(), msg), ruleID, metaV1.GetOptions{})
if err != nil {
klog.Warningf("message: %s process failure, get rule with error: %s, namespaces: %s name: %s", msg.GetID(), err, namespace, ruleID)
continue
@@ -324,7 +329,7 @@ func (uc *UpstreamController) updateRuleStatus() {
klog.Warningf("message: %s process failure, content marshal err: %s", msg.GetID(), err)
continue
}
- _, err = uc.crdClient.RulesV1().Rules(namespace).Patch(context.Background(), ruleID, controller.MergePatchType, body, metaV1.PatchOptions{})
+ _, err = uc.crdClient.RulesV1().Rules(namespace).Patch(utilcontext.FromMessage(context.Background(), msg), ruleID, controller.MergePatchType, body, metaV1.PatchOptions{})
if err != nil {
klog.Warningf("message: %s process failure, update ruleStatus failed with error: %s, namespace: %s, name: %s", msg.GetID(), err, namespace, ruleID)
} else {
@@ -359,7 +364,7 @@ func (uc *UpstreamController) updatePodStatus() {
switch msg.GetOperation() {
case model.UpdateOperation:
for _, podStatus := range podStatuses {
- getPod, err := uc.kubeClient.CoreV1().Pods(namespace).Get(context.Background(), podStatus.Name, metaV1.GetOptions{})
+ getPod, err := uc.kubeClient.CoreV1().Pods(namespace).Get(utilcontext.FromMessage(context.Background(), msg), podStatus.Name, metaV1.GetOptions{})
if (err == nil && getPod.UID != podStatus.UID) || errors.IsNotFound(err) {
klog.Warningf("message: %s, pod not found, namespace: %s, name: %s", msg.GetID(), namespace, podStatus.Name)
@@ -431,7 +436,7 @@ func (uc *UpstreamController) updatePodStatus() {
uc.normalizePodStatus(getPod, &status)
getPod.Status = status
- if updatedPod, err := uc.kubeClient.CoreV1().Pods(getPod.Namespace).UpdateStatus(context.Background(), getPod, metaV1.UpdateOptions{}); err != nil {
+ if updatedPod, err := uc.kubeClient.CoreV1().Pods(getPod.Namespace).UpdateStatus(utilcontext.FromMessage(context.Background(), msg), getPod, metaV1.UpdateOptions{}); err != nil {
uc.podStatusResponse(msg, err)
klog.Warningf("message: %s, update pod status failed with error: %s, namespace: %s, name: %s", msg.GetID(), err, getPod.Namespace, getPod.Name)
} else {
@@ -442,7 +447,7 @@ func (uc *UpstreamController) updatePodStatus() {
if updatedPod.DeletionTimestamp != nil && (status.Phase == v1.PodSucceeded || status.Phase == v1.PodFailed) {
if uc.isPodNotRunning(status.ContainerStatuses) {
- if err := uc.kubeClient.CoreV1().Pods(updatedPod.Namespace).Delete(context.Background(), updatedPod.Name, *metaV1.NewDeleteOptions(0)); err != nil {
+ if err := uc.kubeClient.CoreV1().Pods(updatedPod.Namespace).Delete(utilcontext.FromMessage(context.Background(), msg), updatedPod.Name, *metaV1.NewDeleteOptions(0)); err != nil {
klog.Warningf("message: %s, graceful delete pod failed with error: %s, namespace: %s, name: %s", msg.GetID(), err, updatedPod.Namespace, updatedPod.Name)
} else {
klog.Infof("message: %s, pod delete successfully, namespace: %s, name: %s", msg.GetID(), updatedPod.Namespace, updatedPod.Name)
@@ -462,9 +467,49 @@ func (uc *UpstreamController) updatePodStatus() {
}
// createNode create new edge node to kubernetes
-func (uc *UpstreamController) createNode(name string, node *v1.Node) (*v1.Node, error) {
+func (uc *UpstreamController) createNode(nodeID, name string, node *v1.Node) (*v1.Node, error) {
+ // noderestriction admission plugin forbids kubelet to change reversed labels.
+ // add those labels separately after node creation.
+ kubernetesReversedLabels := make(map[string]string)
+ for k, v := range node.Labels {
+ // todo check labels against whitelist
+ parts := strings.Split(k, "/")
+ if len(parts) != 2 || !(parts[0] == "kubernetes.io" || strings.HasSuffix(parts[0], ".kubernetes.io")) {
+ continue
+ }
+ kubernetesReversedLabels[k] = v
+ delete(node.Labels, k)
+ }
+ defer func() {
+ if node == nil {
+ return
+ }
+ if node.Labels == nil {
+ node.Labels = make(map[string]string)
+ }
+ for k, v := range kubernetesReversedLabels {
+ node.Labels[k] = v
+ }
+ }()
+
node.Name = name
- return uc.kubeClient.CoreV1().Nodes().Create(context.Background(), node, metaV1.CreateOptions{})
+ hostnameOverride := kubeedgeutil.GetHostname()
+ localIP, err := kubeedgeutil.GetLocalIP(hostnameOverride)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get cloudcore localIP with err:%v", err)
+ }
+ if node.Annotations == nil {
+ node.Annotations = make(map[string]string)
+ }
+ node.Annotations[common.EdgeMappingCloudKey] = localIP
+ node, err = uc.kubeClient.CoreV1().Nodes().Create(utilcontext.WithEdgeNode(context.Background(), nodeID), node, metaV1.CreateOptions{})
+ if err == nil && len(kubernetesReversedLabels) > 0 {
+ patchBytes, err := json.Marshal(map[string]interface{}{"metadata": map[string]interface{}{"labels": kubernetesReversedLabels}})
+ if err == nil {
+ node, err = uc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), name, patchtypes.MergePatchType, patchBytes, metaV1.PatchOptions{})
+ }
+ }
+ return node, err
}
// updateNodeStatus update node status
@@ -494,10 +539,15 @@ func (uc *UpstreamController) updateNodeStatus() {
klog.Warningf("message: %s process failure, get resource name failed with error: %s", msg.GetID(), err)
continue
}
+ nodeID, err := messagelayer.GetNodeID(msg)
+ if err != nil {
+ klog.Warningf("message: %s process failure, get node ID failed with error: %s", msg.GetID(), err)
+ continue
+ }
switch msg.GetOperation() {
case model.InsertOperation:
- _, err := uc.kubeClient.CoreV1().Nodes().Get(context.Background(), name, metaV1.GetOptions{})
+ _, err := uc.kubeClient.CoreV1().Nodes().Get(utilcontext.FromMessage(context.Background(), msg), name, metaV1.GetOptions{})
if err == nil {
klog.Infof("node: %s already exists, do nothing", name)
uc.nodeMsgResponse(name, namespace, common.MessageSuccessfulContent, msg)
@@ -520,7 +570,7 @@ func (uc *UpstreamController) updateNodeStatus() {
continue
}
- if _, err = uc.createNode(name, node); err != nil {
+ if _, err = uc.createNode(nodeID, name, node); err != nil {
errLog := fmt.Sprintf("create node %s error: %v , register node failed", name, err)
klog.Error(errLog)
uc.nodeMsgResponse(name, namespace, errLog, msg)
@@ -537,7 +587,7 @@ func (uc *UpstreamController) updateNodeStatus() {
continue
}
- getNode, err := uc.kubeClient.CoreV1().Nodes().Get(context.Background(), name, metaV1.GetOptions{})
+ getNode, err := uc.kubeClient.CoreV1().Nodes().Get(utilcontext.FromMessage(context.Background(), msg), name, metaV1.GetOptions{})
if errors.IsNotFound(err) {
klog.Warningf("message: %s process failure, node %s not found", msg.GetID(), name)
continue
@@ -587,7 +637,7 @@ func (uc *UpstreamController) updateNodeStatus() {
getNode.Status = nodeStatusRequest.Status
- node, err := uc.kubeClient.CoreV1().Nodes().UpdateStatus(context.Background(), getNode, metaV1.UpdateOptions{})
+ node, err := uc.kubeClient.CoreV1().Nodes().UpdateStatus(utilcontext.FromMessage(context.Background(), msg), getNode, metaV1.UpdateOptions{})
if err != nil {
klog.Warningf("message: %s process failure, update node failed with error: %s, namespace: %s, name: %s", msg.GetID(), err, getNode.Namespace, getNode.Name)
continue
@@ -634,11 +684,11 @@ func kubeClientGet(uc *UpstreamController, namespace string, name string, queryT
case model.ResourceTypeSecret:
obj, err = uc.secretLister.Secrets(namespace).Get(name)
case common.ResourceTypePersistentVolume:
- obj, err = uc.kubeClient.CoreV1().PersistentVolumes().Get(context.Background(), name, metaV1.GetOptions{})
+ obj, err = uc.kubeClient.CoreV1().PersistentVolumes().Get(utilcontext.FromMessage(context.Background(), msg), name, metaV1.GetOptions{})
case common.ResourceTypePersistentVolumeClaim:
- obj, err = uc.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), name, metaV1.GetOptions{})
+ obj, err = uc.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(utilcontext.FromMessage(context.Background(), msg), name, metaV1.GetOptions{})
case common.ResourceTypeVolumeAttachment:
- obj, err = uc.kubeClient.StorageV1().VolumeAttachments().Get(context.Background(), name, metaV1.GetOptions{})
+ obj, err = uc.kubeClient.StorageV1().VolumeAttachments().Get(utilcontext.FromMessage(context.Background(), msg), name, metaV1.GetOptions{})
case model.ResourceTypeNode:
obj, err = uc.nodeLister.Get(name)
case model.ResourceTypeServiceAccountToken:
@@ -772,7 +822,7 @@ func (uc *UpstreamController) getServiceAccountToken(namespace string, name stri
return nil, err
}
- tokenRequest, err := uc.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, &tr, metaV1.CreateOptions{})
+ tokenRequest, err := uc.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(utilcontext.FromMessage(context.TODO(), msg), name, &tr, metaV1.CreateOptions{})
if err != nil {
klog.Errorf("apiserver get service account token failed: err %v", err)
return nil, err
@@ -842,6 +892,11 @@ func (uc *UpstreamController) registerNode() {
klog.Warningf("message: %s process failure, get resource name failed with error: %v", msg.GetID(), err)
continue
}
+ nodeID, err := messagelayer.GetNodeID(msg)
+ if err != nil {
+ klog.Warningf("message: %s process failure, get node ID failed with error: %s", msg.GetID(), err)
+ continue
+ }
node := &v1.Node{}
err = json.Unmarshal(data, node)
@@ -852,7 +907,7 @@ func (uc *UpstreamController) registerNode() {
continue
}
- resp, err := uc.createNode(name, node)
+ resp, err := uc.createNode(nodeID, name, node)
if err != nil {
klog.Errorf("create node %s error: %v , register node failed", name, err)
}
@@ -896,7 +951,7 @@ func (uc *UpstreamController) patchNode() {
continue
}
- node, err := uc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), name, apimachineryType.StrategicMergePatchType, patchBytes, metaV1.PatchOptions{}, "status")
+ node, err := uc.kubeClient.CoreV1().Nodes().Patch(utilcontext.FromMessage(context.TODO(), msg), name, apimachineryType.StrategicMergePatchType, patchBytes, metaV1.PatchOptions{}, "status")
if err != nil {
klog.Errorf("message: %s process failure, patch node failed with error: %v, namespace: %s, name: %s", msg.GetID(), err, namespace, name)
}
@@ -949,7 +1004,7 @@ func (uc *UpstreamController) updateNode() {
switch msg.GetOperation() {
case model.UpdateOperation:
- getNode, err := uc.kubeClient.CoreV1().Nodes().Get(context.Background(), name, metaV1.GetOptions{})
+ getNode, err := uc.kubeClient.CoreV1().Nodes().Get(utilcontext.FromMessage(context.Background(), msg), name, metaV1.GetOptions{})
if errors.IsNotFound(err) {
klog.Warningf("message: %s process failure, node %s not found", msg.GetID(), name)
continue
@@ -977,7 +1032,7 @@ func (uc *UpstreamController) updateNode() {
klog.Warningf("marshal node data failed with err: %s", err)
continue
}
- node, err := uc.kubeClient.CoreV1().Nodes().Patch(context.Background(), getNode.Name, apimachineryType.StrategicMergePatchType, byteNode, metaV1.PatchOptions{})
+ node, err := uc.kubeClient.CoreV1().Nodes().Patch(utilcontext.FromMessage(context.Background(), msg), getNode.Name, apimachineryType.StrategicMergePatchType, byteNode, metaV1.PatchOptions{})
if err != nil {
klog.Warningf("message: %s process failure, update node failed with error: %s, namespace: %s, name: %s", msg.GetID(), err, getNode.Namespace, getNode.Name)
continue
@@ -1039,7 +1094,7 @@ func (uc *UpstreamController) patchPod() {
continue
}
- updatedPod, err := uc.kubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, apimachineryType.StrategicMergePatchType, patchBytes, metaV1.PatchOptions{}, "status")
+ updatedPod, err := uc.kubeClient.CoreV1().Pods(namespace).Patch(utilcontext.FromMessage(context.TODO(), msg), name, apimachineryType.StrategicMergePatchType, patchBytes, metaV1.PatchOptions{}, "status")
if err != nil {
klog.Errorf("message: %s process failure, patch pod failed with error: %v, namespace: %s, name: %s", msg.GetID(), err, namespace, name)
}
@@ -1088,7 +1143,7 @@ func (uc *UpstreamController) createPod() {
continue
}
- createPod, err := uc.kubeClient.CoreV1().Pods(namespace).Create(context.TODO(), &pod, metaV1.CreateOptions{})
+ createPod, err := uc.kubeClient.CoreV1().Pods(namespace).Create(utilcontext.FromMessage(context.TODO(), msg), &pod, metaV1.CreateOptions{})
if err != nil {
klog.Errorf("message: %s process failure, create pod failed with error: %v, namespace: %s, name: %s", msg.GetID(), err, namespace, name)
}
@@ -1149,7 +1204,7 @@ func (uc *UpstreamController) deletePod() {
}
var resMsg *model.Message
- err = uc.kubeClient.CoreV1().Pods(namespace).Delete(context.Background(), name, deleteOptions)
+ err = uc.kubeClient.CoreV1().Pods(namespace).Delete(utilcontext.FromMessage(context.Background(), msg), name, deleteOptions)
if err != nil && !errors.IsNotFound(err) && !strings.Contains(err.Error(), "The object might have been deleted and then recreated") {
klog.Warningf("Failed to delete pod, namespace: %s, name: %s, err: %v", namespace, name, err)
resMsg = model.NewMessage(msg.GetID()).
@@ -1219,7 +1274,7 @@ func (uc *UpstreamController) createOrUpdateLease() {
switch msg.GetOperation() {
case model.InsertOperation:
- resp, err := uc.kubeClient.CoordinationV1().Leases(namespace).Create(context.TODO(), lease, metaV1.CreateOptions{})
+ resp, err := uc.kubeClient.CoordinationV1().Leases(namespace).Create(utilcontext.FromMessage(context.TODO(), msg), lease, metaV1.CreateOptions{})
if err != nil {
klog.Errorf("create lease %s failed, error: %v", name, err)
}
@@ -1235,7 +1290,7 @@ func (uc *UpstreamController) createOrUpdateLease() {
klog.V(4).Infof("message: %s, create lease successfully, namespace: %s, name: %s", msg.GetID(), namespace, name)
case model.UpdateOperation:
- resp, err := uc.kubeClient.CoordinationV1().Leases(namespace).Update(context.TODO(), lease, metaV1.UpdateOptions{})
+ resp, err := uc.kubeClient.CoordinationV1().Leases(namespace).Update(utilcontext.FromMessage(context.TODO(), msg), lease, metaV1.UpdateOptions{})
if err != nil {
klog.Errorf("Update lease %s failed, error: %v", name, err)
}
@@ -1322,7 +1377,7 @@ func (uc *UpstreamController) processCSR() {
continue
}
- csrResp, err := uc.kubeClient.CertificatesV1().CertificateSigningRequests().Create(context.Background(), csr, metaV1.CreateOptions{})
+ csrResp, err := uc.kubeClient.CertificatesV1().CertificateSigningRequests().Create(utilcontext.FromMessage(context.Background(), msg), csr, metaV1.CreateOptions{})
if err != nil {
klog.Errorf("create CertificateSigningRequests %s failed, error: %s", name, err)
}
@@ -1465,6 +1520,32 @@ func (uc *UpstreamController) nodeMsgResponse(nodeName, namespace, content strin
}
}
+func UpdateAnnotation(ctx context.Context, nodeName string) error {
+ node, err := client.GetKubeClient().CoreV1().Nodes().Get(ctx, nodeName, metaV1.GetOptions{})
+ if err != nil {
+ return fmt.Errorf("failed to get node:%s,err:%v", nodeName, err)
+ }
+ hostnameOverride := kubeedgeutil.GetHostname()
+ localIP, err := kubeedgeutil.GetLocalIP(hostnameOverride)
+ if err != nil {
+ return fmt.Errorf("failed to get cloudcore localIP with err:%v", err)
+ }
+ if value, ok := node.Annotations[comconstants.EdgeMappingCloudKey]; ok {
+ if value == localIP {
+ return nil
+ }
+ }
+ if node.Annotations == nil {
+ node.Annotations = make(map[string]string)
+ }
+ node.Annotations[comconstants.EdgeMappingCloudKey] = localIP
+ _, err = client.GetKubeClient().CoreV1().Nodes().Update(ctx, node, metaV1.UpdateOptions{})
+ if err != nil {
+ return fmt.Errorf("failed to update node:%s with err:%v", nodeName, err)
+ }
+ return nil
+}
+
// NewUpstreamController create UpstreamController from config
func NewUpstreamController(config *v1alpha1.EdgeController, factory k8sinformer.SharedInformerFactory) (*UpstreamController, error) {
uc := &UpstreamController{
diff --git a/cloud/pkg/edgecontroller/manager/configmap_test.go b/cloud/pkg/edgecontroller/manager/configmap_test.go
index 28f571169..8fac7113e 100644
--- a/cloud/pkg/edgecontroller/manager/configmap_test.go
+++ b/cloud/pkg/edgecontroller/manager/configmap_test.go
@@ -85,7 +85,7 @@ func TestNewConfigMapManager(t *testing.T) {
QPS: 100,
Burst: 200,
ContentType: "application/vnd.kubernetes.protobuf",
- })
+ }, false)
client.DefaultGetRestMapper = func() (mapper meta.RESTMapper, err error) { return nil, nil }
diff --git a/cloud/pkg/edgecontroller/manager/node_test.go b/cloud/pkg/edgecontroller/manager/node_test.go
index d0ac50b1b..bc3a2ebc7 100644
--- a/cloud/pkg/edgecontroller/manager/node_test.go
+++ b/cloud/pkg/edgecontroller/manager/node_test.go
@@ -78,7 +78,7 @@ func TestNewNodesManager(t *testing.T) {
QPS: 100,
Burst: 200,
ContentType: "application/vnd.kubernetes.protobuf",
- })
+ }, false)
client.DefaultGetRestMapper = func() (mapper meta.RESTMapper, err error) { return nil, nil }
diff --git a/cloud/pkg/edgecontroller/manager/pod_test.go b/cloud/pkg/edgecontroller/manager/pod_test.go
index 1541bed7b..1b7b9dcc9 100644
--- a/cloud/pkg/edgecontroller/manager/pod_test.go
+++ b/cloud/pkg/edgecontroller/manager/pod_test.go
@@ -221,7 +221,7 @@ func TestNewPodManager(t *testing.T) {
QPS: 100,
Burst: 200,
ContentType: "application/vnd.kubernetes.protobuf",
- })
+ }, false)
client.DefaultGetRestMapper = func() (mapper meta.RESTMapper, err error) { return nil, nil }
diff --git a/cloud/pkg/edgecontroller/manager/rule_test.go b/cloud/pkg/edgecontroller/manager/rule_test.go
index b3155de6a..536775af1 100644
--- a/cloud/pkg/edgecontroller/manager/rule_test.go
+++ b/cloud/pkg/edgecontroller/manager/rule_test.go
@@ -85,7 +85,7 @@ func TestNewRuleManager(t *testing.T) {
QPS: 100,
Burst: 200,
ContentType: "application/vnd.kubernetes.protobuf",
- })
+ }, false)
client.DefaultGetRestMapper = func() (mapper meta.RESTMapper, err error) { return nil, nil }
diff --git a/cloud/pkg/edgecontroller/manager/ruleendpoint_test.go b/cloud/pkg/edgecontroller/manager/ruleendpoint_test.go
index 2614bbc35..67df7a9f3 100644
--- a/cloud/pkg/edgecontroller/manager/ruleendpoint_test.go
+++ b/cloud/pkg/edgecontroller/manager/ruleendpoint_test.go
@@ -85,7 +85,7 @@ func TestNewRuleEndpointManager(t *testing.T) {
QPS: 100,
Burst: 200,
ContentType: "application/vnd.kubernetes.protobuf",
- })
+ }, false)
client.DefaultGetRestMapper = func() (mapper meta.RESTMapper, err error) { return nil, nil }
diff --git a/cloud/pkg/edgecontroller/manager/secret_test.go b/cloud/pkg/edgecontroller/manager/secret_test.go
index 8cb679797..82cde4b0e 100644
--- a/cloud/pkg/edgecontroller/manager/secret_test.go
+++ b/cloud/pkg/edgecontroller/manager/secret_test.go
@@ -84,7 +84,7 @@ func TestNewSecretManager(t *testing.T) {
QPS: 100,
Burst: 200,
ContentType: "application/vnd.kubernetes.protobuf",
- })
+ }, false)
client.DefaultGetRestMapper = func() (mapper meta.RESTMapper, err error) { return nil, nil }
diff --git a/cloud/pkg/router/listener/http.go b/cloud/pkg/router/listener/http.go
index b7b703646..688db9be5 100644
--- a/cloud/pkg/router/listener/http.go
+++ b/cloud/pkg/router/listener/http.go
@@ -1,18 +1,27 @@
package listener
import (
+ "bytes"
+ "context"
+ "errors"
"fmt"
"io"
"net/http"
+ "strconv"
"strings"
"sync"
"time"
+ "github.com/avast/retry-go"
"github.com/google/uuid"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
+ "github.com/kubeedge/kubeedge/cloud/pkg/common/client"
routerConfig "github.com/kubeedge/kubeedge/cloud/pkg/router/config"
"github.com/kubeedge/kubeedge/cloud/pkg/router/utils"
+ "github.com/kubeedge/kubeedge/common/constants"
+ "github.com/kubeedge/kubeedge/pkg/util"
)
const MaxMessageBytes = 12 * (1 << 20)
@@ -98,85 +107,132 @@ func (rh *RestHandler) matchedPath(uri string) (string, bool) {
}
func (rh *RestHandler) httpHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Transfer-Encoding", "chunked")
uriSections := strings.Split(r.RequestURI, "/")
if len(uriSections) < 2 {
// URL format incorrect
- klog.Warningf("url format incorrect: %s", r.URL.String())
- w.WriteHeader(http.StatusNotFound)
- if _, err := w.Write([]byte("Request error")); err != nil {
- klog.Errorf("Response write error: %s, %s", r.RequestURI, err.Error())
- }
+ err := fmt.Errorf("url format incorrect: %s", r.URL.String())
+ writeErr(w, r, http.StatusNotFound, err)
return
}
- matchPath, exist := rh.matchedPath(r.RequestURI)
- if !exist {
- klog.Warningf("URL format incorrect: %s", r.RequestURI)
- w.WriteHeader(http.StatusNotFound)
- if _, err := w.Write([]byte("Request error")); err != nil {
- klog.Errorf("Response write error: %s, %s", r.RequestURI, err.Error())
- }
- return
- }
- v, ok := rh.handlers.Load(matchPath)
- if !ok {
- klog.Warningf("No matched handler for path: %s", matchPath)
- return
- }
- handle, ok := v.(Handle)
- if !ok {
- klog.Errorf("invalid convert to Handle. match path: %s", matchPath)
- return
- }
aReaderCloser := http.MaxBytesReader(w, r.Body, MaxMessageBytes)
b, err := io.ReadAll(aReaderCloser)
if err != nil {
- klog.Errorf("request error, write result: %v", err)
- w.WriteHeader(http.StatusBadRequest)
- if _, err = w.Write([]byte("Request error,body is null")); err != nil {
- klog.Errorf("Response write error: %s, %s", r.RequestURI, err.Error())
- }
+ writeErr(w, r, http.StatusBadRequest, err)
return
}
- if isNodeName(uriSections[1]) {
- params := make(map[string]interface{})
- msgID := uuid.New().String()
- params["messageID"] = msgID
- params["request"] = r
- params["timeout"] = rh.restTimeout
- params["data"] = b
+ edgeNodeName := uriSections[1]
+ err = retry.Do(
+ func() error {
+ targetCloudCoreIP, err := GetEdgeToCloudCoreIP(r.Context(), edgeNodeName)
+ if err != nil {
+ return err
+ }
- v, err := handle(params)
- if err != nil {
- klog.Errorf("handle request error, msg id: %s, err: %v", msgID, err)
- return
- }
- response, ok := v.(*http.Response)
- if !ok {
- klog.Errorf("response convert error, msg id: %s", msgID)
- return
- }
- body, err := io.ReadAll(io.LimitReader(response.Body, MaxMessageBytes))
- if err != nil {
- klog.Errorf("response body read error, msg id: %s, reason: %v", msgID, err)
- return
- }
- for key, values := range response.Header {
- for _, value := range values {
- w.Header().Add(key, value)
+ hostnameOverride := util.GetHostname()
+ localIP, err := util.GetLocalIP(hostnameOverride)
+ if err != nil {
+ return fmt.Errorf("failed to get cloudcore localIP with err:%v", err)
}
- }
- w.WriteHeader(response.StatusCode)
- if _, err = w.Write(body); err != nil {
- klog.Errorf("response body write error, msg id: %s, reason: %v", msgID, err)
- return
- }
- klog.Infof("response to client, msg id: %s, write result: success", msgID)
- } else {
- w.WriteHeader(http.StatusNotFound)
- _, err = w.Write([]byte("No rule match"))
- klog.Infof("no rule match, write result: %v", err)
+ if targetCloudCoreIP != localIP {
+ var url string
+ if r.TLS != nil {
+ url = "https://" + targetCloudCoreIP
+ } else {
+ url = "http://" + targetCloudCoreIP
+ }
+ url += ":" + strconv.Itoa(rh.port) + r.RequestURI
+ reqBody := io.NopCloser(bytes.NewBuffer(b))
+ forwardReq, err := http.NewRequest(r.Method, url, reqBody)
+ if err != nil {
+ return fmt.Errorf("failed to create forward request: %v", err)
+ }
+
+ forwardReq.TLS = r.TLS
+ forwardReq.Header = make(http.Header)
+ for key, values := range r.Header {
+ forwardReq.Header[key] = values
+ }
+ return requestForward(targetCloudCoreIP, w, forwardReq)
+ }
+
+ matchPath, exist := rh.matchedPath(r.RequestURI)
+ if !exist {
+ klog.Warningf("URL format incorrect: %s", r.RequestURI)
+ w.WriteHeader(http.StatusNotFound)
+ if _, err := w.Write([]byte("Request error")); err != nil {
+ klog.Errorf("Response write error: %s, %s", r.RequestURI, err.Error())
+ }
+ return nil
+ }
+ v, ok := rh.handlers.Load(matchPath)
+ if !ok {
+ klog.Warningf("No matched handler for path: %s", matchPath)
+ return nil
+ }
+ handle, ok := v.(Handle)
+ if !ok {
+ klog.Errorf("invalid convert to Handle. match path: %s", matchPath)
+ return nil
+ }
+
+ if isNodeName(uriSections[1]) {
+ params := make(map[string]interface{})
+ msgID := uuid.New().String()
+ params["messageID"] = msgID
+ params["request"] = r
+ params["timeout"] = rh.restTimeout
+ params["data"] = b
+
+ v, err := handle(params)
+ if err != nil {
+ klog.Errorf("handle request error, msg id: %s, err: %v", msgID, err)
+ return nil
+ }
+ response, ok := v.(*http.Response)
+ if !ok {
+ klog.Errorf("response convert error, msg id: %s", msgID)
+ return nil
+ }
+ body, err := io.ReadAll(io.LimitReader(response.Body, MaxMessageBytes))
+ if err != nil {
+ klog.Errorf("response body read error, msg id: %s, reason: %v", msgID, err)
+ return nil
+ }
+ for key, values := range response.Header {
+ for _, value := range values {
+ w.Header().Add(key, value)
+ }
+ }
+
+ if response.StatusCode != http.StatusOK {
+ errMsg := string(body)
+ return errors.New(errMsg)
+ }
+
+ w.WriteHeader(response.StatusCode)
+ if _, err = w.Write(body); err != nil {
+ klog.Errorf("response body write error, msg id: %s, reason: %v", msgID, err)
+ return nil
+ }
+ klog.Infof("response to client, msg id: %s, write result: success", msgID)
+ return nil
+ }
+ w.WriteHeader(http.StatusNotFound)
+ _, err = w.Write([]byte("No rule match"))
+ klog.Infof("no rule match, write result: %v", err)
+ return nil
+ },
+ retry.Delay(1*time.Second),
+ retry.Attempts(3),
+ retry.DelayType(retry.FixedDelay),
+ )
+
+ if err != nil {
+ writeErr(w, r, http.StatusInternalServerError, err)
+ return
}
}
@@ -196,3 +252,61 @@ func (rh *RestHandler) IsMatch(key interface{}, message interface{}) bool {
func isNodeName(_ string) bool {
return true
}
+
+func GetEdgeToCloudCoreIP(ctx context.Context, nodeName string) (string, error) {
+ node, err := client.GetKubeClient().CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
+ if err != nil {
+ return "", fmt.Errorf("failed to get node:%s,err:%v", nodeName, err)
+ }
+ cloudCoreIP, ok := node.Annotations[constants.EdgeMappingCloudKey]
+ if !ok {
+ return "", fmt.Errorf("no corresponding cloudcore was found for edgeNode:%s", nodeName)
+ }
+ return cloudCoreIP, nil
+}
+
+func requestForward(targetCloudCoreIP string, w http.ResponseWriter, forwardReq *http.Request) error {
+ httpClient := &http.Client{}
+ resp, err := httpClient.Do(forwardReq)
+ if err != nil {
+ return fmt.Errorf("failed to forward request: %v", err)
+ }
+ defer func(Body io.ReadCloser) {
+ err := Body.Close()
+ if err != nil {
+ klog.Errorf("failed to close resp.Body with err:%v", err)
+ }
+ }(resp.Body)
+
+ for key, values := range resp.Header {
+ for _, value := range values {
+ w.Header().Add(key, value)
+ }
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ bodyBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("error reading body:%v", err)
+ }
+ errMsg := string(bodyBytes)
+ return errors.New(errMsg)
+ }
+
+ w.WriteHeader(resp.StatusCode)
+ _, err = io.Copy(w, resp.Body)
+ if err != nil {
+ return fmt.Errorf("failed to copy resp.Body to writer with err:%v", err)
+ }
+
+ klog.Infof("forwarded request to %s successfully", targetCloudCoreIP)
+ return nil
+}
+
+func writeErr(w http.ResponseWriter, r *http.Request, statusCode int, err error) {
+ klog.Errorf(err.Error())
+ w.WriteHeader(statusCode)
+ if _, err := w.Write([]byte(err.Error())); err != nil {
+ klog.Errorf("Response write error: %s, %s", r.RequestURI, err.Error())
+ }
+}
diff --git a/cloud/pkg/router/provider/eventbus/eventbus.go b/cloud/pkg/router/provider/eventbus/eventbus.go
index e6ca0012b..955587c23 100644
--- a/cloud/pkg/router/provider/eventbus/eventbus.go
+++ b/cloud/pkg/router/provider/eventbus/eventbus.go
@@ -9,6 +9,7 @@ import (
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub"
"github.com/kubeedge/kubeedge/cloud/pkg/common/modules"
"github.com/kubeedge/kubeedge/cloud/pkg/router/constants"
"github.com/kubeedge/kubeedge/cloud/pkg/router/listener"
@@ -143,6 +144,14 @@ func (eb *EventBus) GoToTarget(data map[string]interface{}, _ chan struct{}) (in
msg.SetResourceOperation(resource, publishOperation)
msg.FillBody(string(body))
msg.SetRoute(modules.RouterSourceEventBus, modules.UserGroup)
+
+ sessionMgr, err := cloudhub.GetSessionManager()
+ if err != nil {
+ return nil, err
+ }
+ if _, exists := sessionMgr.GetSession(nodeName); !exists {
+ return nil, fmt.Errorf("cloudcore doesn't have session for node:%s", nodeName)
+ }
beehiveContext.Send(modules.CloudHubModuleName, *msg)
return nil, nil
}
diff --git a/cloud/pkg/router/provider/servicebus/servicebus.go b/cloud/pkg/router/provider/servicebus/servicebus.go
index 250f39786..557ab6e51 100644
--- a/cloud/pkg/router/provider/servicebus/servicebus.go
+++ b/cloud/pkg/router/provider/servicebus/servicebus.go
@@ -12,6 +12,7 @@ import (
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub"
"github.com/kubeedge/kubeedge/cloud/pkg/common/modules"
"github.com/kubeedge/kubeedge/cloud/pkg/router/constants"
"github.com/kubeedge/kubeedge/cloud/pkg/router/listener"
@@ -146,6 +147,14 @@ func (sb *ServiceBus) GoToTarget(data map[string]interface{}, stop chan struct{}
msg.SetResourceOperation(resource, request.Method)
msg.FillBody(request)
msg.SetRoute(modules.RouterSourceServiceBus, modules.UserGroup)
+
+ sessionMgr, err := cloudhub.GetSessionManager()
+ if err != nil {
+ return nil, err
+ }
+ if _, exists := sessionMgr.GetSession(nodeName); !exists {
+ return nil, fmt.Errorf("cloudcore doesn't have session for node:%s", nodeName)
+ }
beehiveContext.Send(modules.CloudHubModuleName, *msg)
if stop != nil {
listener.MessageHandlerInstance.SetCallback(messageID, func(message *model.Message) {
diff --git a/common/constants/default.go b/common/constants/default.go
index 5e254d8fa..db91af7b3 100644
--- a/common/constants/default.go
+++ b/common/constants/default.go
@@ -16,7 +16,8 @@ const (
SystemName = "kubeedge"
SystemNamespace = SystemName
- CloudConfigMapName = "cloudcore"
+ CloudConfigMapName = "cloudcore"
+ EdgeMappingCloudKey = "cloudcore"
// runtime
DockerContainerRuntime = "docker"
diff --git a/docs/images/proposals/authorizer-chain.png b/docs/images/proposals/authorizer-chain.png
new file mode 100644
index 000000000..dac10f5a9
--- /dev/null
+++ b/docs/images/proposals/authorizer-chain.png
Binary files differ
diff --git a/docs/images/proposals/cloudhub-enhancement-design.png b/docs/images/proposals/cloudhub-enhancement-design.png
new file mode 100644
index 000000000..89087f49e
--- /dev/null
+++ b/docs/images/proposals/cloudhub-enhancement-design.png
Binary files differ
diff --git a/docs/images/proposals/edge-pod-get.png b/docs/images/proposals/edge-pod-get.png
new file mode 100644
index 000000000..24c1b7cc3
--- /dev/null
+++ b/docs/images/proposals/edge-pod-get.png
Binary files differ
diff --git a/docs/images/proposals/keadm-get-pod.png b/docs/images/proposals/keadm-get-pod.png
new file mode 100644
index 000000000..f953bb86b
--- /dev/null
+++ b/docs/images/proposals/keadm-get-pod.png
Binary files differ
diff --git a/docs/images/proposals/keadm-restart-pod.png b/docs/images/proposals/keadm-restart-pod.png
new file mode 100644
index 000000000..560119df8
--- /dev/null
+++ b/docs/images/proposals/keadm-restart-pod.png
Binary files differ
diff --git a/docs/proposals/cloudhub-enhancement.md b/docs/proposals/cloudhub-enhancement.md
new file mode 100644
index 000000000..9b6f16e03
--- /dev/null
+++ b/docs/proposals/cloudhub-enhancement.md
@@ -0,0 +1,71 @@
+---
+title: Authorization Enhancement of CloudCore Websocket API
+authors:
+ - "spambot000"
+approvers: {}
+creation-date: 2024-04-06
+last-updated: 2024-06-22
+
+---
+# Authorization Enhancement for CloudCore Websocket API
+
+## Motivation
+CloudCore is the bridge between edge nodes and api server. However, it cannot restrict the access to cluster resources for a specific node. To address this issue, it is necessary to make an enhancement on CloudCore websocket API.
+
+#### Goal
+- Alpha
+
+Support node authorization mode. CloudCore can restrict an edge node from operating the resources owned by other edge nodes. It is planned to implement this feature before release 1.18.
+- Beta
+
+Support RBAC authorization mode. KubeEdge users can utilize RBAC configurations to limit the access to their custom resources. There are still some implementation details to be discussed.
+
+## Design detail for alpha
+
+![arch.png](../images/proposals/cloudhub-enhancement-design.png)
+
+### Summary
+
+CloudHub is the entrance of CloudCore websocket API so that we can identify the sender of messages and check whether the sender has sufficient permissions.
+
+### Authentication
+
+To establish the websocket connection with CloudCore, EdgeCore must provide a X509 certificate, which is signed by CloudCore. By verifying the client certificates and parsing the `Subject` fields, CloudHub can identify different nodes.
+
+### Authorization
+
+Most of CloudHub APIs read/write k8s resources. To limit access to k8s resources, reusing the existing mechanism is a good choice. `User impersonation` can override the user info, which minimizes the permissions of requests made by a privileged user.
+
+Some of CloudHub APIs won't directly access k8s resources. We must manually check the node permissions. Code reuse can decrease the complexity of development and maintenance. API server employs several authorization modes to authorize the requests: `Node`, `ABAC`, `RBAC`, `Webhook`. `NodeAuthorizer` implements the `Node` authorization mode, which prevent a node from reading the resources that are not related with the pods deployed on it. The `unionAuthzHandler` provides an approach to organize all of these authorization modes. Additionally, an authorizer for KubeEdge custom resource is necessary to bypass the authorizer chain.
+
+The following graph demonstrates how CloudHub handles different requests by the authorizer chain.
+
+![arch.png](../images/proposals/authorizer-chain.png)
+
+## Configurations
+
+This feature may introduce following configurations:
+
+```yaml
+kubeAPIConfig:
+ ...
+modules:
+ cloudhub:
+ authorization:
+ // optional, default false, toggle authoration
+ enable: true
+ // optional, default to false, do authorization but always allow all the requests
+ debug: false
+ // required, an authorizer chain
+ authorizers:
+ // node authorization mode
+ - node:
+ enable: true
+ ...
+```
+
+## Compatibility
+
+- By default, this feature is disabled.
+- This feature replies on the `Common Name` field of client certificate to identify edge nodes. Older version of EdgeCore(<1.16) will try to create a certificate with same `Common Name`. In the situation of upgrading, you can manually generate new client certificate and replace old one with it.
+- To safely adapt to this feature, you can switch `debug` on. When authorization fails, CloudCore just records the log but the requests are normally proceeded.
diff --git a/docs/proposals/device-crd-v1beta1.md b/docs/proposals/device-crd-v1beta1.md
index 84eacf261..1a722a308 100644
--- a/docs/proposals/device-crd-v1beta1.md
+++ b/docs/proposals/device-crd-v1beta1.md
@@ -342,8 +342,8 @@ spec:
nodeName: worker-node1
properties:
- name: temp
- collectCycle: 2000
- reportCycle: 2000
+ collectCycle: 2000 # 2000 stands for 2000 milliseconds (2 seconds)
+ reportCycle: 2000 # 2000 stands for 2000 milliseconds (2 seconds)
desired:
value: "30"
reportToCloud: true
diff --git a/docs/proposals/edgepodgetandrestart.md b/docs/proposals/edgepodgetandrestart.md
new file mode 100644
index 000000000..562862c67
--- /dev/null
+++ b/docs/proposals/edgepodgetandrestart.md
@@ -0,0 +1,136 @@
+---
+title: Add pod restart and status query functions for the edge node
+authors:
+- "@luomengY"
+ approvers:
+ creation-date: 2024-04-15
+ last-updated: 2024-04-15
+ status: implementable
+---
+
+# Add pod restart and status query functions for the edge node
+
+
+## Motivation
+
+When the edge node is offline, it is not possible to query the status of the edge node pod and restart the pod of the edge node through kubectl in the cloud. This feature provides support for querying and restarting the pod status of the edge node.
+
+### Goals
+
+- By using the `keadm ctl get pod [flags]` command on edge nodes, the status of pod can be queried.
+- By using the `keadm ctl restart pod [flags]` command on edge nodes, pod can be restarted.
+
+## Background and challenges
+
+- In edge computing, the network environment is usually poor, and edge nodes are offline most of the time. Now kubeedge does not support querying the status of pod when cloud edge is offline.
+- When the edge node goes offline, the patch of the pod state of the edge node to the apiserver will fail. However, if the patch update is not done at the edge, the pod state in the edge node's metabase sqlite will not be updated. Even if kubedge has provided a metaserver and considered edge autonomy, the obtained pod state is still the state before going offline. Therefore, it is necessary to consider the patch update of the pod state of the edge node when it goes offline.
+- When the edge node goes offline, users cannot restart the pod of the edge node. In many scenarios, users expect kubedge to support pod restart of the edge node. Here, we do not recommend deleting the pod at the edge node. Considering excessive permissions, edge pod restart only stops the containers in the pod, rather than killing the podsandbox.
+
+## Design Details
+
+### Keadm ctl get pod design.
+
+1. Add the ctl get pod subcommand to keadm:
+
+ ```
+ "keadm ctl get pod" command get pods in edge node
+
+ Usage:
+ keadm ctl get pod [flags]
+
+ Flags:
+ -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace
+ -h, --help help for pod
+ -n, --namespace string Specify a namespace (default "default")
+ -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file, custom-columns, custom-columns-file, wide)
+ -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
+ ```
+2. Get pod scheme design
+
+ <img src="../images/proposals/keadm-get-pod.png">
+
+ - When the `keadm ctl get pod [flags]` command is executed, a Restful request will be issued to MetaServer.
+ - MetaServer determines whether edge and cloud are online on the network.
+ - If the edge and cloud networks are connected, MetaServer will forward the restful request through a proxy to ApiServer, and then request a return result from ApiServer.
+ - If the edge node goes offline, MetaServer will retrieve pod data from the edge metabase sqlite.
+ - After obtaining the results, install the print format of kubectl and input it into the console.
+
+3. example
+
+ ```
+ [root@centos-edgenode1 kubeedge]# keadm ctl get pod
+ NAME READY STATUS RESTARTS AGE
+ mysql-0 0/1 CrashLoopBackOff 47 (55s ago) 140m
+ nginx-deployment-7b79f6fd7f-wpm62 1/1 Running 0 139m
+
+ [root@centos-edgenode1 kubeedge]# keadm ctl get pod -owide -A
+ NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+ default mysql-0 0/1 CrashLoopBackOff 43 (2m55s ago) 138m 10.88.0.2 centos-edgenode1 <none> <none>
+ default nginx-deployment-7b79f6fd7f-wpm62 1/1 Running 0 137m 10.88.0.3 centos-edgenode1 <none> <none>
+ kube-system kube-proxy-lrhf2 1/1 Running 0 6h27m 192.168.52.100 centos-edgenode1 <none> <none>
+ kubeedge edge-eclipse-mosquitto-4p96z 1/1 Running 0 6h42m 192.168.52.100 centos-edgenode1 <none> <none>
+ kubeedge edgemesh-agent-rtwr2 1/1 Running 0 5h43m 192.168.52.100 centos-edgenode1 <none> <none>
+ kubesphere-monitoring-system node-exporter-pwcfm 2/2 Running 0 128m 192.168.52.100 centos-edgenode1 <none> <none>
+
+ [root@centos-edgenode1 kubeedge]# keadm ctl get pod -n kubeedge -l k8s-app=kubeedge,kubeedge=edgemesh-agent -owide
+ NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+ edgemesh-agent-rtwr2 1/1 Running 0 5h49m 192.168.52.100 centos-edgenode1 <none> <none>
+ ```
+
+### Keadm ctl restart pod design.
+
+1. Add the ctl restart pod subcommand to keadm:
+
+ ```
+ "keadm ctl restart pod" command delete pods in edge node
+
+ Usage:
+ keadm ctl restart pod [flags]
+
+ Flags:
+ -h, --help help for pod
+ -n, --namespace string Specify a namespace (default "default")
+ ```
+
+2. Restart pod scheme design
+
+ <img src="../images/proposals/keadm-restart-pod.png">
+
+ - After executing `keadm ctl restart pod [flags]`, initiate a Restful API request to MetaServer to retrieve pod data.
+ - Create an `internalapi.RuntimeService` through `remote.NewRemoteRuntimeService`.
+ - Use the `io.kubernetes.pod.name` and `io.kubernetes.pod.namespace` tag selectors to filter the containers in the `remoteRuntimeService` interface that need to be restarted in the pod.
+ - After obtaining the container list, using `remoteRuntimeService.StopContainer` to stop containers.
+
+3. example
+
+ ```
+ [root@centos-edgenode1 kubeedge]# keadm ctl restart pod -n kubeedge edge-eclipse-mosquitto-j2db9
+ 4b9efa598c80ffc59705a1e49aeba0b5fec2db6513905c1cceb8aee7a2ae453d
+ b63fa1d05f0163b5556663c33827e8df673d8c8c386da49c3b3ddf3ccd7efb84
+ [root@centos-edgenode1 kubeedge]# keadm ctl get pod -n kubeedge edge-eclipse-mosquitto-j2db9
+ kubeedge edge-eclipse-mosquitto-j2db9 1/1 Running 2 (1m ago) 11d
+
+ [root@centos-edgenode1 kubeedge]# keadm ctl get pod -l k8s-app=kubeedge,kubeedge=edgemesh-agent -owide -A
+ NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+ kubeedge edgemesh-agent-rtwr2 1/1 Running 0 5h52m 192.168.52.100 centos-edgenode1 <none> <none>
+ [root@centos-edgenode1 kubeedge]# keadm ctl restart pod -n kubeedge edgemesh-agent-rtwr2
+ 689c25f7ca270b539dd4ae9288ba101ab5ca341140d09ee6497385446bac6f30
+ [root@centos-edgenode1 kubeedge]# keadm ctl get pod -l k8s-app=kubeedge,kubeedge=edgemesh-agent -owide -A
+ NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+ kubeedge edgemesh-agent-rtwr2 1/1 Running 1 (7s ago) 5h52m 192.168.52.100 centos-edgenode1 <none> <none>
+
+ [root@centos-edgenode1 kubeedge]# keadm ctl restart pod -n kubeedge edgemesh-agent-rtwr2
+ 4d6c7f11c98bc44902b87268b87a2a2091c3389eb1fa4f58325fb001ff655924
+ [root@centos-edgenode1 kubeedge]# keadm ctl get pod -l k8s-app=kubeedge,kubeedge=edgemesh-agent -owide -A
+ NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+ kubeedge edgemesh-agent-rtwr2 1/1 Running 2 (14s ago) 5h54m 192.168.52.100 centos-edgenode1 <none> <none>
+ ```
+
+### Design of pod's status patch when edge nodes are offline.
+
+The design of obtaining pods at edge nodes when they are offline is as follows:
+
+ <img src="../images/proposals/edge-pod-get.png">
+
+When the edge node is offline, we get the pod from the edge node. In the `metaserver`, we get the data of the `pod` and `podpatch` from SQLite, merge them into the latest pod, and then return it to the request because `podpatch` is the status data of the latest pod.
+`podpatch` is the pod status reported by `StatusManger` from `edged`. MetaManager will persist `podpatch`to SQLite and save it to SQLite regardless of whether the edge node is online or offline. \ No newline at end of file
diff --git a/edge/cmd/edgecore/app/server.go b/edge/cmd/edgecore/app/server.go
index 6af9719de..9a86e5c9a 100644
--- a/edge/cmd/edgecore/app/server.go
+++ b/edge/cmd/edgecore/app/server.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"os"
+ "strings"
ps "github.com/shirou/gopsutil/v3/process"
"github.com/spf13/cobra"
@@ -87,7 +88,7 @@ offering HTTP client capabilities to components of cloud to reach HTTP servers r
if err != nil {
klog.Exit(err)
}
- config.Modules.EdgeHub.Token = string(token)
+ config.Modules.EdgeHub.Token = strings.TrimSpace(string(token))
}
if errs := validation.ValidateEdgeCoreConfiguration(config); len(errs) > 0 {
diff --git a/edge/pkg/edgehub/certificate/certmanager.go b/edge/pkg/edgehub/certificate/certmanager.go
index fb7bc5d1f..21f0abaa8 100644
--- a/edge/pkg/edgehub/certificate/certmanager.go
+++ b/edge/pkg/edgehub/certificate/certmanager.go
@@ -5,17 +5,14 @@ import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
- "crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
- "encoding/hex"
"encoding/pem"
"fmt"
"io"
nethttp "net/http"
"os"
- "strings"
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -27,6 +24,7 @@ import (
"github.com/kubeedge/kubeedge/edge/pkg/edgehub/common/certutil"
"github.com/kubeedge/kubeedge/edge/pkg/edgehub/common/http"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha2"
+ "github.com/kubeedge/kubeedge/pkg/security/token"
)
// jitteryDuration uses some jitter to set the rotation threshold so each node
@@ -125,13 +123,9 @@ func (cm *CertManager) applyCerts() error {
}
// validate the CA certificate by hashcode
- tokenParts := strings.Split(cm.token, ".")
- if len(tokenParts) != 4 {
- return fmt.Errorf("token credentials are in the wrong format")
- }
- ok, hash, newHash := ValidateCACerts(cacert, tokenParts[0])
- if !ok {
- return fmt.Errorf("failed to validate CA certificate. tokenCAhash: %s, CAhash: %s", hash, newHash)
+ realToken, err := token.VerifyCAAndGetRealToken(cm.token, cacert)
+ if err != nil {
+ return err
}
// save the ca.crt to file
@@ -146,7 +140,7 @@ func (cm *CertManager) applyCerts() error {
// get the edge.crt
caPem := pem.EncodeToMemory(&pem.Block{Bytes: cacert, Type: cert.CertificateBlockType})
- pk, edgeCert, err := cm.GetEdgeCert(cm.certURL, caPem, tls.Certificate{}, strings.Join(tokenParts[1:], "."))
+ pk, edgeCert, err := cm.GetEdgeCert(cm.certURL, caPem, tls.Certificate{}, realToken)
if err != nil {
return fmt.Errorf("failed to get edge certificate from the cloudcore, error: %v", err)
}
@@ -312,18 +306,3 @@ func (cm *CertManager) getCSR() (*ecdsa.PrivateKey, []byte, error) {
return pk, csr, nil
}
-
-// ValidateCACerts validates the CA certificate by hash code
-func ValidateCACerts(cacerts []byte, hash string) (bool, string, string) {
- if len(cacerts) == 0 && hash == "" {
- return true, "", ""
- }
-
- newHash := hashCA(cacerts)
- return hash == newHash, hash, newHash
-}
-
-func hashCA(cacerts []byte) string {
- digest := sha256.Sum256(cacerts)
- return hex.EncodeToString(digest[:])
-}
diff --git a/edge/pkg/edgehub/certificate/certmanager_test.go b/edge/pkg/edgehub/certificate/certmanager_test.go
deleted file mode 100644
index 38286b529..000000000
--- a/edge/pkg/edgehub/certificate/certmanager_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Copyright 2022 The KubeEdge Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package certificate
-
-import (
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "os"
- "testing"
-
- "github.com/kubeedge/kubeedge/edge/pkg/common/util"
-)
-
-func init() {
- _, err := os.Stat("/tmp/edge.crt")
- if err != nil {
- err := util.GenerateTestCertificate("/tmp/", "edge", "edge")
-
- if err != nil {
- fmt.Printf("Failed to create certificate: %v\n", err)
- }
- }
-}
-
-func TestValidateCACerts(t *testing.T) {
- cacert, err := os.ReadFile("/tmp/edge.crt")
- if err != nil {
- t.Fatalf("Failed to load certificate: %v", err)
- }
- digest := sha256.Sum256(cacert)
- hash := hex.EncodeToString(digest[:])
-
- tests := []struct {
- cacert []byte
- hash string
- want bool
- ttName string
- }{
- {
- cacert: make([]byte, 0),
- hash: "",
- want: true,
- ttName: "empty cacert and empty hash",
- },
- {
- cacert: cacert,
- hash: hash,
- want: true,
- ttName: "valid cacert and hash",
- },
- {
- cacert: cacert,
- hash: "invalid",
- want: false,
- ttName: "invalid hash",
- },
- }
- for _, tt := range tests {
- t.Run("", func(t *testing.T) {
- got, _, _ := ValidateCACerts(tt.cacert, tt.hash)
- if got != tt.want {
- t.Errorf("ValidateCACerts = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/edge/pkg/metamanager/client/pod.go b/edge/pkg/metamanager/client/pod.go
index dafb51513..b355f62a1 100644
--- a/edge/pkg/metamanager/client/pod.go
+++ b/edge/pkg/metamanager/client/pod.go
@@ -2,6 +2,7 @@ package client
import (
"encoding/json"
+ "errors"
"fmt"
"reflect"
"strings"
@@ -127,6 +128,10 @@ func (c *pods) Patch(name string, patchBytes []byte) (*corev1.Pod, error) {
return nil, fmt.Errorf("parse message to pod failed, err: %v", err)
}
+ if resp.Router.Operation == model.ResponseErrorOperation {
+ return nil, errors.New(string(content))
+ }
+
return handlePodResp(resource, content)
}
diff --git a/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/store.go b/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/store.go
index a983f7388..2962332c9 100644
--- a/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/store.go
+++ b/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/store.go
@@ -14,9 +14,13 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage"
+ "k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog/v2"
+ "github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/kubeedge/edge/pkg/metamanager/dao"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/imitator"
+ patchutil "github.com/kubeedge/kubeedge/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/util"
"github.com/kubeedge/kubeedge/pkg/metaserver"
"github.com/kubeedge/kubeedge/pkg/metaserver/util"
)
@@ -56,17 +60,26 @@ func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions,
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.Predicate)
}
-func (s *store) Get(_ context.Context, key string, _ storage.GetOptions, objPtr runtime.Object) error {
+func (s *store) Get(ctx context.Context, key string, _ storage.GetOptions, objPtr runtime.Object) error {
resp, err := s.client.Get(context.TODO(), key)
if err != nil || len(*resp.Kvs) == 0 {
klog.Error(err)
return err
}
unstrObj := objPtr.(*unstructured.Unstructured)
- return runtime.DecodeInto(s.codec, []byte((*resp.Kvs)[0].Value), unstrObj)
+ if err = runtime.DecodeInto(s.codec, []byte((*resp.Kvs)[0].Value), unstrObj); err != nil {
+ return err
+ }
+
+ if unstrObj.GetKind() == "Pod" {
+ if err = MergePatchedResource(ctx, unstrObj, model.ResourceTypePodPatch); err != nil {
+ return err
+ }
+ }
+ return nil
}
-func (s *store) GetList(_ context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
+func (s *store) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
klog.Infof("get a list req, key=%v", key)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
@@ -91,6 +104,12 @@ func (s *store) GetList(_ context.Context, key string, opts storage.ListOptions,
return err
}
+ if unstrObj.GetKind() == "Pod" {
+ if err = MergePatchedResource(ctx, &unstrObj, model.ResourceTypePodPatch); err != nil {
+ return err
+ }
+ }
+
labelSet := labels.Set(unstrObj.GetLabels())
if !opts.Predicate.Label.Matches(labelSet) {
continue
@@ -141,3 +160,27 @@ func newStore() *store {
}
return &s
}
+
+func MergePatchedResource(ctx context.Context, originalObj *unstructured.Unstructured, resourceTypePatch string) error {
+ resKey := fmt.Sprintf("%s/%s/%s", originalObj.GetNamespace(), resourceTypePatch, originalObj.GetName())
+ var metas *[]string
+ metas, err := dao.QueryMeta("key", resKey)
+ if err != nil {
+ return err
+ }
+ if len(*metas) > 0 {
+ defaultScheme := scheme.Scheme
+ defaulter := runtime.ObjectDefaulter(defaultScheme)
+ updatedResource := new(unstructured.Unstructured)
+ GroupVersionKind := originalObj.GroupVersionKind()
+ schemaReferenceObj, err := defaultScheme.New(GroupVersionKind)
+ if err != nil {
+ return fmt.Errorf("failed to build schema reference object, err: %+v", err)
+ }
+ if err = patchutil.StrategicPatchObject(ctx, defaulter, originalObj, []byte((*metas)[0]), updatedResource, schemaReferenceObj, ""); err != nil {
+ return err
+ }
+ originalObj = updatedResource
+ }
+ return nil
+}
diff --git a/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/util/patch.go b/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/util/patch.go
new file mode 100644
index 000000000..b023d288d
--- /dev/null
+++ b/edge/pkg/metamanager/metaserver/kubernetes/storage/sqlite/util/patch.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+@CHANGELOG
+KubeEdge Authors: To merge the patchBytes of StrategicMergePatchType into the original resource,
+This file is derived from K8S apiserver code with reduced set of methods
+Changes done are
+1. Package util got some functions from "k8s.io/apiserver/pkg/endpoints/handlers/patch.go"
+and made some variant
+*/
+
+package util
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/mergepatch"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/apiserver/pkg/warning"
+ kjson "sigs.k8s.io/json"
+)
+
+func StrategicPatchObject(
+ requestContext context.Context,
+ defaulter runtime.ObjectDefaulter,
+ originalObject runtime.Object,
+ patchBytes []byte,
+ objToUpdate runtime.Object,
+ schemaReferenceObj runtime.Object,
+ validationDirective string,
+) error {
+ originalObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(originalObject)
+ if err != nil {
+ return err
+ }
+
+ patchMap := make(map[string]interface{})
+ var strictErrs []error
+ if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
+ strictErrs, err = kjson.UnmarshalStrict(patchBytes, &patchMap)
+ if err != nil {
+ return errors.NewBadRequest(err.Error())
+ }
+ } else {
+ if err = kjson.UnmarshalCaseSensitivePreserveInts(patchBytes, &patchMap); err != nil {
+ return errors.NewBadRequest(err.Error())
+ }
+ }
+
+ return applyPatchToObject(requestContext, defaulter, originalObjMap, patchMap, objToUpdate, schemaReferenceObj, strictErrs, validationDirective)
+}
+
+// applyPatchToObject applies a strategic merge patch of <patchMap> to
+// <originalMap> and stores the result in <objToUpdate>.
+// NOTE: <objToUpdate> must be a versioned object.
+func applyPatchToObject(
+ requestContext context.Context,
+ defaulter runtime.ObjectDefaulter,
+ originalMap map[string]interface{},
+ patchMap map[string]interface{},
+ objToUpdate runtime.Object,
+ schemaReferenceObj runtime.Object,
+ strictErrs []error,
+ validationDirective string,
+) error {
+ patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj)
+ if err != nil {
+ return interpretStrategicMergePatchError(err)
+ }
+
+ // Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object
+ converter := runtime.DefaultUnstructuredConverter
+ returnUnknownFields := validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict
+ if err := converter.FromUnstructuredWithValidation(patchedObjMap, objToUpdate, returnUnknownFields); err != nil {
+ strictError, isStrictError := runtime.AsStrictDecodingError(err)
+ switch {
+ case !isStrictError:
+ // disregard any sttrictErrs, because it's an incomplete
+ // list of strict errors given that we don't know what fields were
+ // unknown because StrategicMergeMapPatch failed.
+ // Non-strict errors trump in this case.
+ return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
+ field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), err.Error()),
+ })
+ case validationDirective == metav1.FieldValidationWarn:
+ addStrictDecodingWarnings(requestContext, append(strictErrs, strictError.Errors()...))
+ default:
+ strictDecodingError := runtime.NewStrictDecodingError(append(strictErrs, strictError.Errors()...))
+ return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
+ field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), strictDecodingError.Error()),
+ })
+ }
+ } else if len(strictErrs) > 0 {
+ switch {
+ case validationDirective == metav1.FieldValidationWarn:
+ addStrictDecodingWarnings(requestContext, strictErrs)
+ default:
+ return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
+ field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()),
+ })
+ }
+ }
+
+ // Decoding from JSON to a versioned object would apply defaults, so we do the same here
+ defaulter.Default(objToUpdate)
+
+ return nil
+}
+
+// addStrictDecodingWarnings confirms that the error is a strict decoding error
+// and if so adds a warning for each strict decoding violation.
+func addStrictDecodingWarnings(requestContext context.Context, errs []error) {
+ for _, e := range errs {
+ yamlWarnings := parseYAMLWarnings(e.Error())
+ for _, w := range yamlWarnings {
+ warning.AddWarning(requestContext, "", w)
+ }
+ }
+}
+
+// parseYAMLWarnings takes the strict decoding errors from the yaml decoder's output
+// and parses each individual warnings, or leaves the warning as is if
+// it does not look like a yaml strict decoding error.
+func parseYAMLWarnings(errString string) []string {
+ var trimmedString string
+ if trimmedShortString := strings.TrimPrefix(errString, shortPrefix); len(trimmedShortString) < len(errString) {
+ trimmedString = trimmedShortString
+ } else if trimmedLongString := strings.TrimPrefix(errString, longPrefix); len(trimmedLongString) < len(errString) {
+ trimmedString = trimmedLongString
+ } else {
+ // not a yaml error, return as-is
+ return []string{errString}
+ }
+
+ splitStrings := strings.Split(trimmedString, "\n")
+ for i, s := range splitStrings {
+ splitStrings[i] = strings.TrimSpace(s)
+ }
+ return splitStrings
+}
+
+// interpretStrategicMergePatchError interprets the error type and returns an error with appropriate HTTP code.
+func interpretStrategicMergePatchError(err error) error {
+ switch err {
+ case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat:
+ return errors.NewBadRequest(err.Error())
+ case mergepatch.ErrNoListOfLists, mergepatch.ErrPatchContentNotMatchRetainKeys:
+ return errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false)
+ default:
+ return err
+ }
+}
+
+const (
+ // shortPrefix is one possible beginning of yaml unmarshal strict errors.
+ shortPrefix = "yaml: unmarshal errors:\n"
+ // longPrefix is the other possible beginning of yaml unmarshal strict errors.
+ longPrefix = "error converting YAML to JSON: yaml: unmarshal errors:\n"
+)
diff --git a/edge/pkg/metamanager/process.go b/edge/pkg/metamanager/process.go
index 16bf596c7..dab76bd9c 100644
--- a/edge/pkg/metamanager/process.go
+++ b/edge/pkg/metamanager/process.go
@@ -257,7 +257,12 @@ func (m *metaManager) processPatch(message model.Message) {
feedbackError(err, message)
return
}
- sendToCloud(&message)
+
+ if connect.IsConnected() {
+ sendToCloud(&message)
+ } else {
+ feedbackError(connect.ErrConnectionLost, message)
+ }
}
func (m *metaManager) processResponse(message model.Message) {
diff --git a/go.mod b/go.mod
index b882368dd..6033f55b9 100644
--- a/go.mod
+++ b/go.mod
@@ -278,7 +278,7 @@ require (
k8s.io/pod-security-admission v0.0.0 // indirect
k8s.io/system-validators v1.8.0 // indirect
oras.land/oras-go v1.2.3 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
)
diff --git a/keadm/cmd/keadm/app/cmd/debug/check_test.go b/keadm/cmd/keadm/app/cmd/debug/check_test.go
new file mode 100644
index 000000000..f41fe4b5e
--- /dev/null
+++ b/keadm/cmd/keadm/app/cmd/debug/check_test.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package debug
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/common"
+)
+
+func TestNewCheck(t *testing.T) {
+ assert := assert.New(t)
+ cmd := NewCheck()
+
+ assert.NotNil(cmd)
+ assert.Equal("check", cmd.Use)
+ assert.Equal(edgeCheckShortDescription, cmd.Short)
+ assert.Equal(edgeCheckLongDescription, cmd.Long)
+ assert.Equal(edgeCheckExample, cmd.Example)
+
+ for _, v := range common.CheckObjectMap {
+ subCmd := NewSubEdgeCheck(CheckObject(v))
+ cmd.AddCommand(subCmd)
+
+ assert.NotNil(subCmd)
+ assert.Equal(v.Use, subCmd.Use)
+ assert.Equal(v.Desc, subCmd.Short)
+
+ flags := subCmd.Flags()
+ assert.NotNil(flags)
+
+ switch v.Use {
+ case common.ArgCheckAll:
+ // Verify domain flag
+ flag := flags.Lookup("domain")
+ assert.NotNil(flag)
+ assert.Equal("www.github.com", flag.DefValue)
+ assert.Equal("d", flag.Shorthand)
+ assert.Equal("specify test domain", flag.Usage)
+
+ // Verify IP flag
+ flag = flags.Lookup("ip")
+ assert.NotNil(flag)
+ assert.Equal("", flag.DefValue)
+ assert.Equal("i", flag.Shorthand)
+ assert.Equal("specify test ip", flag.Usage)
+
+ // Verify cloud-hub-server flag
+ flag = flags.Lookup("cloud-hub-server")
+ assert.NotNil(flag)
+ assert.Equal("", flag.DefValue)
+ assert.Equal("s", flag.Shorthand)
+ assert.Equal("specify cloudhub server", flag.Usage)
+
+ // Verify dns-ip flag
+ flag = flags.Lookup("dns-ip")
+ assert.NotNil(flag)
+ assert.Equal("", flag.DefValue)
+ assert.Equal("D", flag.Shorthand)
+ assert.Equal("specify test dns ip", flag.Usage)
+
+ // Verify config flag
+ flag = flags.Lookup("config")
+ assert.NotNil(flag)
+ assert.Equal("", flag.DefValue)
+ assert.Equal("c", flag.Shorthand)
+ expectedUsage := fmt.Sprintf("Specify configuration file, default is %s", common.EdgecoreConfigPath)
+ assert.Equal(expectedUsage, flag.Usage)
+
+ case common.ArgCheckDNS:
+ // Verify domain flag
+ flag := flags.Lookup("domain")
+ assert.NotNil(flag)
+ assert.Equal("www.github.com", flag.DefValue)
+ assert.Equal("d", flag.Shorthand)
+ assert.Equal("specify test domain", flag.Usage)
+
+ // Verify dns-ip flag
+ flag = flags.Lookup("dns-ip")
+ assert.NotNil(flag)
+ assert.Equal("", flag.DefValue)
+ assert.Equal("D", flag.Shorthand)
+ assert.Equal("specify test dns ip", flag.Usage)
+
+ case common.ArgCheckNetwork:
+ // Verify IP flag
+ flag := flags.Lookup("ip")
+ assert.NotNil(flag)
+ assert.Equal("", flag.DefValue)
+ assert.Equal("i", flag.Shorthand)
+ assert.Equal("specify test ip", flag.Usage)
+
+ // Verify cloud-hub-server flag
+ flag = flags.Lookup("cloud-hub-server")
+ assert.NotNil(flag)
+ assert.Equal("", flag.DefValue)
+ assert.Equal("s", flag.Shorthand)
+ assert.Equal("specify cloudhub server", flag.Usage)
+
+ // Verify config flag
+ flag = flags.Lookup("config")
+ assert.NotNil(flag)
+ assert.Equal("", flag.DefValue)
+ assert.Equal("c", flag.Shorthand)
+ expectedUsage := fmt.Sprintf("Specify configuration file, default is %s", common.EdgecoreConfigPath)
+ assert.Equal(expectedUsage, flag.Usage)
+ }
+ }
+}
+
+func TestNewSubEdgeCheck(t *testing.T) {
+ assert := assert.New(t)
+
+ testCases := []struct {
+ use string
+ expectedDefValue map[string]string
+ expectedShorthand map[string]string
+ expectedUsage map[string]string
+ }{
+ {
+ use: "all",
+ expectedDefValue: map[string]string{
+ "domain": "www.github.com",
+ "ip": "",
+ "cloud-hub-server": "",
+ "dns-ip": "",
+ "config": "",
+ },
+ expectedShorthand: map[string]string{
+ "domain": "d",
+ "ip": "i",
+ "cloud-hub-server": "s",
+ "dns-ip": "D",
+ "config": "c",
+ },
+ expectedUsage: map[string]string{
+ "domain": "specify test domain",
+ "ip": "specify test ip",
+ "cloud-hub-server": "specify cloudhub server",
+ "dns-ip": "specify test dns ip",
+ "config": fmt.Sprintf("Specify configuration file, default is %s", common.EdgecoreConfigPath),
+ },
+ },
+ {
+ use: "dns",
+ expectedDefValue: map[string]string{
+ "domain": "www.github.com",
+ "dns-ip": "",
+ },
+ expectedShorthand: map[string]string{
+ "domain": "d",
+ "dns-ip": "D",
+ },
+ expectedUsage: map[string]string{
+ "domain": "specify test domain",
+ "dns-ip": "specify test dns ip",
+ },
+ },
+ {
+ use: "network",
+ expectedDefValue: map[string]string{
+ "ip": "",
+ "cloud-hub-server": "",
+ "config": "",
+ },
+ expectedShorthand: map[string]string{
+ "ip": "i",
+ "cloud-hub-server": "s",
+ "config": "c",
+ },
+ expectedUsage: map[string]string{
+ "ip": "specify test ip",
+ "cloud-hub-server": "specify cloudhub server",
+ "config": fmt.Sprintf("Specify configuration file, default is %s", common.EdgecoreConfigPath),
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.use, func(t *testing.T) {
+ checkObj := CheckObject{
+ Use: tc.use,
+ Desc: fmt.Sprintf("Check %s item", tc.use),
+ }
+ cmd := NewSubEdgeCheck(checkObj)
+
+ assert.NotNil(cmd)
+
+ flags := cmd.Flags()
+ assert.NotNil(flags)
+
+ for flagName, expectedDefValue := range tc.expectedDefValue {
+ t.Run(flagName, func(t *testing.T) {
+ flag := flags.Lookup(flagName)
+ assert.NotNilf(flag, "Flag %s should exist", flagName)
+
+ assert.Equal(expectedDefValue, flag.DefValue)
+ assert.Equal(tc.expectedShorthand[flagName], flag.Shorthand)
+ assert.Equal(tc.expectedUsage[flagName], flag.Usage)
+ })
+ }
+ })
+ }
+}
+
+func TestNewCheckOptions(t *testing.T) {
+ assert := assert.New(t)
+ co := NewCheckOptions()
+ assert.NotNil(co)
+
+ assert.Equal("www.github.com", co.Domain)
+ assert.Equal(1, co.Timeout)
+}
diff --git a/keadm/cmd/keadm/app/cmd/debug/collect.go b/keadm/cmd/keadm/app/cmd/debug/collect.go
index 1aee5f14a..74a2d6768 100644
--- a/keadm/cmd/keadm/app/cmd/debug/collect.go
+++ b/keadm/cmd/keadm/app/cmd/debug/collect.go
@@ -46,14 +46,12 @@ func NewCollect() *cobra.Command {
return cmd
}
-// dd flags
+// add flags
func addCollectOtherFlags(cmd *cobra.Command, collectOptions *common.CollectOptions) {
cmd.Flags().StringVarP(&collectOptions.Config, common.EdgecoreConfig, "c", collectOptions.Config,
fmt.Sprintf("Specify configuration file, default is %s", common.EdgecoreConfigPath))
cmd.Flags().BoolVarP(&collectOptions.Detail, "detail", "d", false,
"Whether to print internal log output")
- //cmd.Flags().StringVar(&collectOptions.OutputPath, "output-path", collectOptions.OutputPath,
- // "Cache data and store data compression packages in a directory that default to the current directory")
cmd.Flags().StringVarP(&collectOptions.OutputPath, "output-path", "o", collectOptions.OutputPath,
"Cache data and store data compression packages in a directory that default to the current directory")
cmd.Flags().StringVarP(&collectOptions.LogPath, "log-path", "l", util.KubeEdgeLogPath,
diff --git a/keadm/cmd/keadm/app/cmd/debug/collect_test.go b/keadm/cmd/keadm/app/cmd/debug/collect_test.go
new file mode 100644
index 000000000..028cbc303
--- /dev/null
+++ b/keadm/cmd/keadm/app/cmd/debug/collect_test.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package debug
+
+import (
+ "testing"
+
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/assert"
+
+ "github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/common"
+ "github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/util"
+)
+
+func TestCollect_NewCollect(t *testing.T) {
+ assert := assert.New(t)
+ cmd := NewCollect()
+
+ assert.NotNil(cmd)
+ assert.Equal("collect", cmd.Use)
+ assert.Equal("Obtain all the data of the current node", cmd.Short)
+ assert.Equal(edgecollectLongDescription, cmd.Long)
+ assert.Equal(edgecollectExample, cmd.Example)
+ assert.NotNil(cmd.Run)
+
+ subcommands := cmd.Commands()
+ assert.Empty(subcommands)
+
+ expectedFlags := []struct {
+ flagName string
+ shorthand string
+ defaultVal string
+ expectedVal string
+ }{
+ {
+ flagName: "config",
+ shorthand: "c",
+ defaultVal: common.EdgecoreConfigPath,
+ expectedVal: common.EdgecoreConfigPath,
+ },
+ {
+ flagName: "detail",
+ shorthand: "d",
+ defaultVal: "false",
+ expectedVal: "false",
+ },
+ {
+ flagName: "output-path",
+ shorthand: "o",
+ defaultVal: ".",
+ expectedVal: ".",
+ },
+ {
+ flagName: "log-path",
+ shorthand: "l",
+ defaultVal: util.KubeEdgeLogPath,
+ expectedVal: util.KubeEdgeLogPath,
+ },
+ }
+
+ for _, tt := range expectedFlags {
+ t.Run(tt.flagName, func(t *testing.T) {
+ flag := cmd.Flag(tt.flagName)
+ assert.Equal(tt.flagName, flag.Name)
+ assert.Equal(tt.defaultVal, flag.DefValue)
+ assert.Equal(tt.expectedVal, flag.Value.String())
+ assert.Equal(tt.shorthand, flag.Shorthand)
+ })
+ }
+}
+
+func TestCollect_AddCollectOtherFlags(t *testing.T) {
+ assert := assert.New(t)
+ cmd := &cobra.Command{}
+
+ co := newCollectOptions()
+ addCollectOtherFlags(cmd, co)
+
+ expectedFlags := []struct {
+ flagName string
+ shorthand string
+ defaultVal string
+ expectedVal string
+ }{
+ {
+ flagName: "config",
+ shorthand: "c",
+ defaultVal: common.EdgecoreConfigPath,
+ expectedVal: common.EdgecoreConfigPath,
+ },
+ {
+ flagName: "detail",
+ shorthand: "d",
+ defaultVal: "false",
+ expectedVal: "false",
+ },
+ {
+ flagName: "output-path",
+ shorthand: "o",
+ defaultVal: ".",
+ expectedVal: ".",
+ },
+ {
+ flagName: "log-path",
+ shorthand: "l",
+ defaultVal: util.KubeEdgeLogPath,
+ expectedVal: util.KubeEdgeLogPath,
+ },
+ }
+
+ for _, tt := range expectedFlags {
+ t.Run(tt.flagName, func(t *testing.T) {
+ flag := cmd.Flag(tt.flagName)
+ assert.Equal(tt.flagName, flag.Name)
+ assert.Equal(tt.defaultVal, flag.DefValue)
+ assert.Equal(tt.expectedVal, flag.Value.String())
+ assert.Equal(tt.shorthand, flag.Shorthand)
+ })
+ }
+}
+
+func TestCollect_NewCollectOptions(t *testing.T) {
+ assert := assert.New(t)
+
+ co := newCollectOptions()
+ assert.NotNil(co)
+
+ assert.Equal(common.EdgecoreConfigPath, co.Config)
+ assert.Equal(".", co.OutputPath)
+ assert.Equal(false, co.Detail)
+}
diff --git a/keadm/cmd/keadm/app/cmd/debug/debug_test.go b/keadm/cmd/keadm/app/cmd/debug/debug_test.go
new file mode 100644
index 000000000..8709320d2
--- /dev/null
+++ b/keadm/cmd/keadm/app/cmd/debug/debug_test.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package debug
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewEdgeDebug(t *testing.T) {
+ assert := assert.New(t)
+
+ cmd := NewEdgeDebug()
+
+ assert.NotNil(cmd)
+ assert.Equal("debug", cmd.Use)
+ assert.Equal(edgeDebugShortDescription, cmd.Short)
+ assert.Equal(edgeDebugLongDescription, cmd.Long)
+
+ expectedSubCommands := []string{"get", "diagnose", "check", "collect"}
+ for _, subCmd := range expectedSubCommands {
+ found := false
+ for _, cmd := range cmd.Commands() {
+ if cmd.Use == subCmd {
+ found = true
+ break
+ }
+ }
+ assert.True(found)
+ }
+}
diff --git a/keadm/cmd/keadm/app/cmd/debug/diagnose_test.go b/keadm/cmd/keadm/app/cmd/debug/diagnose_test.go
new file mode 100644
index 000000000..d4832bfd7
--- /dev/null
+++ b/keadm/cmd/keadm/app/cmd/debug/diagnose_test.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package debug
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/common"
+)
+
+func TestNewDiagnose(t *testing.T) {
+ assert := assert.New(t)
+ cmd := NewDiagnose()
+
+ assert.NotNil(cmd)
+ assert.Equal("diagnose", cmd.Use)
+ assert.Equal(edgeDiagnoseShortDescription, cmd.Short)
+ assert.Equal(edgeDiagnoseLongDescription, cmd.Long)
+ assert.Equal(edgeDiagnoseExample, cmd.Example)
+
+ subcommands := cmd.Commands()
+ assert.NotNil(subcommands)
+}
+
+func TestNewSubDiagnose(t *testing.T) {
+ assert := assert.New(t)
+
+ cases := []struct {
+ use string
+ expectedDefValue map[string]string
+ expectedShorthand map[string]string
+ expectedUsage map[string]string
+ }{
+ {
+ use: common.ArgDiagnoseNode,
+ expectedDefValue: map[string]string{
+ common.EdgecoreConfig: common.EdgecoreConfigPath,
+ },
+ expectedShorthand: map[string]string{
+ common.EdgecoreConfig: "c",
+ },
+ expectedUsage: map[string]string{
+ common.EdgecoreConfig: fmt.Sprintf("Specify configuration file, default is %s", common.EdgecoreConfigPath),
+ },
+ },
+ {
+ use: common.ArgDiagnosePod,
+ expectedDefValue: map[string]string{
+ "namespace": "default",
+ },
+ expectedShorthand: map[string]string{
+ "namespace": "n",
+ },
+ expectedUsage: map[string]string{
+ "namespace": "specify namespace",
+ },
+ },
+ {
+ use: common.ArgDiagnoseInstall,
+ expectedDefValue: map[string]string{
+ "dns-ip": "",
+ "domain": "",
+ "ip": "",
+ "cloud-hub-server": "",
+ },
+ expectedShorthand: map[string]string{
+ "dns-ip": "D",
+ "domain": "d",
+ "ip": "i",
+ "cloud-hub-server": "s",
+ },
+ expectedUsage: map[string]string{
+ "dns-ip": "specify test dns server ip",
+ "domain": "specify test domain",
+ "ip": "specify test ip",
+ "cloud-hub-server": "specify cloudhub server",
+ },
+ },
+ }
+
+ for _, test := range cases {
+ t.Run(test.use, func(t *testing.T) {
+ diagnoseObj := Diagnose{
+ Use: test.use,
+ Desc: fmt.Sprintf("Diagnose %s", test.use),
+ }
+ cmd := NewSubDiagnose(diagnoseObj)
+
+ assert.NotNil(cmd)
+ assert.Equal(diagnoseObj.Use, cmd.Use)
+ assert.Equal(diagnoseObj.Desc, cmd.Short)
+
+ flags := cmd.Flags()
+ assert.NotNil(flags)
+
+ for flagName, expectedDefValue := range test.expectedDefValue {
+ t.Run(flagName, func(t *testing.T) {
+ flag := flags.Lookup(flagName)
+ assert.NotNil(flag)
+
+ assert.Equal(expectedDefValue, flag.DefValue)
+ assert.Equal(test.expectedShorthand[flagName], flag.Shorthand)
+ assert.Equal(test.expectedUsage[flagName], flag.Usage)
+ })
+ }
+ })
+ }
+}
+
+func TestNewDiagnoseOptions(t *testing.T) {
+ assert := assert.New(t)
+
+ do := NewDiagnoseOptions()
+ assert.NotNil(do)
+
+ assert.Equal("default", do.Namespace)
+ assert.Equal(common.EdgecoreConfigPath, do.Config)
+ assert.Equal("", do.CheckOptions.IP)
+ assert.Equal(3, do.CheckOptions.Timeout)
+}
diff --git a/keadm/cmd/keadm/app/cmd/debug/get_flags.go b/keadm/cmd/keadm/app/cmd/debug/get_flags.go
index 0ed31e2a9..7051ba6eb 100644
--- a/keadm/cmd/keadm/app/cmd/debug/get_flags.go
+++ b/keadm/cmd/keadm/app/cmd/debug/get_flags.go
@@ -101,13 +101,13 @@ type HumanPrintFlags struct {
// AllowedFormats returns more customized formating options
func (f *HumanPrintFlags) AllowedFormats() []string {
- return []string{"wide"}
+ return []string{FormatTypeWIDE}
}
// ToPrinter receives an outputFormat and returns a printer capable of
// handling human-readable output.
func (f *HumanPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) {
- if len(outputFormat) > 0 && outputFormat != "wide" {
+ if len(outputFormat) > 0 && outputFormat != FormatTypeWIDE {
return nil, genericclioptions.NoCompatiblePrinterError{Options: f, AllowedFormats: f.AllowedFormats()}
}
@@ -130,7 +130,7 @@ func (f *HumanPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrint
Kind: f.Kind,
WithKind: showKind,
NoHeaders: f.NoHeaders,
- Wide: outputFormat == "wide",
+ Wide: outputFormat == FormatTypeWIDE,
WithNamespace: f.WithNamespace,
ColumnLabels: columnLabels,
ShowLabels: showLabels,
diff --git a/keadm/cmd/keadm/app/cmd/debug/get_flags_test.go b/keadm/cmd/keadm/app/cmd/debug/get_flags_test.go
new file mode 100644
index 000000000..b0fa7701b
--- /dev/null
+++ b/keadm/cmd/keadm/app/cmd/debug/get_flags_test.go
@@ -0,0 +1,187 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+@CHANGELOG
+KubeEdge Authors: To create keadm debug get function like kubectl get,
+This file is derived from K8S Kubectl code with reduced set of methods
+Changes done are
+1. Package edged got some functions from "k8s.io/kubectl/pkg/cmd/get/get_flags.go"
+and made some variant
+*/
+
+package debug
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+)
+
+func AllowedFormats(t *testing.T) {
+ assert := assert.New(t)
+ printFlags := &PrintFlags{
+ JSONYamlPrintFlags: genericclioptions.NewJSONYamlPrintFlags(),
+ NamePrintFlags: genericclioptions.NewNamePrintFlags(""),
+ TemplateFlags: genericclioptions.NewKubeTemplatePrintFlags(),
+ HumanReadableFlags: NewHumanPrintFlags(),
+ }
+
+ formats := printFlags.AllowedFormats()
+ expectedFormats := append(printFlags.JSONYamlPrintFlags.AllowedFormats(), printFlags.HumanReadableFlags.AllowedFormats()...)
+
+ assert.Equal(expectedFormats, formats)
+}
+
+func TestToPrinter(t *testing.T) {
+ assert := assert.New(t)
+
+ printFlags := &PrintFlags{
+ JSONYamlPrintFlags: genericclioptions.NewJSONYamlPrintFlags(),
+ NamePrintFlags: genericclioptions.NewNamePrintFlags(""),
+ TemplateFlags: genericclioptions.NewKubeTemplatePrintFlags(),
+ HumanReadableFlags: NewHumanPrintFlags(),
+ NoHeaders: new(bool),
+ OutputFormat: new(string),
+ }
+
+ *printFlags.OutputFormat = "json"
+ printer, err := printFlags.ToPrinter()
+ assert.NoError(err)
+ assert.NotNil(printer)
+
+ *printFlags.OutputFormat = "yaml"
+ printer, err = printFlags.ToPrinter()
+ assert.NoError(err)
+ assert.NotNil(printer)
+
+ *printFlags.OutputFormat = FormatTypeWIDE
+ printer, err = printFlags.ToPrinter()
+ assert.NoError(err)
+ assert.NotNil(printer)
+
+ *printFlags.OutputFormat = "unsupported"
+ printer, err = printFlags.ToPrinter()
+ assert.Error(err)
+ assert.Nil(printer)
+
+ *printFlags.OutputFormat = FormatTypeWIDE
+ *printFlags.NoHeaders = true
+ printer, err = printFlags.ToPrinter()
+ assert.NoError(err)
+ assert.NotNil(printer)
+
+ *printFlags.NoHeaders = false
+
+ *printFlags.OutputFormat = ""
+ printer, err = printFlags.ToPrinter()
+ assert.NoError(err)
+ assert.NotNil(printer)
+}
+
+func TestNewGetPrintFlags(t *testing.T) {
+ assert := assert.New(t)
+
+ printFlags := NewGetPrintFlags()
+ assert.NotNil(printFlags)
+
+ assert.IsType(&genericclioptions.JSONYamlPrintFlags{}, printFlags.JSONYamlPrintFlags)
+ assert.IsType(&genericclioptions.NamePrintFlags{}, printFlags.NamePrintFlags)
+ assert.IsType(&genericclioptions.KubeTemplatePrintFlags{}, printFlags.TemplateFlags)
+ assert.IsType(&HumanPrintFlags{}, printFlags.HumanReadableFlags)
+
+ assert.NotNil(printFlags.OutputFormat)
+ assert.Equal("", *printFlags.OutputFormat)
+
+ assert.NotNil(printFlags.NoHeaders)
+ assert.Equal(false, *printFlags.NoHeaders)
+
+ assert.Equal(printFlags.HumanReadableFlags.NoHeaders, false)
+ assert.Equal(printFlags.HumanReadableFlags.WithNamespace, false)
+ assert.Equal(*printFlags.HumanReadableFlags.ShowLabels, false)
+ assert.Equal(*printFlags.HumanReadableFlags.SortBy, "")
+ assert.Equal(*printFlags.HumanReadableFlags.ShowKind, false)
+ assert.Empty(*printFlags.HumanReadableFlags.ColumnLabels)
+}
+
+func TestHumanPrintFlags_AllowedFormats(t *testing.T) {
+ assert := assert.New(t)
+
+ humanPrintFlags := &HumanPrintFlags{}
+ formats := humanPrintFlags.AllowedFormats()
+
+ expectedFormats := []string{FormatTypeWIDE}
+ assert.Equal(expectedFormats, formats)
+}
+
+func TestNewHumanPrintFlags(t *testing.T) {
+ assert := assert.New(t)
+
+ humanPrintFlags := NewHumanPrintFlags()
+ assert.NotNil(humanPrintFlags)
+
+ assert.Equal(humanPrintFlags.NoHeaders, false)
+ assert.Equal(humanPrintFlags.WithNamespace, false)
+ assert.Equal(humanPrintFlags.ColumnLabels, &[]string{})
+ assert.Equal(humanPrintFlags.Kind, schema.GroupKind{})
+ assert.Equal(*humanPrintFlags.ShowLabels, false)
+ assert.Equal(*humanPrintFlags.SortBy, "")
+ assert.Equal(*humanPrintFlags.ShowKind, false)
+}
+
+func TestHumanPrintFlags_ToPrinter(t *testing.T) {
+ assert := assert.New(t)
+
+ humanPrintFlags := &HumanPrintFlags{
+ ShowKind: new(bool),
+ ShowLabels: new(bool),
+ SortBy: new(string),
+ ColumnLabels: new([]string),
+ NoHeaders: false,
+ Kind: schema.GroupKind{},
+ WithNamespace: false,
+ }
+
+ outputFormat := FormatTypeWIDE
+ printer, err := humanPrintFlags.ToPrinter(outputFormat)
+ assert.NoError(err)
+ assert.NotNil(printer)
+
+ outputFormat = ""
+ printer, err = humanPrintFlags.ToPrinter(outputFormat)
+ assert.NoError(err)
+ assert.NotNil(printer)
+
+ outputFormat = "unsupported"
+ printer, err = humanPrintFlags.ToPrinter(outputFormat)
+ assert.Error(err)
+ assert.Nil(printer)
+}
+
+func TestHumanPrintFlags_EnsureWithNamespace(t *testing.T) {
+ assert := assert.New(t)
+ humanPrintFlags := &HumanPrintFlags{}
+
+ err := humanPrintFlags.EnsureWithNamespace()
+
+ assert.NoError(err)
+ assert.Equal(humanPrintFlags.WithNamespace, true)
+}
+
+func Test_EnsureWithNamespace(t *testing.T) {
+ assert := assert.New(t)
+
+ printFlags := NewGetPrintFlags()
+ err := printFlags.EnsureWithNamespace()
+
+ assert.NoError(err)
+}
diff --git a/keadm/cmd/keadm/app/cmd/debug/get_test.go b/keadm/cmd/keadm/app/cmd/debug/get_test.go
index 31e01d264..e99668a41 100644
--- a/keadm/cmd/keadm/app/cmd/debug/get_test.go
+++ b/keadm/cmd/keadm/app/cmd/debug/get_test.go
@@ -14,11 +14,177 @@ limitations under the License.
package debug
import (
- "reflect"
+ "errors"
"testing"
+
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/assert"
+
+ edgecoreCfg "github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha2"
)
+func TestNewCmdDebugGet(t *testing.T) {
+ assert := assert.New(t)
+ cmd := NewCmdDebugGet()
+
+ assert.NotNil(cmd)
+ assert.Equal("get", cmd.Use)
+ assert.Equal("Display one or many resources", cmd.Short)
+ assert.Equal(debugGetLong, cmd.Long)
+ assert.Equal(debugGetExample, cmd.Example)
+
+ assert.NotNil(cmd.Run)
+
+ getOption := NewGetOptions()
+
+ flag := cmd.Flag("namespace")
+ assert.NotNil(flag)
+ assert.Equal(getOption.Namespace, flag.DefValue)
+ assert.Equal("namespace", flag.Name)
+ assert.Equal("n", flag.Shorthand)
+
+ flag = cmd.Flag("output")
+ assert.NotNil(flag)
+ assert.Equal(*getOption.PrintFlags.OutputFormat, flag.DefValue)
+ assert.Equal("output", flag.Name)
+ assert.Equal("o", flag.Shorthand)
+
+ flag = cmd.Flag("selector")
+ assert.NotNil(flag)
+ assert.Equal(getOption.LabelSelector, flag.DefValue)
+ assert.Equal("selector", flag.Name)
+ assert.Equal("l", flag.Shorthand)
+
+ flag = cmd.Flag("edgedb-path")
+ assert.NotNil(flag)
+ assert.Equal(getOption.DataPath, flag.DefValue)
+ assert.Equal("edgedb-path", flag.Name)
+ assert.Equal("p", flag.Shorthand)
+
+ flag = cmd.Flag("all-namespaces")
+ assert.NotNil(flag)
+ assert.Equal(getOption.AllNamespace, flag.DefValue == "true")
+ assert.Equal("all-namespaces", flag.Name)
+ assert.Equal("A", flag.Shorthand)
+}
+
+func TestCheckErr(t *testing.T) {
+ assert := assert.New(t)
+
+ mockHandler := func(msg string, exitCode int) {
+ t.Errorf("handleErr should not be called for nil error")
+ }
+ CheckErr(nil, mockHandler)
+
+ expectedMsg := "Test error"
+ expectedExitCode := DefaultErrorExitCode
+ expectedErr := errors.New(expectedMsg)
+
+ var handledMsg string
+ var handledExitCode int
+
+ mockHandler = func(msg string, exitCode int) {
+ handledMsg = msg
+ handledExitCode = exitCode
+ }
+
+ CheckErr(expectedErr, mockHandler)
+
+ assert.Equal(expectedMsg, handledMsg)
+ assert.Equal(expectedExitCode, handledExitCode)
+}
+
+func TestAddGetOtherFlags(t *testing.T) {
+ getOption := NewGetOptions()
+ cmd := &cobra.Command{}
+
+ addGetOtherFlags(cmd, getOption)
+
+ assert := assert.New(t)
+
+ flag := cmd.Flag("namespace")
+ assert.NotNil(flag)
+ assert.Equal(getOption.Namespace, flag.DefValue)
+ assert.Equal("namespace", flag.Name)
+ assert.Equal("n", flag.Shorthand)
+
+ flag = cmd.Flag("output")
+ assert.NotNil(flag)
+ assert.Equal(*getOption.PrintFlags.OutputFormat, flag.DefValue)
+ assert.Equal("output", flag.Name)
+ assert.Equal("o", flag.Shorthand)
+
+ flag = cmd.Flag("selector")
+ assert.NotNil(flag)
+ assert.Equal(getOption.LabelSelector, flag.DefValue)
+ assert.Equal("selector", flag.Name)
+ assert.Equal("l", flag.Shorthand)
+
+ flag = cmd.Flag("edgedb-path")
+ assert.NotNil(flag)
+ assert.Equal(getOption.DataPath, flag.DefValue)
+ assert.Equal("edgedb-path", flag.Name)
+ assert.Equal("p", flag.Shorthand)
+
+ flag = cmd.Flag("all-namespaces")
+ assert.NotNil(flag)
+ assert.Equal(getOption.AllNamespace, flag.DefValue == "true")
+ assert.Equal("all-namespaces", flag.Name)
+ assert.Equal("A", flag.Shorthand)
+}
+
+func TestNewGetOptions(t *testing.T) {
+ assert := assert.New(t)
+ opts := NewGetOptions()
+
+ assert.NotNil(opts)
+ assert.Equal(opts.Namespace, "default")
+ assert.Equal(opts.DataPath, edgecoreCfg.DataBaseDataSource)
+ assert.Equal(opts.PrintFlags, NewGetPrintFlags())
+}
+
+func TestIsAllowedFormat(t *testing.T) {
+ assert := assert.New(t)
+ getOptions := NewGetOptions()
+
+ tests := []struct {
+ format string
+ expected bool
+ }{
+ {
+ "yaml",
+ true,
+ },
+ {
+ "json",
+ true,
+ },
+ {
+ "wide",
+ true,
+ },
+ {
+ "xml",
+ false,
+ },
+ {
+ "",
+ false,
+ },
+ {
+ "plain",
+ false,
+ },
+ }
+
+ for _, test := range tests {
+ stdResult := getOptions.IsAllowedFormat(test.format)
+ assert.Equal(test.expected, stdResult)
+ }
+}
+
func TestSplitSelectorParameters(t *testing.T) {
+ assert := assert.New(t)
type args struct {
args string
}
@@ -37,22 +203,37 @@ func TestSplitSelectorParameters(t *testing.T) {
{Key: "key3", Value: "value3", Exist: true},
},
wantErr: false,
- }, {
+ },
+ {
name: "testWithoutLabel",
args: args{args: "key1"},
want: []Selector{},
wantErr: false,
- }, {
+ },
+ {
name: "testWithEmptyValue",
args: args{args: "key1!="},
want: []Selector{{Key: "key1", Value: "", Exist: false}},
wantErr: false,
- }, {
+ },
+ {
name: "testWithMoreThanOneLabel",
args: args{args: "key1=value1=,key2=value2"},
want: nil,
wantErr: true,
},
+ {
+ name: "testEmptyString",
+ args: args{args: ""},
+ want: []Selector{},
+ wantErr: false,
+ },
+ {
+ name: "testOnlyCommas",
+ args: args{args: ",,,"},
+ want: []Selector{},
+ wantErr: false,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -61,9 +242,60 @@ func TestSplitSelectorParameters(t *testing.T) {
t.Errorf("SplitSelectorParameters() error = %v, wantErr %v", err, tt.wantErr)
return
}
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("SplitSelectorParameters() = %v, want %v", got, tt.want)
- }
+ assert.Equal(tt.want, got)
+ })
+ }
+}
+
+func TestIsExistName(t *testing.T) {
+ tests := []struct {
+ name string
+ resNames []string
+ key string
+ expected bool
+ }{
+ {
+ name: "Key exists in resNames",
+ resNames: []string{"pod1", "pod2", "pod3"},
+ key: "pod1",
+ expected: true,
+ },
+ {
+ name: "Key does not exist in resNames",
+ resNames: []string{"pod1", "pod2", "pod3"},
+ key: "pod4",
+ expected: false,
+ },
+ {
+ name: "Empty resNames",
+ resNames: []string{},
+ key: "pod1",
+ expected: false,
+ },
+ {
+ name: "Empty key",
+ resNames: []string{"pod1", "pod2", "pod3"},
+ key: "",
+ expected: false,
+ },
+ {
+ name: "Key is substring of one element in resNames",
+ resNames: []string{"pod123", "pod2", "pod3"},
+ key: "pod1",
+ expected: false,
+ },
+ {
+ name: "ResNames contains special characters",
+ resNames: []string{"pod@123", "pod#2", "pod$3"},
+ key: "pod@123",
+ expected: true,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ result := isExistName(test.resNames, test.key)
+ assert.Equal(t, test.expected, result)
})
}
}
diff --git a/pkg/apis/componentconfig/cloudcore/v1alpha1/default.go b/pkg/apis/componentconfig/cloudcore/v1alpha1/default.go
index e8963577c..ed91a4e19 100644
--- a/pkg/apis/componentconfig/cloudcore/v1alpha1/default.go
+++ b/pkg/apis/componentconfig/cloudcore/v1alpha1/default.go
@@ -80,6 +80,17 @@ func NewDefaultCloudCoreConfig() *CloudCoreConfig {
Port: 10002,
Address: "0.0.0.0",
},
+ Authorization: &CloudHubAuthorization{
+ Enable: false,
+ Debug: true,
+ Modes: []AuthorizationMode{
+ {
+ Node: &NodeAuthorization{
+ Enable: true,
+ },
+ },
+ },
+ },
},
EdgeController: &EdgeController{
Enable: true,
diff --git a/pkg/apis/componentconfig/cloudcore/v1alpha1/types.go b/pkg/apis/componentconfig/cloudcore/v1alpha1/types.go
index b39ec3475..a1819fe96 100644
--- a/pkg/apis/componentconfig/cloudcore/v1alpha1/types.go
+++ b/pkg/apis/componentconfig/cloudcore/v1alpha1/types.go
@@ -151,6 +151,8 @@ type CloudHub struct {
// TokenRefreshDuration indicates the interval of cloudcore token refresh, unit is hour
// default 12h
TokenRefreshDuration time.Duration `json:"tokenRefreshDuration,omitempty"`
+ // Authorization authz configurations
+ Authorization *CloudHubAuthorization `json:"authorization,omitempty"`
}
// CloudHubQUIC indicates the quic server config
@@ -205,6 +207,32 @@ type CloudHubHTTPS struct {
Port uint32 `json:"port,omitempty"`
}
+// CloudHubAuthorization CloudHub authz configurations
+type CloudHubAuthorization struct {
+ // Enable indicates whether enable CloudHub Authorization
+ // default false
+ Enable bool `json:"enable"`
+ // Debug only logs errors but always allow messages
+ // default false
+ Debug bool `json:"debug"`
+ // Modes a list of authorization modes will be used
+ // default node
+ Modes []AuthorizationMode `json:"modes"`
+}
+
+// AuthorizationMode indicates an authorization mdoe
+type AuthorizationMode struct {
+ // Node node authorization
+ Node *NodeAuthorization `json:"node,omitempty"`
+}
+
+// NodeAuthorization node authorization
+type NodeAuthorization struct {
+ // Enable enables node authorization
+ // default true
+ Enable bool `json:"enable"`
+}
+
// EdgeController indicates the config of EdgeController module
type EdgeController struct {
// Enable indicates whether EdgeController is enabled,
diff --git a/pkg/metaserver/application.go b/pkg/metaserver/application.go
index 39debd754..fdc087fd7 100644
--- a/pkg/metaserver/application.go
+++ b/pkg/metaserver/application.go
@@ -32,6 +32,7 @@ import (
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/kubeedge/cloud/pkg/common/messagelayer"
)
// Application record the resources that are in applying for requesting to be transferred down from the cloud, please:
@@ -247,6 +248,12 @@ func MsgToApplication(msg model.Message) (*Application, error) {
if err != nil {
return nil, err
}
+
+ nodeID, err := messagelayer.GetNodeID(msg)
+ if err != nil {
+ nodeID = app.Nodename
+ }
+ app.Nodename = nodeID
return app, nil
}
diff --git a/pkg/security/token/token.go b/pkg/security/token/token.go
new file mode 100644
index 000000000..1fd183fb9
--- /dev/null
+++ b/pkg/security/token/token.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package token
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang-jwt/jwt"
+)
+
+// Create will creates a new token consisting of caHash and jwt token.
+func Create(ca, caKey []byte, intervalTime time.Duration) (string, error) {
+ // set double intervalTime as expirationTime, which can guarantee that the validity period
+ // of the token obtained at anytime is greater than or equal to intervalTime.
+ expiresAt := time.Now().Add(time.Hour * intervalTime * 2).Unix()
+
+ token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.StandardClaims{
+ ExpiresAt: expiresAt,
+ })
+
+ tokenString, err := token.SignedString(caKey)
+ if err != nil {
+ return "", err
+ }
+
+ // combine caHash and tokenString into caHashAndToken
+ return strings.Join([]string{hashCA(ca), tokenString}, "."), nil
+}
+
+func hashCA(ca []byte) string {
+ digest := sha256.Sum256(ca)
+ return hex.EncodeToString(digest[:])
+}
+
+// Verify verifies the token is valid
+func Verify(token string, caKey []byte) (bool, error) {
+ jwtToken, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
+ if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
+ return nil, fmt.Errorf("invalid token method type, want *jwt.SigningMethodHMAC, but is %T", token.Method)
+ }
+ return caKey, nil
+ })
+ if err != nil {
+ // return the original error for the caller to determine.
+ return false, err
+ }
+ return jwtToken.Valid, nil
+}
+
+// VerifyCAAndGetRealToken verifies the CA certificate by hashcode is same with token part,
+// then get real token, which cut prefix ca hash from input token.
+func VerifyCAAndGetRealToken(token string, ca []byte) (string, error) {
+ tokenParts := strings.Split(token, ".")
+ if len(tokenParts) != 4 {
+ return "", fmt.Errorf("token %s credentials are in the wrong format", token)
+ }
+ if currentHash := hashCA(ca); currentHash != tokenParts[0] {
+ return "", fmt.Errorf("failed to validate CA certificate. tokenCAhash: %s, CAhash: %s",
+ tokenParts[0], currentHash)
+ }
+ return strings.Join(tokenParts[1:], "."), nil
+}
diff --git a/pkg/security/token/token_test.go b/pkg/security/token/token_test.go
new file mode 100644
index 000000000..e9352474c
--- /dev/null
+++ b/pkg/security/token/token_test.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2024 The KubeEdge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package token
+
+import (
+ "encoding/pem"
+ "testing"
+)
+
+const (
+ testCA = `-----BEGIN CERTIFICATE-----
+MIIDEDCCAfigAwIBAgIIHmr3g3dw7rYwDQYJKoZIhvcNAQELBQAwJjEOMAwGA1UE
+BhMFQ2hpbmExCTAHBgNVBAoTADEJMAcGA1UECxMAMB4XDTI0MDQwODA5NTY1MloX
+DTM0MDQwNjA5NTY1MlowJjEOMAwGA1UEBhMFQ2hpbmExCTAHBgNVBAoTADEJMAcG
+A1UECxMAMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm2td7Yn3tTv0
+g1d6MxQBqESl/flEvt7G1gFWoXHHzSN9+jh75Y1meHkuLu6LeYYuQMdFiHzra/jM
+mN78RJToOW96yH97x9F+YstCStKdMh3D04vmiXqwdkzIFXvbcFol1mXP8r72R8z+
+odjPr/EwDNI0KSzTtZfoKIalwCDzqX+WPOgRKaCyTHs01dNHSQhdyhG9oTdeDtIL
+e6HNqxA966jMF6p/giHSUrcec41XxxZPfHZ5sppaSIMxabBS/M/lMlav2ZMfr6+y
+szP33/CRnbn45d767wyH9P0kbWrdU9IPN9vGD7QKfNfcoN2FLHgkkoXOJl/AXJfF
+BftXWs0qoQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUegaSgp7zhR9AwLcVBKjraccqbkMwDQYJKoZIhvcNAQELBQAD
+ggEBAI/I2Ln//zxUhMY9JwM57sDDQ7Vimc+uWSgrtqhiGOGMzhBFREr1dS5UE1a2
+dMMh566lBuQAT7hyOC9EqL+zbHAcGZGUyIqByIKv9W2HMNnTOGZ3XbPJNV6DH/wX
+66Jv9dvNf+EVj0PhJvRmn6QslbVrOmAtmylllTXDJnoULX2+ZAgHNS2+p3rnXCas
+Nh52RfmjaH1sH7e1zvVKvOpTCKbuArzjspSdJ1ssnWYnrLtkAvz7PZEDL88fFmre
+uhDkSogDJI/yC8m+6lnvYdLWuDAkVREP39XZ/7KtJFLEeBikRhsRK2BOnVidPFDM
+rFqlS7gD0cPmIEo2wgkh3pKaxNE=
+-----END CERTIFICATE-----`
+
+ testCAKey = `-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAm2td7Yn3tTv0g1d6MxQBqESl/flEvt7G1gFWoXHHzSN9+jh7
+5Y1meHkuLu6LeYYuQMdFiHzra/jMmN78RJToOW96yH97x9F+YstCStKdMh3D04vm
+iXqwdkzIFXvbcFol1mXP8r72R8z+odjPr/EwDNI0KSzTtZfoKIalwCDzqX+WPOgR
+KaCyTHs01dNHSQhdyhG9oTdeDtILe6HNqxA966jMF6p/giHSUrcec41XxxZPfHZ5
+sppaSIMxabBS/M/lMlav2ZMfr6+yszP33/CRnbn45d767wyH9P0kbWrdU9IPN9vG
+D7QKfNfcoN2FLHgkkoXOJl/AXJfFBftXWs0qoQIDAQABAoIBAQCUnP8M48+cWj89
+1EkCTJAlIbeD+nY0+XsyKcd3yv/d9aFBwf8fCq3AZ0e1Et8FjjvuL14a3DCVZyvk
+xdx9i9HfEe1biSOId5cdyvSR7YDo6jNVtsH0FgBkrpjoii3T6i+iKmrE2LtQ/wFB
+K7u0prFmoR3FfZrXWvFgxxf5dsjn+p3nJQdDZbWjAcZJdf7T78EtnbQ4uzCAQRTW
+hsmfI8OPTzyf/FLTkscjNzP6GVMWP9x017TfKgucqwPt8FeKqc9Si+fEZ3GDQiSi
+KZHSXGIjO3MxCPDX4XW4sfkP5+iB8OEZHUHN5SHBAnTu2sR5hnwnPj2y1jcL7T3s
+Fv3vnTWRAoGBAMW0+L7PZ3foEVi8IL9pkB24+3u6CIIrc98a7YKbLfWmDSmBQis/
+5BRSvuf1l99EZDbVHE6xkhecSOP8cmIrZ1NjfUgXloaDkHg4hLgtQ+ZIPnSOHifP
+M+DO6re+yCkngxgklofUSxX4STviPJiewZayVTwWjpbK74fv8KrraooNAoGBAMk+
+goOH8aUf2QeyCHrROQAb8QXF3hRbyimFES/7eSdlFIy/bROnT9cdcSviyzrvBzwY
+3SIEcp7g3ZbJb+dFaUObE/nY6EJP+moCCOurBKjzJeRQ1zlFzb08c4lCAU7/l28z
+iHSQJavQof/mvcR5Pi81h5VFuFeFgchBYoHChCHlAoGBAJ9Om8DkzrLHxHKD5L9Y
+CFBq5flkhcadzNhRkmBTOk1eZ+yxwuemq9nUcw/lzWKScU3dmtmuK9HqlLFgkaqY
+3sFKwYB9wUTSbm7w28CseLHuNKUmfxYE2AClumwkxpSiyfeCQ+lfHsGtNxWRztIL
+2mHbgOLSKkNHcotOw9Z1q3thAoGAGA/dUxTCE9hG/uCOmwDBK/4rR2FtOEnxVh2O
+/Im45rjzSBDrXdo3daUTjwfC/PzvhIQEjLiza8O/OvRC6QgnmenE7a69tpARhPNR
+VbxRBlJsSWxRD4wFGYdM2TCHL4bn+GfU/PrvRiff9tUEA6XrhYGFAJghfnV8GxGW
+UaWMXvECgYEAlhQUBLZ/ZTPXQWb7VA5Mur/s0ptrs4CAP+KBc9xyBfKrPEmjMcxJ
+2788MXYbnGzwpY0Nk/ruUNJ+PFPy4GgGWnZp7Fqi5qGIrFkuazQaulRQJwTZkIzJ
+x0KJ+pjtT+89L1r7murZAJcPL+TyRYeg295NTfcjAfSdcBfIjzURYVg=
+-----END RSA PRIVATE KEY-----`
+)
+
+func TestToken(t *testing.T) {
+ var token string
+
+ _, caDer := pem.Decode([]byte(testCA))
+ _, cakeyDer := pem.Decode([]byte(testCAKey))
+
+ t.Run("test Create", func(t *testing.T) {
+ var err error
+ token, err = Create(caDer, cakeyDer, 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(token) == 0 {
+ t.Fatal("failed to get token")
+ }
+ })
+
+ t.Run("test VerifyCAAndGetRealToken", func(t *testing.T) {
+ var err error
+ token, err = VerifyCAAndGetRealToken(token, caDer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(token) == 0 {
+ t.Fatal("failed to get token")
+ }
+ })
+
+ t.Run("test Verify", func(t *testing.T) {
+ b, err := Verify(token, cakeyDer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !b {
+ t.Fatalf("invalid token %s", token)
+ }
+ })
+}
diff --git a/staging/src/github.com/kubeedge/beehive/pkg/core/model/message.go b/staging/src/github.com/kubeedge/beehive/pkg/core/model/message.go
index 57ce2a5e9..d4ebce420 100644
--- a/staging/src/github.com/kubeedge/beehive/pkg/core/model/message.go
+++ b/staging/src/github.com/kubeedge/beehive/pkg/core/model/message.go
@@ -254,7 +254,7 @@ func (msg *Message) NewRespByMessage(message *Message, content interface{}) *Mes
// NewErrorMessage returns a new error message by a message received
func NewErrorMessage(message *Message, errContent string) *Message {
- return NewMessage(message.Header.ParentID).
+ return NewMessage(message.GetID()).
SetResourceOperation(message.Router.Resource, ResponseErrorOperation).
FillBody(errContent)
}
diff --git a/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/influxdb2/handler.go b/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/influxdb2/handler.go
index 287bb5616..53d950967 100644
--- a/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/influxdb2/handler.go
+++ b/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/influxdb2/handler.go
@@ -21,7 +21,6 @@ import (
"time"
"k8s.io/klog/v2"
-
"github.com/kubeedge/Template/driver"
"github.com/kubeedge/mapper-framework/pkg/common"
)
@@ -37,7 +36,7 @@ func DataHandler(ctx context.Context, twin *common.Twin, client *driver.Customiz
klog.Errorf("init database client err: %v", err)
return
}
- reportCycle := time.Duration(twin.Property.ReportCycle)
+ reportCycle := time.Millisecond * time.Duration(twin.Property.ReportCycle)
if reportCycle == 0 {
reportCycle = common.DefaultReportCycle
}
diff --git a/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/redis/handler.go b/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/redis/handler.go
index 9363f6f0e..dfec8af78 100644
--- a/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/redis/handler.go
+++ b/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/redis/handler.go
@@ -37,7 +37,7 @@ func DataHandler(ctx context.Context, twin *common.Twin, client *driver.Customiz
klog.Errorf("init redis database client err: %v", err)
return
}
- reportCycle := time.Duration(twin.Property.ReportCycle)
+ reportCycle := time.Millisecond * time.Duration(twin.Property.ReportCycle)
if reportCycle == 0 {
reportCycle = common.DefaultReportCycle
}
diff --git a/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/tdengine/handler.go b/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/tdengine/handler.go
index 2840a6ed3..ba6732659 100644
--- a/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/tdengine/handler.go
+++ b/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/data/dbmethod/tdengine/handler.go
@@ -37,7 +37,7 @@ func DataHandler(ctx context.Context, twin *common.Twin, client *driver.Customiz
klog.Errorf("init database client err: %v", err)
return
}
- reportCycle := time.Duration(twin.Property.ReportCycle)
+ reportCycle := time.Millisecond * time.Duration(twin.Property.ReportCycle)
if reportCycle == 0 {
reportCycle = common.DefaultReportCycle
}
diff --git a/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/device/device.go b/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/device/device.go
index 4888fb664..9f909d57f 100644
--- a/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/device/device.go
+++ b/staging/src/github.com/kubeedge/mapper-framework/_template/mapper/device/device.go
@@ -145,7 +145,7 @@ func dataHandler(ctx context.Context, dev *driver.CustomizedDev) {
ObservedDesired: twin.ObservedDesired,
VisitorConfig: &visitorConfig,
Topic: fmt.Sprintf(common.TopicTwinUpdate, dev.Instance.ID),
- CollectCycle: time.Duration(twin.Property.CollectCycle),
+ CollectCycle: time.Millisecond * time.Duration(twin.Property.CollectCycle),
ReportToCloud: twin.Property.ReportToCloud,
}
go twinData.Run(ctx)
@@ -202,7 +202,7 @@ func pushHandler(ctx context.Context, twin *common.Twin, client *driver.Customiz
klog.Errorf("init publish method err: %v", err)
return
}
- reportCycle := time.Duration(twin.Property.ReportCycle)
+ reportCycle := time.Millisecond * time.Duration(twin.Property.ReportCycle)
if reportCycle == 0 {
reportCycle = common.DefaultReportCycle
}
diff --git a/staging/src/github.com/kubeedge/viaduct/pkg/client/quic.go b/staging/src/github.com/kubeedge/viaduct/pkg/client/quic.go
index c95c09afc..7716f948e 100644
--- a/staging/src/github.com/kubeedge/viaduct/pkg/client/quic.go
+++ b/staging/src/github.com/kubeedge/viaduct/pkg/client/quic.go
@@ -121,8 +121,9 @@ func (c *QuicClient) Connect() (conn.Connection, error) {
Consumer: c.options.Consumer,
Handler: c.options.Handler,
State: &conn.ConnectionState{
- State: api.StatConnected,
- Headers: c.exOpts.Header,
+ State: api.StatConnected,
+ Headers: c.exOpts.Header,
+ PeerCertificates: session.ConnectionState().PeerCertificates,
},
AutoRoute: c.options.AutoRoute,
}), nil
diff --git a/staging/src/github.com/kubeedge/viaduct/pkg/client/ws.go b/staging/src/github.com/kubeedge/viaduct/pkg/client/ws.go
index 2991771d4..17cbefcf0 100644
--- a/staging/src/github.com/kubeedge/viaduct/pkg/client/ws.go
+++ b/staging/src/github.com/kubeedge/viaduct/pkg/client/ws.go
@@ -1,6 +1,7 @@
package client
import (
+ "crypto/x509"
"fmt"
"io"
@@ -49,6 +50,10 @@ func (c *WSClient) Connect() (conn.Connection, error) {
if c.exOpts.Callback != nil {
c.exOpts.Callback(wsConn, resp)
}
+ var peerCerts []*x509.Certificate
+ if resp != nil && resp.TLS != nil {
+ peerCerts = resp.TLS.PeerCertificates
+ }
return conn.NewConnection(&conn.ConnectionOptions{
ConnType: api.ProtocolTypeWS,
ConnUse: c.options.ConnUse,
@@ -57,8 +62,9 @@ func (c *WSClient) Connect() (conn.Connection, error) {
Handler: c.options.Handler,
CtrlLane: lane.NewLane(api.ProtocolTypeWS, wsConn),
State: &conn.ConnectionState{
- State: api.StatConnected,
- Headers: c.exOpts.Header.Clone(),
+ State: api.StatConnected,
+ Headers: c.exOpts.Header.Clone(),
+ PeerCertificates: peerCerts,
},
AutoRoute: c.options.AutoRoute,
}), nil
diff --git a/staging/src/github.com/kubeedge/viaduct/pkg/conn/quic.go b/staging/src/github.com/kubeedge/viaduct/pkg/conn/quic.go
index 3f7dd68a3..f0177183b 100644
--- a/staging/src/github.com/kubeedge/viaduct/pkg/conn/quic.go
+++ b/staging/src/github.com/kubeedge/viaduct/pkg/conn/quic.go
@@ -244,8 +244,9 @@ func (conn *QuicConnection) handleMessage(stream *smgr.Stream) {
conn.handler = mux.MuxDefault
}
conn.handler.ServeConn(&mux.MessageRequest{
- Header: conn.state.Headers,
- Message: msg,
+ Header: conn.state.Headers,
+ PeerCertificates: conn.state.PeerCertificates,
+ Message: msg,
}, &responseWriter{
Type: api.ProtocolTypeQuic,
Van: stream.Stream,
@@ -341,6 +342,5 @@ func (conn *QuicConnection) LocalAddr() net.Addr {
}
func (conn *QuicConnection) ConnectionState() ConnectionState {
- conn.state.PeerCertificates = conn.session.Sess.ConnectionState().PeerCertificates
return *conn.state
}
diff --git a/staging/src/github.com/kubeedge/viaduct/pkg/conn/ws.go b/staging/src/github.com/kubeedge/viaduct/pkg/conn/ws.go
index 25edde31a..a48f68b30 100644
--- a/staging/src/github.com/kubeedge/viaduct/pkg/conn/ws.go
+++ b/staging/src/github.com/kubeedge/viaduct/pkg/conn/ws.go
@@ -139,8 +139,9 @@ func (conn *WSConnection) handleMessage() {
conn.handler = mux.MuxDefault
}
conn.handler.ServeConn(&mux.MessageRequest{
- Header: conn.state.Headers,
- Message: msg,
+ Header: conn.state.Headers,
+ PeerCertificates: conn.state.PeerCertificates,
+ Message: msg,
}, &responseWriter{
Type: api.ProtocolTypeWS,
Van: conn.wsConn,
diff --git a/staging/src/github.com/kubeedge/viaduct/pkg/mux/mux.go b/staging/src/github.com/kubeedge/viaduct/pkg/mux/mux.go
index d88bdb3a7..57918dbed 100644
--- a/staging/src/github.com/kubeedge/viaduct/pkg/mux/mux.go
+++ b/staging/src/github.com/kubeedge/viaduct/pkg/mux/mux.go
@@ -1,6 +1,7 @@
package mux
import (
+ "crypto/x509"
"fmt"
"net/http"
@@ -18,14 +19,16 @@ type Handler interface {
}
type MessageRequest struct {
- Header http.Header
- Message *model.Message
+ Header http.Header
+ PeerCertificates []*x509.Certificate
+ Message *model.Message
}
type MessageContainer struct {
- Header http.Header
- Message *model.Message
- parameters map[string]string
+ Header http.Header
+ PeerCertificates []*x509.Certificate
+ Message *model.Message
+ parameters map[string]string
}
type MessageMux struct {
@@ -62,11 +65,12 @@ func (mux *MessageMux) extractParameters(expression *MessageExpression, resource
return parameters
}
-func (mux *MessageMux) wrapMessage(header http.Header, msg *model.Message, params map[string]string) *MessageContainer {
+func (mux *MessageMux) wrapMessage(req *MessageRequest, params map[string]string) *MessageContainer {
return &MessageContainer{
- Message: msg,
- parameters: params,
- Header: header,
+ Message: req.Message,
+ parameters: params,
+ Header: req.Header,
+ PeerCertificates: req.PeerCertificates,
}
}
@@ -81,7 +85,7 @@ func (mux *MessageMux) dispatch(req *MessageRequest, writer ResponseWriter) erro
// extract parameters
parameters := mux.extractParameters(entry.pattern.resExpr, req.Message.GetResource())
// wrap message
- container := mux.wrapMessage(req.Header, req.Message, parameters)
+ container := mux.wrapMessage(req, parameters)
// call user handle of entry
entry.handleFunc(container, writer)
return nil
diff --git a/staging/src/github.com/kubeedge/viaduct/pkg/server/quic.go b/staging/src/github.com/kubeedge/viaduct/pkg/server/quic.go
index 8f81ca747..b40d4c420 100644
--- a/staging/src/github.com/kubeedge/viaduct/pkg/server/quic.go
+++ b/staging/src/github.com/kubeedge/viaduct/pkg/server/quic.go
@@ -125,8 +125,9 @@ func (srv *QuicServer) handleSession(session quic.Session) {
CtrlLane: lane.NewLane(api.ProtocolTypeQuic, ctrlStream),
Handler: srv.options.Handler,
State: &conn.ConnectionState{
- State: api.StatConnected,
- Headers: header,
+ State: api.StatConnected,
+ Headers: header,
+ PeerCertificates: session.ConnectionState().PeerCertificates,
},
AutoRoute: srv.options.AutoRoute,
OnReadTransportErr: srv.options.OnReadTransportErr,
diff --git a/staging/src/github.com/kubeedge/viaduct/pkg/server/ws.go b/staging/src/github.com/kubeedge/viaduct/pkg/server/ws.go
index 619602803..d33ea04a2 100644
--- a/staging/src/github.com/kubeedge/viaduct/pkg/server/ws.go
+++ b/staging/src/github.com/kubeedge/viaduct/pkg/server/ws.go
@@ -87,8 +87,9 @@ func (srv *WSServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
Handler: srv.options.Handler,
CtrlLane: lane.NewLane(api.ProtocolTypeWS, wsConn),
State: &conn.ConnectionState{
- State: api.StatConnected,
- Headers: req.Header.Clone(),
+ State: api.StatConnected,
+ Headers: req.Header.Clone(),
+ PeerCertificates: req.TLS.PeerCertificates,
},
AutoRoute: srv.options.AutoRoute,
OnReadTransportErr: srv.options.OnReadTransportErr,
diff --git a/tests/scripts/conformance_e2e.sh b/tests/scripts/conformance_e2e.sh
new file mode 100755
index 000000000..68296382d
--- /dev/null
+++ b/tests/scripts/conformance_e2e.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+# Copyright 2024 The KubeEdge Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+set -x
+
+KUBEEDGE_ROOT=$PWD
+TEST_DIR=$(realpath $(dirname $0)/..)
+
+GOPATH=${GOPATH:-$(go env GOPATH)}
+KIND_IMAGE=${1:-"kindest/node:v1.28.0"}
+VERSION=$(git rev-parse --short=12 HEAD)
+
+function cleanup() {
+ bash ${KUBEEDGE_ROOT}/tests/scripts/cleanup.sh
+}
+
+function validate_ip() {
+ local ip=$1
+ if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+cleanup
+
+ENABLE_DAEMON=true bash -x ${KUBEEDGE_ROOT}/hack/local-up-kubeedge.sh ${KIND_IMAGE} || {
+ echo "failed to start cluster !!!"
+ exit 1
+}
+
+trap cleanup EXIT
+
+sleep 10
+
+MASTER_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' test-control-plane`
+if [ -z "$MASTER_IP" ] || ! validate_ip "$MASTER_IP"; then
+ echo "error when get master ip: $MASTER_IP"
+ exit 1
+fi
+
+if [ ! -f "$HOME/.kube/config" ]; then
+ echo "not found kubeconfig file"
+ exit 1
+fi
+
+export KUBECONFIG=$HOME/.kube/config
+
+if [ ! -d "/tmp/results" ]; then
+ mkdir -p /tmp/results
+fi
+
+rm -rf /tmp/results/*
+
+function run_conformance_test() {
+ local image_name=$1
+ local tag_name=$2
+
+ docker build -t "$image_name:$tag_name" -f ${KUBEEDGE_ROOT}/build/conformance/Dockerfile .
+
+ docker run --rm \
+ --env E2E_SKIP="\[Serial\]" \
+ --env E2E_PARALLEL="-p" \
+ --env CHECK_EDGECORE_ENVIRONMENT="false" \
+ --env ACK_GINKGO_RC="true" \
+ --env KUBECONFIG=/root/.kube/config \
+ --env RESULTS_DIR=/tmp/results \
+ --env E2E_EXTRA_ARGS="--kube-master=https://${MASTER_IP}:6443" \
+ -v ${KUBECONFIG}:/root/.kube/config \
+ -v /tmp/results:/tmp/results \
+ --network host "$image_name:$tag_name"
+}
+
+run_conformance_test "kubeedge/conformance-test" ${VERSION} || { echo "Conformance test failed with exit code $?"; exit 1; } \ No newline at end of file
diff --git a/vendor/golang.org/x/tools/container/intsets/sparse.go b/vendor/golang.org/x/tools/container/intsets/sparse.go
new file mode 100644
index 000000000..d5fe156ed
--- /dev/null
+++ b/vendor/golang.org/x/tools/container/intsets/sparse.go
@@ -0,0 +1,1114 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package intsets provides Sparse, a compact and fast representation
+// for sparse sets of int values.
+//
+// The time complexity of the operations Len, Insert, Remove and Has
+// is in O(n) but in practice those methods are faster and more
+// space-efficient than equivalent operations on sets based on the Go
+// map type. The IsEmpty, Min, Max, Clear and TakeMin operations
+// require constant time.
+package intsets // import "golang.org/x/tools/container/intsets"
+
+// TODO(adonovan):
+// - Add InsertAll(...int), RemoveAll(...int)
+// - Add 'bool changed' results for {Intersection,Difference}With too.
+//
+// TODO(adonovan): implement Dense, a dense bit vector with a similar API.
+// The space usage would be proportional to Max(), not Len(), and the
+// implementation would be based upon big.Int.
+//
+// TODO(adonovan): opt: make UnionWith and Difference faster.
+// These are the hot-spots for go/pointer.
+
+import (
+ "bytes"
+ "fmt"
+ "math/bits"
+)
+
+// A Sparse is a set of int values.
+// Sparse operations (even queries) are not concurrency-safe.
+//
+// The zero value for Sparse is a valid empty set.
+//
+// Sparse sets must be copied using the Copy method, not by assigning
+// a Sparse value.
+type Sparse struct {
+ // An uninitialized Sparse represents an empty set.
+ // An empty set may also be represented by
+ // root.next == root.prev == &root.
+ //
+ // The root is always the block with the smallest offset.
+ // It can be empty, but only if it is the only block; in that case, offset is
+ // MaxInt (which is not a valid offset).
+ root block
+}
+
+type word uintptr
+
+const (
+ _m = ^word(0)
+ bitsPerWord = 8 << (_m>>8&1 + _m>>16&1 + _m>>32&1)
+ bitsPerBlock = 256 // optimal value for go/pointer solver performance
+ wordsPerBlock = bitsPerBlock / bitsPerWord
+)
+
+// Limit values of implementation-specific int type.
+const (
+ MaxInt = int(^uint(0) >> 1)
+ MinInt = -MaxInt - 1
+)
+
+// popcount returns the number of set bits in w.
+func popcount(x word) int {
+ // Avoid OnesCount(uint): don't assume uint = uintptr.
+ if bitsPerWord == 32 {
+ return bits.OnesCount32(uint32(x))
+ } else {
+ return bits.OnesCount64(uint64(x))
+ }
+}
+
+// nlz returns the number of leading zeros of x.
+func nlz(x word) int {
+ // Avoid LeadingZeros(uint): don't assume uint = uintptr.
+ if bitsPerWord == 32 {
+ return bits.LeadingZeros32(uint32(x))
+ } else {
+ return bits.LeadingZeros64(uint64(x))
+ }
+}
+
+// ntz returns the number of trailing zeros of x.
+func ntz(x word) int {
+ // Avoid TrailingZeros(uint): don't assume uint = uintptr.
+ if bitsPerWord == 32 {
+ return bits.TrailingZeros32(uint32(x))
+ } else {
+ return bits.TrailingZeros64(uint64(x))
+ }
+}
+
+// -- block ------------------------------------------------------------
+
+// A set is represented as a circular doubly-linked list of blocks,
+// each containing an offset and a bit array of fixed size
+// bitsPerBlock; the blocks are ordered by increasing offset.
+//
+// The set contains an element x iff the block whose offset is x - (x
+// mod bitsPerBlock) has the bit (x mod bitsPerBlock) set, where mod
+// is the Euclidean remainder.
+//
+// A block may only be empty transiently.
+type block struct {
+ offset int // offset mod bitsPerBlock == 0
+ bits [wordsPerBlock]word // contains at least one set bit
+ next, prev *block // doubly-linked list of blocks
+}
+
+// wordMask returns the word index (in block.bits)
+// and single-bit mask for the block's ith bit.
+func wordMask(i uint) (w uint, mask word) {
+ w = i / bitsPerWord
+ mask = 1 << (i % bitsPerWord)
+ return
+}
+
+// insert sets the block b's ith bit and
+// returns true if it was not already set.
+func (b *block) insert(i uint) bool {
+ w, mask := wordMask(i)
+ if b.bits[w]&mask == 0 {
+ b.bits[w] |= mask
+ return true
+ }
+ return false
+}
+
+// remove clears the block's ith bit and
+// returns true if the bit was previously set.
+// NB: may leave the block empty.
+func (b *block) remove(i uint) bool {
+ w, mask := wordMask(i)
+ if b.bits[w]&mask != 0 {
+ b.bits[w] &^= mask
+ return true
+ }
+ return false
+}
+
+// has reports whether the block's ith bit is set.
+func (b *block) has(i uint) bool {
+ w, mask := wordMask(i)
+ return b.bits[w]&mask != 0
+}
+
+// empty reports whether b.len()==0, but more efficiently.
+func (b *block) empty() bool {
+ for _, w := range b.bits {
+ if w != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// len returns the number of set bits in block b.
+func (b *block) len() int {
+ var l int
+ for _, w := range b.bits {
+ l += popcount(w)
+ }
+ return l
+}
+
+// max returns the maximum element of the block.
+// The block must not be empty.
+func (b *block) max() int {
+ bi := b.offset + bitsPerBlock
+ // Decrement bi by number of high zeros in last.bits.
+ for i := len(b.bits) - 1; i >= 0; i-- {
+ if w := b.bits[i]; w != 0 {
+ return bi - nlz(w) - 1
+ }
+ bi -= bitsPerWord
+ }
+ panic("BUG: empty block")
+}
+
+// min returns the minimum element of the block,
+// and also removes it if take is set.
+// The block must not be initially empty.
+// NB: may leave the block empty.
+func (b *block) min(take bool) int {
+ for i, w := range b.bits {
+ if w != 0 {
+ tz := ntz(w)
+ if take {
+ b.bits[i] = w &^ (1 << uint(tz))
+ }
+ return b.offset + i*bitsPerWord + tz
+ }
+ }
+ panic("BUG: empty block")
+}
+
+// lowerBound returns the smallest element of the block that is greater than or
+// equal to the element corresponding to the ith bit. If there is no such
+// element, the second return value is false.
+func (b *block) lowerBound(i uint) (int, bool) {
+ w := i / bitsPerWord
+ bit := i % bitsPerWord
+
+ if val := b.bits[w] >> bit; val != 0 {
+ return b.offset + int(i) + ntz(val), true
+ }
+
+ for w++; w < wordsPerBlock; w++ {
+ if val := b.bits[w]; val != 0 {
+ return b.offset + int(w*bitsPerWord) + ntz(val), true
+ }
+ }
+
+ return 0, false
+}
+
+// forEach calls f for each element of block b.
+// f must not mutate b's enclosing Sparse.
+func (b *block) forEach(f func(int)) {
+ for i, w := range b.bits {
+ offset := b.offset + i*bitsPerWord
+ for bi := 0; w != 0 && bi < bitsPerWord; bi++ {
+ if w&1 != 0 {
+ f(offset)
+ }
+ offset++
+ w >>= 1
+ }
+ }
+}
+
+// offsetAndBitIndex returns the offset of the block that would
+// contain x and the bit index of x within that block.
+func offsetAndBitIndex(x int) (int, uint) {
+ mod := x % bitsPerBlock
+ if mod < 0 {
+ // Euclidean (non-negative) remainder
+ mod += bitsPerBlock
+ }
+ return x - mod, uint(mod)
+}
+
+// -- Sparse --------------------------------------------------------------
+
+// none is a shared, empty, sentinel block that indicates the end of a block
+// list.
+var none block
+
+// Dummy type used to generate an implicit panic. This must be defined at the
+// package level; if it is defined inside a function, it prevents the inlining
+// of that function.
+type to_copy_a_sparse_you_must_call_its_Copy_method struct{}
+
+// init ensures s is properly initialized.
+func (s *Sparse) init() {
+ root := &s.root
+ if root.next == nil {
+ root.offset = MaxInt
+ root.next = root
+ root.prev = root
+ } else if root.next.prev != root {
+ // Copying a Sparse x leads to pernicious corruption: the
+ // new Sparse y shares the old linked list, but iteration
+ // on y will never encounter &y.root so it goes into a
+ // loop. Fail fast before this occurs.
+ // We don't want to call panic here because it prevents the
+ // inlining of this function.
+ _ = (interface{}(nil)).(to_copy_a_sparse_you_must_call_its_Copy_method)
+ }
+}
+
+func (s *Sparse) first() *block {
+ s.init()
+ if s.root.offset == MaxInt {
+ return &none
+ }
+ return &s.root
+}
+
+// next returns the next block in the list, or end if b is the last block.
+func (s *Sparse) next(b *block) *block {
+ if b.next == &s.root {
+ return &none
+ }
+ return b.next
+}
+
+// prev returns the previous block in the list, or end if b is the first block.
+func (s *Sparse) prev(b *block) *block {
+ if b.prev == &s.root {
+ return &none
+ }
+ return b.prev
+}
+
+// IsEmpty reports whether the set s is empty.
+func (s *Sparse) IsEmpty() bool {
+ return s.root.next == nil || s.root.offset == MaxInt
+}
+
+// Len returns the number of elements in the set s.
+func (s *Sparse) Len() int {
+ var l int
+ for b := s.first(); b != &none; b = s.next(b) {
+ l += b.len()
+ }
+ return l
+}
+
+// Max returns the maximum element of the set s, or MinInt if s is empty.
+func (s *Sparse) Max() int {
+ if s.IsEmpty() {
+ return MinInt
+ }
+ return s.root.prev.max()
+}
+
+// Min returns the minimum element of the set s, or MaxInt if s is empty.
+func (s *Sparse) Min() int {
+ if s.IsEmpty() {
+ return MaxInt
+ }
+ return s.root.min(false)
+}
+
+// LowerBound returns the smallest element >= x, or MaxInt if there is no such
+// element.
+func (s *Sparse) LowerBound(x int) int {
+ offset, i := offsetAndBitIndex(x)
+ for b := s.first(); b != &none; b = s.next(b) {
+ if b.offset > offset {
+ return b.min(false)
+ }
+ if b.offset == offset {
+ if y, ok := b.lowerBound(i); ok {
+ return y
+ }
+ }
+ }
+ return MaxInt
+}
+
+// block returns the block that would contain offset,
+// or nil if s contains no such block.
+// Precondition: offset is a multiple of bitsPerBlock.
+func (s *Sparse) block(offset int) *block {
+ for b := s.first(); b != &none && b.offset <= offset; b = s.next(b) {
+ if b.offset == offset {
+ return b
+ }
+ }
+ return nil
+}
+
+// Insert adds x to the set s, and reports whether the set grew.
+func (s *Sparse) Insert(x int) bool {
+ offset, i := offsetAndBitIndex(x)
+
+ b := s.first()
+ for ; b != &none && b.offset <= offset; b = s.next(b) {
+ if b.offset == offset {
+ return b.insert(i)
+ }
+ }
+
+ // Insert new block before b.
+ new := s.insertBlockBefore(b)
+ new.offset = offset
+ return new.insert(i)
+}
+
+// removeBlock removes a block and returns the block that followed it (or end if
+// it was the last block).
+func (s *Sparse) removeBlock(b *block) *block {
+ if b != &s.root {
+ b.prev.next = b.next
+ b.next.prev = b.prev
+ if b.next == &s.root {
+ return &none
+ }
+ return b.next
+ }
+
+ first := s.root.next
+ if first == &s.root {
+ // This was the only block.
+ s.Clear()
+ return &none
+ }
+ s.root.offset = first.offset
+ s.root.bits = first.bits
+ if first.next == &s.root {
+ // Single block remaining.
+ s.root.next = &s.root
+ s.root.prev = &s.root
+ } else {
+ s.root.next = first.next
+ first.next.prev = &s.root
+ }
+ return &s.root
+}
+
+// Remove removes x from the set s, and reports whether the set shrank.
+func (s *Sparse) Remove(x int) bool {
+ offset, i := offsetAndBitIndex(x)
+ if b := s.block(offset); b != nil {
+ if !b.remove(i) {
+ return false
+ }
+ if b.empty() {
+ s.removeBlock(b)
+ }
+ return true
+ }
+ return false
+}
+
+// Clear removes all elements from the set s.
+func (s *Sparse) Clear() {
+ s.root = block{
+ offset: MaxInt,
+ next: &s.root,
+ prev: &s.root,
+ }
+}
+
+// If set s is non-empty, TakeMin sets *p to the minimum element of
+// the set s, removes that element from the set and returns true.
+// Otherwise, it returns false and *p is undefined.
+//
+// This method may be used for iteration over a worklist like so:
+//
+// var x int
+// for worklist.TakeMin(&x) { use(x) }
+func (s *Sparse) TakeMin(p *int) bool {
+ if s.IsEmpty() {
+ return false
+ }
+ *p = s.root.min(true)
+ if s.root.empty() {
+ s.removeBlock(&s.root)
+ }
+ return true
+}
+
+// Has reports whether x is an element of the set s.
+func (s *Sparse) Has(x int) bool {
+ offset, i := offsetAndBitIndex(x)
+ if b := s.block(offset); b != nil {
+ return b.has(i)
+ }
+ return false
+}
+
+// forEach applies function f to each element of the set s in order.
+//
+// f must not mutate s. Consequently, forEach is not safe to expose
+// to clients. In any case, using "range s.AppendTo()" allows more
+// natural control flow with continue/break/return.
+func (s *Sparse) forEach(f func(int)) {
+ for b := s.first(); b != &none; b = s.next(b) {
+ b.forEach(f)
+ }
+}
+
+// Copy sets s to the value of x.
+func (s *Sparse) Copy(x *Sparse) {
+ if s == x {
+ return
+ }
+
+ xb := x.first()
+ sb := s.first()
+ for xb != &none {
+ if sb == &none {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ xb = x.next(xb)
+ sb = s.next(sb)
+ }
+ s.discardTail(sb)
+}
+
+// insertBlockBefore returns a new block, inserting it before next.
+// If next is the root, the root is replaced. If next is end, the block is
+// inserted at the end.
+func (s *Sparse) insertBlockBefore(next *block) *block {
+ if s.IsEmpty() {
+ if next != &none {
+ panic("BUG: passed block with empty set")
+ }
+ return &s.root
+ }
+
+ if next == &s.root {
+ // Special case: we need to create a new block that will become the root
+ // block.The old root block becomes the second block.
+ second := s.root
+ s.root = block{
+ next: &second,
+ }
+ if second.next == &s.root {
+ s.root.prev = &second
+ } else {
+ s.root.prev = second.prev
+ second.next.prev = &second
+ second.prev = &s.root
+ }
+ return &s.root
+ }
+ if next == &none {
+ // Insert before root.
+ next = &s.root
+ }
+ b := new(block)
+ b.next = next
+ b.prev = next.prev
+ b.prev.next = b
+ next.prev = b
+ return b
+}
+
+// discardTail removes block b and all its successors from s.
+func (s *Sparse) discardTail(b *block) {
+ if b != &none {
+ if b == &s.root {
+ s.Clear()
+ } else {
+ b.prev.next = &s.root
+ s.root.prev = b.prev
+ }
+ }
+}
+
+// IntersectionWith sets s to the intersection s ∩ x.
+func (s *Sparse) IntersectionWith(x *Sparse) {
+ if s == x {
+ return
+ }
+
+ xb := x.first()
+ sb := s.first()
+ for xb != &none && sb != &none {
+ switch {
+ case xb.offset < sb.offset:
+ xb = x.next(xb)
+
+ case xb.offset > sb.offset:
+ sb = s.removeBlock(sb)
+
+ default:
+ var sum word
+ for i := range sb.bits {
+ r := xb.bits[i] & sb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum != 0 {
+ sb = s.next(sb)
+ } else {
+ // sb will be overwritten or removed
+ }
+
+ xb = x.next(xb)
+ }
+ }
+
+ s.discardTail(sb)
+}
+
+// Intersection sets s to the intersection x ∩ y.
+func (s *Sparse) Intersection(x, y *Sparse) {
+ switch {
+ case s == x:
+ s.IntersectionWith(y)
+ return
+ case s == y:
+ s.IntersectionWith(x)
+ return
+ case x == y:
+ s.Copy(x)
+ return
+ }
+
+ xb := x.first()
+ yb := y.first()
+ sb := s.first()
+ for xb != &none && yb != &none {
+ switch {
+ case xb.offset < yb.offset:
+ xb = x.next(xb)
+ continue
+ case xb.offset > yb.offset:
+ yb = y.next(yb)
+ continue
+ }
+
+ if sb == &none {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+
+ var sum word
+ for i := range sb.bits {
+ r := xb.bits[i] & yb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum != 0 {
+ sb = s.next(sb)
+ } else {
+ // sb will be overwritten or removed
+ }
+
+ xb = x.next(xb)
+ yb = y.next(yb)
+ }
+
+ s.discardTail(sb)
+}
+
+// Intersects reports whether s ∩ x ≠ ∅.
+func (s *Sparse) Intersects(x *Sparse) bool {
+ sb := s.first()
+ xb := x.first()
+ for sb != &none && xb != &none {
+ switch {
+ case xb.offset < sb.offset:
+ xb = x.next(xb)
+ case xb.offset > sb.offset:
+ sb = s.next(sb)
+ default:
+ for i := range sb.bits {
+ if sb.bits[i]&xb.bits[i] != 0 {
+ return true
+ }
+ }
+ sb = s.next(sb)
+ xb = x.next(xb)
+ }
+ }
+ return false
+}
+
+// UnionWith sets s to the union s ∪ x, and reports whether s grew.
+func (s *Sparse) UnionWith(x *Sparse) bool {
+ if s == x {
+ return false
+ }
+
+ var changed bool
+ xb := x.first()
+ sb := s.first()
+ for xb != &none {
+ if sb != &none && sb.offset == xb.offset {
+ for i := range xb.bits {
+ union := sb.bits[i] | xb.bits[i]
+ if sb.bits[i] != union {
+ sb.bits[i] = union
+ changed = true
+ }
+ }
+ xb = x.next(xb)
+ } else if sb == &none || sb.offset > xb.offset {
+ sb = s.insertBlockBefore(sb)
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ changed = true
+
+ xb = x.next(xb)
+ }
+ sb = s.next(sb)
+ }
+ return changed
+}
+
+// Union sets s to the union x ∪ y.
+func (s *Sparse) Union(x, y *Sparse) {
+ switch {
+ case x == y:
+ s.Copy(x)
+ return
+ case s == x:
+ s.UnionWith(y)
+ return
+ case s == y:
+ s.UnionWith(x)
+ return
+ }
+
+ xb := x.first()
+ yb := y.first()
+ sb := s.first()
+ for xb != &none || yb != &none {
+ if sb == &none {
+ sb = s.insertBlockBefore(sb)
+ }
+ switch {
+ case yb == &none || (xb != &none && xb.offset < yb.offset):
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ xb = x.next(xb)
+
+ case xb == &none || (yb != &none && yb.offset < xb.offset):
+ sb.offset = yb.offset
+ sb.bits = yb.bits
+ yb = y.next(yb)
+
+ default:
+ sb.offset = xb.offset
+ for i := range xb.bits {
+ sb.bits[i] = xb.bits[i] | yb.bits[i]
+ }
+ xb = x.next(xb)
+ yb = y.next(yb)
+ }
+ sb = s.next(sb)
+ }
+
+ s.discardTail(sb)
+}
+
+// DifferenceWith sets s to the difference s ∖ x.
+func (s *Sparse) DifferenceWith(x *Sparse) {
+ if s == x {
+ s.Clear()
+ return
+ }
+
+ xb := x.first()
+ sb := s.first()
+ for xb != &none && sb != &none {
+ switch {
+ case xb.offset > sb.offset:
+ sb = s.next(sb)
+
+ case xb.offset < sb.offset:
+ xb = x.next(xb)
+
+ default:
+ var sum word
+ for i := range sb.bits {
+ r := sb.bits[i] & ^xb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum == 0 {
+ sb = s.removeBlock(sb)
+ } else {
+ sb = s.next(sb)
+ }
+ xb = x.next(xb)
+ }
+ }
+}
+
+// Difference sets s to the difference x ∖ y.
+func (s *Sparse) Difference(x, y *Sparse) {
+ switch {
+ case x == y:
+ s.Clear()
+ return
+ case s == x:
+ s.DifferenceWith(y)
+ return
+ case s == y:
+ var y2 Sparse
+ y2.Copy(y)
+ s.Difference(x, &y2)
+ return
+ }
+
+ xb := x.first()
+ yb := y.first()
+ sb := s.first()
+ for xb != &none && yb != &none {
+ if xb.offset > yb.offset {
+ // y has block, x has &none
+ yb = y.next(yb)
+ continue
+ }
+
+ if sb == &none {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+
+ switch {
+ case xb.offset < yb.offset:
+ // x has block, y has &none
+ sb.bits = xb.bits
+
+ sb = s.next(sb)
+
+ default:
+ // x and y have corresponding blocks
+ var sum word
+ for i := range sb.bits {
+ r := xb.bits[i] & ^yb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum != 0 {
+ sb = s.next(sb)
+ } else {
+ // sb will be overwritten or removed
+ }
+
+ yb = y.next(yb)
+ }
+ xb = x.next(xb)
+ }
+
+ for xb != &none {
+ if sb == &none {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ sb = s.next(sb)
+
+ xb = x.next(xb)
+ }
+
+ s.discardTail(sb)
+}
+
+// SymmetricDifferenceWith sets s to the symmetric difference s ∆ x.
+func (s *Sparse) SymmetricDifferenceWith(x *Sparse) {
+ if s == x {
+ s.Clear()
+ return
+ }
+
+ sb := s.first()
+ xb := x.first()
+ for xb != &none && sb != &none {
+ switch {
+ case sb.offset < xb.offset:
+ sb = s.next(sb)
+ case xb.offset < sb.offset:
+ nb := s.insertBlockBefore(sb)
+ nb.offset = xb.offset
+ nb.bits = xb.bits
+ xb = x.next(xb)
+ default:
+ var sum word
+ for i := range sb.bits {
+ r := sb.bits[i] ^ xb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum == 0 {
+ sb = s.removeBlock(sb)
+ } else {
+ sb = s.next(sb)
+ }
+ xb = x.next(xb)
+ }
+ }
+
+ for xb != &none { // append the tail of x to s
+ sb = s.insertBlockBefore(sb)
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ sb = s.next(sb)
+ xb = x.next(xb)
+ }
+}
+
+// SymmetricDifference sets s to the symmetric difference x ∆ y.
+func (s *Sparse) SymmetricDifference(x, y *Sparse) {
+ switch {
+ case x == y:
+ s.Clear()
+ return
+ case s == x:
+ s.SymmetricDifferenceWith(y)
+ return
+ case s == y:
+ s.SymmetricDifferenceWith(x)
+ return
+ }
+
+ sb := s.first()
+ xb := x.first()
+ yb := y.first()
+ for xb != &none && yb != &none {
+ if sb == &none {
+ sb = s.insertBlockBefore(sb)
+ }
+ switch {
+ case yb.offset < xb.offset:
+ sb.offset = yb.offset
+ sb.bits = yb.bits
+ sb = s.next(sb)
+ yb = y.next(yb)
+ case xb.offset < yb.offset:
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ sb = s.next(sb)
+ xb = x.next(xb)
+ default:
+ var sum word
+ for i := range sb.bits {
+ r := xb.bits[i] ^ yb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum != 0 {
+ sb.offset = xb.offset
+ sb = s.next(sb)
+ }
+ xb = x.next(xb)
+ yb = y.next(yb)
+ }
+ }
+
+ for xb != &none { // append the tail of x to s
+ if sb == &none {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ sb = s.next(sb)
+ xb = x.next(xb)
+ }
+
+ for yb != &none { // append the tail of y to s
+ if sb == &none {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = yb.offset
+ sb.bits = yb.bits
+ sb = s.next(sb)
+ yb = y.next(yb)
+ }
+
+ s.discardTail(sb)
+}
+
+// SubsetOf reports whether s ∖ x = ∅.
+func (s *Sparse) SubsetOf(x *Sparse) bool {
+ if s == x {
+ return true
+ }
+
+ sb := s.first()
+ xb := x.first()
+ for sb != &none {
+ switch {
+ case xb == &none || xb.offset > sb.offset:
+ return false
+ case xb.offset < sb.offset:
+ xb = x.next(xb)
+ default:
+ for i := range sb.bits {
+ if sb.bits[i]&^xb.bits[i] != 0 {
+ return false
+ }
+ }
+ sb = s.next(sb)
+ xb = x.next(xb)
+ }
+ }
+ return true
+}
+
+// Equals reports whether the sets s and t have the same elements.
+func (s *Sparse) Equals(t *Sparse) bool {
+ if s == t {
+ return true
+ }
+ sb := s.first()
+ tb := t.first()
+ for {
+ switch {
+ case sb == &none && tb == &none:
+ return true
+ case sb == &none || tb == &none:
+ return false
+ case sb.offset != tb.offset:
+ return false
+ case sb.bits != tb.bits:
+ return false
+ }
+
+ sb = s.next(sb)
+ tb = t.next(tb)
+ }
+}
+
+// String returns a human-readable description of the set s.
+func (s *Sparse) String() string {
+ var buf bytes.Buffer
+ buf.WriteByte('{')
+ s.forEach(func(x int) {
+ if buf.Len() > 1 {
+ buf.WriteByte(' ')
+ }
+ fmt.Fprintf(&buf, "%d", x)
+ })
+ buf.WriteByte('}')
+ return buf.String()
+}
+
+// BitString returns the set as a string of 1s and 0s denoting the sum
+// of the i'th powers of 2, for each i in s. A radix point, always
+// preceded by a digit, appears if the sum is non-integral.
+//
+// Examples:
+//
+// {}.BitString() = "0"
+// {4,5}.BitString() = "110000"
+// {-3}.BitString() = "0.001"
+// {-3,0,4,5}.BitString() = "110001.001"
+func (s *Sparse) BitString() string {
+ if s.IsEmpty() {
+ return "0"
+ }
+
+ min, max := s.Min(), s.Max()
+ var nbytes int
+ if max > 0 {
+ nbytes = max
+ }
+ nbytes++ // zero bit
+ radix := nbytes
+ if min < 0 {
+ nbytes += len(".") - min
+ }
+
+ b := make([]byte, nbytes)
+ for i := range b {
+ b[i] = '0'
+ }
+ if radix < nbytes {
+ b[radix] = '.'
+ }
+ s.forEach(func(x int) {
+ if x >= 0 {
+ x += len(".")
+ }
+ b[radix-x] = '1'
+ })
+ return string(b)
+}
+
+// GoString returns a string showing the internal representation of
+// the set s.
+func (s *Sparse) GoString() string {
+ var buf bytes.Buffer
+ for b := s.first(); b != &none; b = s.next(b) {
+ fmt.Fprintf(&buf, "block %p {offset=%d next=%p prev=%p",
+ b, b.offset, b.next, b.prev)
+ for _, w := range b.bits {
+ fmt.Fprintf(&buf, " 0%016x", w)
+ }
+ fmt.Fprintf(&buf, "}\n")
+ }
+ return buf.String()
+}
+
+// AppendTo returns the result of appending the elements of s to slice
+// in order.
+func (s *Sparse) AppendTo(slice []int) []int {
+ s.forEach(func(x int) {
+ slice = append(slice, x)
+ })
+ return slice
+}
+
+// -- Testing/debugging ------------------------------------------------
+
+// check returns an error if the representation invariants of s are violated.
+func (s *Sparse) check() error {
+ s.init()
+ if s.root.empty() {
+ // An empty set must have only the root block with offset MaxInt.
+ if s.root.next != &s.root {
+ return fmt.Errorf("multiple blocks with empty root block")
+ }
+ if s.root.offset != MaxInt {
+ return fmt.Errorf("empty set has offset %d, should be MaxInt", s.root.offset)
+ }
+ return nil
+ }
+ for b := s.first(); ; b = s.next(b) {
+ if b.offset%bitsPerBlock != 0 {
+ return fmt.Errorf("bad offset modulo: %d", b.offset)
+ }
+ if b.empty() {
+ return fmt.Errorf("empty block")
+ }
+ if b.prev.next != b {
+ return fmt.Errorf("bad prev.next link")
+ }
+ if b.next.prev != b {
+ return fmt.Errorf("bad next.prev link")
+ }
+ if b.next == &s.root {
+ break
+ }
+ if b.offset >= b.next.offset {
+ return fmt.Errorf("bad offset order: b.offset=%d, b.next.offset=%d",
+ b.offset, b.next.offset)
+ }
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/persistentvolume/util.go b/vendor/k8s.io/kubernetes/pkg/api/v1/persistentvolume/util.go
new file mode 100644
index 000000000..7d55e7778
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/api/v1/persistentvolume/util.go
@@ -0,0 +1,157 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package persistentvolume
+
+import (
+ corev1 "k8s.io/api/core/v1"
+)
+
+func getClaimRefNamespace(pv *corev1.PersistentVolume) string {
+ if pv.Spec.ClaimRef != nil {
+ return pv.Spec.ClaimRef.Namespace
+ }
+ return ""
+}
+
+// Visitor is called with each object's namespace and name, and returns true if visiting should continue
+type Visitor func(namespace, name string, kubeletVisible bool) (shouldContinue bool)
+
+func skipEmptyNames(visitor Visitor) Visitor {
+ return func(namespace, name string, kubeletVisible bool) bool {
+ if len(name) == 0 {
+ // continue visiting
+ return true
+ }
+ // delegate to visitor
+ return visitor(namespace, name, kubeletVisible)
+ }
+}
+
+// VisitPVSecretNames invokes the visitor function with the name of every secret
+// referenced by the PV spec. If visitor returns false, visiting is short-circuited.
+// Returns true if visiting completed, false if visiting was short-circuited.
+func VisitPVSecretNames(pv *corev1.PersistentVolume, visitor Visitor) bool {
+ visitor = skipEmptyNames(visitor)
+ source := &pv.Spec.PersistentVolumeSource
+ switch {
+ case source.AzureFile != nil:
+ if source.AzureFile.SecretNamespace != nil && len(*source.AzureFile.SecretNamespace) > 0 {
+ if len(source.AzureFile.SecretName) > 0 && !visitor(*source.AzureFile.SecretNamespace, source.AzureFile.SecretName, true /* kubeletVisible */) {
+ return false
+ }
+ } else {
+ if len(source.AzureFile.SecretName) > 0 && !visitor(getClaimRefNamespace(pv), source.AzureFile.SecretName, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ return true
+ case source.CephFS != nil:
+ if source.CephFS.SecretRef != nil {
+ // previously persisted PV objects use claimRef namespace
+ ns := getClaimRefNamespace(pv)
+ if len(source.CephFS.SecretRef.Namespace) > 0 {
+ // use the secret namespace if namespace is set
+ ns = source.CephFS.SecretRef.Namespace
+ }
+ if !visitor(ns, source.CephFS.SecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ case source.Cinder != nil:
+ if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Namespace, source.Cinder.SecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ case source.FlexVolume != nil:
+ if source.FlexVolume.SecretRef != nil {
+ // previously persisted PV objects use claimRef namespace
+ ns := getClaimRefNamespace(pv)
+ if len(source.FlexVolume.SecretRef.Namespace) > 0 {
+ // use the secret namespace if namespace is set
+ ns = source.FlexVolume.SecretRef.Namespace
+ }
+ if !visitor(ns, source.FlexVolume.SecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ case source.RBD != nil:
+ if source.RBD.SecretRef != nil {
+ // previously persisted PV objects use claimRef namespace
+ ns := getClaimRefNamespace(pv)
+ if len(source.RBD.SecretRef.Namespace) > 0 {
+ // use the secret namespace if namespace is set
+ ns = source.RBD.SecretRef.Namespace
+ }
+ if !visitor(ns, source.RBD.SecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ case source.ScaleIO != nil:
+ if source.ScaleIO.SecretRef != nil {
+ ns := getClaimRefNamespace(pv)
+ if source.ScaleIO.SecretRef != nil && len(source.ScaleIO.SecretRef.Namespace) > 0 {
+ ns = source.ScaleIO.SecretRef.Namespace
+ }
+ if !visitor(ns, source.ScaleIO.SecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ case source.ISCSI != nil:
+ if source.ISCSI.SecretRef != nil {
+ // previously persisted PV objects use claimRef namespace
+ ns := getClaimRefNamespace(pv)
+ if len(source.ISCSI.SecretRef.Namespace) > 0 {
+ // use the secret namespace if namespace is set
+ ns = source.ISCSI.SecretRef.Namespace
+ }
+ if !visitor(ns, source.ISCSI.SecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ case source.StorageOS != nil:
+ if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Namespace, source.StorageOS.SecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ case source.CSI != nil:
+ if source.CSI.ControllerPublishSecretRef != nil {
+ if !visitor(source.CSI.ControllerPublishSecretRef.Namespace, source.CSI.ControllerPublishSecretRef.Name, false /* kubeletVisible */) {
+ return false
+ }
+ }
+ if source.CSI.ControllerExpandSecretRef != nil {
+ if !visitor(source.CSI.ControllerExpandSecretRef.Namespace, source.CSI.ControllerExpandSecretRef.Name, false /* kubeletVisible */) {
+ return false
+ }
+ }
+
+ if source.CSI.NodePublishSecretRef != nil {
+ if !visitor(source.CSI.NodePublishSecretRef.Namespace, source.CSI.NodePublishSecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ if source.CSI.NodeStageSecretRef != nil {
+ if !visitor(source.CSI.NodeStageSecretRef.Namespace, source.CSI.NodeStageSecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ if source.CSI.NodeExpandSecretRef != nil {
+ if !visitor(source.CSI.NodeExpandSecretRef.Namespace, source.CSI.NodeExpandSecretRef.Name, true /* kubeletVisible */) {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/abac/OWNERS
new file mode 100644
index 000000000..2fa50ca5b
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# approval on api packages bubbles to api-approvers
+reviewers:
+ - sig-auth-authorizers-approvers
+ - sig-auth-authorizers-reviewers
+labels:
+ - sig/auth
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/doc.go
new file mode 100644
index 000000000..9d24e1135
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+package abac
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/latest/latest.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/latest/latest.go
new file mode 100644
index 000000000..b89ab1cbd
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/latest/latest.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package latest
+
+import (
+ //Init the abac api package
+ _ "k8s.io/kubernetes/pkg/apis/abac"
+ _ "k8s.io/kubernetes/pkg/apis/abac/v0"
+ _ "k8s.io/kubernetes/pkg/apis/abac/v1beta1"
+)
+
+// TODO: this file is totally wrong, it should look like other latest files.
+// lavalamp is in the middle of fixing this code, so wait for the new way of doing things..
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/register.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/register.go
new file mode 100644
index 000000000..8dacb5803
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package abac
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+// GroupName is the API group for abac
+const GroupName = "abac.authorization.kubernetes.io"
+
+// SchemeGroupVersion is the API group version used to register abac internal
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Scheme is the default instance of runtime.Scheme to which types in the abac API group are api.Registry.
+// TODO: remove this, abac should not have its own scheme.
+var Scheme = runtime.NewScheme()
+
+// Codecs provides access to encoding and decoding for the scheme
+var Codecs = serializer.NewCodecFactory(Scheme)
+
+func init() {
+ // TODO: delete this, abac should not have its own scheme.
+ addKnownTypes(Scheme)
+}
+
+var (
+ // SchemeBuilder is the scheme builder with scheme init functions to run for this API package
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Policy{},
+ )
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/types.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/types.go
new file mode 100644
index 000000000..3f094b724
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/types.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package abac
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Policy contains a single ABAC policy rule
+type Policy struct {
+ metav1.TypeMeta
+
+ // Spec describes the policy rule
+ Spec PolicySpec
+}
+
+// PolicySpec contains the attributes for a policy rule
+type PolicySpec struct {
+
+ // User is the username this rule applies to.
+ // Either user or group is required to match the request.
+ // "*" matches all users.
+ User string
+
+ // Group is the group this rule applies to.
+ // Either user or group is required to match the request.
+ // "*" matches all groups.
+ Group string
+
+ // Readonly matches readonly requests when true, and all requests when false
+ Readonly bool
+
+ // APIGroup is the name of an API group. APIGroup, Resource, and Namespace are required to match resource requests.
+ // "*" matches all API groups
+ APIGroup string
+
+ // Resource is the name of a resource. APIGroup, Resource, and Namespace are required to match resource requests.
+ // "*" matches all resources
+ Resource string
+
+ // Namespace is the name of a namespace. APIGroup, Resource, and Namespace are required to match resource requests.
+ // "*" matches all namespaces (including unnamespaced requests)
+ Namespace string
+
+ // NonResourcePath matches non-resource request paths.
+ // "*" matches all paths
+ // "/foo/*" matches all subpaths of foo
+ NonResourcePath string
+
+ // TODO: "expires" string in RFC3339 format.
+
+ // TODO: want a way to allow some users to restart containers of a pod but
+ // not delete or modify it.
+
+ // TODO: want a way to allow a controller to create a pod based only on a
+ // certain podTemplates.
+
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/conversion.go
new file mode 100644
index 000000000..13b537502
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/conversion.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v0
+
+import (
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/kubernetes/pkg/apis/abac"
+)
+
+// allAuthenticated matches k8s.io/apiserver/pkg/authentication/user.AllAuthenticated,
+// but we don't want a client library (which must include types), depending on a server library
+const allAuthenticated = "system:authenticated"
+
+func Convert_v0_Policy_To_abac_Policy(in *Policy, out *abac.Policy, s conversion.Scope) error {
+ out.Spec.User = in.User
+ out.Spec.Group = in.Group
+ out.Spec.Namespace = in.Namespace
+ out.Spec.Resource = in.Resource
+ out.Spec.Readonly = in.Readonly
+
+ // In v0, unspecified user and group matches all authenticated subjects
+ if len(in.User) == 0 && len(in.Group) == 0 {
+ out.Spec.Group = allAuthenticated
+ }
+ // In v0, user or group of * matches all authenticated subjects
+ if in.User == "*" || in.Group == "*" {
+ out.Spec.Group = allAuthenticated
+ out.Spec.User = ""
+ }
+
+ // In v0, leaving namespace empty matches all namespaces
+ if len(in.Namespace) == 0 {
+ out.Spec.Namespace = "*"
+ }
+ // In v0, leaving resource empty matches all resources
+ if len(in.Resource) == 0 {
+ out.Spec.Resource = "*"
+ }
+ // Any rule in v0 should match all API groups
+ out.Spec.APIGroup = "*"
+
+ // In v0, leaving namespace and resource blank allows non-resource paths
+ if len(in.Namespace) == 0 && len(in.Resource) == 0 {
+ out.Spec.NonResourcePath = "*"
+ }
+
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/doc.go
new file mode 100644
index 000000000..44aa923c3
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:conversion-gen=false
+// +k8s:deepcopy-gen=package
+
+// +groupName=abac.authorization.kubernetes.io
+
+package v0 // import "k8s.io/kubernetes/pkg/apis/abac/v0"
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/register.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/register.go
new file mode 100644
index 000000000..3d22251c4
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/register.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v0
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/kubernetes/pkg/apis/abac"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "abac.authorization.kubernetes.io"
+
+// SchemeGroupVersion is the API group version used to register abac v0
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v0"}
+
+func init() {
+ // TODO: Delete this init function, abac should not have its own scheme.
+ utilruntime.Must(addKnownTypes(abac.Scheme))
+
+ utilruntime.Must(RegisterConversions(abac.Scheme))
+}
+
+var (
+ // SchemeBuilder is the scheme builder with scheme init functions to run for this API package
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ SchemeBuilder runtime.SchemeBuilder
+ // localSchemeBuilder ïs a pointer to SchemeBuilder instance. Using localSchemeBuilder
+ // defaulting and conversion init funcs are registered as well.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ localSchemeBuilder = &SchemeBuilder
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Policy{},
+ )
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/types.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/types.go
new file mode 100644
index 000000000..3ebf30fea
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/types.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+package v0
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Policy contains a single ABAC policy rule
+type Policy struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // User is the username this rule applies to.
+ // Either user or group is required to match the request.
+ // "*" matches all users.
+ // +optional
+ User string `json:"user,omitempty"`
+
+ // Group is the group this rule applies to.
+ // Either user or group is required to match the request.
+ // "*" matches all groups.
+ // +optional
+ Group string `json:"group,omitempty"`
+
+ // Readonly matches readonly requests when true, and all requests when false
+ // +optional
+ Readonly bool `json:"readonly,omitempty"`
+
+ // Resource is the name of a resource
+ // "*" matches all resources
+ // +optional
+ Resource string `json:"resource,omitempty"`
+
+ // Namespace is the name of a namespace
+ // "*" matches all namespaces (including unnamespaced requests)
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.conversion.go
new file mode 100644
index 000000000..1111f6b07
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.conversion.go
@@ -0,0 +1,43 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v0
+
+import (
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ abac "k8s.io/kubernetes/pkg/apis/abac"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddConversionFunc((*Policy)(nil), (*abac.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v0_Policy_To_abac_Policy(a.(*Policy), b.(*abac.Policy), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.deepcopy.go
new file mode 100644
index 000000000..0e2ad7bca
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.deepcopy.go
@@ -0,0 +1,51 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v0
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Policy) DeepCopyInto(out *Policy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
+func (in *Policy) DeepCopy() *Policy {
+ if in == nil {
+ return nil
+ }
+ out := new(Policy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Policy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/conversion.go
new file mode 100644
index 000000000..6508d479a
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/conversion.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/kubernetes/pkg/apis/abac"
+)
+
+// allAuthenticated matches k8s.io/apiserver/pkg/authentication/user.AllAuthenticated,
+// but we don't want an client library (which must include types), depending on a server library
+const allAuthenticated = "system:authenticated"
+
+func Convert_v1beta1_Policy_To_abac_Policy(in *Policy, out *abac.Policy, s conversion.Scope) error {
+ if err := autoConvert_v1beta1_Policy_To_abac_Policy(in, out, s); err != nil {
+ return err
+ }
+
+ // In v1beta1, * user or group maps to all authenticated subjects
+ if in.Spec.User == "*" || in.Spec.Group == "*" {
+ out.Spec.Group = allAuthenticated
+ out.Spec.User = ""
+ }
+
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/doc.go
new file mode 100644
index 000000000..62ac4e21d
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/abac
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=abac.authorization.kubernetes.io
+
+package v1beta1 // import "k8s.io/kubernetes/pkg/apis/abac/v1beta1"
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go
new file mode 100644
index 000000000..ff20882c6
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/kubernetes/pkg/apis/abac"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "abac.authorization.kubernetes.io"
+
+// SchemeGroupVersion is the API group and version for abac v1beta1
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+func init() {
+ // TODO: Delete this init function, abac should not have its own scheme.
+ utilruntime.Must(addKnownTypes(abac.Scheme))
+
+ utilruntime.Must(RegisterConversions(abac.Scheme))
+}
+
+var (
+ // SchemeBuilder is the scheme builder with scheme init functions to run for this API package
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ SchemeBuilder runtime.SchemeBuilder
+ // localSchemeBuilder ïs a pointer to SchemeBuilder instance. Using localSchemeBuilder
+ // defaulting and conversion init funcs are registered as well.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ localSchemeBuilder = &SchemeBuilder
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes, RegisterDefaults)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Policy{},
+ )
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/types.go
new file mode 100644
index 000000000..6e7c7239c
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/types.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Policy contains a single ABAC policy rule
+type Policy struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Spec describes the policy rule
+ Spec PolicySpec `json:"spec"`
+}
+
+// PolicySpec contains the attributes for a policy rule
+type PolicySpec struct {
+ // User is the username this rule applies to.
+ // Either user or group is required to match the request.
+ // "*" matches all users.
+ // +optional
+ User string `json:"user,omitempty"`
+
+ // Group is the group this rule applies to.
+ // Either user or group is required to match the request.
+ // "*" matches all groups.
+ // +optional
+ Group string `json:"group,omitempty"`
+
+ // Readonly matches readonly requests when true, and all requests when false
+ // +optional
+ Readonly bool `json:"readonly,omitempty"`
+
+ // APIGroup is the name of an API group. APIGroup, Resource, and Namespace are required to match resource requests.
+ // "*" matches all API groups
+ // +optional
+ APIGroup string `json:"apiGroup,omitempty"`
+
+ // Resource is the name of a resource. APIGroup, Resource, and Namespace are required to match resource requests.
+ // "*" matches all resources
+ // +optional
+ Resource string `json:"resource,omitempty"`
+
+ // Namespace is the name of a namespace. APIGroup, Resource, and Namespace are required to match resource requests.
+ // "*" matches all namespaces (including unnamespaced requests)
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+
+ // NonResourcePath matches non-resource request paths.
+ // "*" matches all paths
+ // "/foo/*" matches all subpaths of foo
+ // +optional
+ NonResourcePath string `json:"nonResourcePath,omitempty"`
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.conversion.go
new file mode 100644
index 000000000..4bb4d8b54
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,109 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ abac "k8s.io/kubernetes/pkg/apis/abac"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*abac.Policy)(nil), (*Policy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_abac_Policy_To_v1beta1_Policy(a.(*abac.Policy), b.(*Policy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*PolicySpec)(nil), (*abac.PolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_PolicySpec_To_abac_PolicySpec(a.(*PolicySpec), b.(*abac.PolicySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*abac.PolicySpec)(nil), (*PolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_abac_PolicySpec_To_v1beta1_PolicySpec(a.(*abac.PolicySpec), b.(*PolicySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*Policy)(nil), (*abac.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Policy_To_abac_Policy(a.(*Policy), b.(*abac.Policy), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_Policy_To_abac_Policy(in *Policy, out *abac.Policy, s conversion.Scope) error {
+ if err := Convert_v1beta1_PolicySpec_To_abac_PolicySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_abac_Policy_To_v1beta1_Policy(in *abac.Policy, out *Policy, s conversion.Scope) error {
+ if err := Convert_abac_PolicySpec_To_v1beta1_PolicySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_abac_Policy_To_v1beta1_Policy is an autogenerated conversion function.
+func Convert_abac_Policy_To_v1beta1_Policy(in *abac.Policy, out *Policy, s conversion.Scope) error {
+ return autoConvert_abac_Policy_To_v1beta1_Policy(in, out, s)
+}
+
+func autoConvert_v1beta1_PolicySpec_To_abac_PolicySpec(in *PolicySpec, out *abac.PolicySpec, s conversion.Scope) error {
+ out.User = in.User
+ out.Group = in.Group
+ out.Readonly = in.Readonly
+ out.APIGroup = in.APIGroup
+ out.Resource = in.Resource
+ out.Namespace = in.Namespace
+ out.NonResourcePath = in.NonResourcePath
+ return nil
+}
+
+// Convert_v1beta1_PolicySpec_To_abac_PolicySpec is an autogenerated conversion function.
+func Convert_v1beta1_PolicySpec_To_abac_PolicySpec(in *PolicySpec, out *abac.PolicySpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_PolicySpec_To_abac_PolicySpec(in, out, s)
+}
+
+func autoConvert_abac_PolicySpec_To_v1beta1_PolicySpec(in *abac.PolicySpec, out *PolicySpec, s conversion.Scope) error {
+ out.User = in.User
+ out.Group = in.Group
+ out.Readonly = in.Readonly
+ out.APIGroup = in.APIGroup
+ out.Resource = in.Resource
+ out.Namespace = in.Namespace
+ out.NonResourcePath = in.NonResourcePath
+ return nil
+}
+
+// Convert_abac_PolicySpec_To_v1beta1_PolicySpec is an autogenerated conversion function.
+func Convert_abac_PolicySpec_To_v1beta1_PolicySpec(in *abac.PolicySpec, out *PolicySpec, s conversion.Scope) error {
+ return autoConvert_abac_PolicySpec_To_v1beta1_PolicySpec(in, out, s)
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..f4cc7b93f
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,68 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Policy) DeepCopyInto(out *Policy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.Spec = in.Spec
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
+func (in *Policy) DeepCopy() *Policy {
+ if in == nil {
+ return nil
+ }
+ out := new(Policy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Policy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicySpec) DeepCopyInto(out *PolicySpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec.
+func (in *PolicySpec) DeepCopy() *PolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.defaults.go
new file mode 100644
index 000000000..198b5be4a
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,33 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/zz_generated.deepcopy.go
new file mode 100644
index 000000000..40c5b58fd
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/zz_generated.deepcopy.go
@@ -0,0 +1,68 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package abac
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Policy) DeepCopyInto(out *Policy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.Spec = in.Spec
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
+func (in *Policy) DeepCopy() *Policy {
+ if in == nil {
+ return nil
+ }
+ out := new(Policy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Policy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicySpec) DeepCopyInto(out *PolicySpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec.
+func (in *PolicySpec) DeepCopy() *PolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go b/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go
new file mode 100644
index 000000000..b69c14419
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go
@@ -0,0 +1,279 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package abac authorizes Kubernetes API actions using an Attribute-based access control scheme.
+package abac
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+ "strings"
+
+ "k8s.io/klog/v2"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apiserver/pkg/authentication/user"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/kubernetes/pkg/apis/abac"
+
+ // Import latest API for init/side-effects
+ _ "k8s.io/kubernetes/pkg/apis/abac/latest"
+ "k8s.io/kubernetes/pkg/apis/abac/v0"
+)
+
+type policyLoadError struct {
+ path string
+ line int
+ data []byte
+ err error
+}
+
+func (p policyLoadError) Error() string {
+ if p.line >= 0 {
+ return fmt.Sprintf("error reading policy file %s, line %d: %s: %v", p.path, p.line, string(p.data), p.err)
+ }
+ return fmt.Sprintf("error reading policy file %s: %v", p.path, p.err)
+}
+
+// PolicyList is simply a slice of Policy structs.
+type PolicyList []*abac.Policy
+
+// NewFromFile attempts to create a policy list from the given file.
+//
+// TODO: Have policies be created via an API call and stored in REST storage.
+func NewFromFile(path string) (PolicyList, error) {
+ // File format is one map per line. This allows easy concatenation of files,
+ // comments in files, and identification of errors by line number.
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ pl := make(PolicyList, 0)
+
+ decoder := abac.Codecs.UniversalDecoder()
+
+ i := 0
+ unversionedLines := 0
+ for scanner.Scan() {
+ i++
+ p := &abac.Policy{}
+ b := scanner.Bytes()
+
+ // skip comment lines and blank lines
+ trimmed := strings.TrimSpace(string(b))
+ if len(trimmed) == 0 || strings.HasPrefix(trimmed, "#") {
+ continue
+ }
+
+ decodedObj, _, err := decoder.Decode(b, nil, nil)
+ if err != nil {
+ if !(runtime.IsMissingVersion(err) || runtime.IsMissingKind(err) || runtime.IsNotRegisteredError(err)) {
+ return nil, policyLoadError{path, i, b, err}
+ }
+ unversionedLines++
+ // Migrate unversioned policy object
+ oldPolicy := &v0.Policy{}
+ if err := runtime.DecodeInto(decoder, b, oldPolicy); err != nil {
+ return nil, policyLoadError{path, i, b, err}
+ }
+ if err := abac.Scheme.Convert(oldPolicy, p, nil); err != nil {
+ return nil, policyLoadError{path, i, b, err}
+ }
+ pl = append(pl, p)
+ continue
+ }
+
+ decodedPolicy, ok := decodedObj.(*abac.Policy)
+ if !ok {
+ return nil, policyLoadError{path, i, b, fmt.Errorf("unrecognized object: %#v", decodedObj)}
+ }
+ pl = append(pl, decodedPolicy)
+ }
+
+ if unversionedLines > 0 {
+ klog.Warningf("Policy file %s contained unversioned rules. See docs/admin/authorization.md#abac-mode for ABAC file format details.", path)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, policyLoadError{path, -1, nil, err}
+ }
+ return pl, nil
+}
+
+func matches(p abac.Policy, a authorizer.Attributes) bool {
+ if subjectMatches(p, a.GetUser()) {
+ if verbMatches(p, a) {
+ // Resource and non-resource requests are mutually exclusive, at most one will match a policy
+ if resourceMatches(p, a) {
+ return true
+ }
+ if nonResourceMatches(p, a) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// subjectMatches returns true if specified user and group properties in the policy match the attributes
+func subjectMatches(p abac.Policy, user user.Info) bool {
+ matched := false
+
+ if user == nil {
+ return false
+ }
+ username := user.GetName()
+ groups := user.GetGroups()
+
+ // If the policy specified a user, ensure it matches
+ if len(p.Spec.User) > 0 {
+ if p.Spec.User == "*" {
+ matched = true
+ } else {
+ matched = p.Spec.User == username
+ if !matched {
+ return false
+ }
+ }
+ }
+
+ // If the policy specified a group, ensure it matches
+ if len(p.Spec.Group) > 0 {
+ if p.Spec.Group == "*" {
+ matched = true
+ } else {
+ matched = false
+ for _, group := range groups {
+ if p.Spec.Group == group {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ return false
+ }
+ }
+ }
+
+ return matched
+}
+
+func verbMatches(p abac.Policy, a authorizer.Attributes) bool {
+ // TODO: match on verb
+
+ // All policies allow read only requests
+ if a.IsReadOnly() {
+ return true
+ }
+
+ // Allow if policy is not readonly
+ if !p.Spec.Readonly {
+ return true
+ }
+
+ return false
+}
+
+func nonResourceMatches(p abac.Policy, a authorizer.Attributes) bool {
+ // A non-resource policy cannot match a resource request
+ if !a.IsResourceRequest() {
+ // Allow wildcard match
+ if p.Spec.NonResourcePath == "*" {
+ return true
+ }
+ // Allow exact match
+ if p.Spec.NonResourcePath == a.GetPath() {
+ return true
+ }
+ // Allow a trailing * subpath match
+ if strings.HasSuffix(p.Spec.NonResourcePath, "*") && strings.HasPrefix(a.GetPath(), strings.TrimRight(p.Spec.NonResourcePath, "*")) {
+ return true
+ }
+ }
+ return false
+}
+
+func resourceMatches(p abac.Policy, a authorizer.Attributes) bool {
+ // A resource policy cannot match a non-resource request
+ if a.IsResourceRequest() {
+ if p.Spec.Namespace == "*" || p.Spec.Namespace == a.GetNamespace() {
+ if p.Spec.Resource == "*" || p.Spec.Resource == a.GetResource() {
+ if p.Spec.APIGroup == "*" || p.Spec.APIGroup == a.GetAPIGroup() {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// Authorize implements authorizer.Authorize
+func (pl PolicyList) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
+ for _, p := range pl {
+ if matches(*p, a) {
+ return authorizer.DecisionAllow, "", nil
+ }
+ }
+ return authorizer.DecisionNoOpinion, "No policy matched.", nil
+ // TODO: Benchmark how much time policy matching takes with a medium size
+ // policy file, compared to other steps such as encoding/decoding.
+ // Then, add Caching only if needed.
+}
+
+// RulesFor returns rules for the given user and namespace.
+func (pl PolicyList) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) {
+ var (
+ resourceRules []authorizer.ResourceRuleInfo
+ nonResourceRules []authorizer.NonResourceRuleInfo
+ )
+
+ for _, p := range pl {
+ if subjectMatches(*p, user) {
+ if p.Spec.Namespace == "*" || p.Spec.Namespace == namespace {
+ if len(p.Spec.Resource) > 0 {
+ r := authorizer.DefaultResourceRuleInfo{
+ Verbs: getVerbs(p.Spec.Readonly),
+ APIGroups: []string{p.Spec.APIGroup},
+ Resources: []string{p.Spec.Resource},
+ }
+ var resourceRule authorizer.ResourceRuleInfo = &r
+ resourceRules = append(resourceRules, resourceRule)
+ }
+ if len(p.Spec.NonResourcePath) > 0 {
+ r := authorizer.DefaultNonResourceRuleInfo{
+ Verbs: getVerbs(p.Spec.Readonly),
+ NonResourceURLs: []string{p.Spec.NonResourcePath},
+ }
+ var nonResourceRule authorizer.NonResourceRuleInfo = &r
+ nonResourceRules = append(nonResourceRules, nonResourceRule)
+ }
+ }
+ }
+ }
+ return resourceRules, nonResourceRules, false, nil
+}
+
+func getVerbs(isReadOnly bool) []string {
+ if isReadOnly {
+ return []string{"get", "list", "watch"}
+ }
+ return []string{"*"}
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/example_policy_file.jsonl b/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/example_policy_file.jsonl
new file mode 100644
index 000000000..14993be27
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/example_policy_file.jsonl
@@ -0,0 +1,11 @@
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:authenticated", "nonResourcePath": "*", "readonly": true}}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:unauthenticated", "nonResourcePath": "*", "readonly": true}}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*" }}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"scheduler", "namespace": "*", "resource": "pods", "readonly": true }}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"scheduler", "namespace": "*", "resource": "bindings" }}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "pods", "readonly": true }}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "services", "readonly": true }}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "endpoints", "readonly": true }}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "events" }}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"alice", "namespace": "projectCaribou", "resource": "*", "apiGroup": "*" }}
+{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"bob", "namespace": "projectCaribou", "resource": "*", "apiGroup": "*", "readonly": true }} \ No newline at end of file
diff --git a/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/default.go b/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/default.go
new file mode 100644
index 000000000..5fa994c73
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/default.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package nodeidentifier
+
+import (
+ "strings"
+
+ "k8s.io/apiserver/pkg/authentication/user"
+)
+
+// NewDefaultNodeIdentifier returns a default NodeIdentifier implementation,
+// which returns isNode=true if the user groups contain the system:nodes group
+// and the user name matches the format system:node:<nodeName>, and populates
+// nodeName if isNode is true
+func NewDefaultNodeIdentifier() NodeIdentifier {
+ return defaultNodeIdentifier{}
+}
+
+// defaultNodeIdentifier implements NodeIdentifier
+type defaultNodeIdentifier struct{}
+
+// nodeUserNamePrefix is the prefix for usernames in the form `system:node:<nodeName>`
+const nodeUserNamePrefix = "system:node:"
+
+// NodeIdentity returns isNode=true if the user groups contain the system:nodes
+// group and the user name matches the format system:node:<nodeName>, and
+// populates nodeName if isNode is true
+func (defaultNodeIdentifier) NodeIdentity(u user.Info) (string, bool) {
+ // Make sure we're a node, and can parse the node name
+ if u == nil {
+ return "", false
+ }
+
+ userName := u.GetName()
+ if !strings.HasPrefix(userName, nodeUserNamePrefix) {
+ return "", false
+ }
+
+ isNode := false
+ for _, g := range u.GetGroups() {
+ if g == user.NodesGroup {
+ isNode = true
+ break
+ }
+ }
+ if !isNode {
+ return "", false
+ }
+
+ nodeName := strings.TrimPrefix(userName, nodeUserNamePrefix)
+ return nodeName, true
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/interfaces.go b/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/interfaces.go
new file mode 100644
index 000000000..df10a88a8
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/interfaces.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package nodeidentifier
+
+import (
+ "k8s.io/apiserver/pkg/authentication/user"
+)
+
+// NodeIdentifier determines node information from a given user
+type NodeIdentifier interface {
+ // NodeIdentity determines node information from the given user.Info.
+ // nodeName is the name of the Node API object associated with the user.Info,
+ // and may be empty if a specific node cannot be determined.
+ // isNode is true if the user.Info represents an identity issued to a node.
+ NodeIdentity(user.Info) (nodeName string, isNode bool)
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking/controller.go b/vendor/k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking/controller.go
new file mode 100644
index 000000000..c45fdfcb3
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking/controller.go
@@ -0,0 +1,197 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package legacytokentracking
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "golang.org/x/time/rate"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/wait"
+ corev1informers "k8s.io/client-go/informers/core/v1"
+ "k8s.io/client-go/kubernetes"
+ corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/clock"
+)
+
+const (
+ ConfigMapName = "kube-apiserver-legacy-service-account-token-tracking"
+ ConfigMapDataKey = "since"
+ dateFormat = "2006-01-02"
+)
+
+var (
+ queueKey = metav1.NamespaceSystem + "/" + ConfigMapName
+)
+
+// Controller maintains a timestamp value configmap `kube-apiserver-legacy-service-account-token-tracking`
+// in `kube-system` to indicates if the tracking for legacy tokens is enabled in
+// the cluster. For HA clusters, the configmap will be eventually created after
+// all controller instances have enabled the feature. When disabling this
+// feature, existing configmap will be deleted.
+type Controller struct {
+ configMapClient corev1client.ConfigMapsGetter
+ configMapInformer cache.SharedIndexInformer
+ configMapCache cache.Indexer
+ configMapSynced cache.InformerSynced
+ queue workqueue.RateLimitingInterface
+
+ // rate limiter controls the rate limit of the creation of the configmap.
+ // this is useful in multi-apiserver cluster to prevent config existing in a
+ // cluster with mixed enabled/disabled controllers. otherwise, those
+ // apiservers will fight to create/delete until all apiservers are enabled
+ // or disabled.
+ creationRatelimiter *rate.Limiter
+ clock clock.Clock
+}
+
+// NewController returns a Controller struct.
+func NewController(cs kubernetes.Interface) *Controller {
+ return newController(cs, clock.RealClock{}, rate.NewLimiter(rate.Every(30*time.Minute), 1))
+}
+
+func newController(cs kubernetes.Interface, cl clock.Clock, limiter *rate.Limiter) *Controller {
+ informer := corev1informers.NewFilteredConfigMapInformer(cs, metav1.NamespaceSystem, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(options *metav1.ListOptions) {
+ options.FieldSelector = fields.OneTermEqualSelector("metadata.name", ConfigMapName).String()
+ })
+
+ c := &Controller{
+ configMapClient: cs.CoreV1(),
+ queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "legacy_token_tracking_controller"),
+ configMapInformer: informer,
+ configMapCache: informer.GetIndexer(),
+ configMapSynced: informer.HasSynced,
+ creationRatelimiter: limiter,
+ clock: cl,
+ }
+
+ informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ c.enqueue()
+ },
+ UpdateFunc: func(oldObj, newObj interface{}) {
+ c.enqueue()
+ },
+ DeleteFunc: func(obj interface{}) {
+ c.enqueue()
+ },
+ })
+
+ return c
+}
+
+func (c *Controller) enqueue() {
+ c.queue.Add(queueKey)
+}
+
+// Run starts the controller sync loop.
+func (c *Controller) Run(stopCh <-chan struct{}) {
+ defer utilruntime.HandleCrash()
+ defer c.queue.ShutDown()
+
+ klog.Info("Starting legacy_token_tracking_controller")
+ defer klog.Infof("Shutting down legacy_token_tracking_controller")
+
+ go c.configMapInformer.Run(stopCh)
+ if !cache.WaitForNamedCacheSync("configmaps", stopCh, c.configMapSynced) {
+ return
+ }
+
+ go wait.Until(c.runWorker, time.Second, stopCh)
+
+ c.queue.Add(queueKey)
+
+ <-stopCh
+ klog.Info("Ending legacy_token_tracking_controller")
+}
+
+func (c *Controller) runWorker() {
+ for c.processNext() {
+ }
+}
+
+func (c *Controller) processNext() bool {
+ key, quit := c.queue.Get()
+ if quit {
+ return false
+ }
+ defer c.queue.Done(key)
+
+ if err := c.syncConfigMap(); err != nil {
+ utilruntime.HandleError(fmt.Errorf("while syncing ConfigMap %q, err: %w", key, err))
+ c.queue.AddRateLimited(key)
+ return true
+ }
+ c.queue.Forget(key)
+ return true
+}
+
+func (c *Controller) syncConfigMap() error {
+ obj, exists, err := c.configMapCache.GetByKey(queueKey)
+ if err != nil {
+ return err
+ }
+
+ now := c.clock.Now()
+ if !exists {
+ r := c.creationRatelimiter.ReserveN(now, 1)
+ if delay := r.DelayFrom(now); delay > 0 {
+ c.queue.AddAfter(queueKey, delay)
+ r.CancelAt(now)
+ return nil
+ }
+
+ if _, err = c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
+ Data: map[string]string{ConfigMapDataKey: now.UTC().Format(dateFormat)},
+ }, metav1.CreateOptions{}); err != nil {
+ if apierrors.IsAlreadyExists(err) {
+ return nil
+ }
+ // don't consume the creationRatelimiter for an unsuccessful attempt
+ r.CancelAt(now)
+ return err
+ }
+ } else {
+ configMap := obj.(*corev1.ConfigMap)
+ if _, err = time.Parse(dateFormat, configMap.Data[ConfigMapDataKey]); err != nil {
+ configMap := configMap.DeepCopy()
+ if configMap.Data == nil {
+ configMap.Data = map[string]string{}
+ }
+ configMap.Data[ConfigMapDataKey] = now.UTC().Format(dateFormat)
+ if _, err = c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil {
+ if apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/OWNERS
new file mode 100644
index 000000000..55d3fb23c
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - sig-auth-authorizers-approvers
+reviewers:
+ - sig-auth-authorizers-reviewers
+labels:
+ - sig/auth
diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/config.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/config.go
new file mode 100644
index 000000000..b2bc689e4
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/config.go
@@ -0,0 +1,158 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorizer
+
+import (
+ "errors"
+ "fmt"
+
+ utilnet "k8s.io/apimachinery/pkg/util/net"
+ "k8s.io/apimachinery/pkg/util/wait"
+ authzconfig "k8s.io/apiserver/pkg/apis/apiserver"
+ "k8s.io/apiserver/pkg/authentication/user"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/apiserver/pkg/authorization/authorizerfactory"
+ "k8s.io/apiserver/pkg/authorization/union"
+ webhookutil "k8s.io/apiserver/pkg/util/webhook"
+ "k8s.io/apiserver/plugin/pkg/authorizer/webhook"
+ versionedinformers "k8s.io/client-go/informers"
+ "k8s.io/kubernetes/pkg/auth/authorizer/abac"
+ "k8s.io/kubernetes/pkg/auth/nodeidentifier"
+ "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
+ "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node"
+ "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
+ "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
+)
+
+// Config contains the data on how to authorize a request to the Kube API Server
+type Config struct {
+ // Options for ModeABAC
+
+ // Path to an ABAC policy file.
+ PolicyFile string
+
+ // Options for ModeWebhook
+
+ // WebhookRetryBackoff specifies the backoff parameters for the authorization webhook retry logic.
+ // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed
+ // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded.
+ WebhookRetryBackoff *wait.Backoff
+
+ VersionedInformerFactory versionedinformers.SharedInformerFactory
+
+ // Optional field, custom dial function used to connect to webhook
+ CustomDial utilnet.DialFunc
+
+ // AuthorizationConfiguration stores the configuration for the Authorizer chain
+ // It will deprecate most of the above flags when GA
+ AuthorizationConfiguration *authzconfig.AuthorizationConfiguration
+}
+
+// New returns the right sort of union of multiple authorizer.Authorizer objects
+// based on the authorizationMode or an error.
+func (config Config) New() (authorizer.Authorizer, authorizer.RuleResolver, error) {
+ if len(config.AuthorizationConfiguration.Authorizers) == 0 {
+ return nil, nil, fmt.Errorf("at least one authorization mode must be passed")
+ }
+
+ var (
+ authorizers []authorizer.Authorizer
+ ruleResolvers []authorizer.RuleResolver
+ )
+
+ // Add SystemPrivilegedGroup as an authorizing group
+ superuserAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)
+ authorizers = append(authorizers, superuserAuthorizer)
+
+ for _, configuredAuthorizer := range config.AuthorizationConfiguration.Authorizers {
+ // Keep cases in sync with constant list in k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go.
+ switch configuredAuthorizer.Type {
+ case authzconfig.AuthorizerType(modes.ModeNode):
+ node.RegisterMetrics()
+ graph := node.NewGraph()
+ node.AddGraphEventHandlers(
+ graph,
+ config.VersionedInformerFactory.Core().V1().Nodes(),
+ config.VersionedInformerFactory.Core().V1().Pods(),
+ config.VersionedInformerFactory.Core().V1().PersistentVolumes(),
+ config.VersionedInformerFactory.Storage().V1().VolumeAttachments(),
+ )
+ nodeAuthorizer := node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), bootstrappolicy.NodeRules())
+ authorizers = append(authorizers, nodeAuthorizer)
+ ruleResolvers = append(ruleResolvers, nodeAuthorizer)
+
+ case authzconfig.AuthorizerType(modes.ModeAlwaysAllow):
+ alwaysAllowAuthorizer := authorizerfactory.NewAlwaysAllowAuthorizer()
+ authorizers = append(authorizers, alwaysAllowAuthorizer)
+ ruleResolvers = append(ruleResolvers, alwaysAllowAuthorizer)
+ case authzconfig.AuthorizerType(modes.ModeAlwaysDeny):
+ alwaysDenyAuthorizer := authorizerfactory.NewAlwaysDenyAuthorizer()
+ authorizers = append(authorizers, alwaysDenyAuthorizer)
+ ruleResolvers = append(ruleResolvers, alwaysDenyAuthorizer)
+ case authzconfig.AuthorizerType(modes.ModeABAC):
+ abacAuthorizer, err := abac.NewFromFile(config.PolicyFile)
+ if err != nil {
+ return nil, nil, err
+ }
+ authorizers = append(authorizers, abacAuthorizer)
+ ruleResolvers = append(ruleResolvers, abacAuthorizer)
+ case authzconfig.AuthorizerType(modes.ModeWebhook):
+ if config.WebhookRetryBackoff == nil {
+ return nil, nil, errors.New("retry backoff parameters for authorization webhook has not been specified")
+ }
+ clientConfig, err := webhookutil.LoadKubeconfig(*configuredAuthorizer.Webhook.ConnectionInfo.KubeConfigFile, config.CustomDial)
+ if err != nil {
+ return nil, nil, err
+ }
+ var decisionOnError authorizer.Decision
+ switch configuredAuthorizer.Webhook.FailurePolicy {
+ case authzconfig.FailurePolicyNoOpinion:
+ decisionOnError = authorizer.DecisionNoOpinion
+ case authzconfig.FailurePolicyDeny:
+ decisionOnError = authorizer.DecisionDeny
+ default:
+ return nil, nil, fmt.Errorf("unknown failurePolicy %q", configuredAuthorizer.Webhook.FailurePolicy)
+ }
+ webhookAuthorizer, err := webhook.New(clientConfig,
+ configuredAuthorizer.Webhook.SubjectAccessReviewVersion,
+ configuredAuthorizer.Webhook.AuthorizedTTL.Duration,
+ configuredAuthorizer.Webhook.UnauthorizedTTL.Duration,
+ *config.WebhookRetryBackoff,
+ decisionOnError,
+ configuredAuthorizer.Webhook.MatchConditions,
+ )
+ if err != nil {
+ return nil, nil, err
+ }
+ authorizers = append(authorizers, webhookAuthorizer)
+ ruleResolvers = append(ruleResolvers, webhookAuthorizer)
+ case authzconfig.AuthorizerType(modes.ModeRBAC):
+ rbacAuthorizer := rbac.New(
+ &rbac.RoleGetter{Lister: config.VersionedInformerFactory.Rbac().V1().Roles().Lister()},
+ &rbac.RoleBindingLister{Lister: config.VersionedInformerFactory.Rbac().V1().RoleBindings().Lister()},
+ &rbac.ClusterRoleGetter{Lister: config.VersionedInformerFactory.Rbac().V1().ClusterRoles().Lister()},
+ &rbac.ClusterRoleBindingLister{Lister: config.VersionedInformerFactory.Rbac().V1().ClusterRoleBindings().Lister()},
+ )
+ authorizers = append(authorizers, rbacAuthorizer)
+ ruleResolvers = append(ruleResolvers, rbacAuthorizer)
+ default:
+ return nil, nil, fmt.Errorf("unknown authorization mode %s specified", configuredAuthorizer.Type)
+ }
+ }
+
+ return union.New(authorizers...), union.NewRuleResolvers(ruleResolvers...), nil
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go
new file mode 100644
index 000000000..501b98a95
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import "k8s.io/apimachinery/pkg/util/sets"
+
+const (
+ // ModeAlwaysAllow is the mode to set all requests as authorized
+ ModeAlwaysAllow string = "AlwaysAllow"
+ // ModeAlwaysDeny is the mode to set no requests as authorized
+ ModeAlwaysDeny string = "AlwaysDeny"
+ // ModeABAC is the mode to use Attribute Based Access Control to authorize
+ ModeABAC string = "ABAC"
+ // ModeWebhook is the mode to make an external webhook call to authorize
+ ModeWebhook string = "Webhook"
+ // ModeRBAC is the mode to use Role Based Access Control to authorize
+ ModeRBAC string = "RBAC"
+ // ModeNode is an authorization mode that authorizes API requests made by kubelets.
+ ModeNode string = "Node"
+)
+
+// AuthorizationModeChoices is the list of supported authorization modes
+var AuthorizationModeChoices = []string{ModeAlwaysAllow, ModeAlwaysDeny, ModeABAC, ModeWebhook, ModeRBAC, ModeNode}
+
+// IsValidAuthorizationMode returns true if the given authorization mode is a valid one for the apiserver
+func IsValidAuthorizationMode(authzMode string) bool {
+ return sets.NewString(AuthorizationModeChoices...).Has(authzMode)
+}
diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/registry/authorization/util/helpers.go
new file mode 100644
index 000000000..f939f7b82
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/util/helpers.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "k8s.io/apiserver/pkg/authentication/user"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ authorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
+)
+
+// ResourceAttributesFrom combines the API object information and the user.Info from the context to build a full authorizer.AttributesRecord for resource access
+func ResourceAttributesFrom(user user.Info, in authorizationapi.ResourceAttributes) authorizer.AttributesRecord {
+ return authorizer.AttributesRecord{
+ User: user,
+ Verb: in.Verb,
+ Namespace: in.Namespace,
+ APIGroup: in.Group,
+ APIVersion: matchAllVersionIfEmpty(in.Version),
+ Resource: in.Resource,
+ Subresource: in.Subresource,
+ Name: in.Name,
+ ResourceRequest: true,
+ }
+}
+
+// NonResourceAttributesFrom combines the API object information and the user.Info from the context to build a full authorizer.AttributesRecord for non resource access
+func NonResourceAttributesFrom(user user.Info, in authorizationapi.NonResourceAttributes) authorizer.AttributesRecord {
+ return authorizer.AttributesRecord{
+ User: user,
+ ResourceRequest: false,
+ Path: in.Path,
+ Verb: in.Verb,
+ }
+}
+
+func convertToUserInfoExtra(extra map[string]authorizationapi.ExtraValue) map[string][]string {
+ if extra == nil {
+ return nil
+ }
+ ret := map[string][]string{}
+ for k, v := range extra {
+ ret[k] = []string(v)
+ }
+
+ return ret
+}
+
+// AuthorizationAttributesFrom takes a spec and returns the proper authz attributes to check it.
+func AuthorizationAttributesFrom(spec authorizationapi.SubjectAccessReviewSpec) authorizer.AttributesRecord {
+ userToCheck := &user.DefaultInfo{
+ Name: spec.User,
+ Groups: spec.Groups,
+ UID: spec.UID,
+ Extra: convertToUserInfoExtra(spec.Extra),
+ }
+
+ var authorizationAttributes authorizer.AttributesRecord
+ if spec.ResourceAttributes != nil {
+ authorizationAttributes = ResourceAttributesFrom(userToCheck, *spec.ResourceAttributes)
+ } else {
+ authorizationAttributes = NonResourceAttributesFrom(userToCheck, *spec.NonResourceAttributes)
+ }
+
+ return authorizationAttributes
+}
+
+// matchAllVersionIfEmpty returns a "*" if the version is unspecified
+func matchAllVersionIfEmpty(version string) string {
+ if len(version) == 0 {
+ return "*"
+ }
+ return version
+}
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/OWNERS b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/OWNERS
new file mode 100644
index 000000000..ff6413c7f
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - sig-auth-node-isolation-approvers
+reviewers:
+ - sig-auth-node-isolation-reviewers
+labels:
+ - sig/auth
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go
new file mode 100644
index 000000000..85a4b8084
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go
@@ -0,0 +1,494 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package node
+
+import (
+ "sync"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/component-helpers/storage/ephemeral"
+ "k8s.io/dynamic-resource-allocation/resourceclaim"
+ pvutil "k8s.io/kubernetes/pkg/api/v1/persistentvolume"
+ podutil "k8s.io/kubernetes/pkg/api/v1/pod"
+ "k8s.io/kubernetes/third_party/forked/gonum/graph"
+ "k8s.io/kubernetes/third_party/forked/gonum/graph/simple"
+)
+
+// namedVertex implements graph.Node and remembers the type, namespace, and name of its related API object
+type namedVertex struct {
+ name string
+ namespace string
+ id int
+ vertexType vertexType
+}
+
+func newNamedVertex(vertexType vertexType, namespace, name string, id int) *namedVertex {
+ return &namedVertex{
+ vertexType: vertexType,
+ name: name,
+ namespace: namespace,
+ id: id,
+ }
+}
+func (n *namedVertex) ID() int {
+ return n.id
+}
+func (n *namedVertex) String() string {
+ if len(n.namespace) == 0 {
+ return vertexTypes[n.vertexType] + ":" + n.name
+ }
+ return vertexTypes[n.vertexType] + ":" + n.namespace + "/" + n.name
+}
+
+// destinationEdge is a graph edge that includes a denormalized reference to the final destination vertex.
+// This should only be used when there is a single leaf vertex reachable from T.
+type destinationEdge struct {
+ F graph.Node
+ T graph.Node
+ Destination graph.Node
+}
+
+func newDestinationEdge(from, to, destination graph.Node) graph.Edge {
+ return &destinationEdge{F: from, T: to, Destination: destination}
+}
+func (e *destinationEdge) From() graph.Node { return e.F }
+func (e *destinationEdge) To() graph.Node { return e.T }
+func (e *destinationEdge) Weight() float64 { return 0 }
+func (e *destinationEdge) DestinationID() int { return e.Destination.ID() }
+
+// Graph holds graph vertices and a way to look up a vertex for a particular API type/namespace/name.
+// All edges point toward the vertices representing Kubernetes nodes:
+//
+// node <- pod
+// pod <- secret,configmap,pvc
+// pvc <- pv
+// pv <- secret
+type Graph struct {
+ lock sync.RWMutex
+ graph *simple.DirectedAcyclicGraph
+ // vertices is a map of type -> namespace -> name -> vertex
+ vertices map[vertexType]namespaceVertexMapping
+
+ // destinationEdgeIndex is a map of vertex -> set of destination IDs
+ destinationEdgeIndex map[int]*intSet
+ // destinationEdgeThreshold is the minimum number of distinct destination IDs at which to maintain an index
+ destinationEdgeThreshold int
+}
+
+// namespaceVertexMapping is a map of namespace -> name -> vertex
+type namespaceVertexMapping map[string]nameVertexMapping
+
+// nameVertexMapping is a map of name -> vertex
+type nameVertexMapping map[string]*namedVertex
+
+func NewGraph() *Graph {
+ return &Graph{
+ vertices: map[vertexType]namespaceVertexMapping{},
+ graph: simple.NewDirectedAcyclicGraph(0, 0),
+
+ destinationEdgeIndex: map[int]*intSet{},
+ // experimentally determined to be the point at which iteration adds an order of magnitude to the authz check.
+ // since maintaining indexes costs time/memory while processing graph changes, we don't want to make this too low.
+ destinationEdgeThreshold: 200,
+ }
+}
+
+// vertexType indicates the type of the API object the vertex represents.
+// represented as a byte to minimize space used in the vertices.
+type vertexType byte
+
+const (
+ configMapVertexType vertexType = iota
+ nodeVertexType
+ podVertexType
+ pvcVertexType
+ pvVertexType
+ resourceClaimVertexType
+ secretVertexType
+ vaVertexType
+ serviceAccountVertexType
+)
+
+var vertexTypes = map[vertexType]string{
+ configMapVertexType: "configmap",
+ nodeVertexType: "node",
+ podVertexType: "pod",
+ pvcVertexType: "pvc",
+ pvVertexType: "pv",
+ resourceClaimVertexType: "resourceclaim",
+ secretVertexType: "secret",
+ vaVertexType: "volumeattachment",
+ serviceAccountVertexType: "serviceAccount",
+}
+
+// must be called under a write lock
+func (g *Graph) getOrCreateVertex_locked(vertexType vertexType, namespace, name string) *namedVertex {
+ if vertex, exists := g.getVertex_rlocked(vertexType, namespace, name); exists {
+ return vertex
+ }
+ return g.createVertex_locked(vertexType, namespace, name)
+}
+
+// must be called under a read lock
+func (g *Graph) getVertex_rlocked(vertexType vertexType, namespace, name string) (*namedVertex, bool) {
+ vertex, exists := g.vertices[vertexType][namespace][name]
+ return vertex, exists
+}
+
+// must be called under a write lock
+func (g *Graph) createVertex_locked(vertexType vertexType, namespace, name string) *namedVertex {
+ typedVertices, exists := g.vertices[vertexType]
+ if !exists {
+ typedVertices = namespaceVertexMapping{}
+ g.vertices[vertexType] = typedVertices
+ }
+
+ namespacedVertices, exists := typedVertices[namespace]
+ if !exists {
+ namespacedVertices = map[string]*namedVertex{}
+ typedVertices[namespace] = namespacedVertices
+ }
+
+ vertex := newNamedVertex(vertexType, namespace, name, g.graph.NewNodeID())
+ namespacedVertices[name] = vertex
+ g.graph.AddNode(vertex)
+
+ return vertex
+}
+
+// must be called under write lock
+func (g *Graph) deleteVertex_locked(vertexType vertexType, namespace, name string) {
+ vertex, exists := g.getVertex_rlocked(vertexType, namespace, name)
+ if !exists {
+ return
+ }
+
+ // find existing neighbors with a single edge (meaning we are their only neighbor)
+ neighborsToRemove := []graph.Node{}
+ edgesToRemoveFromIndexes := []graph.Edge{}
+ g.graph.VisitFrom(vertex, func(neighbor graph.Node) bool {
+ // this downstream neighbor has only one edge (which must be from us), so remove them as well
+ if g.graph.Degree(neighbor) == 1 {
+ neighborsToRemove = append(neighborsToRemove, neighbor)
+ }
+ return true
+ })
+ g.graph.VisitTo(vertex, func(neighbor graph.Node) bool {
+ if g.graph.Degree(neighbor) == 1 {
+ // this upstream neighbor has only one edge (which must be to us), so remove them as well
+ neighborsToRemove = append(neighborsToRemove, neighbor)
+ } else {
+ // decrement the destination edge index on this neighbor if the edge between us was a destination edge
+ edgesToRemoveFromIndexes = append(edgesToRemoveFromIndexes, g.graph.EdgeBetween(vertex, neighbor))
+ }
+ return true
+ })
+
+ // remove the vertex
+ g.removeVertex_locked(vertex)
+
+ // remove neighbors that are now edgeless
+ for _, neighbor := range neighborsToRemove {
+ g.removeVertex_locked(neighbor.(*namedVertex))
+ }
+
+ // remove edges from destination indexes for neighbors that dropped outbound edges
+ for _, edge := range edgesToRemoveFromIndexes {
+ g.removeEdgeFromDestinationIndex_locked(edge)
+ }
+}
+
+// must be called under write lock
+// deletes edges from a given vertex type to a specific vertex
+// will delete each orphaned "from" vertex, but will never delete the "to" vertex
+func (g *Graph) deleteEdges_locked(fromType, toType vertexType, toNamespace, toName string) {
+ // get the "to" side
+ toVert, exists := g.getVertex_rlocked(toType, toNamespace, toName)
+ if !exists {
+ return
+ }
+
+ // delete all edges between vertices of fromType and toVert
+ neighborsToRemove := []*namedVertex{}
+ edgesToRemove := []graph.Edge{}
+ g.graph.VisitTo(toVert, func(from graph.Node) bool {
+ fromVert := from.(*namedVertex)
+ if fromVert.vertexType != fromType {
+ return true
+ }
+ // this neighbor has only one edge (which must be to us), so remove them as well
+ if g.graph.Degree(fromVert) == 1 {
+ neighborsToRemove = append(neighborsToRemove, fromVert)
+ } else {
+ edgesToRemove = append(edgesToRemove, g.graph.EdgeBetween(from, toVert))
+ }
+ return true
+ })
+
+ // clean up orphaned verts
+ for _, v := range neighborsToRemove {
+ g.removeVertex_locked(v)
+ }
+
+ // remove edges and decrement destination indexes for neighbors that dropped outbound edges
+ for _, edge := range edgesToRemove {
+ g.graph.RemoveEdge(edge)
+ g.removeEdgeFromDestinationIndex_locked(edge)
+ }
+}
+
+// A fastpath for recomputeDestinationIndex_locked for "removing edge" case.
+func (g *Graph) removeEdgeFromDestinationIndex_locked(e graph.Edge) {
+ n := e.From()
+ // don't maintain indices for nodes with few edges
+ edgeCount := g.graph.Degree(n)
+ if edgeCount < g.destinationEdgeThreshold {
+ delete(g.destinationEdgeIndex, n.ID())
+ return
+ }
+
+ // decrement the nodeID->destinationID refcount in the index, if the index exists
+ index := g.destinationEdgeIndex[n.ID()]
+ if index == nil {
+ return
+ }
+ if destinationEdge, ok := e.(*destinationEdge); ok {
+ index.decrement(destinationEdge.DestinationID())
+ }
+}
+
+// A fastpath for recomputeDestinationIndex_locked for "adding edge case".
+func (g *Graph) addEdgeToDestinationIndex_locked(e graph.Edge) {
+ n := e.From()
+ index := g.destinationEdgeIndex[n.ID()]
+ if index == nil {
+ // There is no index, use the full index computation method
+ g.recomputeDestinationIndex_locked(n)
+ return
+ }
+ // fast-add the new edge to an existing index
+ if destinationEdge, ok := e.(*destinationEdge); ok {
+ index.increment(destinationEdge.DestinationID())
+ }
+}
+
+// must be called under write lock
+// removeVertex_locked removes the specified vertex from the graph and from the maintained indices.
+// It does nothing to indexes of neighbor vertices.
+func (g *Graph) removeVertex_locked(v *namedVertex) {
+ g.graph.RemoveNode(v)
+ delete(g.destinationEdgeIndex, v.ID())
+ delete(g.vertices[v.vertexType][v.namespace], v.name)
+ if len(g.vertices[v.vertexType][v.namespace]) == 0 {
+ delete(g.vertices[v.vertexType], v.namespace)
+ }
+}
+
+// must be called under write lock
+// recomputeDestinationIndex_locked recomputes the index of destination ids for the specified vertex
+func (g *Graph) recomputeDestinationIndex_locked(n graph.Node) {
+ // don't maintain indices for nodes with few edges
+ edgeCount := g.graph.Degree(n)
+ if edgeCount < g.destinationEdgeThreshold {
+ delete(g.destinationEdgeIndex, n.ID())
+ return
+ }
+
+ // get or create the index
+ index := g.destinationEdgeIndex[n.ID()]
+ if index == nil {
+ index = newIntSet()
+ } else {
+ index.reset()
+ }
+
+ // populate the index
+ g.graph.VisitFrom(n, func(dest graph.Node) bool {
+ if destinationEdge, ok := g.graph.EdgeBetween(n, dest).(*destinationEdge); ok {
+ index.increment(destinationEdge.DestinationID())
+ }
+ return true
+ })
+ g.destinationEdgeIndex[n.ID()] = index
+}
+
+// AddPod should only be called once spec.NodeName is populated.
+// It sets up edges for the following relationships (which are immutable for a pod once bound to a node):
+//
+// pod -> node
+// secret -> pod
+// configmap -> pod
+// pvc -> pod
+// svcacct -> pod
+func (g *Graph) AddPod(pod *corev1.Pod) {
+ start := time.Now()
+ defer func() {
+ graphActionsDuration.WithLabelValues("AddPod").Observe(time.Since(start).Seconds())
+ }()
+ g.lock.Lock()
+ defer g.lock.Unlock()
+
+ g.deleteVertex_locked(podVertexType, pod.Namespace, pod.Name)
+ podVertex := g.getOrCreateVertex_locked(podVertexType, pod.Namespace, pod.Name)
+ nodeVertex := g.getOrCreateVertex_locked(nodeVertexType, "", pod.Spec.NodeName)
+ g.graph.SetEdge(newDestinationEdge(podVertex, nodeVertex, nodeVertex))
+
+ // Short-circuit adding edges to other resources for mirror pods.
+ // A node must never be able to create a pod that grants them permissions on other API objects.
+ // The NodeRestriction admission plugin prevents creation of such pods, but short-circuiting here gives us defense in depth.
+ if _, isMirrorPod := pod.Annotations[corev1.MirrorPodAnnotationKey]; isMirrorPod {
+ return
+ }
+
+ // TODO(mikedanese): If the pod doesn't mount the service account secrets,
+ // should the node still get access to the service account?
+ //
+ // ref https://github.com/kubernetes/kubernetes/issues/58790
+ if len(pod.Spec.ServiceAccountName) > 0 {
+ serviceAccountVertex := g.getOrCreateVertex_locked(serviceAccountVertexType, pod.Namespace, pod.Spec.ServiceAccountName)
+ e := newDestinationEdge(serviceAccountVertex, podVertex, nodeVertex)
+ g.graph.SetEdge(e)
+ g.addEdgeToDestinationIndex_locked(e)
+ }
+
+ podutil.VisitPodSecretNames(pod, func(secret string) bool {
+ secretVertex := g.getOrCreateVertex_locked(secretVertexType, pod.Namespace, secret)
+ e := newDestinationEdge(secretVertex, podVertex, nodeVertex)
+ g.graph.SetEdge(e)
+ g.addEdgeToDestinationIndex_locked(e)
+ return true
+ })
+
+ podutil.VisitPodConfigmapNames(pod, func(configmap string) bool {
+ configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, pod.Namespace, configmap)
+ e := newDestinationEdge(configmapVertex, podVertex, nodeVertex)
+ g.graph.SetEdge(e)
+ g.addEdgeToDestinationIndex_locked(e)
+ return true
+ })
+
+ for _, v := range pod.Spec.Volumes {
+ claimName := ""
+ if v.PersistentVolumeClaim != nil {
+ claimName = v.PersistentVolumeClaim.ClaimName
+ } else if v.Ephemeral != nil {
+ claimName = ephemeral.VolumeClaimName(pod, &v)
+ }
+ if claimName != "" {
+ pvcVertex := g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, claimName)
+ e := newDestinationEdge(pvcVertex, podVertex, nodeVertex)
+ g.graph.SetEdge(e)
+ g.addEdgeToDestinationIndex_locked(e)
+ }
+ }
+
+ for _, podResourceClaim := range pod.Spec.ResourceClaims {
+ claimName, _, err := resourceclaim.Name(pod, &podResourceClaim)
+ // Do we have a valid claim name? If yes, add an edge that grants
+ // kubelet access to that claim. An error indicates that a claim
+ // still needs to be created, nil that intentionally no claim
+ // was created and never will be because it isn't needed.
+ if err == nil && claimName != nil {
+ claimVertex := g.getOrCreateVertex_locked(resourceClaimVertexType, pod.Namespace, *claimName)
+ e := newDestinationEdge(claimVertex, podVertex, nodeVertex)
+ g.graph.SetEdge(e)
+ g.addEdgeToDestinationIndex_locked(e)
+ }
+ }
+}
+func (g *Graph) DeletePod(name, namespace string) {
+ start := time.Now()
+ defer func() {
+ graphActionsDuration.WithLabelValues("DeletePod").Observe(time.Since(start).Seconds())
+ }()
+ g.lock.Lock()
+ defer g.lock.Unlock()
+ g.deleteVertex_locked(podVertexType, namespace, name)
+}
+
+// AddPV sets up edges for the following relationships:
+//
+// secret -> pv
+//
+// pv -> pvc
+func (g *Graph) AddPV(pv *corev1.PersistentVolume) {
+ start := time.Now()
+ defer func() {
+ graphActionsDuration.WithLabelValues("AddPV").Observe(time.Since(start).Seconds())
+ }()
+ g.lock.Lock()
+ defer g.lock.Unlock()
+
+ // clear existing edges
+ g.deleteVertex_locked(pvVertexType, "", pv.Name)
+
+ // if we have a pvc, establish new edges
+ if pv.Spec.ClaimRef != nil {
+ pvVertex := g.getOrCreateVertex_locked(pvVertexType, "", pv.Name)
+
+ // since we don't know the other end of the pvc -> pod -> node chain (or it may not even exist yet), we can't decorate these edges with kubernetes node info
+ g.graph.SetEdge(simple.Edge{F: pvVertex, T: g.getOrCreateVertex_locked(pvcVertexType, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)})
+ pvutil.VisitPVSecretNames(pv, func(namespace, secret string, kubeletVisible bool) bool {
+ // This grants access to the named secret in the same namespace as the bound PVC
+ if kubeletVisible {
+ g.graph.SetEdge(simple.Edge{F: g.getOrCreateVertex_locked(secretVertexType, namespace, secret), T: pvVertex})
+ }
+ return true
+ })
+ }
+}
+func (g *Graph) DeletePV(name string) {
+ start := time.Now()
+ defer func() {
+ graphActionsDuration.WithLabelValues("DeletePV").Observe(time.Since(start).Seconds())
+ }()
+ g.lock.Lock()
+ defer g.lock.Unlock()
+ g.deleteVertex_locked(pvVertexType, "", name)
+}
+
+// AddVolumeAttachment sets up edges for the following relationships:
+//
+// volume attachment -> node
+func (g *Graph) AddVolumeAttachment(attachmentName, nodeName string) {
+ start := time.Now()
+ defer func() {
+ graphActionsDuration.WithLabelValues("AddVolumeAttachment").Observe(time.Since(start).Seconds())
+ }()
+ g.lock.Lock()
+ defer g.lock.Unlock()
+
+ // clear existing edges
+ g.deleteVertex_locked(vaVertexType, "", attachmentName)
+
+ // if we have a node, establish new edges
+ if len(nodeName) > 0 {
+ vaVertex := g.getOrCreateVertex_locked(vaVertexType, "", attachmentName)
+ nodeVertex := g.getOrCreateVertex_locked(nodeVertexType, "", nodeName)
+ g.graph.SetEdge(newDestinationEdge(vaVertex, nodeVertex, nodeVertex))
+ }
+}
+func (g *Graph) DeleteVolumeAttachment(name string) {
+ start := time.Now()
+ defer func() {
+ graphActionsDuration.WithLabelValues("DeleteVolumeAttachment").Observe(time.Since(start).Seconds())
+ }()
+ g.lock.Lock()
+ defer g.lock.Unlock()
+ g.deleteVertex_locked(vaVertexType, "", name)
+}
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go
new file mode 100644
index 000000000..52a808ef7
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go
@@ -0,0 +1,186 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package node
+
+import (
+ "time"
+
+ "k8s.io/klog/v2"
+
+ corev1 "k8s.io/api/core/v1"
+ storagev1 "k8s.io/api/storage/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
+ corev1informers "k8s.io/client-go/informers/core/v1"
+ storageinformers "k8s.io/client-go/informers/storage/v1"
+ "k8s.io/client-go/tools/cache"
+)
+
+type graphPopulator struct {
+ graph *Graph
+}
+
+func AddGraphEventHandlers(
+ graph *Graph,
+ nodes corev1informers.NodeInformer,
+ pods corev1informers.PodInformer,
+ pvs corev1informers.PersistentVolumeInformer,
+ attachments storageinformers.VolumeAttachmentInformer,
+) {
+ g := &graphPopulator{
+ graph: graph,
+ }
+
+ podHandler, _ := pods.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: g.addPod,
+ UpdateFunc: g.updatePod,
+ DeleteFunc: g.deletePod,
+ })
+
+ pvsHandler, _ := pvs.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: g.addPV,
+ UpdateFunc: g.updatePV,
+ DeleteFunc: g.deletePV,
+ })
+
+ attachHandler, _ := attachments.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: g.addVolumeAttachment,
+ UpdateFunc: g.updateVolumeAttachment,
+ DeleteFunc: g.deleteVolumeAttachment,
+ })
+
+ go cache.WaitForNamedCacheSync("node_authorizer", wait.NeverStop,
+ podHandler.HasSynced, pvsHandler.HasSynced, attachHandler.HasSynced)
+}
+
+func (g *graphPopulator) addPod(obj interface{}) {
+ g.updatePod(nil, obj)
+}
+
+func (g *graphPopulator) updatePod(oldObj, obj interface{}) {
+ pod := obj.(*corev1.Pod)
+ if len(pod.Spec.NodeName) == 0 {
+ // No node assigned
+ klog.V(5).Infof("updatePod %s/%s, no node", pod.Namespace, pod.Name)
+ return
+ }
+ if oldPod, ok := oldObj.(*corev1.Pod); ok && oldPod != nil {
+ if (pod.Spec.NodeName == oldPod.Spec.NodeName) && (pod.UID == oldPod.UID) &&
+ resourceClaimStatusesEqual(oldPod.Status.ResourceClaimStatuses, pod.Status.ResourceClaimStatuses) {
+ // Node and uid are unchanged, all object references in the pod spec are immutable respectively unmodified (claim statuses).
+ klog.V(5).Infof("updatePod %s/%s, node unchanged", pod.Namespace, pod.Name)
+ return
+ }
+ }
+
+ klog.V(4).Infof("updatePod %s/%s for node %s", pod.Namespace, pod.Name, pod.Spec.NodeName)
+ startTime := time.Now()
+ g.graph.AddPod(pod)
+ klog.V(5).Infof("updatePod %s/%s for node %s completed in %v", pod.Namespace, pod.Name, pod.Spec.NodeName, time.Since(startTime))
+}
+
+func resourceClaimStatusesEqual(statusA, statusB []corev1.PodResourceClaimStatus) bool {
+ if len(statusA) != len(statusB) {
+ return false
+ }
+ // In most cases, status entries only get added once and not modified.
+ // But this cannot be guaranteed, so for the sake of correctness in all
+ // cases this code here has to check.
+ for i := range statusA {
+ if statusA[i].Name != statusB[i].Name {
+ return false
+ }
+ claimNameA := statusA[i].ResourceClaimName
+ claimNameB := statusB[i].ResourceClaimName
+ if (claimNameA == nil) != (claimNameB == nil) {
+ return false
+ }
+ if claimNameA != nil && *claimNameA != *claimNameB {
+ return false
+ }
+ }
+ return true
+}
+
+func (g *graphPopulator) deletePod(obj interface{}) {
+ if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
+ obj = tombstone.Obj
+ }
+ pod, ok := obj.(*corev1.Pod)
+ if !ok {
+ klog.Infof("unexpected type %T", obj)
+ return
+ }
+ if len(pod.Spec.NodeName) == 0 {
+ klog.V(5).Infof("deletePod %s/%s, no node", pod.Namespace, pod.Name)
+ return
+ }
+
+ klog.V(4).Infof("deletePod %s/%s for node %s", pod.Namespace, pod.Name, pod.Spec.NodeName)
+ startTime := time.Now()
+ g.graph.DeletePod(pod.Name, pod.Namespace)
+ klog.V(5).Infof("deletePod %s/%s for node %s completed in %v", pod.Namespace, pod.Name, pod.Spec.NodeName, time.Since(startTime))
+}
+
+func (g *graphPopulator) addPV(obj interface{}) {
+ g.updatePV(nil, obj)
+}
+
+func (g *graphPopulator) updatePV(oldObj, obj interface{}) {
+ pv := obj.(*corev1.PersistentVolume)
+ // TODO: skip add if uid, pvc, and secrets are all identical between old and new
+ g.graph.AddPV(pv)
+}
+
+func (g *graphPopulator) deletePV(obj interface{}) {
+ if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
+ obj = tombstone.Obj
+ }
+ pv, ok := obj.(*corev1.PersistentVolume)
+ if !ok {
+ klog.Infof("unexpected type %T", obj)
+ return
+ }
+ g.graph.DeletePV(pv.Name)
+}
+
+func (g *graphPopulator) addVolumeAttachment(obj interface{}) {
+ g.updateVolumeAttachment(nil, obj)
+}
+
+func (g *graphPopulator) updateVolumeAttachment(oldObj, obj interface{}) {
+ attachment := obj.(*storagev1.VolumeAttachment)
+ if oldObj != nil {
+ // skip add if node name is identical
+ oldAttachment := oldObj.(*storagev1.VolumeAttachment)
+ if oldAttachment.Spec.NodeName == attachment.Spec.NodeName {
+ return
+ }
+ }
+ g.graph.AddVolumeAttachment(attachment.Name, attachment.Spec.NodeName)
+}
+
+func (g *graphPopulator) deleteVolumeAttachment(obj interface{}) {
+ if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
+ obj = tombstone.Obj
+ }
+ attachment, ok := obj.(*storagev1.VolumeAttachment)
+ if !ok {
+ klog.Infof("unexpected type %T", obj)
+ return
+ }
+ g.graph.DeleteVolumeAttachment(attachment.Name)
+}
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/intset.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/intset.go
new file mode 100644
index 000000000..57b2305da
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/intset.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package node
+
+// intSet maintains a map of id to refcounts
+type intSet struct {
+ // members is a map of id to refcounts
+ members map[int]int
+}
+
+func newIntSet() *intSet {
+ return &intSet{members: map[int]int{}}
+}
+
+// has returns true if the specified id has a positive refcount.
+// it is safe to call concurrently, but must not be called concurrently with any of the other methods.
+func (s *intSet) has(i int) bool {
+ if s == nil {
+ return false
+ }
+ return s.members[i] > 0
+}
+
+// reset removes all ids, effectively setting their refcounts to 0.
+// it is not thread-safe.
+func (s *intSet) reset() {
+ for k := range s.members {
+ delete(s.members, k)
+ }
+}
+
+// increment adds one to the refcount of the specified id.
+// it is not thread-safe.
+func (s *intSet) increment(i int) {
+ s.members[i]++
+}
+
+// decrement removes one from the refcount of the specified id,
+// and removes the id if the resulting refcount is <= 0.
+// it will not track refcounts lower than zero.
+// it is not thread-safe.
+func (s *intSet) decrement(i int) {
+ if s.members[i] <= 1 {
+ delete(s.members, i)
+ } else {
+ s.members[i]--
+ }
+}
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/metrics.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/metrics.go
new file mode 100644
index 000000000..5fbf347f6
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/metrics.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package node
+
+import (
+ "sync"
+
+ "k8s.io/component-base/metrics"
+ "k8s.io/component-base/metrics/legacyregistry"
+)
+
+const nodeAuthorizerSubsystem = "node_authorizer"
+
+var (
+ graphActionsDuration = metrics.NewHistogramVec(
+ &metrics.HistogramOpts{
+ Subsystem: nodeAuthorizerSubsystem,
+ Name: "graph_actions_duration_seconds",
+ Help: "Histogram of duration of graph actions in node authorizer.",
+ StabilityLevel: metrics.ALPHA,
+ // Start with 0.1ms with the last bucket being [~200ms, Inf)
+ Buckets: metrics.ExponentialBuckets(0.0001, 2, 12),
+ },
+ []string{"operation"},
+ )
+)
+
+var registerMetrics sync.Once
+
+// RegisterMetrics registers metrics for node package.
+func RegisterMetrics() {
+ registerMetrics.Do(func() {
+ legacyregistry.MustRegister(graphActionsDuration)
+ })
+}
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go
new file mode 100644
index 000000000..b03467ffd
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go
@@ -0,0 +1,344 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package node
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/klog/v2"
+
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apiserver/pkg/authentication/user"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ utilfeature "k8s.io/apiserver/pkg/util/feature"
+ "k8s.io/component-base/featuregate"
+ coordapi "k8s.io/kubernetes/pkg/apis/coordination"
+ api "k8s.io/kubernetes/pkg/apis/core"
+ resourceapi "k8s.io/kubernetes/pkg/apis/resource"
+ storageapi "k8s.io/kubernetes/pkg/apis/storage"
+ "k8s.io/kubernetes/pkg/auth/nodeidentifier"
+ "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
+ "k8s.io/kubernetes/third_party/forked/gonum/graph"
+ "k8s.io/kubernetes/third_party/forked/gonum/graph/traverse"
+)
+
+// NodeAuthorizer authorizes requests from kubelets, with the following logic:
+// 1. If a request is not from a node (NodeIdentity() returns isNode=false), reject
+// 2. If a specific node cannot be identified (NodeIdentity() returns nodeName=""), reject
+// 3. If a request is for a secret, configmap, persistent volume, resource claim, or persistent volume claim, reject unless the verb is get, and the requested object is related to the requesting node:
+// node <- configmap
+// node <- pod
+// node <- pod <- secret
+// node <- pod <- configmap
+// node <- pod <- pvc
+// node <- pod <- pvc <- pv
+// node <- pod <- pvc <- pv <- secret
+// node <- pod <- ResourceClaim
+// 4. For other resources, authorize all nodes uniformly using statically defined rules
+type NodeAuthorizer struct {
+ graph *Graph
+ identifier nodeidentifier.NodeIdentifier
+ nodeRules []rbacv1.PolicyRule
+
+ // allows overriding for testing
+ features featuregate.FeatureGate
+}
+
+var _ = authorizer.Authorizer(&NodeAuthorizer{})
+var _ = authorizer.RuleResolver(&NodeAuthorizer{})
+
+// NewAuthorizer returns a new node authorizer
+func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacv1.PolicyRule) *NodeAuthorizer {
+ return &NodeAuthorizer{
+ graph: graph,
+ identifier: identifier,
+ nodeRules: rules,
+ features: utilfeature.DefaultFeatureGate,
+ }
+}
+
+var (
+ configMapResource = api.Resource("configmaps")
+ secretResource = api.Resource("secrets")
+ pvcResource = api.Resource("persistentvolumeclaims")
+ pvResource = api.Resource("persistentvolumes")
+ resourceClaimResource = resourceapi.Resource("resourceclaims")
+ vaResource = storageapi.Resource("volumeattachments")
+ svcAcctResource = api.Resource("serviceaccounts")
+ leaseResource = coordapi.Resource("leases")
+ csiNodeResource = storageapi.Resource("csinodes")
+)
+
+func (r *NodeAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) {
+ if _, isNode := r.identifier.NodeIdentity(user); isNode {
+ // indicate nodes do not have fully enumerated permissions
+ return nil, nil, true, fmt.Errorf("node authorizer does not support user rule resolution")
+ }
+ return nil, nil, false, nil
+}
+
+func (r *NodeAuthorizer) Authorize(ctx context.Context, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+ nodeName, isNode := r.identifier.NodeIdentity(attrs.GetUser())
+ if !isNode {
+ // reject requests from non-nodes
+ return authorizer.DecisionNoOpinion, "", nil
+ }
+ if len(nodeName) == 0 {
+ // reject requests from unidentifiable nodes
+ klog.V(2).Infof("NODE DENY: unknown node for user %q", attrs.GetUser().GetName())
+ return authorizer.DecisionNoOpinion, fmt.Sprintf("unknown node for user %q", attrs.GetUser().GetName()), nil
+ }
+
+ // subdivide access to specific resources
+ if attrs.IsResourceRequest() {
+ requestResource := schema.GroupResource{Group: attrs.GetAPIGroup(), Resource: attrs.GetResource()}
+ switch requestResource {
+ case secretResource:
+ return r.authorizeReadNamespacedObject(nodeName, secretVertexType, attrs)
+ case configMapResource:
+ return r.authorizeReadNamespacedObject(nodeName, configMapVertexType, attrs)
+ case pvcResource:
+ if attrs.GetSubresource() == "status" {
+ return r.authorizeStatusUpdate(nodeName, pvcVertexType, attrs)
+ }
+ return r.authorizeGet(nodeName, pvcVertexType, attrs)
+ case pvResource:
+ return r.authorizeGet(nodeName, pvVertexType, attrs)
+ case resourceClaimResource:
+ return r.authorizeGet(nodeName, resourceClaimVertexType, attrs)
+ case vaResource:
+ return r.authorizeGet(nodeName, vaVertexType, attrs)
+ case svcAcctResource:
+ return r.authorizeCreateToken(nodeName, serviceAccountVertexType, attrs)
+ case leaseResource:
+ return r.authorizeLease(nodeName, attrs)
+ case csiNodeResource:
+ return r.authorizeCSINode(nodeName, attrs)
+ }
+
+ }
+
+ // Access to other resources is not subdivided, so just evaluate against the statically defined node rules
+ if rbac.RulesAllow(attrs, r.nodeRules...) {
+ return authorizer.DecisionAllow, "", nil
+ }
+ return authorizer.DecisionNoOpinion, "", nil
+}
+
+// authorizeStatusUpdate authorizes get/update/patch requests to status subresources of the specified type if they are related to the specified node
+func (r *NodeAuthorizer) authorizeStatusUpdate(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+ switch attrs.GetVerb() {
+ case "update", "patch":
+ // ok
+ default:
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only get/update/patch this type", nil
+ }
+
+ if attrs.GetSubresource() != "status" {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only update status subresource", nil
+ }
+
+ return r.authorize(nodeName, startingType, attrs)
+}
+
+// authorizeGet authorizes "get" requests to objects of the specified type if they are related to the specified node
+func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+ if attrs.GetVerb() != "get" {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only get individual resources of this type", nil
+ }
+ if len(attrs.GetSubresource()) > 0 {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "cannot get subresource", nil
+ }
+ return r.authorize(nodeName, startingType, attrs)
+}
+
+// authorizeReadNamespacedObject authorizes "get", "list" and "watch" requests to single objects of a
+// specified types if they are related to the specified node.
+func (r *NodeAuthorizer) authorizeReadNamespacedObject(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+ switch attrs.GetVerb() {
+ case "get", "list", "watch":
+ //ok
+ default:
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only read resources of this type", nil
+ }
+
+ if len(attrs.GetSubresource()) > 0 {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "cannot read subresource", nil
+ }
+ if len(attrs.GetNamespace()) == 0 {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only read namespaced object of this type", nil
+ }
+ return r.authorize(nodeName, startingType, attrs)
+}
+
+func (r *NodeAuthorizer) authorize(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+ if len(attrs.GetName()) == 0 {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "No Object name found", nil
+ }
+
+ ok, err := r.hasPathFrom(nodeName, startingType, attrs.GetNamespace(), attrs.GetName())
+ if err != nil {
+ klog.V(2).InfoS("NODE DENY", "err", err)
+ return authorizer.DecisionNoOpinion, fmt.Sprintf("no relationship found between node '%s' and this object", nodeName), nil
+ }
+ if !ok {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, fmt.Sprintf("no relationship found between node '%s' and this object", nodeName), nil
+ }
+ return authorizer.DecisionAllow, "", nil
+}
+
+// authorizeCreateToken authorizes "create" requests to serviceaccounts 'token'
+// subresource of pods running on a node
+func (r *NodeAuthorizer) authorizeCreateToken(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+ if attrs.GetVerb() != "create" || len(attrs.GetName()) == 0 {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only create tokens for individual service accounts", nil
+ }
+
+ if attrs.GetSubresource() != "token" {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only create token subresource of serviceaccount", nil
+ }
+
+ ok, err := r.hasPathFrom(nodeName, startingType, attrs.GetNamespace(), attrs.GetName())
+ if err != nil {
+ klog.V(2).Infof("NODE DENY: %v", err)
+ return authorizer.DecisionNoOpinion, fmt.Sprintf("no relationship found between node '%s' and this object", nodeName), nil
+ }
+ if !ok {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, fmt.Sprintf("no relationship found between node '%s' and this object", nodeName), nil
+ }
+ return authorizer.DecisionAllow, "", nil
+}
+
+// authorizeLease authorizes node requests to coordination.k8s.io/leases.
+func (r *NodeAuthorizer) authorizeLease(nodeName string, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+ // allowed verbs: get, create, update, patch, delete
+ verb := attrs.GetVerb()
+ switch verb {
+ case "get", "create", "update", "patch", "delete":
+ //ok
+ default:
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only get, create, update, patch, or delete a node lease", nil
+ }
+
+ // the request must be against the system namespace reserved for node leases
+ if attrs.GetNamespace() != api.NamespaceNodeLease {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, fmt.Sprintf("can only access leases in the %q system namespace", api.NamespaceNodeLease), nil
+ }
+
+ // the request must come from a node with the same name as the lease
+ // note we skip this check for create, since the authorizer doesn't know the name on create
+ // the noderestriction admission plugin is capable of performing this check at create time
+ if verb != "create" && attrs.GetName() != nodeName {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only access node lease with the same name as the requesting node", nil
+ }
+
+ return authorizer.DecisionAllow, "", nil
+}
+
+// authorizeCSINode authorizes node requests to CSINode storage.k8s.io/csinodes
+func (r *NodeAuthorizer) authorizeCSINode(nodeName string, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+ // allowed verbs: get, create, update, patch, delete
+ verb := attrs.GetVerb()
+ switch verb {
+ case "get", "create", "update", "patch", "delete":
+ //ok
+ default:
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only get, create, update, patch, or delete a CSINode", nil
+ }
+
+ if len(attrs.GetSubresource()) > 0 {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "cannot authorize CSINode subresources", nil
+ }
+
+ // the request must come from a node with the same name as the CSINode
+ // note we skip this check for create, since the authorizer doesn't know the name on create
+ // the noderestriction admission plugin is capable of performing this check at create time
+ if verb != "create" && attrs.GetName() != nodeName {
+ klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
+ return authorizer.DecisionNoOpinion, "can only access CSINode with the same name as the requesting node", nil
+ }
+
+ return authorizer.DecisionAllow, "", nil
+}
+
+// hasPathFrom returns true if there is a directed path from the specified type/namespace/name to the specified Node
+func (r *NodeAuthorizer) hasPathFrom(nodeName string, startingType vertexType, startingNamespace, startingName string) (bool, error) {
+ r.graph.lock.RLock()
+ defer r.graph.lock.RUnlock()
+
+ nodeVertex, exists := r.graph.getVertex_rlocked(nodeVertexType, "", nodeName)
+ if !exists {
+ return false, fmt.Errorf("unknown node '%s' cannot get %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName)
+ }
+
+ startingVertex, exists := r.graph.getVertex_rlocked(startingType, startingNamespace, startingName)
+ if !exists {
+ return false, fmt.Errorf("node '%s' cannot get unknown %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName)
+ }
+
+ // Fast check to see if we know of a destination edge
+ if r.graph.destinationEdgeIndex[startingVertex.ID()].has(nodeVertex.ID()) {
+ return true, nil
+ }
+
+ found := false
+ traversal := &traverse.VisitingDepthFirst{
+ EdgeFilter: func(edge graph.Edge) bool {
+ if destinationEdge, ok := edge.(*destinationEdge); ok {
+ if destinationEdge.DestinationID() != nodeVertex.ID() {
+ // Don't follow edges leading to other nodes
+ return false
+ }
+ // We found an edge leading to the node we want
+ found = true
+ }
+ // Visit this edge
+ return true
+ },
+ }
+ traversal.Walk(r.graph.graph, startingVertex, func(n graph.Node) bool {
+ if n.ID() == nodeVertex.ID() {
+ // We found the node we want
+ found = true
+ }
+ // Stop visiting if we've found the node we want
+ return found
+ })
+ if !found {
+ return false, fmt.Errorf("node '%s' cannot get %s %s/%s, no relationship to this object was found in the node authorizer graph", nodeName, vertexTypes[startingType], startingNamespace, startingName)
+ }
+ return true, nil
+}
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go
new file mode 100644
index 000000000..a31d1f95a
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go
@@ -0,0 +1,505 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package bootstrappolicy
+
+import (
+ "strings"
+
+ "k8s.io/klog/v2"
+
+ capi "k8s.io/api/certificates/v1beta1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ genericfeatures "k8s.io/apiserver/pkg/features"
+ utilfeature "k8s.io/apiserver/pkg/util/feature"
+ rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
+ "k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking"
+ "k8s.io/kubernetes/pkg/features"
+)
+
+const saRolePrefix = "system:controller:"
+
+func addControllerRole(controllerRoles *[]rbacv1.ClusterRole, controllerRoleBindings *[]rbacv1.ClusterRoleBinding, role rbacv1.ClusterRole) {
+ if !strings.HasPrefix(role.Name, saRolePrefix) {
+ klog.Fatalf(`role %q must start with %q`, role.Name, saRolePrefix)
+ }
+
+ for _, existingRole := range *controllerRoles {
+ if role.Name == existingRole.Name {
+ klog.Fatalf("role %q was already registered", role.Name)
+ }
+ }
+
+ *controllerRoles = append(*controllerRoles, role)
+ addClusterRoleLabel(*controllerRoles)
+
+ *controllerRoleBindings = append(*controllerRoleBindings,
+ rbacv1helpers.NewClusterBinding(role.Name).SAs("kube-system", role.Name[len(saRolePrefix):]).BindingOrDie())
+ addClusterRoleBindingLabel(*controllerRoleBindings)
+}
+
+func eventsRule() rbacv1.PolicyRule {
+ return rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup, eventsGroup).Resources("events").RuleOrDie()
+}
+
+func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) {
+ // controllerRoles is a slice of roles used for controllers
+ controllerRoles := []rbacv1.ClusterRole{}
+ // controllerRoleBindings is a slice of roles used for controllers
+ controllerRoleBindings := []rbacv1.ClusterRoleBinding{}
+
+ addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
+ role := rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ eventsRule(),
+ rbacv1helpers.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie(),
+ },
+ }
+
+ role.Rules = append(role.Rules, rbacv1helpers.NewRule("get", "watch", "list").Groups("storage.k8s.io").Resources("csidrivers").RuleOrDie())
+ role.Rules = append(role.Rules, rbacv1helpers.NewRule("get", "watch", "list").Groups("storage.k8s.io").Resources("csinodes").RuleOrDie())
+
+ return role
+ }())
+
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "clusterrole-aggregation-controller"},
+ Rules: []rbacv1.PolicyRule{
+ // this controller must have full permissions on clusterroles to allow it to mutate them in any way
+ rbacv1helpers.NewRule("escalate", "get", "list", "watch", "update", "patch").Groups(rbacGroup).Resources("clusterroles").RuleOrDie(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cronjob-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "delete", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "daemon-set-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("daemonsets").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "create", "delete", "update", "patch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "deployment-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
+ // TODO: remove "update" once
+ // https://github.com/kubernetes/kubernetes/issues/36897 is resolved.
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
+ role := rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "disruption-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(policyGroup).Resources("poddisruptionbudgets/status").RuleOrDie(),
+ rbacv1helpers.NewRule("get").Groups("*").Resources("*/scale").RuleOrDie(),
+ eventsRule(),
+ },
+ }
+ if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
+ role.Rules = append(role.Rules, rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie())
+ }
+ return role
+ }())
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpoint-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpointslice-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods", "nodes").RuleOrDie(),
+ // The controller needs to be able to set a service's finalizers to be able to create an EndpointSlice
+ // resource that is owned by the service and sets blockOwnerDeletion=true in its ownerRef.
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("services/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpointslicemirroring-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
+ // The controller needs to be able to set a service's finalizers to be able to create an EndpointSlice
+ // resource that is owned by the service and sets blockOwnerDeletion=true in its ownerRef.
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("services/finalizers").RuleOrDie(),
+ // The controller needs to be able to set a service's finalizers to be able to create an EndpointSlice
+ // resource that is owned by the endpoint and sets blockOwnerDeletion=true in its ownerRef.
+ // see https://github.com/openshift/kubernetes/blob/8691466059314c3f7d6dcffcbb76d14596ca716c/pkg/controller/endpointslicemirroring/utils.go#L87-L88
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("endpoints/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "expand-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
+ rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
+ // glusterfs
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
+ rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
+ rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ephemeral-volume-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+
+ if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "resource-claim-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(resourceGroup).Resources("resourceclaims").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(),
+ rbacv1helpers.NewRule("update", "patch").Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ }
+
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"},
+ Rules: []rbacv1.PolicyRule{
+ // the GC controller needs to run list/watches, selective gets, and updates against any resource
+ rbacv1helpers.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "horizontal-pod-autoscaler"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(),
+ rbacv1helpers.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ // allow listing resource, custom, and external metrics
+ rbacv1helpers.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list").Groups(externalMetricsGroup).Resources("*").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "job-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("jobs/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "namespace-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "delete", "deletecollection").Groups("*").Resources("*").RuleOrDie(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
+ role := rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "node-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
+ // used for pod deletion
+ rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "create", "update").Groups(networkingGroup).Resources("clustercidrs").RuleOrDie(),
+ eventsRule(),
+ },
+ }
+ if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
+ role.Rules = append(role.Rules, rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("pods").RuleOrDie())
+ }
+ return role
+ }())
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "persistent-volume-binder"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch", "get", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+
+ // glusterfs
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "create", "delete").Groups(legacyGroup).Resources("services").RuleOrDie(),
+ rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
+ // openstack
+ rbacv1helpers.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+
+ // recyclerClient.WatchPod
+ rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
+
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
+ role := rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pod-garbage-collector"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ },
+ }
+ if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
+ role.Rules = append(role.Rules, rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie())
+ }
+ return role
+ }())
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replicaset-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replication-controller"},
+ Rules: []rbacv1.PolicyRule{
+ // 1.0 controllers needed get, update, so without these old controllers break on new servers
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "resourcequota-controller"},
+ Rules: []rbacv1.PolicyRule{
+ // quota can count quota on anything for reconciliation, so it needs full viewing powers
+ rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("resourcequotas/status").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "route-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-account-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(),
+ rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("services/status").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-cidrs-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "patch", "update").Groups(networkingGroup).Resources("servicecidrs").RuleOrDie(),
+ rbacv1helpers.NewRule("patch", "update").Groups(networkingGroup).Resources("servicecidrs/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("patch", "update").Groups(networkingGroup).Resources("servicecidrs/status").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(networkingGroup).Resources("ipaddresses").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ }
+ addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
+ role := rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "statefulset-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(appsGroup).Resources("statefulsets/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(appsGroup).Resources("statefulsets/finalizers").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "create", "delete", "update", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "create", "delete", "update", "patch", "list", "watch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
+ eventsRule(),
+ },
+ }
+
+ if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
+ role.Rules = append(role.Rules, rbacv1helpers.NewRule("update", "delete").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie())
+ }
+
+ return role
+ }())
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ttl-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("update", "patch", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "certificate-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(certificatesGroup).Resources("certificatesigningrequests/status", "certificatesigningrequests/approval").RuleOrDie(),
+ rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.KubeAPIServerClientKubeletSignerName).RuleOrDie(),
+ rbacv1helpers.NewRule("sign").Groups(certificatesGroup).Resources("signers").Names(
+ capi.LegacyUnknownSignerName,
+ capi.KubeAPIServerClientSignerName,
+ capi.KubeAPIServerClientKubeletSignerName,
+ capi.KubeletServingSignerName,
+ ).RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
+ rbacv1helpers.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pv-protection-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ttl-after-finished-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(batchGroup).Resources("jobs").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "root-ca-cert-publisher"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("create", "update").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.ValidatingAdmissionPolicy) {
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "validatingadmissionpolicy-status-controller"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(admissionRegistrationGroup).
+ Resources("validatingadmissionpolicies").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "patch", "update").Groups(admissionRegistrationGroup).
+ Resources("validatingadmissionpolicies/status").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ }
+ if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StorageVersionAPI) &&
+ utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerIdentity) {
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "storage-version-garbage-collector"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(coordinationGroup).Resources("leases").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "patch", "update", "delete").Groups(internalAPIServerGroup).
+ Resources("storageversions").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "patch", "update").Groups(internalAPIServerGroup).
+ Resources("storageversions/status").RuleOrDie(),
+ },
+ })
+ }
+ if utilfeature.DefaultFeatureGate.Enabled(features.LegacyServiceAccountTokenCleanUp) {
+ addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "legacy-service-account-token-cleaner"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names(legacytokentracking.ConfigMapName).RuleOrDie(),
+ rbacv1helpers.NewRule("patch", "delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
+ },
+ })
+ }
+
+ return controllerRoles, controllerRoleBindings
+}
+
+// ControllerRoles returns the cluster roles used by controllers
+func ControllerRoles() []rbacv1.ClusterRole {
+ controllerRoles, _ := buildControllerRoles()
+ return controllerRoles
+}
+
+// ControllerRoleBindings returns the role bindings used by controllers
+func ControllerRoleBindings() []rbacv1.ClusterRoleBinding {
+ _, controllerRoleBindings := buildControllerRoles()
+ return controllerRoleBindings
+}
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go
new file mode 100644
index 000000000..745b498ad
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go
@@ -0,0 +1,162 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package bootstrappolicy
+
+import (
+ "strings"
+
+ "k8s.io/klog/v2"
+
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apiserver/pkg/authentication/user"
+ rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
+)
+
+var (
+ // namespaceRoles is a map of namespace to slice of roles to create
+ namespaceRoles = map[string][]rbacv1.Role{}
+
+ // namespaceRoleBindings is a map of namespace to slice of roleBindings to create
+ namespaceRoleBindings = map[string][]rbacv1.RoleBinding{}
+)
+
+func addNamespaceRole(namespace string, role rbacv1.Role) {
+ if !strings.HasPrefix(namespace, "kube-") {
+ klog.Fatalf(`roles can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace)
+ }
+
+ existingRoles := namespaceRoles[namespace]
+ for _, existingRole := range existingRoles {
+ if role.Name == existingRole.Name {
+ klog.Fatalf("role %q was already registered in %q", role.Name, namespace)
+ }
+ }
+
+ role.Namespace = namespace
+ addDefaultMetadata(&role)
+ existingRoles = append(existingRoles, role)
+ namespaceRoles[namespace] = existingRoles
+}
+
+func addNamespaceRoleBinding(namespace string, roleBinding rbacv1.RoleBinding) {
+ if !strings.HasPrefix(namespace, "kube-") {
+ klog.Fatalf(`rolebindings can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace)
+ }
+
+ existingRoleBindings := namespaceRoleBindings[namespace]
+ for _, existingRoleBinding := range existingRoleBindings {
+ if roleBinding.Name == existingRoleBinding.Name {
+ klog.Fatalf("rolebinding %q was already registered in %q", roleBinding.Name, namespace)
+ }
+ }
+
+ roleBinding.Namespace = namespace
+ addDefaultMetadata(&roleBinding)
+ existingRoleBindings = append(existingRoleBindings, roleBinding)
+ namespaceRoleBindings[namespace] = existingRoleBindings
+}
+
+func init() {
+ addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
+ // role for finding authentication config info for starting a server
+ ObjectMeta: metav1.ObjectMeta{Name: "extension-apiserver-authentication-reader"},
+ Rules: []rbacv1.PolicyRule{
+ // this particular config map is exposed and contains authentication configuration information
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(),
+ },
+ })
+ addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
+ // role for the bootstrap signer to be able to inspect kube-system secrets
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "bootstrap-signer"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
+ },
+ })
+ addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
+ // role for the cloud providers to access/create kube-system configmaps
+ // Deprecated starting Kubernetes 1.10 and will be deleted according to GA deprecation policy.
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cloud-provider"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
+ },
+ })
+ addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
+ // role for the token-cleaner to be able to remove secrets, but only in kube-system
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "token-cleaner"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ // TODO: Create util on Role+Binding for leader locking if more cases evolve.
+ addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
+ // role for the leader locking on supplied configmap
+ ObjectMeta: metav1.ObjectMeta{Name: "system::leader-locking-kube-controller-manager"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-controller-manager").RuleOrDie(),
+ },
+ })
+ addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
+ // role for the leader locking on supplied configmap
+ ObjectMeta: metav1.ObjectMeta{Name: "system::leader-locking-kube-scheduler"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-scheduler").RuleOrDie(),
+ },
+ })
+
+ delegatedAuthBinding := rbacv1helpers.NewRoleBinding("extension-apiserver-authentication-reader", metav1.NamespaceSystem).Users(user.KubeControllerManager, user.KubeScheduler).BindingOrDie()
+ delegatedAuthBinding.Name = "system::extension-apiserver-authentication-reader"
+ addNamespaceRoleBinding(metav1.NamespaceSystem, delegatedAuthBinding)
+
+ addNamespaceRoleBinding(metav1.NamespaceSystem,
+ rbacv1helpers.NewRoleBinding("system::leader-locking-kube-controller-manager", metav1.NamespaceSystem).Users(user.KubeControllerManager).SAs(metav1.NamespaceSystem, "kube-controller-manager").BindingOrDie())
+ addNamespaceRoleBinding(metav1.NamespaceSystem,
+ rbacv1helpers.NewRoleBinding("system::leader-locking-kube-scheduler", metav1.NamespaceSystem).Users(user.KubeScheduler).SAs(metav1.NamespaceSystem, "kube-scheduler").BindingOrDie())
+ addNamespaceRoleBinding(metav1.NamespaceSystem,
+ rbacv1helpers.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
+ // cloud-provider is deprecated starting Kubernetes 1.10 and will be deleted according to GA deprecation policy.
+ addNamespaceRoleBinding(metav1.NamespaceSystem,
+ rbacv1helpers.NewRoleBinding(saRolePrefix+"cloud-provider", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "cloud-provider").BindingOrDie())
+ addNamespaceRoleBinding(metav1.NamespaceSystem,
+ rbacv1helpers.NewRoleBinding(saRolePrefix+"token-cleaner", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "token-cleaner").BindingOrDie())
+
+ addNamespaceRole(metav1.NamespacePublic, rbacv1.Role{
+ // role for the bootstrap signer to be able to write its configmap
+ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "bootstrap-signer"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("configmaps").Names("cluster-info").RuleOrDie(),
+ eventsRule(),
+ },
+ })
+ addNamespaceRoleBinding(metav1.NamespacePublic,
+ rbacv1helpers.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespacePublic).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
+
+}
+
+// NamespaceRoles returns a map of namespace to slice of roles to create
+func NamespaceRoles() map[string][]rbacv1.Role {
+ return namespaceRoles
+}
+
+// NamespaceRoleBindings returns a map of namespace to slice of roles to create
+func NamespaceRoleBindings() map[string][]rbacv1.RoleBinding {
+ return namespaceRoleBindings
+}
diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go
new file mode 100644
index 000000000..2e0dbdbad
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go
@@ -0,0 +1,667 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package bootstrappolicy
+
+import (
+ capi "k8s.io/api/certificates/v1beta1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apiserver/pkg/authentication/serviceaccount"
+ "k8s.io/apiserver/pkg/authentication/user"
+ utilfeature "k8s.io/apiserver/pkg/util/feature"
+ rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
+ "k8s.io/kubernetes/pkg/features"
+)
+
+// Write and other vars are slices of the allowed verbs.
+// Label and Annotation are default maps of bootstrappolicy.
+var (
+ Write = []string{"create", "update", "patch", "delete", "deletecollection"}
+ ReadWrite = []string{"get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"}
+ Read = []string{"get", "list", "watch"}
+ ReadUpdate = []string{"get", "list", "watch", "update", "patch"}
+
+ Label = map[string]string{"kubernetes.io/bootstrapping": "rbac-defaults"}
+ Annotation = map[string]string{rbacv1.AutoUpdateAnnotationKey: "true"}
+)
+
+const (
+ legacyGroup = ""
+ appsGroup = "apps"
+ authenticationGroup = "authentication.k8s.io"
+ authorizationGroup = "authorization.k8s.io"
+ autoscalingGroup = "autoscaling"
+ batchGroup = "batch"
+ certificatesGroup = "certificates.k8s.io"
+ coordinationGroup = "coordination.k8s.io"
+ discoveryGroup = "discovery.k8s.io"
+ extensionsGroup = "extensions"
+ policyGroup = "policy"
+ rbacGroup = "rbac.authorization.k8s.io"
+ resourceGroup = "resource.k8s.io"
+ storageGroup = "storage.k8s.io"
+ resMetricsGroup = "metrics.k8s.io"
+ customMetricsGroup = "custom.metrics.k8s.io"
+ externalMetricsGroup = "external.metrics.k8s.io"
+ networkingGroup = "networking.k8s.io"
+ eventsGroup = "events.k8s.io"
+ internalAPIServerGroup = "internal.apiserver.k8s.io"
+ admissionRegistrationGroup = "admissionregistration.k8s.io"
+)
+
+func addDefaultMetadata(obj runtime.Object) {
+ metadata, err := meta.Accessor(obj)
+ if err != nil {
+ // if this happens, then some static code is broken
+ panic(err)
+ }
+
+ labels := metadata.GetLabels()
+ if labels == nil {
+ labels = map[string]string{}
+ }
+ for k, v := range Label {
+ labels[k] = v
+ }
+ metadata.SetLabels(labels)
+
+ annotations := metadata.GetAnnotations()
+ if annotations == nil {
+ annotations = map[string]string{}
+ }
+ for k, v := range Annotation {
+ annotations[k] = v
+ }
+ metadata.SetAnnotations(annotations)
+}
+
+func addClusterRoleLabel(roles []rbacv1.ClusterRole) {
+ for i := range roles {
+ addDefaultMetadata(&roles[i])
+ }
+ return
+}
+
+func addClusterRoleBindingLabel(rolebindings []rbacv1.ClusterRoleBinding) {
+ for i := range rolebindings {
+ addDefaultMetadata(&rolebindings[i])
+ }
+ return
+}
+
+// NodeRules returns node policy rules, it is slice of rbacv1.PolicyRule.
+func NodeRules() []rbacv1.PolicyRule {
+ nodePolicyRules := []rbacv1.PolicyRule{
+ // Needed to check API access. These creates are non-mutating
+ rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(),
+
+ // Needed to build serviceLister, to populate env vars for services
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(),
+
+ // Nodes can register Node API objects and report status.
+ // Use the NodeRestriction admission plugin to limit a node to creating/updating its own API object.
+ rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
+ rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+
+ // TODO: restrict to the bound node as creator in the NodeRestrictions admission plugin
+ rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
+
+ // TODO: restrict to pods scheduled on the bound node once field selectors are supported by list/watch authorization
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(),
+
+ // Needed for the node to create/delete mirror pods.
+ // Use the NodeRestriction admission plugin to limit a node to creating/deleting mirror pods bound to itself.
+ rbacv1helpers.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ // Needed for the node to report status of pods it is running.
+ // Use the NodeRestriction admission plugin to limit a node to updating status of pods bound to itself.
+ rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
+ // Needed for the node to create pod evictions.
+ // Use the NodeRestriction admission plugin to limit a node to creating evictions for pods bound to itself.
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(),
+
+ // Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs
+ // Needed for configmap volume and envs
+ // Use the Node authorization mode to limit a node to get secrets/configmaps referenced by pods bound to itself.
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(),
+ // Needed for persistent volumes
+ // Use the Node authorization mode to limit a node to get pv/pvc objects referenced by pods bound to itself.
+ rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
+
+ // TODO: add to the Node authorizer and restrict to endpoints referenced by pods or PVs bound to the node
+ // Needed for glusterfs volumes
+ rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
+ // Used to create a certificatesigningrequest for a node-specific client certificate, and watch
+ // for it to be signed. This allows the kubelet to rotate it's own certificate.
+ rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
+
+ // Leases
+ rbacv1helpers.NewRule("get", "create", "update", "patch", "delete").Groups("coordination.k8s.io").Resources("leases").RuleOrDie(),
+
+ // CSI
+ rbacv1helpers.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie(),
+
+ // Use the Node authorization to limit a node to create tokens for service accounts running on that node
+ // Use the NodeRestriction admission plugin to limit a node to create tokens bound to pods on that node
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(),
+ }
+
+ // Use the Node authorization mode to limit a node to update status of pvc objects referenced by pods bound to itself.
+ // Use the NodeRestriction admission plugin to limit a node to just update the status stanza.
+ pvcStatusPolicyRule := rbacv1helpers.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie()
+ nodePolicyRules = append(nodePolicyRules, pvcStatusPolicyRule)
+
+ // CSI
+ csiDriverRule := rbacv1helpers.NewRule("get", "watch", "list").Groups("storage.k8s.io").Resources("csidrivers").RuleOrDie()
+ nodePolicyRules = append(nodePolicyRules, csiDriverRule)
+ csiNodeInfoRule := rbacv1helpers.NewRule("get", "create", "update", "patch", "delete").Groups("storage.k8s.io").Resources("csinodes").RuleOrDie()
+ nodePolicyRules = append(nodePolicyRules, csiNodeInfoRule)
+
+ // RuntimeClass
+ nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get", "list", "watch").Groups("node.k8s.io").Resources("runtimeclasses").RuleOrDie())
+
+ // DRA Resource Claims
+ if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
+ nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get").Groups(resourceGroup).Resources("resourceclaims").RuleOrDie())
+ }
+ // Kubelet needs access to ClusterTrustBundles to support the pemTrustAnchors volume type.
+ if utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundle) {
+ nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get", "list", "watch").Groups(certificatesGroup).Resources("clustertrustbundles").RuleOrDie())
+ }
+
+ return nodePolicyRules
+}
+
+// ClusterRoles returns the cluster roles to bootstrap an API server with
+func ClusterRoles() []rbacv1.ClusterRole {
+ roles := []rbacv1.ClusterRole{
+ {
+ // a "root" role which can do absolutely anything
+ ObjectMeta: metav1.ObjectMeta{Name: "cluster-admin"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
+ rbacv1helpers.NewRule("*").URLs("*").RuleOrDie(),
+ },
+ },
+ {
+ // a role which provides just enough power to determine if the server is
+ // ready and discover API versions for negotiation
+ ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get").URLs(
+ "/livez", "/readyz", "/healthz",
+ "/version", "/version/",
+ "/openapi", "/openapi/*",
+ "/api", "/api/*",
+ "/apis", "/apis/*",
+ ).RuleOrDie(),
+ },
+ },
+ {
+ // a role which provides minimal read access to the monitoring endpoints
+ // (i.e. /metrics, /livez/*, /readyz/*, /healthz/*, /livez, /readyz, /healthz)
+ // The splatted health check endpoints allow read access to individual health check
+ // endpoints which may contain more sensitive cluster information information
+ ObjectMeta: metav1.ObjectMeta{Name: "system:monitoring"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get").URLs(
+ "/metrics", "/metrics/slis",
+ "/livez", "/readyz", "/healthz",
+ "/livez/*", "/readyz/*", "/healthz/*",
+ ).RuleOrDie(),
+ },
+ },
+ }
+
+ basicUserRules := []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("selfsubjectreviews").RuleOrDie(),
+ }
+
+ roles = append(roles, []rbacv1.ClusterRole{
+ {
+ // a role which provides minimal resource access to allow a "normal" user to learn information about themselves
+ ObjectMeta: metav1.ObjectMeta{Name: "system:basic-user"},
+ Rules: basicUserRules,
+ },
+ {
+ // a role which provides just enough power read insensitive cluster information
+ ObjectMeta: metav1.ObjectMeta{Name: "system:public-info-viewer"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get").URLs(
+ "/livez", "/readyz", "/healthz", "/version", "/version/",
+ ).RuleOrDie(),
+ },
+ },
+ {
+ // a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users.
+ ObjectMeta: metav1.ObjectMeta{Name: "admin"},
+ AggregationRule: &rbacv1.AggregationRule{
+ ClusterRoleSelectors: []metav1.LabelSelector{
+ {MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}},
+ },
+ },
+ },
+ {
+ // a role for a namespace level editor. It grants access to all user level actions in a namespace.
+ // It does not grant powers for "privileged" resources which are domain of the system: `/status`
+ // subresources or `quota`/`limits` which are used to control namespaces
+ ObjectMeta: metav1.ObjectMeta{Name: "edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}},
+ AggregationRule: &rbacv1.AggregationRule{
+ ClusterRoleSelectors: []metav1.LabelSelector{
+ {MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}},
+ },
+ },
+ },
+ {
+ // a role for namespace level viewing. It grants Read-only access to non-escalating resources in
+ // a namespace.
+ ObjectMeta: metav1.ObjectMeta{Name: "view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}},
+ AggregationRule: &rbacv1.AggregationRule{
+ ClusterRoleSelectors: []metav1.LabelSelector{
+ {MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}},
+ },
+ },
+ },
+ {
+ // a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users.
+ ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-admin", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}},
+ Rules: []rbacv1.PolicyRule{
+ // additional admin powers
+ rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(),
+ rbacv1helpers.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(),
+ },
+ },
+ {
+ // a role for a namespace level editor. It grants access to all user level actions in a namespace.
+ // It does not grant powers for "privileged" resources which are domain of the system: `/status`
+ // subresources or `quota`/`limits` which are used to control namespaces
+ ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}},
+ Rules: []rbacv1.PolicyRule{
+ // Allow read on escalating resources
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods/attach", "pods/proxy", "pods/exec", "pods/portforward", "secrets", "services/proxy").RuleOrDie(),
+ rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(),
+ rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
+ "services", "services/proxy", "persistentvolumeclaims", "configmaps", "secrets", "events").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Write...).Groups(appsGroup).Resources(
+ "statefulsets", "statefulsets/scale",
+ "daemonsets",
+ "deployments", "deployments/scale", "deployments/rollback",
+ "replicasets", "replicasets/scale").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Write...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Write...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Write...).Groups(extensionsGroup).Resources("daemonsets",
+ "deployments", "deployments/scale", "deployments/rollback", "ingresses",
+ "replicasets", "replicasets/scale", "replicationcontrollers/scale",
+ "networkpolicies").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Write...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Write...).Groups(networkingGroup).Resources("networkpolicies", "ingresses").RuleOrDie(),
+
+ rbacv1helpers.NewRule(ReadWrite...).Groups(coordinationGroup).Resources("leases").RuleOrDie(),
+ },
+ },
+ {
+ // a role for namespace level viewing. It grants Read-only access to non-escalating resources in
+ // a namespace.
+ ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
+ "services", "services/status", "endpoints", "persistentvolumeclaims", "persistentvolumeclaims/status", "configmaps").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
+ "pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
+ // read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
+ // indicator of which namespaces you have access to.
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Read...).Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources(
+ "controllerrevisions",
+ "statefulsets", "statefulsets/status", "statefulsets/scale",
+ "daemonsets", "daemonsets/status",
+ "deployments", "deployments/status", "deployments/scale",
+ "replicasets", "replicasets/status", "replicasets/scale").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers", "horizontalpodautoscalers/status").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs", "cronjobs/status", "jobs/status").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "daemonsets/status", "deployments", "deployments/scale", "deployments/status",
+ "ingresses", "ingresses/status", "replicasets", "replicasets/scale", "replicasets/status", "replicationcontrollers/scale",
+ "networkpolicies").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets", "poddisruptionbudgets/status").RuleOrDie(),
+
+ rbacv1helpers.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies", "ingresses", "ingresses/status").RuleOrDie(),
+ },
+ },
+ {
+ // a role to use for heapster's connections back to the API server
+ ObjectMeta: metav1.ObjectMeta{Name: "system:heapster"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(),
+ },
+ },
+ {
+ // a role for nodes to use to have the access they need for running pods
+ ObjectMeta: metav1.ObjectMeta{Name: systemNodeRoleName},
+ Rules: NodeRules(),
+ },
+ {
+ // a role to use for node-problem-detector access. It does not get bound to default location since
+ // deployment locations can reasonably vary.
+ ObjectMeta: metav1.ObjectMeta{Name: "system:node-problem-detector"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
+ eventsRule(),
+ },
+ },
+ {
+ // a role to use for full access to the kubelet API
+ ObjectMeta: metav1.ObjectMeta{Name: "system:kubelet-api-admin"},
+ Rules: []rbacv1.PolicyRule{
+ // Allow read-only access to the Node API objects
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ // Allow all API calls to the nodes
+ rbacv1helpers.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ rbacv1helpers.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/stats", "nodes/log").RuleOrDie(),
+ },
+ },
+ {
+ // a role to use for bootstrapping a node's client certificates
+ ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"},
+ Rules: []rbacv1.PolicyRule{
+ // used to create a certificatesigningrequest for a node-specific client certificate, and watch for it to be signed
+ rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
+ },
+ },
+ {
+ // a role to use for allowing authentication and authorization delegation
+ ObjectMeta: metav1.ObjectMeta{Name: "system:auth-delegator"},
+ Rules: []rbacv1.PolicyRule{
+ // These creates are non-mutating
+ rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
+ },
+ },
+ {
+ // a role to use for the API registry, summarization, and proxy handling
+ ObjectMeta: metav1.ObjectMeta{Name: "system:kube-aggregator"},
+ Rules: []rbacv1.PolicyRule{
+ // it needs to see all services so that it knows whether the ones it points to exist or not
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
+ },
+ },
+ {
+ // a role to use for bootstrapping the kube-controller-manager so it can create the shared informers
+ // service accounts, and secrets that we need to create separate identities for other controllers
+ ObjectMeta: metav1.ObjectMeta{Name: "system:kube-controller-manager"},
+ Rules: []rbacv1.PolicyRule{
+ eventsRule(),
+ // Needed for leader election.
+ rbacv1helpers.NewRule("create").Groups(coordinationGroup).Resources("leases").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "update").Groups(coordinationGroup).Resources("leases").Names("kube-controller-manager").RuleOrDie(),
+ // Fundamental resources.
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("secrets", "serviceaccounts").RuleOrDie(),
+ rbacv1helpers.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
+ rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("namespaces", "secrets", "serviceaccounts", "configmaps").RuleOrDie(),
+ rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("secrets", "serviceaccounts").RuleOrDie(),
+ // Needed to check API access. These creates are non-mutating
+ rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
+ // Needed for all shared informers
+ rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(),
+ },
+ },
+ {
+ // a role to use for the kube-dns pod
+ ObjectMeta: metav1.ObjectMeta{Name: "system:kube-dns"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("endpoints", "services").RuleOrDie(),
+ },
+ },
+ {
+ // a role for an external/out-of-tree persistent volume provisioner
+ ObjectMeta: metav1.ObjectMeta{Name: "system:persistent-volume-provisioner"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
+ // update is needed in addition to read access for setting lock annotations on PVCs
+ rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
+
+ // Needed for watching provisioning success and failure events
+ rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
+
+ eventsRule(),
+ },
+ },
+ {
+ // a role making the csrapprover controller approve a node client CSR
+ ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:nodeclient"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/nodeclient").RuleOrDie(),
+ },
+ },
+ {
+ // a role making the csrapprover controller approve a node client CSR requested by the node itself
+ ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeclient").RuleOrDie(),
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "system:volume-scheduler"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
+ rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:legacy-unknown-approver"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.LegacyUnknownSignerName).RuleOrDie(),
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:kubelet-serving-approver"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.KubeletServingSignerName).RuleOrDie(),
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:kube-apiserver-client-approver"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.KubeAPIServerClientSignerName).RuleOrDie(),
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:kube-apiserver-client-kubelet-approver"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.KubeAPIServerClientKubeletSignerName).RuleOrDie(),
+ },
+ },
+ }...)
+
+ // Add the cluster role for reading the ServiceAccountIssuerDiscovery endpoints
+ // Also allow slash-ended URLs to allow clients generated from published openapi docs prior to fixing the trailing slash to work properly
+ roles = append(roles, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: "system:service-account-issuer-discovery"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("get").URLs(
+ "/.well-known/openid-configuration",
+ "/.well-known/openid-configuration/",
+ "/openid/v1/jwks",
+ "/openid/v1/jwks/",
+ ).RuleOrDie(),
+ },
+ })
+
+ // node-proxier role is used by kube-proxy.
+ nodeProxierRules := []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+
+ eventsRule(),
+ }
+ nodeProxierRules = append(nodeProxierRules, rbacv1helpers.NewRule("list", "watch").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie())
+ roles = append(roles, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: "system:node-proxier"},
+ Rules: nodeProxierRules,
+ })
+
+ kubeSchedulerRules := []rbacv1.PolicyRule{
+ eventsRule(),
+ // This is for leaderlease access
+ // TODO: scope this to the kube-system namespace
+ rbacv1helpers.NewRule("create").Groups(coordinationGroup).Resources("leases").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "update").Groups(coordinationGroup).Resources("leases").Names("kube-scheduler").RuleOrDie(),
+
+ // Fundamental resources
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
+ rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(),
+ rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
+ // Things that select pods
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
+ // Things that pods use or applies to them
+ rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
+ // Needed to check API access. These creates are non-mutating
+ rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
+ rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
+ // Needed for volume limits
+ rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csinodes").RuleOrDie(),
+ // Needed for namespaceSelector feature in pod affinity
+ rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csidrivers").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csistoragecapacities").RuleOrDie(),
+ }
+ // Needed for dynamic resource allocation.
+ if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
+ kubeSchedulerRules = append(kubeSchedulerRules,
+ rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceclaims", "resourceclasses").RuleOrDie(),
+ rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(),
+ rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(),
+ rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulingcontexts/status").RuleOrDie(),
+ )
+ }
+ roles = append(roles, rbacv1.ClusterRole{
+ // a role to use for the kube-scheduler
+ ObjectMeta: metav1.ObjectMeta{Name: "system:kube-scheduler"},
+ Rules: kubeSchedulerRules,
+ })
+
+ // Default ClusterRole to allow reading ClusterTrustBundle objects
+ if utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundle) {
+ roles = append(roles, rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{Name: "system:cluster-trust-bundle-discovery"},
+ Rules: []rbacv1.PolicyRule{
+ rbacv1helpers.NewRule(Read...).Groups(certificatesGroup).Resources("clustertrustbundles").RuleOrDie(),
+ },
+ })
+ }
+
+ addClusterRoleLabel(roles)
+ return roles
+}
+
+const systemNodeRoleName = "system:node"
+
+// ClusterRoleBindings return default rolebindings to the default roles
+func ClusterRoleBindings() []rbacv1.ClusterRoleBinding {
+ rolebindings := []rbacv1.ClusterRoleBinding{
+ rbacv1helpers.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:monitoring").Groups(user.MonitoringGroup).BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated).BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated).BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:public-info-viewer").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(),
+ rbacv1helpers.NewClusterBinding("system:volume-scheduler").Users(user.KubeScheduler).BindingOrDie(),
+
+ // This default binding of the system:node role to the system:nodes group is deprecated in 1.7 with the availability of the Node authorizer.
+ // This leaves the binding, but with an empty set of subjects, so that tightening reconciliation can remove the subject.
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: systemNodeRoleName},
+ RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName},
+ },
+ }
+
+ // Allow all in-cluster workloads (via their service accounts) to read the OIDC discovery endpoints.
+ // Users with certain forms of write access (create pods, create secrets, create service accounts, etc)
+ // can gain access to a service account identity which would allow them to access this information.
+ // This includes the issuer URL, which is already present in the SA token JWT. Similarly, SAs can
+ // already gain this same info via introspection of their own token. Since this discovery endpoint
+ // points to what issued all service account tokens, it seems fitting for SAs to have this access.
+ // Defer to the cluster admin with regard to binding directly to all authenticated and/or
+ // unauthenticated users.
+ rolebindings = append(rolebindings,
+ rbacv1helpers.NewClusterBinding("system:service-account-issuer-discovery").Groups(serviceaccount.AllServiceAccountsGroup).BindingOrDie(),
+ )
+
+ // Service accounts can read ClusterTrustBundle objects.
+ if utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundle) {
+ rolebindings = append(rolebindings, rbacv1helpers.NewClusterBinding("system:cluster-trust-bundle-discovery").Groups(serviceaccount.AllServiceAccountsGroup).BindingOrDie())
+ }
+
+ addClusterRoleBindingLabel(rolebindings)
+
+ return rolebindings
+}
+
+// ClusterRolesToAggregate maps from previous clusterrole name to the new clusterrole name
+func ClusterRolesToAggregate() map[string]string {
+ return map[string]string{
+ "admin": "system:aggregate-to-admin",
+ "edit": "system:aggregate-to-edit",
+ "view": "system:aggregate-to-view",
+ }
+}
+
+// ClusterRoleBindingsToSplit returns a map of Names of source ClusterRoleBindings
+// to copy Subjects, Annotations, and Labels to destination ClusterRoleBinding templates.
+func ClusterRoleBindingsToSplit() map[string]rbacv1.ClusterRoleBinding {
+ bindingsToSplit := map[string]rbacv1.ClusterRoleBinding{}
+ for _, defaultClusterRoleBinding := range ClusterRoleBindings() {
+ switch defaultClusterRoleBinding.Name {
+ case "system:public-info-viewer":
+ bindingsToSplit["system:discovery"] = defaultClusterRoleBinding
+ }
+ }
+ return bindingsToSplit
+}
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/LICENSE b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/LICENSE
new file mode 100644
index 000000000..76edf5ef7
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/LICENSE
@@ -0,0 +1,23 @@
+Copyright ©2013 The gonum Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the gonum project nor the names of its authors and
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/README.md b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/README.md
new file mode 100644
index 000000000..3b51e664a
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/README.md
@@ -0,0 +1 @@
+Forked from gonum/graph@50b27dea7ebbfb052dfaf91681afc6fde28d8796 to support memory-use improvements to the simple graph
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/graph.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/graph.go
new file mode 100644
index 000000000..adade5d79
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/graph.go
@@ -0,0 +1,153 @@
+// Copyright ©2014 The gonum Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package graph
+
+// Node is a graph node. It returns a graph-unique integer ID.
+type Node interface {
+ ID() int
+}
+
+// Edge is a graph edge. In directed graphs, the direction of the
+// edge is given from -> to, otherwise the edge is semantically
+// unordered.
+type Edge interface {
+ From() Node
+ To() Node
+ Weight() float64
+}
+
+// Graph is a generalized graph.
+type Graph interface {
+ // Has returns whether the node exists within the graph.
+ Has(Node) bool
+
+ // Nodes returns all the nodes in the graph.
+ Nodes() []Node
+
+ // From returns all nodes that can be reached directly
+ // from the given node.
+ From(Node) []Node
+
+ // HasEdgeBeteen returns whether an edge exists between
+ // nodes x and y without considering direction.
+ HasEdgeBetween(x, y Node) bool
+
+ // Edge returns the edge from u to v if such an edge
+ // exists and nil otherwise. The node v must be directly
+ // reachable from u as defined by the From method.
+ Edge(u, v Node) Edge
+}
+
+// Undirected is an undirected graph.
+type Undirected interface {
+ Graph
+
+ // EdgeBetween returns the edge between nodes x and y.
+ EdgeBetween(x, y Node) Edge
+}
+
+// Directed is a directed graph.
+type Directed interface {
+ Graph
+
+ // HasEdgeFromTo returns whether an edge exists
+ // in the graph from u to v.
+ HasEdgeFromTo(u, v Node) bool
+
+ // To returns all nodes that can reach directly
+ // to the given node.
+ To(Node) []Node
+}
+
+// Weighter defines graphs that can report edge weights.
+type Weighter interface {
+ // Weight returns the weight for the edge between
+ // x and y if Edge(x, y) returns a non-nil Edge.
+ // If x and y are the same node or there is no
+ // joining edge between the two nodes the weight
+ // value returned is implementation dependent.
+ // Weight returns true if an edge exists between
+ // x and y or if x and y have the same ID, false
+ // otherwise.
+ Weight(x, y Node) (w float64, ok bool)
+}
+
+// NodeAdder is an interface for adding arbitrary nodes to a graph.
+type NodeAdder interface {
+ // NewNodeID returns a new unique arbitrary ID.
+ NewNodeID() int
+
+ // Adds a node to the graph. AddNode panics if
+ // the added node ID matches an existing node ID.
+ AddNode(Node)
+}
+
+// NodeRemover is an interface for removing nodes from a graph.
+type NodeRemover interface {
+ // RemoveNode removes a node from the graph, as
+ // well as any edges attached to it. If the node
+ // is not in the graph it is a no-op.
+ RemoveNode(Node)
+}
+
+// EdgeSetter is an interface for adding edges to a graph.
+type EdgeSetter interface {
+ // SetEdge adds an edge from one node to another.
+ // If the graph supports node addition the nodes
+ // will be added if they do not exist, otherwise
+ // SetEdge will panic.
+ // If the IDs returned by e.From and e.To are
+ // equal, SetEdge will panic.
+ SetEdge(e Edge)
+}
+
+// EdgeRemover is an interface for removing nodes from a graph.
+type EdgeRemover interface {
+ // RemoveEdge removes the given edge, leaving the
+ // terminal nodes. If the edge does not exist it
+ // is a no-op.
+ RemoveEdge(Edge)
+}
+
+// Builder is a graph that can have nodes and edges added.
+type Builder interface {
+ NodeAdder
+ EdgeSetter
+}
+
+// UndirectedBuilder is an undirected graph builder.
+type UndirectedBuilder interface {
+ Undirected
+ Builder
+}
+
+// DirectedBuilder is a directed graph builder.
+type DirectedBuilder interface {
+ Directed
+ Builder
+}
+
+// Copy copies nodes and edges as undirected edges from the source to the destination
+// without first clearing the destination. Copy will panic if a node ID in the source
+// graph matches a node ID in the destination.
+//
+// If the source is undirected and the destination is directed both directions will
+// be present in the destination after the copy is complete.
+//
+// If the source is a directed graph, the destination is undirected, and a fundamental
+// cycle exists with two nodes where the edge weights differ, the resulting destination
+// graph's edge weight between those nodes is undefined. If there is a defined function
+// to resolve such conflicts, an Undirect may be used to do this.
+func Copy(dst Builder, src Graph) {
+ nodes := src.Nodes()
+ for _, n := range nodes {
+ dst.AddNode(n)
+ }
+ for _, u := range nodes {
+ for _, v := range src.From(u) {
+ dst.SetEdge(src.Edge(u, v))
+ }
+ }
+}
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear/linear.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear/linear.go
new file mode 100644
index 000000000..ce7c6cfff
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear/linear.go
@@ -0,0 +1,74 @@
+// Copyright ©2015 The gonum Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package linear provides common linear data structures.
+package linear
+
+import (
+ "k8s.io/kubernetes/third_party/forked/gonum/graph"
+)
+
+// NodeStack implements a LIFO stack of graph.Node.
+type NodeStack []graph.Node
+
+// Len returns the number of graph.Nodes on the stack.
+func (s *NodeStack) Len() int { return len(*s) }
+
+// Pop returns the last graph.Node on the stack and removes it
+// from the stack.
+func (s *NodeStack) Pop() graph.Node {
+ v := *s
+ v, n := v[:len(v)-1], v[len(v)-1]
+ *s = v
+ return n
+}
+
+// Push adds the node n to the stack at the last position.
+func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) }
+
+// NodeQueue implements a FIFO queue.
+type NodeQueue struct {
+ head int
+ data []graph.Node
+}
+
+// Len returns the number of graph.Nodes in the queue.
+func (q *NodeQueue) Len() int { return len(q.data) - q.head }
+
+// Enqueue adds the node n to the back of the queue.
+func (q *NodeQueue) Enqueue(n graph.Node) {
+ if len(q.data) == cap(q.data) && q.head > 0 {
+ l := q.Len()
+ copy(q.data, q.data[q.head:])
+ q.head = 0
+ q.data = append(q.data[:l], n)
+ } else {
+ q.data = append(q.data, n)
+ }
+}
+
+// Dequeue returns the graph.Node at the front of the queue and
+// removes it from the queue.
+func (q *NodeQueue) Dequeue() graph.Node {
+ if q.Len() == 0 {
+ panic("queue: empty queue")
+ }
+
+ var n graph.Node
+ n, q.data[q.head] = q.data[q.head], nil
+ q.head++
+
+ if q.Len() == 0 {
+ q.head = 0
+ q.data = q.data[:0]
+ }
+
+ return n
+}
+
+// Reset clears the queue for reuse.
+func (q *NodeQueue) Reset() {
+ q.head = 0
+ q.data = q.data[:0]
+}
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/directed_acyclic.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/directed_acyclic.go
new file mode 100644
index 000000000..ac930feb1
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/directed_acyclic.go
@@ -0,0 +1,83 @@
+package simple
+
+import (
+ "k8s.io/kubernetes/third_party/forked/gonum/graph"
+)
+
+// DirectedAcyclicGraph implements graph.Directed using UndirectedGraph,
+// which only stores one edge for any node pair.
+type DirectedAcyclicGraph struct {
+ *UndirectedGraph
+}
+
+func NewDirectedAcyclicGraph(self, absent float64) *DirectedAcyclicGraph {
+ return &DirectedAcyclicGraph{
+ UndirectedGraph: NewUndirectedGraph(self, absent),
+ }
+}
+
+func (g *DirectedAcyclicGraph) HasEdgeFromTo(u, v graph.Node) bool {
+ edge := g.UndirectedGraph.EdgeBetween(u, v)
+ if edge == nil {
+ return false
+ }
+ return (edge.From().ID() == u.ID())
+}
+
+func (g *DirectedAcyclicGraph) From(n graph.Node) []graph.Node {
+ if !g.Has(n) {
+ return nil
+ }
+
+ fid := n.ID()
+ nodes := make([]graph.Node, 0, g.UndirectedGraph.edges[n.ID()].Len())
+ g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
+ if edge.From().ID() == fid {
+ nodes = append(nodes, g.UndirectedGraph.nodes[edge.To().ID()])
+ }
+ })
+ return nodes
+}
+
+func (g *DirectedAcyclicGraph) VisitFrom(n graph.Node, visitor func(neighbor graph.Node) (shouldContinue bool)) {
+ if !g.Has(n) {
+ return
+ }
+ fid := n.ID()
+ g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
+ if edge.From().ID() == fid {
+ if !visitor(g.UndirectedGraph.nodes[edge.To().ID()]) {
+ return
+ }
+ }
+ })
+}
+
+func (g *DirectedAcyclicGraph) To(n graph.Node) []graph.Node {
+ if !g.Has(n) {
+ return nil
+ }
+
+ tid := n.ID()
+ nodes := make([]graph.Node, 0, g.UndirectedGraph.edges[n.ID()].Len())
+ g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
+ if edge.To().ID() == tid {
+ nodes = append(nodes, g.UndirectedGraph.nodes[edge.From().ID()])
+ }
+ })
+ return nodes
+}
+
+func (g *DirectedAcyclicGraph) VisitTo(n graph.Node, visitor func(neighbor graph.Node) (shouldContinue bool)) {
+ if !g.Has(n) {
+ return
+ }
+ tid := n.ID()
+ g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
+ if edge.To().ID() == tid {
+ if !visitor(g.UndirectedGraph.nodes[edge.From().ID()]) {
+ return
+ }
+ }
+ })
+}
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/edgeholder.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/edgeholder.go
new file mode 100644
index 000000000..f2248ab7d
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/edgeholder.go
@@ -0,0 +1,122 @@
+package simple
+
+import "k8s.io/kubernetes/third_party/forked/gonum/graph"
+
+// edgeHolder represents a set of edges, with no more than one edge to or from a particular neighbor node
+type edgeHolder interface {
+ // Visit invokes visitor with each edge and the id of the neighbor node in the edge
+ Visit(visitor func(neighbor int, edge graph.Edge))
+ // Delete removes edges to or from the specified neighbor
+ Delete(neighbor int) edgeHolder
+ // Set stores the edge to or from the specified neighbor
+ Set(neighbor int, edge graph.Edge) edgeHolder
+ // Get returns the edge to or from the specified neighbor
+ Get(neighbor int) (graph.Edge, bool)
+ // Len returns the number of edges
+ Len() int
+}
+
+// sliceEdgeHolder holds a list of edges to or from self
+type sliceEdgeHolder struct {
+ self int
+ edges []graph.Edge
+}
+
+func (e *sliceEdgeHolder) Visit(visitor func(neighbor int, edge graph.Edge)) {
+ for _, edge := range e.edges {
+ if edge.From().ID() == e.self {
+ visitor(edge.To().ID(), edge)
+ } else {
+ visitor(edge.From().ID(), edge)
+ }
+ }
+}
+func (e *sliceEdgeHolder) Delete(neighbor int) edgeHolder {
+ edges := e.edges[:0]
+ for i, edge := range e.edges {
+ if edge.From().ID() == e.self {
+ if edge.To().ID() == neighbor {
+ continue
+ }
+ } else {
+ if edge.From().ID() == neighbor {
+ continue
+ }
+ }
+ edges = append(edges, e.edges[i])
+ }
+ e.edges = edges
+ return e
+}
+func (e *sliceEdgeHolder) Set(neighbor int, newEdge graph.Edge) edgeHolder {
+ for i, edge := range e.edges {
+ if edge.From().ID() == e.self {
+ if edge.To().ID() == neighbor {
+ e.edges[i] = newEdge
+ return e
+ }
+ } else {
+ if edge.From().ID() == neighbor {
+ e.edges[i] = newEdge
+ return e
+ }
+ }
+ }
+
+ if len(e.edges) < 4 {
+ e.edges = append(e.edges, newEdge)
+ return e
+ }
+
+ h := mapEdgeHolder(make(map[int]graph.Edge, len(e.edges)+1))
+ for i, edge := range e.edges {
+ if edge.From().ID() == e.self {
+ h[edge.To().ID()] = e.edges[i]
+ } else {
+ h[edge.From().ID()] = e.edges[i]
+ }
+ }
+ h[neighbor] = newEdge
+ return h
+}
+func (e *sliceEdgeHolder) Get(neighbor int) (graph.Edge, bool) {
+ for _, edge := range e.edges {
+ if edge.From().ID() == e.self {
+ if edge.To().ID() == neighbor {
+ return edge, true
+ }
+ } else {
+ if edge.From().ID() == neighbor {
+ return edge, true
+ }
+ }
+ }
+ return nil, false
+}
+func (e *sliceEdgeHolder) Len() int {
+ return len(e.edges)
+}
+
+// mapEdgeHolder holds a map of neighbors to edges
+type mapEdgeHolder map[int]graph.Edge
+
+func (e mapEdgeHolder) Visit(visitor func(neighbor int, edge graph.Edge)) {
+ for neighbor, edge := range e {
+ visitor(neighbor, edge)
+ }
+}
+func (e mapEdgeHolder) Delete(neighbor int) edgeHolder {
+ delete(e, neighbor)
+ return e
+}
+func (e mapEdgeHolder) Set(neighbor int, edge graph.Edge) edgeHolder {
+ e[neighbor] = edge
+ return e
+}
+func (e mapEdgeHolder) Get(neighbor int) (graph.Edge, bool) {
+ edge, ok := e[neighbor]
+ return edge, ok
+}
+func (e mapEdgeHolder) Len() int {
+ return len(e)
+}
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/simple.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/simple.go
new file mode 100644
index 000000000..9bc56b8be
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/simple.go
@@ -0,0 +1,45 @@
+// Copyright ©2014 The gonum Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package simple provides a suite of simple graph implementations satisfying
+// the gonum/graph interfaces.
+package simple
+
+import (
+ "math"
+
+ "k8s.io/kubernetes/third_party/forked/gonum/graph"
+)
+
+// Node is a simple graph node.
+type Node int
+
+// ID returns the ID number of the node.
+func (n Node) ID() int {
+ return int(n)
+}
+
+// Edge is a simple graph edge.
+type Edge struct {
+ F, T graph.Node
+ W float64
+}
+
+// From returns the from-node of the edge.
+func (e Edge) From() graph.Node { return e.F }
+
+// To returns the to-node of the edge.
+func (e Edge) To() graph.Node { return e.T }
+
+// Weight returns the weight of the edge.
+func (e Edge) Weight() float64 { return e.W }
+
+// maxInt is the maximum value of the machine-dependent int type.
+const maxInt int = int(^uint(0) >> 1)
+
+// isSame returns whether two float64 values are the same where NaN values
+// are equalable.
+func isSame(a, b float64) bool {
+ return a == b || (math.IsNaN(a) && math.IsNaN(b))
+}
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/undirected.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/undirected.go
new file mode 100644
index 000000000..231fa3ded
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/undirected.go
@@ -0,0 +1,242 @@
+// Copyright ©2014 The gonum Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package simple
+
+import (
+ "fmt"
+
+ "golang.org/x/tools/container/intsets"
+
+ "k8s.io/kubernetes/third_party/forked/gonum/graph"
+)
+
+// UndirectedGraph implements a generalized undirected graph.
+type UndirectedGraph struct {
+ nodes map[int]graph.Node
+ edges map[int]edgeHolder
+
+ self, absent float64
+
+ freeIDs intsets.Sparse
+ usedIDs intsets.Sparse
+}
+
+// NewUndirectedGraph returns an UndirectedGraph with the specified self and absent
+// edge weight values.
+func NewUndirectedGraph(self, absent float64) *UndirectedGraph {
+ return &UndirectedGraph{
+ nodes: make(map[int]graph.Node),
+ edges: make(map[int]edgeHolder),
+
+ self: self,
+ absent: absent,
+ }
+}
+
+// NewNodeID returns a new unique ID for a node to be added to g. The returned ID does
+// not become a valid ID in g until it is added to g.
+func (g *UndirectedGraph) NewNodeID() int {
+ if len(g.nodes) == 0 {
+ return 0
+ }
+ if len(g.nodes) == maxInt {
+ panic(fmt.Sprintf("simple: cannot allocate node: no slot"))
+ }
+
+ var id int
+ if g.freeIDs.Len() != 0 && g.freeIDs.TakeMin(&id) {
+ return id
+ }
+ if id = g.usedIDs.Max(); id < maxInt {
+ return id + 1
+ }
+ for id = 0; id < maxInt; id++ {
+ if !g.usedIDs.Has(id) {
+ return id
+ }
+ }
+ panic("unreachable")
+}
+
+// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID.
+func (g *UndirectedGraph) AddNode(n graph.Node) {
+ if _, exists := g.nodes[n.ID()]; exists {
+ panic(fmt.Sprintf("simple: node ID collision: %d", n.ID()))
+ }
+ g.nodes[n.ID()] = n
+ g.edges[n.ID()] = &sliceEdgeHolder{self: n.ID()}
+
+ g.freeIDs.Remove(n.ID())
+ g.usedIDs.Insert(n.ID())
+}
+
+// RemoveNode removes n from the graph, as well as any edges attached to it. If the node
+// is not in the graph it is a no-op.
+func (g *UndirectedGraph) RemoveNode(n graph.Node) {
+ if _, ok := g.nodes[n.ID()]; !ok {
+ return
+ }
+ delete(g.nodes, n.ID())
+
+ g.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
+ g.edges[neighbor] = g.edges[neighbor].Delete(n.ID())
+ })
+ delete(g.edges, n.ID())
+
+ g.freeIDs.Insert(n.ID())
+ g.usedIDs.Remove(n.ID())
+
+}
+
+// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added.
+// It will panic if the IDs of the e.From and e.To are equal.
+func (g *UndirectedGraph) SetEdge(e graph.Edge) {
+ var (
+ from = e.From()
+ fid = from.ID()
+ to = e.To()
+ tid = to.ID()
+ )
+
+ if fid == tid {
+ panic("simple: adding self edge")
+ }
+
+ if !g.Has(from) {
+ g.AddNode(from)
+ }
+ if !g.Has(to) {
+ g.AddNode(to)
+ }
+
+ g.edges[fid] = g.edges[fid].Set(tid, e)
+ g.edges[tid] = g.edges[tid].Set(fid, e)
+}
+
+// RemoveEdge removes e from the graph, leaving the terminal nodes. If the edge does not exist
+// it is a no-op.
+func (g *UndirectedGraph) RemoveEdge(e graph.Edge) {
+ from, to := e.From(), e.To()
+ if _, ok := g.nodes[from.ID()]; !ok {
+ return
+ }
+ if _, ok := g.nodes[to.ID()]; !ok {
+ return
+ }
+
+ g.edges[from.ID()] = g.edges[from.ID()].Delete(to.ID())
+ g.edges[to.ID()] = g.edges[to.ID()].Delete(from.ID())
+}
+
+// Node returns the node in the graph with the given ID.
+func (g *UndirectedGraph) Node(id int) graph.Node {
+ return g.nodes[id]
+}
+
+// Has returns whether the node exists within the graph.
+func (g *UndirectedGraph) Has(n graph.Node) bool {
+ _, ok := g.nodes[n.ID()]
+ return ok
+}
+
+// Nodes returns all the nodes in the graph.
+func (g *UndirectedGraph) Nodes() []graph.Node {
+ nodes := make([]graph.Node, len(g.nodes))
+ i := 0
+ for _, n := range g.nodes {
+ nodes[i] = n
+ i++
+ }
+
+ return nodes
+}
+
+// Edges returns all the edges in the graph.
+func (g *UndirectedGraph) Edges() []graph.Edge {
+ var edges []graph.Edge
+
+ seen := make(map[[2]int]struct{})
+ for _, u := range g.edges {
+ u.Visit(func(neighbor int, e graph.Edge) {
+ uid := e.From().ID()
+ vid := e.To().ID()
+ if _, ok := seen[[2]int{uid, vid}]; ok {
+ return
+ }
+ seen[[2]int{uid, vid}] = struct{}{}
+ seen[[2]int{vid, uid}] = struct{}{}
+ edges = append(edges, e)
+ })
+ }
+
+ return edges
+}
+
+// From returns all nodes in g that can be reached directly from n.
+func (g *UndirectedGraph) From(n graph.Node) []graph.Node {
+ if !g.Has(n) {
+ return nil
+ }
+
+ nodes := make([]graph.Node, g.edges[n.ID()].Len())
+ i := 0
+ g.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
+ nodes[i] = g.nodes[neighbor]
+ i++
+ })
+
+ return nodes
+}
+
+// HasEdgeBetween returns whether an edge exists between nodes x and y.
+func (g *UndirectedGraph) HasEdgeBetween(x, y graph.Node) bool {
+ _, ok := g.edges[x.ID()].Get(y.ID())
+ return ok
+}
+
+// Edge returns the edge from u to v if such an edge exists and nil otherwise.
+// The node v must be directly reachable from u as defined by the From method.
+func (g *UndirectedGraph) Edge(u, v graph.Node) graph.Edge {
+ return g.EdgeBetween(u, v)
+}
+
+// EdgeBetween returns the edge between nodes x and y.
+func (g *UndirectedGraph) EdgeBetween(x, y graph.Node) graph.Edge {
+ // We don't need to check if neigh exists because
+ // it's implicit in the edges access.
+ if !g.Has(x) {
+ return nil
+ }
+
+ edge, _ := g.edges[x.ID()].Get(y.ID())
+ return edge
+}
+
+// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge.
+// If x and y are the same node or there is no joining edge between the two nodes the weight
+// value returned is either the graph's absent or self value. Weight returns true if an edge
+// exists between x and y or if x and y have the same ID, false otherwise.
+func (g *UndirectedGraph) Weight(x, y graph.Node) (w float64, ok bool) {
+ xid := x.ID()
+ yid := y.ID()
+ if xid == yid {
+ return g.self, true
+ }
+ if n, ok := g.edges[xid]; ok {
+ if e, ok := n.Get(yid); ok {
+ return e.Weight(), true
+ }
+ }
+ return g.absent, false
+}
+
+// Degree returns the degree of n in g.
+func (g *UndirectedGraph) Degree(n graph.Node) int {
+ if _, ok := g.nodes[n.ID()]; !ok {
+ return 0
+ }
+
+ return g.edges[n.ID()].Len()
+}
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/traverse.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/traverse.go
new file mode 100644
index 000000000..105c8f6e1
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/traverse.go
@@ -0,0 +1,186 @@
+// Copyright ©2015 The gonum Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package traverse provides basic graph traversal primitives.
+package traverse
+
+import (
+ "golang.org/x/tools/container/intsets"
+
+ "k8s.io/kubernetes/third_party/forked/gonum/graph"
+ "k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear"
+)
+
+// BreadthFirst implements stateful breadth-first graph traversal.
+type BreadthFirst struct {
+ EdgeFilter func(graph.Edge) bool
+ Visit func(u, v graph.Node)
+ queue linear.NodeQueue
+ visited *intsets.Sparse
+}
+
+// Walk performs a breadth-first traversal of the graph g starting from the given node,
+// depending on the EdgeFilter field and the until parameter if they are non-nil. The
+// traversal follows edges for which EdgeFilter(edge) is true and returns the first node
+// for which until(node, depth) is true. During the traversal, if the Visit field is
+// non-nil, it is called with the nodes joined by each followed edge.
+func (b *BreadthFirst) Walk(g graph.Graph, from graph.Node, until func(n graph.Node, d int) bool) graph.Node {
+ if b.visited == nil {
+ b.visited = &intsets.Sparse{}
+ }
+ b.queue.Enqueue(from)
+ b.visited.Insert(from.ID())
+
+ var (
+ depth int
+ children int
+ untilNext = 1
+ )
+ for b.queue.Len() > 0 {
+ t := b.queue.Dequeue()
+ if until != nil && until(t, depth) {
+ return t
+ }
+ for _, n := range g.From(t) {
+ if b.EdgeFilter != nil && !b.EdgeFilter(g.Edge(t, n)) {
+ continue
+ }
+ if b.visited.Has(n.ID()) {
+ continue
+ }
+ if b.Visit != nil {
+ b.Visit(t, n)
+ }
+ b.visited.Insert(n.ID())
+ children++
+ b.queue.Enqueue(n)
+ }
+ if untilNext--; untilNext == 0 {
+ depth++
+ untilNext = children
+ children = 0
+ }
+ }
+
+ return nil
+}
+
+// WalkAll calls Walk for each unvisited node of the graph g using edges independent
+// of their direction. The functions before and after are called prior to commencing
+// and after completing each walk if they are non-nil respectively. The function
+// during is called on each node as it is traversed.
+func (b *BreadthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) {
+ b.Reset()
+ for _, from := range g.Nodes() {
+ if b.Visited(from) {
+ continue
+ }
+ if before != nil {
+ before()
+ }
+ b.Walk(g, from, func(n graph.Node, _ int) bool {
+ if during != nil {
+ during(n)
+ }
+ return false
+ })
+ if after != nil {
+ after()
+ }
+ }
+}
+
+// Visited returned whether the node n was visited during a traverse.
+func (b *BreadthFirst) Visited(n graph.Node) bool {
+ return b.visited != nil && b.visited.Has(n.ID())
+}
+
+// Reset resets the state of the traverser for reuse.
+func (b *BreadthFirst) Reset() {
+ b.queue.Reset()
+ if b.visited != nil {
+ b.visited.Clear()
+ }
+}
+
+// DepthFirst implements stateful depth-first graph traversal.
+type DepthFirst struct {
+ EdgeFilter func(graph.Edge) bool
+ Visit func(u, v graph.Node)
+ stack linear.NodeStack
+ visited *intsets.Sparse
+}
+
+// Walk performs a depth-first traversal of the graph g starting from the given node,
+// depending on the EdgeFilter field and the until parameter if they are non-nil. The
+// traversal follows edges for which EdgeFilter(edge) is true and returns the first node
+// for which until(node) is true. During the traversal, if the Visit field is non-nil, it
+// is called with the nodes joined by each followed edge.
+func (d *DepthFirst) Walk(g graph.Graph, from graph.Node, until func(graph.Node) bool) graph.Node {
+ if d.visited == nil {
+ d.visited = &intsets.Sparse{}
+ }
+ d.stack.Push(from)
+ d.visited.Insert(from.ID())
+
+ for d.stack.Len() > 0 {
+ t := d.stack.Pop()
+ if until != nil && until(t) {
+ return t
+ }
+ for _, n := range g.From(t) {
+ if d.EdgeFilter != nil && !d.EdgeFilter(g.Edge(t, n)) {
+ continue
+ }
+ if d.visited.Has(n.ID()) {
+ continue
+ }
+ if d.Visit != nil {
+ d.Visit(t, n)
+ }
+ d.visited.Insert(n.ID())
+ d.stack.Push(n)
+ }
+ }
+
+ return nil
+}
+
+// WalkAll calls Walk for each unvisited node of the graph g using edges independent
+// of their direction. The functions before and after are called prior to commencing
+// and after completing each walk if they are non-nil respectively. The function
+// during is called on each node as it is traversed.
+func (d *DepthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) {
+ d.Reset()
+ for _, from := range g.Nodes() {
+ if d.Visited(from) {
+ continue
+ }
+ if before != nil {
+ before()
+ }
+ d.Walk(g, from, func(n graph.Node) bool {
+ if during != nil {
+ during(n)
+ }
+ return false
+ })
+ if after != nil {
+ after()
+ }
+ }
+}
+
+// Visited returned whether the node n was visited during a traverse.
+func (d *DepthFirst) Visited(n graph.Node) bool {
+ return d.visited != nil && d.visited.Has(n.ID())
+}
+
+// Reset resets the state of the traverser for reuse.
+func (d *DepthFirst) Reset() {
+ d.stack = d.stack[:0]
+ if d.visited != nil {
+ d.visited.Clear()
+ }
+}
diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/visit_depth_first.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/visit_depth_first.go
new file mode 100644
index 000000000..89df3c690
--- /dev/null
+++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/visit_depth_first.go
@@ -0,0 +1,86 @@
+// Copyright ©2015 The gonum Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package traverse provides basic graph traversal primitives.
+package traverse
+
+import (
+ "golang.org/x/tools/container/intsets"
+
+ "k8s.io/kubernetes/third_party/forked/gonum/graph"
+ "k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear"
+)
+
+// VisitableGraph
+type VisitableGraph interface {
+ graph.Graph
+
+ // VisitFrom invokes visitor with all nodes that can be reached directly from the given node.
+ // If visitor returns false, visiting is short-circuited.
+ VisitFrom(from graph.Node, visitor func(graph.Node) (shouldContinue bool))
+}
+
+// VisitingDepthFirst implements stateful depth-first graph traversal on a visitable graph.
+type VisitingDepthFirst struct {
+ EdgeFilter func(graph.Edge) bool
+ Visit func(u, v graph.Node)
+ stack linear.NodeStack
+ visited *intsets.Sparse
+}
+
+// Walk performs a depth-first traversal of the graph g starting from the given node,
+// depending on the EdgeFilter field and the until parameter if they are non-nil. The
+// traversal follows edges for which EdgeFilter(edge) is true and returns the first node
+// for which until(node) is true. During the traversal, if the Visit field is non-nil, it
+// is called with the nodes joined by each followed edge.
+func (d *VisitingDepthFirst) Walk(g VisitableGraph, from graph.Node, until func(graph.Node) bool) graph.Node {
+ if d.visited == nil {
+ d.visited = &intsets.Sparse{}
+ }
+ d.stack.Push(from)
+ d.visited.Insert(from.ID())
+ if until != nil && until(from) {
+ return from
+ }
+
+ var found graph.Node
+ for d.stack.Len() > 0 {
+ t := d.stack.Pop()
+ g.VisitFrom(t, func(n graph.Node) (shouldContinue bool) {
+ if d.EdgeFilter != nil && !d.EdgeFilter(g.Edge(t, n)) {
+ return true
+ }
+ if d.visited.Has(n.ID()) {
+ return true
+ }
+ if d.Visit != nil {
+ d.Visit(t, n)
+ }
+ d.visited.Insert(n.ID())
+ d.stack.Push(n)
+ if until != nil && until(n) {
+ found = n
+ return false
+ }
+ return true
+ })
+ if found != nil {
+ return found
+ }
+ }
+ return nil
+}
+
+// Visited returned whether the node n was visited during a traverse.
+func (d *VisitingDepthFirst) Visited(n graph.Node) bool {
+ return d.visited != nil && d.visited.Has(n.ID())
+}
+
+// Reset resets the state of the traverser for reuse.
+func (d *VisitingDepthFirst) Reset() {
+ d.stack = d.stack[:0]
+ if d.visited != nil {
+ d.visited.Clear()
+ }
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 811317da1..d18004919 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1200,6 +1200,7 @@ golang.org/x/time/rate
# golang.org/x/tools v0.16.1
## explicit; go 1.18
golang.org/x/tools/cmd/stringer
+golang.org/x/tools/container/intsets
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/ast/inspector
golang.org/x/tools/go/gcexportdata
@@ -2293,9 +2294,14 @@ k8s.io/kubernetes/pkg/api/node
k8s.io/kubernetes/pkg/api/persistentvolumeclaim
k8s.io/kubernetes/pkg/api/pod
k8s.io/kubernetes/pkg/api/service
+k8s.io/kubernetes/pkg/api/v1/persistentvolume
k8s.io/kubernetes/pkg/api/v1/pod
k8s.io/kubernetes/pkg/api/v1/resource
k8s.io/kubernetes/pkg/api/v1/service
+k8s.io/kubernetes/pkg/apis/abac
+k8s.io/kubernetes/pkg/apis/abac/latest
+k8s.io/kubernetes/pkg/apis/abac/v0
+k8s.io/kubernetes/pkg/apis/abac/v1beta1
k8s.io/kubernetes/pkg/apis/admissionregistration
k8s.io/kubernetes/pkg/apis/apiserverinternal
k8s.io/kubernetes/pkg/apis/apps
@@ -2381,6 +2387,8 @@ k8s.io/kubernetes/pkg/apis/storage/util
k8s.io/kubernetes/pkg/apis/storage/v1
k8s.io/kubernetes/pkg/apis/storage/v1alpha1
k8s.io/kubernetes/pkg/apis/storage/v1beta1
+k8s.io/kubernetes/pkg/auth/authorizer/abac
+k8s.io/kubernetes/pkg/auth/nodeidentifier
k8s.io/kubernetes/pkg/capabilities
k8s.io/kubernetes/pkg/client/conditions
k8s.io/kubernetes/pkg/cluster/ports
@@ -2394,12 +2402,15 @@ k8s.io/kubernetes/pkg/controller/replicaset
k8s.io/kubernetes/pkg/controller/replicaset/metrics
k8s.io/kubernetes/pkg/controller/replication
k8s.io/kubernetes/pkg/controller/volume/events
+k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking
k8s.io/kubernetes/pkg/credentialprovider
k8s.io/kubernetes/pkg/credentialprovider/azure
k8s.io/kubernetes/pkg/credentialprovider/gcp
k8s.io/kubernetes/pkg/credentialprovider/secrets
k8s.io/kubernetes/pkg/features
k8s.io/kubernetes/pkg/fieldpath
+k8s.io/kubernetes/pkg/kubeapiserver/authorizer
+k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes
k8s.io/kubernetes/pkg/kubelet
k8s.io/kubernetes/pkg/kubelet/apis/config
k8s.io/kubernetes/pkg/kubelet/apis/config/scheme
@@ -2506,6 +2517,7 @@ k8s.io/kubernetes/pkg/probe/exec
k8s.io/kubernetes/pkg/probe/grpc
k8s.io/kubernetes/pkg/probe/http
k8s.io/kubernetes/pkg/probe/tcp
+k8s.io/kubernetes/pkg/registry/authorization/util
k8s.io/kubernetes/pkg/registry/core/service/allocator
k8s.io/kubernetes/pkg/registry/rbac/validation
k8s.io/kubernetes/pkg/scheduler
@@ -2595,7 +2607,9 @@ k8s.io/kubernetes/pkg/volume/util/volumepathhandler
k8s.io/kubernetes/pkg/volume/validation
k8s.io/kubernetes/pkg/windows/service
k8s.io/kubernetes/plugin/pkg/admission/serviceaccount
+k8s.io/kubernetes/plugin/pkg/auth/authorizer/node
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac
+k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy
k8s.io/kubernetes/test/e2e/apps
k8s.io/kubernetes/test/e2e/auth
k8s.io/kubernetes/test/e2e/common
@@ -2658,6 +2672,10 @@ k8s.io/kubernetes/test/utils/format
k8s.io/kubernetes/test/utils/image
k8s.io/kubernetes/test/utils/kubeconfig
k8s.io/kubernetes/third_party/forked/golang/expansion
+k8s.io/kubernetes/third_party/forked/gonum/graph
+k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
+k8s.io/kubernetes/third_party/forked/gonum/graph/simple
+k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/kubeedge/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.29.6-kubeedge1
## explicit; go 1.21
k8s.io/legacy-cloud-providers/azure/auth