From bbd9b018ddce477a2f6574a1f2570fc2b84475df Mon Sep 17 00:00:00 2001 From: Kostiantyn Kalynovskyi Date: Mon, 17 May 2021 17:16:35 +0000 Subject: [PATCH] Use site-wide kubeconfig only on demand This patchset introduces new field to Phase.Config API object SiteWideKubeconfig - if set to true, the phase will have access to all kubeconfigs for each cluster defined in ClusterMap. By default only kubeconfig of the cluster specified in Phase.Metadata.ClusterName will be available to the phase executor. This approach will speed up the deployment process because arishipctl will not have to look for multiple kubeconfigs when running each phase Closes: #547 Change-Id: Ic92027ba88d3ce8cb769c254968530037540b8fd --- manifests/phases/phases.yaml | 1 + pkg/api/v1alpha1/phase_types.go | 1 + pkg/cluster/command.go | 6 ++++++ pkg/k8s/kubeconfig/builder.go | 17 ++++++++++++++--- pkg/k8s/kubeconfig/builder_test.go | 5 +++++ pkg/phase/client.go | 2 ++ pkg/phase/command_test.go | 9 +++++++-- pkg/phase/executors/container.go | 1 + 8 files changed, 37 insertions(+), 5 deletions(-) diff --git a/manifests/phases/phases.yaml b/manifests/phases/phases.yaml index e2c4dfda5..f2549b5eb 100644 --- a/manifests/phases/phases.yaml +++ b/manifests/phases/phases.yaml @@ -126,6 +126,7 @@ metadata: name: clusterctl-move clusterName: target-cluster config: + siteWideKubeconfig: true executorRef: apiVersion: airshipit.org/v1alpha1 kind: Clusterctl diff --git a/pkg/api/v1alpha1/phase_types.go b/pkg/api/v1alpha1/phase_types.go index 9f79373db..f2307806f 100644 --- a/pkg/api/v1alpha1/phase_types.go +++ b/pkg/api/v1alpha1/phase_types.go @@ -32,6 +32,7 @@ type Phase struct { // phase runner object which should contain runner configuration type PhaseConfig struct { ExecutorRef *corev1.ObjectReference `json:"executorRef"` + SiteWideKubeconfig bool `json:"siteWideKubeconfig,omitempty"` DocumentEntryPoint string `json:"documentEntryPoint"` } diff --git a/pkg/cluster/command.go b/pkg/cluster/command.go index 6a618355c..b3e152da9 100755 --- a/pkg/cluster/command.go +++ b/pkg/cluster/command.go @@ -83,12 +83,18 @@ func (cmd *GetKubeconfigCommand) RunE(cfgFactory config.Factory, writer io.Write return err } + var siteWide bool + if cmd.ClusterName == "" { + siteWide = true + } + kubeconf := kubeconfig.NewBuilder(). WithBundle(helper.PhaseConfigBundle()). WithClusterctlClient(client). WithClusterMap(cMap). WithClusterName(cmd.ClusterName). WithTempRoot(helper.WorkDir()). + SiteWide(siteWide). Build() return kubeconf.Write(writer) diff --git a/pkg/k8s/kubeconfig/builder.go b/pkg/k8s/kubeconfig/builder.go index 12e1fec67..4001577ff 100644 --- a/pkg/k8s/kubeconfig/builder.go +++ b/pkg/k8s/kubeconfig/builder.go @@ -41,6 +41,7 @@ func NewBuilder() *Builder { // Builder is an object that allows to build a kubeconfig based on various provided sources // such as path to kubeconfig, path to bundle that should contain kubeconfig and parent cluster type Builder struct { + siteWide bool clusterName string root string @@ -88,11 +89,21 @@ func (b *Builder) WithFilesystem(fs fs.FileSystem) *Builder { return b } +// SiteWide allows to build kubeconfig for the entire site. +// If set to true ClusterName will be ignored, since all clusters are requested. +func (b *Builder) SiteWide(t bool) *Builder { + b.siteWide = t + return b +} + // Build site kubeconfig, ignores, but logs, errors that happen when building individual // kubeconfigs. We need this behavior because, some clusters may not yet be deployed // and their kubeconfig is inaccessible yet, but will be accessible at later phases // If builder can't build kubeconfig for specific cluster, its context will not be present // in final kubeconfig. User of kubeconfig, will receive error stating that context doesn't exist +// To request site-wide kubeconfig use builder method SiteWide(true). +// To request a single cluster kubeconfig use methods WithClusterName("my-cluster").SiteWide(false) +// ClusterName is ignored if SiteWide(true) is used. func (b *Builder) Build() Interface { return NewKubeConfig(b.build, InjectFileSystem(b.fs), InjectTempRoot(b.root)) } @@ -101,19 +112,19 @@ func (b *Builder) build() ([]byte, error) { // Set current context to clustername if it was provided var result *api.Config var err error - var kubeContext string - if b.clusterName != "" { + if !b.siteWide { + var kubeContext string kubeContext, result, err = b.buildOne(b.clusterName) if err != nil { return nil, err } + b.siteKubeconf.CurrentContext = kubeContext } else { result, err = b.builtSiteKubeconf() if err != nil { return nil, err } } - b.siteKubeconf.CurrentContext = kubeContext return clientcmd.Write(*result) } diff --git a/pkg/k8s/kubeconfig/builder_test.go b/pkg/k8s/kubeconfig/builder_test.go index 4df7fdf2e..7bbd8e257 100644 --- a/pkg/k8s/kubeconfig/builder_test.go +++ b/pkg/k8s/kubeconfig/builder_test.go @@ -95,6 +95,7 @@ func TestBuilderClusterctl(t *testing.T) { errString string requestedClusterName string tempRoot string + siteWide bool expectedContexts, expectedClusters, expectedAuthInfos []string clusterMap clustermap.ClusterMap @@ -106,6 +107,7 @@ func TestBuilderClusterctl(t *testing.T) { expectedContexts: []string{parentClusterID}, expectedClusters: []string{parentParentCluster}, expectedAuthInfos: []string{parentParentUser}, + siteWide: true, clusterMap: clustermap.NewClusterMap(&v1alpha1.ClusterMap{ Map: map[string]*v1alpha1.Cluster{ childClusterID: { @@ -134,6 +136,7 @@ func TestBuilderClusterctl(t *testing.T) { expectedContexts: []string{parentClusterID, parentParentClusterID}, expectedClusters: []string{"dummycluster_ephemeral", parentParentCluster}, expectedAuthInfos: []string{"kubernetes-admin", parentParentUser}, + siteWide: true, clusterMap: clustermap.NewClusterMap(&v1alpha1.ClusterMap{ Map: map[string]*v1alpha1.Cluster{ parentParentClusterID: { @@ -165,6 +168,7 @@ func TestBuilderClusterctl(t *testing.T) { expectedContexts: []string{parentClusterID, childClusterID, parentParentClusterID}, expectedClusters: []string{parentCluster, parentParentCluster, childCluster}, expectedAuthInfos: []string{parentUser, parentParentUser, childUser}, + siteWide: true, clusterMap: clustermap.NewClusterMap(&v1alpha1.ClusterMap{ Map: map[string]*v1alpha1.Cluster{ childClusterID: { @@ -265,6 +269,7 @@ func TestBuilderClusterctl(t *testing.T) { WithTempRoot(tt.tempRoot). WithClusterctlClient(tt.clusterctlClient). WithFilesystem(tt.fs). + SiteWide(tt.siteWide). Build() require.NotNil(t, kube) filePath, cleanup, err := kube.GetFile() diff --git a/pkg/phase/client.go b/pkg/phase/client.go index 3749557b2..6c0dbf8d3 100644 --- a/pkg/phase/client.go +++ b/pkg/phase/client.go @@ -114,6 +114,8 @@ func (p *phase) executor(docFactory document.DocFactoryFunc, WithClusterMap(cMap). WithTempRoot(p.helper.WorkDir()). WithClusterctlClient(cctlClient). + WithClusterName(p.apiObj.ClusterName). + SiteWide(p.apiObj.Config.SiteWideKubeconfig). Build() return executorFactory( diff --git a/pkg/phase/command_test.go b/pkg/phase/command_test.go index 2aaa58757..c6aae9cc3 100644 --- a/pkg/phase/command_test.go +++ b/pkg/phase/command_test.go @@ -437,6 +437,7 @@ func TestPlanRunCommand(t *testing.T) { name string factory config.Factory expectedErr string + planID ifc.ID }{ { name: "Error config factory", @@ -456,7 +457,10 @@ func TestPlanRunCommand(t *testing.T) { expectedErr: "missing configuration: context with name 'does not exist'", }, { - name: "Error phase by id", + name: "Error plan by id", + planID: ifc.ID{ + Name: "doesn't exist", + }, factory: func() (*config.Config, error) { conf := config.NewConfig() conf.Manifests = map[string]*config.Manifest{ @@ -479,7 +483,7 @@ func TestPlanRunCommand(t *testing.T) { } return conf, nil }, - expectedErr: `context "ephemeral-cluster" does not exist`, + expectedErr: `found no documents`, }, } for _, tc := range testCases { @@ -490,6 +494,7 @@ func TestPlanRunCommand(t *testing.T) { GenericRunFlags: phase.GenericRunFlags{ DryRun: true, }, + PlanID: tt.planID, }, Factory: tt.factory, } diff --git a/pkg/phase/executors/container.go b/pkg/phase/executors/container.go index 3e8782121..3947bfd44 100644 --- a/pkg/phase/executors/container.go +++ b/pkg/phase/executors/container.go @@ -94,6 +94,7 @@ func (c *ContainerExecutor) Run(evtCh chan events.Event, opts ifc.RunOptions) { cleanup, err := c.SetKubeConfig() if err != nil { handleError(evtCh, err) + return } defer cleanup() }