Use site-wide kubeconfig only on demand
This patchset introduces new field to Phase.Config API object SiteWideKubeconfig - if set to true, the phase will have access to all kubeconfigs for each cluster defined in ClusterMap. By default only kubeconfig of the cluster specified in Phase.Metadata.ClusterName will be available to the phase executor. This approach will speed up the deployment process because arishipctl will not have to look for multiple kubeconfigs when running each phase Closes: #547 Change-Id: Ic92027ba88d3ce8cb769c254968530037540b8fd
This commit is contained in:
parent
7fd779ae0b
commit
bbd9b018dd
@ -126,6 +126,7 @@ metadata:
|
||||
name: clusterctl-move
|
||||
clusterName: target-cluster
|
||||
config:
|
||||
siteWideKubeconfig: true
|
||||
executorRef:
|
||||
apiVersion: airshipit.org/v1alpha1
|
||||
kind: Clusterctl
|
||||
|
@ -32,6 +32,7 @@ type Phase struct {
|
||||
// phase runner object which should contain runner configuration
|
||||
type PhaseConfig struct {
|
||||
ExecutorRef *corev1.ObjectReference `json:"executorRef"`
|
||||
SiteWideKubeconfig bool `json:"siteWideKubeconfig,omitempty"`
|
||||
DocumentEntryPoint string `json:"documentEntryPoint"`
|
||||
}
|
||||
|
||||
|
@ -83,12 +83,18 @@ func (cmd *GetKubeconfigCommand) RunE(cfgFactory config.Factory, writer io.Write
|
||||
return err
|
||||
}
|
||||
|
||||
var siteWide bool
|
||||
if cmd.ClusterName == "" {
|
||||
siteWide = true
|
||||
}
|
||||
|
||||
kubeconf := kubeconfig.NewBuilder().
|
||||
WithBundle(helper.PhaseConfigBundle()).
|
||||
WithClusterctlClient(client).
|
||||
WithClusterMap(cMap).
|
||||
WithClusterName(cmd.ClusterName).
|
||||
WithTempRoot(helper.WorkDir()).
|
||||
SiteWide(siteWide).
|
||||
Build()
|
||||
|
||||
return kubeconf.Write(writer)
|
||||
|
@ -41,6 +41,7 @@ func NewBuilder() *Builder {
|
||||
// Builder is an object that allows to build a kubeconfig based on various provided sources
|
||||
// such as path to kubeconfig, path to bundle that should contain kubeconfig and parent cluster
|
||||
type Builder struct {
|
||||
siteWide bool
|
||||
clusterName string
|
||||
root string
|
||||
|
||||
@ -88,11 +89,21 @@ func (b *Builder) WithFilesystem(fs fs.FileSystem) *Builder {
|
||||
return b
|
||||
}
|
||||
|
||||
// SiteWide allows to build kubeconfig for the entire site.
|
||||
// If set to true ClusterName will be ignored, since all clusters are requested.
|
||||
func (b *Builder) SiteWide(t bool) *Builder {
|
||||
b.siteWide = t
|
||||
return b
|
||||
}
|
||||
|
||||
// Build site kubeconfig, ignores, but logs, errors that happen when building individual
|
||||
// kubeconfigs. We need this behavior because, some clusters may not yet be deployed
|
||||
// and their kubeconfig is inaccessible yet, but will be accessible at later phases
|
||||
// If builder can't build kubeconfig for specific cluster, its context will not be present
|
||||
// in final kubeconfig. User of kubeconfig, will receive error stating that context doesn't exist
|
||||
// To request site-wide kubeconfig use builder method SiteWide(true).
|
||||
// To request a single cluster kubeconfig use methods WithClusterName("my-cluster").SiteWide(false)
|
||||
// ClusterName is ignored if SiteWide(true) is used.
|
||||
func (b *Builder) Build() Interface {
|
||||
return NewKubeConfig(b.build, InjectFileSystem(b.fs), InjectTempRoot(b.root))
|
||||
}
|
||||
@ -101,19 +112,19 @@ func (b *Builder) build() ([]byte, error) {
|
||||
// Set current context to clustername if it was provided
|
||||
var result *api.Config
|
||||
var err error
|
||||
if !b.siteWide {
|
||||
var kubeContext string
|
||||
if b.clusterName != "" {
|
||||
kubeContext, result, err = b.buildOne(b.clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.siteKubeconf.CurrentContext = kubeContext
|
||||
} else {
|
||||
result, err = b.builtSiteKubeconf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
b.siteKubeconf.CurrentContext = kubeContext
|
||||
return clientcmd.Write(*result)
|
||||
}
|
||||
|
||||
|
@ -95,6 +95,7 @@ func TestBuilderClusterctl(t *testing.T) {
|
||||
errString string
|
||||
requestedClusterName string
|
||||
tempRoot string
|
||||
siteWide bool
|
||||
|
||||
expectedContexts, expectedClusters, expectedAuthInfos []string
|
||||
clusterMap clustermap.ClusterMap
|
||||
@ -106,6 +107,7 @@ func TestBuilderClusterctl(t *testing.T) {
|
||||
expectedContexts: []string{parentClusterID},
|
||||
expectedClusters: []string{parentParentCluster},
|
||||
expectedAuthInfos: []string{parentParentUser},
|
||||
siteWide: true,
|
||||
clusterMap: clustermap.NewClusterMap(&v1alpha1.ClusterMap{
|
||||
Map: map[string]*v1alpha1.Cluster{
|
||||
childClusterID: {
|
||||
@ -134,6 +136,7 @@ func TestBuilderClusterctl(t *testing.T) {
|
||||
expectedContexts: []string{parentClusterID, parentParentClusterID},
|
||||
expectedClusters: []string{"dummycluster_ephemeral", parentParentCluster},
|
||||
expectedAuthInfos: []string{"kubernetes-admin", parentParentUser},
|
||||
siteWide: true,
|
||||
clusterMap: clustermap.NewClusterMap(&v1alpha1.ClusterMap{
|
||||
Map: map[string]*v1alpha1.Cluster{
|
||||
parentParentClusterID: {
|
||||
@ -165,6 +168,7 @@ func TestBuilderClusterctl(t *testing.T) {
|
||||
expectedContexts: []string{parentClusterID, childClusterID, parentParentClusterID},
|
||||
expectedClusters: []string{parentCluster, parentParentCluster, childCluster},
|
||||
expectedAuthInfos: []string{parentUser, parentParentUser, childUser},
|
||||
siteWide: true,
|
||||
clusterMap: clustermap.NewClusterMap(&v1alpha1.ClusterMap{
|
||||
Map: map[string]*v1alpha1.Cluster{
|
||||
childClusterID: {
|
||||
@ -265,6 +269,7 @@ func TestBuilderClusterctl(t *testing.T) {
|
||||
WithTempRoot(tt.tempRoot).
|
||||
WithClusterctlClient(tt.clusterctlClient).
|
||||
WithFilesystem(tt.fs).
|
||||
SiteWide(tt.siteWide).
|
||||
Build()
|
||||
require.NotNil(t, kube)
|
||||
filePath, cleanup, err := kube.GetFile()
|
||||
|
@ -114,6 +114,8 @@ func (p *phase) executor(docFactory document.DocFactoryFunc,
|
||||
WithClusterMap(cMap).
|
||||
WithTempRoot(p.helper.WorkDir()).
|
||||
WithClusterctlClient(cctlClient).
|
||||
WithClusterName(p.apiObj.ClusterName).
|
||||
SiteWide(p.apiObj.Config.SiteWideKubeconfig).
|
||||
Build()
|
||||
|
||||
return executorFactory(
|
||||
|
@ -437,6 +437,7 @@ func TestPlanRunCommand(t *testing.T) {
|
||||
name string
|
||||
factory config.Factory
|
||||
expectedErr string
|
||||
planID ifc.ID
|
||||
}{
|
||||
{
|
||||
name: "Error config factory",
|
||||
@ -456,7 +457,10 @@ func TestPlanRunCommand(t *testing.T) {
|
||||
expectedErr: "missing configuration: context with name 'does not exist'",
|
||||
},
|
||||
{
|
||||
name: "Error phase by id",
|
||||
name: "Error plan by id",
|
||||
planID: ifc.ID{
|
||||
Name: "doesn't exist",
|
||||
},
|
||||
factory: func() (*config.Config, error) {
|
||||
conf := config.NewConfig()
|
||||
conf.Manifests = map[string]*config.Manifest{
|
||||
@ -479,7 +483,7 @@ func TestPlanRunCommand(t *testing.T) {
|
||||
}
|
||||
return conf, nil
|
||||
},
|
||||
expectedErr: `context "ephemeral-cluster" does not exist`,
|
||||
expectedErr: `found no documents`,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
@ -490,6 +494,7 @@ func TestPlanRunCommand(t *testing.T) {
|
||||
GenericRunFlags: phase.GenericRunFlags{
|
||||
DryRun: true,
|
||||
},
|
||||
PlanID: tt.planID,
|
||||
},
|
||||
Factory: tt.factory,
|
||||
}
|
||||
|
@ -94,6 +94,7 @@ func (c *ContainerExecutor) Run(evtCh chan events.Event, opts ifc.RunOptions) {
|
||||
cleanup, err := c.SetKubeConfig()
|
||||
if err != nil {
|
||||
handleError(evtCh, err)
|
||||
return
|
||||
}
|
||||
defer cleanup()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user