diff --git a/phase/initialize_k0s.go b/phase/initialize_k0s.go index 821891ff..34ee2bf4 100644 --- a/phase/initialize_k0s.go +++ b/phase/initialize_k0s.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" @@ -115,11 +116,22 @@ func (p *InitializeK0s) Run() error { return err } - log.Infof("%s: waiting for kubernetes api to respond", h) - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.KubeAPIReadyFunc(h, p.Config)); err != nil { + log.Infof("%s: wait for kubernetes to reach ready state", h) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := retry.Context(ctx, func(_ context.Context) error { + out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz'"), exec.Sudo(h)) + if out != "ok" { + return fmt.Errorf("kubernetes api /readyz responded with %q", out) + } return err + }) + if err != nil { + return fmt.Errorf("kubernetes not ready: %w", err) } + h.Metadata.Ready = true + return nil }) if err != nil { diff --git a/phase/install_controllers.go b/phase/install_controllers.go index dd91b631..fd11c0ed 100644 --- a/phase/install_controllers.go +++ b/phase/install_controllers.go @@ -33,7 +33,6 @@ func (p *InstallControllers) Prepare(config *v1beta1.Cluster) error { p.hosts = p.Config.Spec.Hosts.Controllers().Filter(func(h *cluster.Host) bool { return !h.Reset && !h.Metadata.NeedsUpgrade && (h != p.leader && h.Metadata.K0sRunningVersion == nil) }) - return nil } @@ -65,13 +64,13 @@ func (p *InstallControllers) CleanUp() { func (p *InstallControllers) After() error { for i, h := range p.hosts { - if h.Metadata.K0sJoinTokenID == "" { + if h.Metadata.K0sTokenData.Token == "" { continue } - h.Metadata.K0sJoinToken = "" + h.Metadata.K0sTokenData.Token = "" err := p.Wet(p.leader, fmt.Sprintf("invalidate k0s join token for controller %s", h), func() error { log.Debugf("%s: invalidating join token for controller %d", p.leader, i+1) - return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sJoinTokenID), exec.Sudo(p.leader)) + return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sTokenData.ID), exec.Sudo(p.leader)) }) if err != nil { log.Warnf("%s: failed to invalidate worker join token: %v", p.leader, err) @@ -88,33 +87,10 @@ func (p *InstallControllers) After() error { // Run the phase func (p *InstallControllers) Run() error { - url := p.Config.Spec.InternalKubeAPIURL() - healthz := fmt.Sprintf("%s/healthz", url) - - err := p.parallelDo(p.hosts, func(h *cluster.Host) error { - if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader { - log.Infof("%s: validating api connection to %s", h, url) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - if err := retry.Context(ctx, node.HTTPStatusFunc(h, healthz, 200, 401)); err != nil { - return fmt.Errorf("failed to connect from controller to kubernetes api at %s - check networking", url) - } - } else { - log.Warnf("%s: dry-run: skipping api connection validation to %s because cluster is not running", h, url) - } - return nil - }) - if err != nil { - return err - } - for _, h := range p.hosts { - var token string - var tokenID string - if p.IsWet() { - log.Infof("%s: generating token", p.leader) - token, err = p.Config.Spec.K0s.GenerateToken( + log.Infof("%s: generate join token for %s", p.leader, h) + token, err := p.Config.Spec.K0s.GenerateToken( p.leader, "controller", time.Duration(10)*time.Minute, @@ -122,20 +98,40 @@ func (p *InstallControllers) Run() error { if err != nil { return err } - h.Metadata.K0sJoinToken = token - tokenID, err = cluster.TokenID(token) + tokenData, err := cluster.ParseToken(token) if err != nil { return err } - log.Debugf("%s: join token ID: %s", p.leader, tokenID) - h.Metadata.K0sJoinTokenID = tokenID + h.Metadata.K0sTokenData = tokenData } else { p.DryMsgf(p.leader, "generate a k0s join token for controller %s", h) - h.Metadata.K0sJoinTokenID = "dry-run" + h.Metadata.K0sTokenData.ID = "dry-run" + h.Metadata.K0sTokenData.URL = p.Config.Spec.KubeAPIURL() } - - log.Infof("%s: writing join token", h) - if err := h.Configurer.WriteFile(h, h.K0sJoinTokenPath(), h.Metadata.K0sJoinToken, "0640"); err != nil { + } + err := p.parallelDo(p.hosts, func(h *cluster.Host) error { + if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader { + log.Infof("%s: validating api connection to %s", h, h.Metadata.K0sTokenData.URL) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := retry.Context(ctx, node.HTTPStatusFunc(h, h.Metadata.K0sTokenData.URL, 200, 401, 404)); err != nil { + return fmt.Errorf("failed to connect from controller to kubernetes api - check networking: %w", err) + } + } else { + log.Warnf("%s: dry-run: skipping api connection validation to because cluster is not actually running", h) + } + return nil + }) + if err != nil { + return err + } + return p.parallelDo(p.hosts, func(h *cluster.Host) error { + tokenPath := h.K0sJoinTokenPath() + log.Infof("%s: writing join token to %s", h, tokenPath) + err := p.Wet(h, fmt.Sprintf("write k0s join token to %s", tokenPath), func() error { + return h.Configurer.WriteFile(h, tokenPath, h.Metadata.K0sTokenData.Token, "0600") + }) + if err != nil { return err } @@ -180,17 +176,22 @@ func (p *InstallControllers) Run() error { return err } - if err := p.waitJoined(h); err != nil { - return err + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := retry.Context(ctx, func(_ context.Context) error { + out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz?verbose=true'"), exec.Sudo(h)) + if err != nil { + return fmt.Errorf("readiness endpoint reports %q: %w", out, err) + } + return nil + }) + if err != nil { + return fmt.Errorf("controller did not reach ready state: %w", err) } - } - h.Metadata.Ready = true - } - return nil -} + h.Metadata.Ready = true + } -func (p *InstallControllers) waitJoined(h *cluster.Host) error { - log.Infof("%s: waiting for kubernetes api to respond", h) - return retry.Timeout(context.TODO(), retry.DefaultTimeout, node.KubeAPIReadyFunc(h, p.Config)) + return nil + }) } diff --git a/phase/install_workers.go b/phase/install_workers.go index ebdc1fbe..8f4b2b1d 100644 --- a/phase/install_workers.go +++ b/phase/install_workers.go @@ -66,7 +66,7 @@ func (p *InstallWorkers) CleanUp() { func (p *InstallWorkers) After() error { if NoWait { for _, h := range p.hosts { - if h.Metadata.K0sJoinToken != "" { + if h.Metadata.K0sTokenData.Token != "" { log.Warnf("%s: --no-wait given, created join tokens will remain valid for 10 minutes", p.leader) break } @@ -74,19 +74,18 @@ func (p *InstallWorkers) After() error { return nil } for i, h := range p.hosts { - if h.Metadata.K0sJoinTokenID == "" { + h.Metadata.K0sTokenData.Token = "" + if h.Metadata.K0sTokenData.ID == "" { continue } - h.Metadata.K0sJoinToken = "" err := p.Wet(p.leader, fmt.Sprintf("invalidate k0s join token for worker %s", h), func() error { log.Debugf("%s: invalidating join token for worker %d", p.leader, i+1) - return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sJoinTokenID), exec.Sudo(p.leader)) + return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sTokenData.ID), exec.Sudo(p.leader)) }) if err != nil { log.Warnf("%s: failed to invalidate worker join token: %v", p.leader, err) } _ = p.Wet(h, "overwrite k0s join token file", func() error { - if err := h.Configurer.WriteFile(h, h.K0sJoinTokenPath(), "# overwritten by k0sctl after join\n", "0600"); err != nil { log.Warnf("%s: failed to overwrite the join token file at %s", h, h.K0sJoinTokenPath()) } @@ -98,30 +97,9 @@ func (p *InstallWorkers) After() error { // Run the phase func (p *InstallWorkers) Run() error { - url := p.Config.Spec.InternalKubeAPIURL() - healthz := fmt.Sprintf("%s/healthz", url) - - err := p.parallelDo(p.hosts, func(h *cluster.Host) error { - if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader { - log.Infof("%s: validating api connection to %s", h, url) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - if err := retry.Context(ctx, node.HTTPStatusFunc(h, healthz, 200, 401)); err != nil { - return fmt.Errorf("failed to connect from worker to kubernetes api at %s - check networking", url) - } - } else { - log.Warnf("%s: dry-run: skipping api connection validation to %s because cluster is not running", h, url) - } - return nil - }) - - if err != nil { - return err - } - for i, h := range p.hosts { log.Infof("%s: generating a join token for worker %d", p.leader, i+1) - err = p.Wet(p.leader, fmt.Sprintf("generate a k0s join token for worker %s", h), func() error { + err := p.Wet(p.leader, fmt.Sprintf("generate a k0s join token for worker %s", h), func() error { t, err := p.Config.Spec.K0s.GenerateToken( p.leader, "worker", @@ -130,18 +108,18 @@ func (p *InstallWorkers) Run() error { if err != nil { return err } - h.Metadata.K0sJoinToken = t - ti, err := cluster.TokenID(t) + td, err := cluster.ParseToken(t) if err != nil { - return err + return fmt.Errorf("parse k0s token: %w", err) } - h.Metadata.K0sJoinTokenID = ti - log.Debugf("%s: join token ID: %s", h, ti) + h.Metadata.K0sTokenData = td + return nil }, func() error { - h.Metadata.K0sJoinTokenID = "dry-run" + h.Metadata.K0sTokenData.ID = "dry-run" + h.Metadata.K0sTokenData.URL = p.Config.Spec.KubeAPIURL() return nil }) if err != nil { @@ -149,10 +127,35 @@ func (p *InstallWorkers) Run() error { } } + err := p.parallelDo(p.hosts, func(h *cluster.Host) error { + if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader { + log.Infof("%s: validating api connection to %s using join token", h, h.Metadata.K0sTokenData.URL) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := retry.Context(ctx, func(_ context.Context) error { + err := h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/version' --kubeconfig=/dev/stdin"), exec.Sudo(h), exec.Stdin(string(h.Metadata.K0sTokenData.Kubeconfig))) + if err != nil { + return fmt.Errorf("failed to connect to kubernetes api using the join token - check networking: %w", err) + } + return nil + }) + if err != nil { + return fmt.Errorf("connectivity check failed: %w", err) + } + } else { + log.Warnf("%s: dry-run: skipping api connection validation because cluster is not actually running", h) + } + return nil + }) + if err != nil { + return err + } + return p.parallelDo(p.hosts, func(h *cluster.Host) error { - err := p.Wet(h, fmt.Sprintf("write k0s join token to %s", h.K0sJoinTokenPath()), func() error { - log.Infof("%s: writing join token", h) - return h.Configurer.WriteFile(h, h.K0sJoinTokenPath(), h.Metadata.K0sJoinToken, "0640") + tokenPath := h.K0sJoinTokenPath() + err := p.Wet(h, fmt.Sprintf("write k0s join token to %s", tokenPath), func() error { + log.Infof("%s: writing join token to %s", h, tokenPath) + return h.Configurer.WriteFile(h, tokenPath, h.Metadata.K0sTokenData.Token, "0600") }) if err != nil { return err diff --git a/phase/reinstall.go b/phase/reinstall.go index fe1aa739..59b32458 100644 --- a/phase/reinstall.go +++ b/phase/reinstall.go @@ -107,9 +107,5 @@ func (p *Reinstall) reinstall(h *cluster.Host) error { return fmt.Errorf("restart after reinstall: %w", err) } - if h != p.Config.Spec.K0sLeader() { - return nil - } - return nil } diff --git a/phase/upgrade_controllers.go b/phase/upgrade_controllers.go index 6cd7e69f..b1bc0ab4 100644 --- a/phase/upgrade_controllers.go +++ b/phase/upgrade_controllers.go @@ -133,8 +133,17 @@ func (p *UpgradeControllers) Run() error { } if p.IsWet() { - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.KubeAPIReadyFunc(h, p.Config)); err != nil { - return fmt.Errorf("kube api did not become ready: %w", err) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := retry.Context(ctx, func(_ context.Context) error { + out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz?verbose=true'"), exec.Sudo(h)) + if err != nil { + return fmt.Errorf("readiness endpoint reports %q: %w", out, err) + } + return nil + }) + if err != nil { + return fmt.Errorf("controller did not reach ready state: %w", err) } } @@ -147,13 +156,5 @@ func (p *UpgradeControllers) Run() error { return nil } - log.Infof("%s: waiting for the scheduler to become ready", leader) - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.ScheduledEventsAfterFunc(leader, time.Now())); err != nil { - if !Force { - return fmt.Errorf("failed to observe scheduling events after api start-up, you can ignore this check by using --force: %w", err) - } - log.Warnf("%s: failed to observe scheduling events after api start-up: %s", leader, err) - } - return nil } diff --git a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go index 8efa709c..057d76cf 100644 --- a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go +++ b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go @@ -179,8 +179,7 @@ type HostMetadata struct { K0sInstalled bool K0sExistingConfig string K0sNewConfig string - K0sJoinToken string - K0sJoinTokenID string + K0sTokenData TokenData K0sStatusArgs Flags Arch string IsK0sLeader bool diff --git a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go index 453bd80b..8c924372 100644 --- a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go +++ b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go @@ -174,50 +174,78 @@ func (k *K0s) GetClusterID(h *Host) (string, error) { return h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get -n kube-system namespace kube-system -o template={{.metadata.uid}}"), exec.Sudo(h)) } -// TokenID returns a token id from a token string that can be used to invalidate the token -func TokenID(s string) (string, error) { +// TokenData is data collected from a decoded k0s token +type TokenData struct { + ID string + URL string + Token string + Kubeconfig []byte +} + +// ParseToken returns TokenData for a token string +func ParseToken(s string) (TokenData, error) { + data := TokenData{Token: s} + b64 := make([]byte, base64.StdEncoding.DecodedLen(len(s))) _, err := base64.StdEncoding.Decode(b64, []byte(s)) if err != nil { - return "", fmt.Errorf("failed to decode token: %w", err) + return data, fmt.Errorf("failed to decode token: %w", err) } sr := strings.NewReader(s) b64r := base64.NewDecoder(base64.StdEncoding, sr) gzr, err := gzip.NewReader(b64r) if err != nil { - return "", fmt.Errorf("failed to create a reader for token: %w", err) + return data, fmt.Errorf("failed to create a reader for token: %w", err) } defer gzr.Close() c, err := io.ReadAll(gzr) if err != nil { - return "", fmt.Errorf("failed to uncompress token: %w", err) + return data, fmt.Errorf("failed to uncompress token: %w", err) } + data.Kubeconfig = c cfg := dig.Mapping{} err = yaml.Unmarshal(c, &cfg) if err != nil { - return "", fmt.Errorf("failed to unmarshal token: %w", err) + return data, fmt.Errorf("failed to unmarshal token: %w", err) } users, ok := cfg.Dig("users").([]interface{}) if !ok || len(users) < 1 { - return "", fmt.Errorf("failed to find users in token") + return data, fmt.Errorf("failed to find users in token") } user, ok := users[0].(dig.Mapping) if !ok { - return "", fmt.Errorf("failed to find user in token") + return data, fmt.Errorf("failed to find user in token") } token, ok := user.Dig("user", "token").(string) if !ok { - return "", fmt.Errorf("failed to find user token in token") + return data, fmt.Errorf("failed to find user token in token") } idx := strings.IndexRune(token, '.') if idx < 0 { - return "", fmt.Errorf("failed to find separator in token") + return data, fmt.Errorf("failed to find separator in token") + } + + data.ID = token[0:idx] + + clusters, ok := cfg.Dig("clusters").([]interface{}) + if !ok || len(clusters) < 1 { + return data, fmt.Errorf("failed to find clusters in token") + } + cluster, ok := clusters[0].(dig.Mapping) + if !ok { + return data, fmt.Errorf("failed to find cluster in token") } - return token[0:idx], nil + url := cluster.DigString("cluster", "server") + if url == "" { + return data, fmt.Errorf("failed to find cluster url in token") + } + data.URL = url + + return data, nil } diff --git a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.go b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.go index 5808f35c..ed4b2af6 100644 --- a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.go +++ b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.go @@ -9,12 +9,13 @@ import ( "gopkg.in/yaml.v2" ) -func TestTokenID(t *testing.T) { +func TestParseToken(t *testing.T) { token := "H4sIAAAAAAAC/2xVXY/iOBZ9r1/BH6geO4GeAWkfKiEmGGLKjn1N/BbidAFOgjuk+Frtf18V3SPtSvN2fc/ROdaVfc9L6Q9Q9+fDqZuNLvilaj7PQ92fZy+vo9/17GU0Go3OdX+p+9loPwz+PPvjD/xn8A3/+Q19C2bfx+Pwyanqfjj8OFTlUL+Wn8P+1B+G+6sth3I2WudoWOc4FspSeYjmAqjKlaEcESWeGBpih2muRCQSNucavEEkzBWNDGoApDV1t19W6uNSbJsyRzS1mPc7TVdiDknV0qNFQmjl1zvsaZmao3RECHVd8YZEFtlEgGW8ISmXBIQiY6km+wwbr5v9yoIvVHs71pL81CAio0yYpQ2DJMFSe1InWHEZMZHQveiqa/3hf2Eg+v/FpKJdnZifHCA2aKK5IwwSsbVzYnZgJkWLdUZ8IbfCZA5CE1hSKhxliZ2rkKRxw2hxZIlSEHMgwFWCckUTi8iTmyNy+ZqJUtktO2Y9C8Wpuk8DsTUT7ehnjt9uBTQ0T7yDB9nyw+A4Tlb5wt2NbHgB5LSJpwvR2Ytpp6oKm/lG2ZvUZoDERjs9vubzamxJcZEaX6vDwLKWFeUWIoOqi7z/hWx7c2q77DfcJ5BkQQFAyxYw6xix8BZILAar8Ha3GM7l420ssZ/UZE/rrQtUytSus4ssXGKOissKkdgiOskw1fowPKRqxnFLPy0hj1pPvV6IC0t4AOhGgZDlZjFdGYdXLBVZBozKrUccW6Ra2mQNm5sF9bsHXRVqv8lB7E3XmNyZjKHTSm7Jp82HyxoJDom56HY8zgFa6/xCoOtdIL8qF8t71rDUYBZAI247ZHnpiluZn+9WNu8GsvEusFuOpvNS20J/+GUN1aN2U2kfpFQouVaBj3PsW6VgXwXVeJfSd4DlLdN2JR+gqoAed8hEBcB7OXc4J3Dl2jLuSCQCL0pHo9jhiCU2ygCcSC3hh2moFEQWNTFvfaQS2snGLJXDMdfFWCiquBKRUh8XqZZXgZIbaJEYTLbcUQnBtLDkY8VbWuzmMAhH97ka1tWWKN1lvQFLICEb3tq+0vu+VNXEPqKvN/gQjkQSsejLv3BsUjTRNk8mpNbMF46d1Ju/SURPRWihBOJtS5eVwp9ZQhvIB8+UCo1ksSXg7IPcS2wNc35cphHKVKNE4rebbSR2ODpxd5uYAA/VfH+JW9Jt1GRv231eJ9mj1uao2+Z7pRrB2ulP4+xF5kOxDtUF3PLKJXmXCb4XgQmzuRFVmmGZnCaA/nrIBdCvuRduvMpVs8lcNi7UcDVhRG0A93JLYpP66yqYgJoLoZumlQ9x2xFD8znIkux77oacdWqSdZSVyjCWnkKmb+9WDz/Nh5+b9O1SIDIUHaC6bW5V4qFsYSnSRmUIloXCuV1MaE7IsQAxBkR5ndqASRZtFDVGm7VszHGzwEfhJqzUzTV2tMi1iG369dfsmjVvkxKKfhMPgjsccEUPLMmCTcJCsTDrfGHGdXsOJcBpo4ezQd7sQroC3EQrdLtVD+Z16lZCY58rEO8SrX7vZiId/+AIckiaRa5YBIl67uU1P/3rZTTqyraejRw6v1Snbqhvw6+U+FX/Som/I+PJ+mp8np+nz13d1MPr7nQazkNf+v9X++z7uhte/1Z6Nt2hs7NRfOp+HD5efF//qPu6q+rzbPTv/7x8qT7Nf4v8g/zT+HmF4eTqbjY6fD+E949vVzeZ7vHx8mM6uPCATi//DQAA//+MVAsnAgcAAA==" - id, err := TokenID(token) + tokendata, err := ParseToken(token) require.NoError(t, err) - require.Equal(t, "i6i3yg", id) + require.Equal(t, "i6i3yg", tokendata.ID) + require.Equal(t, "https://172.17.0.2:6443", tokendata.URL) } func TestUnmarshal(t *testing.T) { diff --git a/pkg/node/statusfunc.go b/pkg/node/statusfunc.go index a280e618..8db17102 100644 --- a/pkg/node/statusfunc.go +++ b/pkg/node/statusfunc.go @@ -7,7 +7,6 @@ import ( "strings" "time" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" "github.com/k0sproject/rig/exec" @@ -137,10 +136,3 @@ func ServiceStoppedFunc(h *cluster.Host, service string) retryFunc { return nil } } - -// KubeAPIReadyFunc returns a function that returns an error unless the host's local kube api responds to /version -func KubeAPIReadyFunc(h *cluster.Host, config *v1beta1.Cluster) retryFunc { - // If the anon-auth is disabled on kube api the version endpoint will give 401 - // thus we need to accept both 200 and 401 as valid statuses when checking kube api - return HTTPStatusFunc(h, fmt.Sprintf("%s/version", config.Spec.NodeInternalKubeAPIURL(h)), 200, 401) -} diff --git a/smoke-test/k0sctl-controller-swap.yaml b/smoke-test/k0sctl-controller-swap.yaml index 5930468e..7372938d 100644 --- a/smoke-test/k0sctl-controller-swap.yaml +++ b/smoke-test/k0sctl-controller-swap.yaml @@ -14,7 +14,7 @@ spec: address: "127.0.0.1" port: 9023 keyPath: ./id_rsa_k0s - - role: controller + - role: controller+worker uploadBinary: true ssh: address: "127.0.0.1"