From cfcd1cd439d8f1ba0d0598edd400993ba0efa981 Mon Sep 17 00:00:00 2001
From: h3nryc0ding
Date: Sat, 12 Apr 2025 18:20:01 +0200
Subject: [PATCH 1/3] refactor: `flux check` to return non-zero exit code on
failure
Signed-off-by: h3nryc0ding
---
cmd/flux/check.go | 321 ++++++++++++++++++++++++++++++-------------
cmd/flux/install.go | 2 +-
pkg/status/status.go | 19 ++-
3 files changed, 237 insertions(+), 105 deletions(-)
diff --git a/cmd/flux/check.go b/cmd/flux/check.go
index 0ac0859b..3d19ca13 100644
--- a/cmd/flux/check.go
+++ b/cmd/flux/check.go
@@ -18,8 +18,8 @@ package main
import (
"context"
+ "errors"
"fmt"
- "os"
"time"
"github.com/Masterminds/semver/v3"
@@ -59,6 +59,28 @@ type checkFlags struct {
pollInterval time.Duration
}
+type checkResult struct {
+ Title string
+ Entries []checkEntry
+}
+
+type checkEntry struct {
+ Name string
+ Failed bool
+}
+
+func (cr *checkResult) Failed() bool {
+ if cr == nil {
+ return false
+ }
+ for _, entry := range cr.Entries {
+ if entry.Failed {
+ return true
+ }
+ }
+ return false
+}
+
var kubernetesConstraints = []string{
">=1.30.0-0",
}
@@ -77,187 +99,298 @@ func init() {
rootCmd.AddCommand(checkCmd)
}
-func runCheckCmd(cmd *cobra.Command, args []string) error {
- logger.Actionf("checking prerequisites")
- checkFailed := false
-
- fluxCheck()
-
+func runCheckCmd(_ *cobra.Command, _ []string) error {
ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout)
defer cancel()
cfg, err := utils.KubeConfig(kubeconfigArgs, kubeclientOptions)
if err != nil {
- return fmt.Errorf("Kubernetes client initialization failed: %s", err.Error())
+ return fmt.Errorf("kubernetes client initialization failed: %w", err)
}
kubeClient, err := client.New(cfg, client.Options{Scheme: utils.NewScheme()})
if err != nil {
- return err
+ return fmt.Errorf("error creating kubernetes client: %w", err)
}
- if !kubernetesCheck(cfg, kubernetesConstraints) {
- checkFailed = true
+ if !runPreChecks(ctx, cfg) {
+ return errors.New("pre-installation checks failed")
}
if checkArgs.pre {
- if checkFailed {
- os.Exit(1)
- }
- logger.Successf("prerequisites checks passed")
+ logger.Actionf("All pre-installation checks passed")
return nil
}
- logger.Actionf("checking version in cluster")
- if !fluxClusterVersionCheck(ctx, kubeClient) {
- checkFailed = true
+ if !runChecks(ctx, kubeClient) {
+ return errors.New("checks failed")
}
- logger.Actionf("checking controllers")
- if !componentsCheck(ctx, kubeClient) {
- checkFailed = true
+ logger.Actionf("All checks passed")
+ return nil
+}
+
+func runPreChecks(_ context.Context, kConfig *rest.Config) bool {
+ checks := []func() checkResult{
+ fluxCheck,
+ func() checkResult {
+ return kubernetesCheck(kConfig, kubernetesConstraints)
+ },
}
- logger.Actionf("checking crds")
- if !crdsCheck(ctx, kubeClient) {
- checkFailed = true
+ return runGenericChecks(checks)
+}
+
+func runChecks(ctx context.Context, kClient client.Client) bool {
+ checks := []func() checkResult{
+ func() checkResult { return fluxClusterVersionCheck(ctx, kClient) },
+ func() checkResult { return controllersCheck(ctx, kClient) },
+ func() checkResult { return crdsCheck(ctx, kClient) },
}
- if checkFailed {
- logger.Failuref("check failed")
- os.Exit(1)
+ return runGenericChecks(checks)
+}
+
+func runGenericChecks(checks []func() checkResult) bool {
+ success := true
+ for _, check := range checks {
+ result := check()
+ logCheckResult(result)
+ if result.Failed() {
+ success = false
+ }
}
+ return success
+}
- logger.Successf("all checks passed")
- return nil
+func logCheckResult(res checkResult) {
+ logger.Actionf("Checking %s", res.Title)
+ for _, entry := range res.Entries {
+ if entry.Failed {
+ logger.Failuref("%s", entry.Name)
+ } else {
+ logger.Successf("%s", entry.Name)
+ }
+ }
}
-func fluxCheck() {
+func fluxCheck() checkResult {
+ res := checkResult{Title: "flux pre-requisites"}
+
curSv, err := version.ParseVersion(VERSION)
if err != nil {
- return
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error parsing current version: %s", err.Error()),
+ Failed: true,
+ })
+ return res
}
+
// Exclude development builds.
if curSv.Prerelease() != "" {
- return
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("flux %s is a development build", curSv),
+ })
+ return res
}
+
latest, err := install.GetLatestVersion()
if err != nil {
- return
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error getting latest version: %s", err.Error()),
+ Failed: true,
+ })
+ return res
}
+
latestSv, err := version.ParseVersion(latest)
if err != nil {
- return
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error parsing latest version: %s", err.Error()),
+ Failed: true,
+ })
+ return res
}
+
if latestSv.GreaterThan(curSv) {
- logger.Failuref("flux %s <%s (new CLI version is available, please upgrade)", curSv, latestSv)
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("flux %s <%s (new CLI version is available, please upgrade)", curSv, latestSv),
+ Failed: true,
+ })
+ } else {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("flux %s >=%s (latest CLI version)", curSv, latestSv),
+ })
}
+
+ return res
}
-func kubernetesCheck(cfg *rest.Config, constraints []string) bool {
+func kubernetesCheck(cfg *rest.Config, constraints []string) checkResult {
+ res := checkResult{Title: "kubernetes pre-requisites"}
+
clientSet, err := kubernetes.NewForConfig(cfg)
if err != nil {
- logger.Failuref("Kubernetes client initialization failed: %s", err.Error())
- return false
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error creating kubernetes client: %s", err.Error()),
+ Failed: true,
+ })
+ return res
}
kv, err := clientSet.Discovery().ServerVersion()
if err != nil {
- logger.Failuref("Kubernetes API call failed: %s", err.Error())
- return false
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error getting kubernetes version: %s", err.Error()),
+ Failed: true,
+ })
+ return res
}
v, err := version.ParseVersion(kv.String())
if err != nil {
- logger.Failuref("Kubernetes version can't be determined")
- return false
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error parsing kubernetes version: %s", err.Error()),
+ Failed: true,
+ })
+ return res
}
- var valid bool
- var vrange string
for _, constraint := range constraints {
c, _ := semver.NewConstraint(constraint)
- if c.Check(v) {
- valid = true
- vrange = constraint
- break
+ entry := checkEntry{
+ Name: fmt.Sprintf("kubernetes %s%s", v.String(), constraint),
}
+ if !c.Check(v) {
+ entry.Failed = true
+ }
+ res.Entries = append(res.Entries, entry)
}
- if !valid {
- logger.Failuref("Kubernetes version %s does not match %s", v.Original(), constraints[0])
- return false
- }
-
- logger.Successf("Kubernetes %s %s", v.String(), vrange)
- return true
+ return res
}
-func componentsCheck(ctx context.Context, kubeClient client.Client) bool {
+func controllersCheck(ctx context.Context, kubeClient client.Client) checkResult {
+ res := checkResult{Title: "flux controllers"}
+
statusChecker, err := status.NewStatusCheckerWithClient(kubeClient, checkArgs.pollInterval, rootArgs.timeout, logger)
if err != nil {
- return false
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error creating status checker: %s", err.Error()),
+ Failed: true,
+ })
+ return res
}
- ok := true
selector := client.MatchingLabels{manifestgen.PartOfLabelKey: manifestgen.PartOfLabelValue}
var list v1.DeploymentList
ns := *kubeconfigArgs.Namespace
- if err := kubeClient.List(ctx, &list, client.InNamespace(ns), selector); err == nil {
- if len(list.Items) == 0 {
- logger.Failuref("no controllers found in the '%s' namespace with the label selector '%s=%s'",
- ns, manifestgen.PartOfLabelKey, manifestgen.PartOfLabelValue)
- return false
+
+ if err := kubeClient.List(ctx, &list, client.InNamespace(ns), selector); err != nil {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error listing deployments: %s", err.Error()),
+ Failed: true,
+ })
+ return res
+ }
+
+ if len(list.Items) == 0 {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("no controllers found in the '%s' namespace with the label selector '%s=%s'",
+ ns, manifestgen.PartOfLabelKey, manifestgen.PartOfLabelValue),
+ Failed: true,
+ })
+ return res
+ }
+
+ for _, d := range list.Items {
+ ref, err := buildComponentObjectRefs(d.Name)
+ if err != nil {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error building component object refs for %s: %s", d.Name, err.Error()),
+ Failed: true,
+ })
+ continue
}
- for _, d := range list.Items {
- if ref, err := buildComponentObjectRefs(d.Name); err == nil {
- if err := statusChecker.Assess(ref...); err != nil {
- ok = false
- }
- }
+ if err := statusChecker.Assess(ref...); err != nil {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error checking status of %s: %s", d.Name, err.Error()),
+ Failed: true,
+ })
+ } else {
for _, c := range d.Spec.Template.Spec.Containers {
- logger.Actionf(c.Image)
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf(c.Image),
+ })
}
}
}
- return ok
+
+ return res
}
-func crdsCheck(ctx context.Context, kubeClient client.Client) bool {
- ok := true
+func crdsCheck(ctx context.Context, kubeClient client.Client) checkResult {
+ res := checkResult{Title: "flux crds"}
+
selector := client.MatchingLabels{manifestgen.PartOfLabelKey: manifestgen.PartOfLabelValue}
var list apiextensionsv1.CustomResourceDefinitionList
- if err := kubeClient.List(ctx, &list, client.InNamespace(*kubeconfigArgs.Namespace), selector); err == nil {
- if len(list.Items) == 0 {
- logger.Failuref("no crds found with the label selector '%s=%s'",
- manifestgen.PartOfLabelKey, manifestgen.PartOfLabelValue)
- return false
- }
- for _, crd := range list.Items {
- versions := crd.Status.StoredVersions
- if len(versions) > 0 {
- logger.Successf(crd.Name + "/" + versions[len(versions)-1])
- } else {
- ok = false
- logger.Failuref("no stored versions for %s", crd.Name)
- }
+ if err := kubeClient.List(ctx, &list, client.InNamespace(*kubeconfigArgs.Namespace), selector); err != nil {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error listing CRDs: %s", err.Error()),
+ Failed: true,
+ })
+ return res
+ }
+
+ if len(list.Items) == 0 {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("no crds found with the label selector '%s=%s'",
+ manifestgen.PartOfLabelKey, manifestgen.PartOfLabelValue),
+ Failed: true,
+ })
+ return res
+ }
+
+ for _, crd := range list.Items {
+ versions := crd.Status.StoredVersions
+ if len(versions) > 0 {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("%s/%s", crd.Name, versions[len(versions)-1]),
+ })
+ } else {
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("no stored versions for %s", crd.Name),
+ Failed: true,
+ })
}
}
- return ok
+
+ return res
}
-func fluxClusterVersionCheck(ctx context.Context, kubeClient client.Client) bool {
+func fluxClusterVersionCheck(ctx context.Context, kubeClient client.Client) checkResult {
+ res := checkResult{Title: "flux installation status"}
+
clusterInfo, err := getFluxClusterInfo(ctx, kubeClient)
if err != nil {
- logger.Failuref("checking failed: %s", err.Error())
- return false
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("error getting cluster info: %s", err.Error()),
+ Failed: true,
+ })
+ return res
}
if clusterInfo.distribution() != "" {
- logger.Successf("distribution: %s", clusterInfo.distribution())
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("distribution: %s", clusterInfo.distribution()),
+ })
}
- logger.Successf("bootstrapped: %t", clusterInfo.bootstrapped)
- return true
+
+ res.Entries = append(res.Entries, checkEntry{
+ Name: fmt.Sprintf("bootstrapped: %t", clusterInfo.bootstrapped),
+ })
+
+ return res
}
diff --git a/cmd/flux/install.go b/cmd/flux/install.go
index 390f8616..bc5bc00d 100644
--- a/cmd/flux/install.go
+++ b/cmd/flux/install.go
@@ -277,7 +277,7 @@ func installCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("verifying installation")
if err := statusChecker.Assess(componentRefs...); err != nil {
- return fmt.Errorf("install failed")
+ return fmt.Errorf("install failed: %w", err)
}
logger.Successf("install finished")
diff --git a/pkg/status/status.go b/pkg/status/status.go
index 5f20e90f..f5040084 100644
--- a/pkg/status/status.go
+++ b/pkg/status/status.go
@@ -18,6 +18,7 @@ package status
import (
"context"
+ "errors"
"fmt"
"sort"
"strings"
@@ -85,22 +86,20 @@ func (sc *StatusChecker) Assess(identifiers ...object.ObjMetadata) error {
sort.SliceStable(identifiers, func(i, j int) bool {
return strings.Compare(identifiers[i].Name, identifiers[j].Name) < 0
})
+ var errs []error
for _, id := range identifiers {
rs := coll.ResourceStatuses[id]
- switch rs.Status {
- case status.CurrentStatus:
- sc.logger.Successf("%s: %s ready", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind))
- case status.NotFoundStatus:
- sc.logger.Failuref("%s: %s not found", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind))
- default:
- sc.logger.Failuref("%s: %s not ready", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind))
+ if rs.Status == status.NotFoundStatus {
+ errs = append(errs, fmt.Errorf("%s: %s not found", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind)))
+ } else if rs.Status != status.CurrentStatus {
+ errs = append(errs, fmt.Errorf("%s: %s not ready", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind)))
}
}
- if coll.Error != nil || ctx.Err() == context.DeadlineExceeded {
- return fmt.Errorf("timed out waiting for all resources to be ready")
+ if coll.Error != nil || errors.Is(ctx.Err(), context.DeadlineExceeded) {
+ errs = append(errs, fmt.Errorf("timeout waiting for resources to be ready"))
}
- return nil
+ return errors.Join(errs...)
}
// desiredStatusNotifierFunc returns an Observer function for the
From 05d46113451ac30018c753fc79430e6b310c81af Mon Sep 17 00:00:00 2001
From: h3nryc0ding
Date: Sat, 12 Apr 2025 18:32:55 +0200
Subject: [PATCH 2/3] fix tests
Signed-off-by: h3nryc0ding
---
cmd/flux/check.go | 4 ++--
cmd/flux/testdata/check/check_pre.golden | 8 +++++---
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/cmd/flux/check.go b/cmd/flux/check.go
index 3d19ca13..26e42354 100644
--- a/cmd/flux/check.go
+++ b/cmd/flux/check.go
@@ -114,11 +114,11 @@ func runCheckCmd(_ *cobra.Command, _ []string) error {
}
if !runPreChecks(ctx, cfg) {
- return errors.New("pre-installation checks failed")
+ return errors.New("pre-requisites checks failed")
}
if checkArgs.pre {
- logger.Actionf("All pre-installation checks passed")
+ logger.Actionf("All pre-requisites checks passed")
return nil
}
diff --git a/cmd/flux/testdata/check/check_pre.golden b/cmd/flux/testdata/check/check_pre.golden
index cb83e9dc..fd6e8b0d 100644
--- a/cmd/flux/testdata/check/check_pre.golden
+++ b/cmd/flux/testdata/check/check_pre.golden
@@ -1,3 +1,5 @@
-► checking prerequisites
-✔ Kubernetes {{ .serverVersion }} >=1.30.0-0
-✔ prerequisites checks passed
+► Checking flux pre-requisites
+✔ flux 0.0.0-dev.0 is a development build
+► Checking kubernetes pre-requisites
+✔ kubernetes {{ .serverVersion }}>=1.30.0-0
+► All pre-requisites checks passed
From 63b52cdbb77fafced60799e17533254799403038 Mon Sep 17 00:00:00 2001
From: h3nryc0ding
Date: Sat, 12 Apr 2025 18:35:37 +0200
Subject: [PATCH 3/3] rename pre-requisites to pre-installation to align with
flag description
Signed-off-by: h3nryc0ding
---
cmd/flux/check.go | 8 ++++----
cmd/flux/testdata/check/check_pre.golden | 6 +++---
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/cmd/flux/check.go b/cmd/flux/check.go
index 26e42354..8ef0e811 100644
--- a/cmd/flux/check.go
+++ b/cmd/flux/check.go
@@ -114,11 +114,11 @@ func runCheckCmd(_ *cobra.Command, _ []string) error {
}
if !runPreChecks(ctx, cfg) {
- return errors.New("pre-requisites checks failed")
+ return errors.New("pre-installation checks failed")
}
if checkArgs.pre {
- logger.Actionf("All pre-requisites checks passed")
+ logger.Actionf("All pre-installation checks passed")
return nil
}
@@ -175,7 +175,7 @@ func logCheckResult(res checkResult) {
}
func fluxCheck() checkResult {
- res := checkResult{Title: "flux pre-requisites"}
+ res := checkResult{Title: "flux pre-installation"}
curSv, err := version.ParseVersion(VERSION)
if err != nil {
@@ -227,7 +227,7 @@ func fluxCheck() checkResult {
}
func kubernetesCheck(cfg *rest.Config, constraints []string) checkResult {
- res := checkResult{Title: "kubernetes pre-requisites"}
+ res := checkResult{Title: "kubernetes pre-installation"}
clientSet, err := kubernetes.NewForConfig(cfg)
if err != nil {
diff --git a/cmd/flux/testdata/check/check_pre.golden b/cmd/flux/testdata/check/check_pre.golden
index fd6e8b0d..352523b8 100644
--- a/cmd/flux/testdata/check/check_pre.golden
+++ b/cmd/flux/testdata/check/check_pre.golden
@@ -1,5 +1,5 @@
-► Checking flux pre-requisites
+► Checking flux pre-installation
✔ flux 0.0.0-dev.0 is a development build
-► Checking kubernetes pre-requisites
+► Checking kubernetes pre-installation
✔ kubernetes {{ .serverVersion }}>=1.30.0-0
-► All pre-requisites checks passed
+► All pre-installation checks passed