Add kstatus to install and check commands
Signed-off-by: jonathan-innis <jonathan.innis.ji@gmail.com>
This commit is contained in:
@@ -162,24 +162,14 @@ func applyInstallManifests(ctx context.Context, manifestPath string, components
|
||||
return fmt.Errorf("install failed")
|
||||
}
|
||||
|
||||
kubeConfig, err := utils.KubeConfig(rootArgs.kubeconfig, rootArgs.kubecontext)
|
||||
if err != nil {
|
||||
return fmt.Errorf("install failed")
|
||||
}
|
||||
timeout, err := time.ParseDuration(rootArgs.timeout.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("install failed")
|
||||
}
|
||||
|
||||
statusChecker := StatusChecker{}
|
||||
if err = statusChecker.New(kubeConfig, timeout); err != nil {
|
||||
return fmt.Errorf("install failed")
|
||||
err := statusChecker.New(time.Second, rootArgs.timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("install failed with: %v", err)
|
||||
}
|
||||
if err = statusChecker.AddChecks(components); err != nil {
|
||||
return fmt.Errorf("install failed")
|
||||
}
|
||||
if err = statusChecker.Assess(time.Second); err != nil {
|
||||
return fmt.Errorf("install failed")
|
||||
err = statusChecker.Assess(components...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("install failed with: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/fluxcd/flux2/internal/utils"
|
||||
@@ -172,16 +173,22 @@ func componentsCheck() bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout)
|
||||
defer cancel()
|
||||
|
||||
statusChecker := StatusChecker{}
|
||||
err := statusChecker.New(time.Second, rootArgs.timeout)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
ok := true
|
||||
for _, deployment := range checkArgs.components {
|
||||
kubectlArgs := []string{"-n", rootArgs.namespace, "rollout", "status", "deployment", deployment, "--timeout", rootArgs.timeout.String()}
|
||||
if output, err := utils.ExecKubectlCommand(ctx, utils.ModeCapture, rootArgs.kubeconfig, rootArgs.kubecontext, kubectlArgs...); err != nil {
|
||||
logger.Failuref("%s: %s", deployment, strings.TrimSuffix(output, "\n"))
|
||||
err = statusChecker.Assess(deployment)
|
||||
if err != nil {
|
||||
logger.Failuref("%s: %s", deployment, err)
|
||||
ok = false
|
||||
} else {
|
||||
logger.Successf("%s is healthy", deployment)
|
||||
}
|
||||
kubectlArgs = []string{"-n", rootArgs.namespace, "get", "deployment", deployment, "-o", "jsonpath=\"{..image}\""}
|
||||
kubectlArgs := []string{"-n", rootArgs.namespace, "get", "deployment", deployment, "-o", "jsonpath=\"{..image}\""}
|
||||
if output, err := utils.ExecKubectlCommand(ctx, utils.ModeCapture, rootArgs.kubeconfig, rootArgs.kubecontext, kubectlArgs...); err == nil {
|
||||
logger.Actionf(strings.TrimPrefix(strings.TrimSuffix(output, "\""), "\""))
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -178,14 +179,19 @@ func installCmdRun(cmd *cobra.Command, args []string) error {
|
||||
logger.Successf("install completed")
|
||||
}
|
||||
|
||||
statusChecker := StatusChecker{}
|
||||
err = statusChecker.New(time.Second, rootArgs.timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("install failed with: %v", err)
|
||||
}
|
||||
|
||||
logger.Waitingf("verifying installation")
|
||||
for _, deployment := range components {
|
||||
kubectlArgs = []string{"-n", rootArgs.namespace, "rollout", "status", "deployment", deployment, "--timeout", rootArgs.timeout.String()}
|
||||
if _, err := utils.ExecKubectlCommand(ctx, applyOutput, rootArgs.kubeconfig, rootArgs.kubecontext, kubectlArgs...); err != nil {
|
||||
err := statusChecker.Assess(deployment)
|
||||
if err != nil {
|
||||
return fmt.Errorf("install failed")
|
||||
} else {
|
||||
logger.Successf("%s ready", deployment)
|
||||
}
|
||||
logger.Successf("%s ready", deployment)
|
||||
}
|
||||
|
||||
logger.Successf("install finished")
|
||||
|
||||
@@ -19,13 +19,24 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/polling"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
|
||||
"github.com/fluxcd/flux2/internal/utils"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
)
|
||||
|
||||
@@ -40,9 +51,8 @@ type statusable interface {
|
||||
}
|
||||
|
||||
type StatusChecker struct {
|
||||
client client.Client
|
||||
timeout time.Duration
|
||||
objRefs []object.ObjMetadata
|
||||
pollInterval time.Duration
|
||||
timeout time.Duration
|
||||
statusPoller *polling.StatusPoller
|
||||
}
|
||||
|
||||
@@ -71,7 +81,11 @@ func isReady(ctx context.Context, kubeClient client.Client,
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *StatusChecker) New(kubeConfig *rest.Config, timeout time.Duration) error {
|
||||
func (sc *StatusChecker) New(pollInterval time.Duration, timeout time.Duration) error {
|
||||
kubeConfig, err := utils.KubeConfig(rootArgs.kubeconfig, rootArgs.kubecontext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
restMapper, err := apiutil.NewDynamicRESTMapper(kubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -81,32 +95,25 @@ func (sc *StatusChecker) New(kubeConfig *rest.Config, timeout time.Duration) err
|
||||
return err
|
||||
}
|
||||
statusPoller := polling.NewStatusPoller(client, restMapper)
|
||||
sc.client = client
|
||||
sc.statusPoller = statusPoller
|
||||
sc.pollInterval = pollInterval
|
||||
sc.timeout = timeout
|
||||
return err
|
||||
}
|
||||
|
||||
func (sc *StatusChecker) AddChecks(components []string) error {
|
||||
var componentRefs []object.ObjMetadata
|
||||
for _, deployment := range components {
|
||||
objMeta, err := object.CreateObjMetadata(rootArgs.namespace, deployment, schema.GroupKind{Group: "apps", Kind: "Deployment"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
componentRefs = append(componentRefs, objMeta)
|
||||
}
|
||||
sc.objRefs = componentRefs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *StatusChecker) Assess(pollInterval time.Duration) error {
|
||||
func (sc *StatusChecker) Assess(components ...string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), sc.timeout)
|
||||
defer cancel()
|
||||
|
||||
opts := polling.Options{PollInterval: pollInterval, UseCache: true}
|
||||
eventsChan := sc.statusPoller.Poll(ctx, sc.objRefs, opts)
|
||||
coll := collector.NewResourceStatusCollector(sc.objRefs)
|
||||
objRefs, err := sc.getObjectRefs(components)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := polling.Options{PollInterval: sc.pollInterval, UseCache: true}
|
||||
eventsChan := sc.statusPoller.Poll(ctx, objRefs, opts)
|
||||
|
||||
coll := collector.NewResourceStatusCollector(objRefs)
|
||||
done := coll.ListenWithObserver(eventsChan, collector.ObserverFunc(
|
||||
func(statusCollector *collector.ResourceStatusCollector, e event.Event) {
|
||||
var rss []*event.ResourceStatus
|
||||
@@ -123,7 +130,6 @@ func (sc *StatusChecker) Assess(pollInterval time.Duration) error {
|
||||
)
|
||||
<-done
|
||||
|
||||
|
||||
if coll.Error != nil {
|
||||
return coll.Error
|
||||
}
|
||||
@@ -136,35 +142,23 @@ func (sc *StatusChecker) Assess(pollInterval time.Duration) error {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Health check timed out for [%v]", strings.Join(ids, ", "))
|
||||
return fmt.Errorf("Status check timed out for component(s): [%v]", strings.Join(ids, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *StatusChecker) toObjMetadata(cr []meta.NamespacedObjectKindReference) ([]object.ObjMetadata, error) {
|
||||
oo := []object.ObjMetadata{}
|
||||
for _, c := range cr {
|
||||
// For backwards compatibility
|
||||
if c.APIVersion == "" {
|
||||
c.APIVersion = "apps/v1"
|
||||
}
|
||||
|
||||
gv, err := schema.ParseGroupVersion(c.APIVersion)
|
||||
func (sc *StatusChecker) getObjectRefs(components []string) ([]object.ObjMetadata, error) {
|
||||
var objRefs []object.ObjMetadata
|
||||
for _, deployment := range components {
|
||||
objMeta, err := object.CreateObjMetadata(rootArgs.namespace, deployment, schema.GroupKind{Group: "apps", Kind: "Deployment"})
|
||||
if err != nil {
|
||||
return []object.ObjMetadata{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gk := schema.GroupKind{Group: gv.Group, Kind: c.Kind}
|
||||
o, err := object.CreateObjMetadata(c.Namespace, c.Name, gk)
|
||||
if err != nil {
|
||||
return []object.ObjMetadata{}, err
|
||||
}
|
||||
|
||||
oo = append(oo, o)
|
||||
objRefs = append(objRefs, objMeta)
|
||||
}
|
||||
return oo, nil
|
||||
return objRefs, nil
|
||||
}
|
||||
|
||||
func (sc *StatusChecker) objMetadataToString(om object.ObjMetadata) string {
|
||||
return fmt.Sprintf("%s '%s/%s'", om.GroupKind.Kind, om.Namespace, om.Name)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user