Reuse isReady from create_image commands

I implemented the isReady procedure for adapters for resume -- use it
in create too.

Signed-off-by: Michael Bridgen <michael@weave.works>
pull/538/head
Michael Bridgen 4 years ago
parent 45240bdb71
commit 3b9b2cbe9f

@ -22,7 +22,6 @@ import (
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
@ -31,7 +30,6 @@ import (
"github.com/fluxcd/flux2/internal/utils"
imagev1 "github.com/fluxcd/image-reflector-controller/api/v1alpha1"
"github.com/fluxcd/pkg/apis/meta"
)
var createImagePolicyCmd = &cobra.Command{
@ -60,6 +58,12 @@ func init() {
createImageCmd.AddCommand(createImagePolicyCmd)
}
// getObservedGeneration is implemented here, since it's not
// (presently) needed elsewhere.
func (obj imagePolicyAdapter) getObservedGeneration() int64 {
return obj.ImagePolicy.Status.ObservedGeneration
}
func createImagePolicyRun(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("ImagePolicy name is required")
@ -121,7 +125,7 @@ func createImagePolicyRun(cmd *cobra.Command, args []string) error {
logger.Waitingf("waiting for ImagePolicy reconciliation")
if err := wait.PollImmediate(pollInterval, timeout,
isImagePolicyReady(ctx, kubeClient, namespacedName, &policy)); err != nil {
isReady(ctx, kubeClient, namespacedName, imagePolicyAdapter{&policy})); err != nil {
return err
}
logger.Successf("ImagePolicy reconciliation completed")
@ -155,28 +159,3 @@ func upsertImagePolicy(ctx context.Context, kubeClient client.Client, policy *im
}
return nsname, nil
}
func isImagePolicyReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, policy *imagev1.ImagePolicy) wait.ConditionFunc {
return func() (bool, error) {
err := kubeClient.Get(ctx, namespacedName, policy)
if err != nil {
return false, err
}
// Confirm the state we are observing is for the current generation
if policy.Generation != policy.Status.ObservedGeneration {
return false, nil
}
if c := apimeta.FindStatusCondition(policy.Status.Conditions, meta.ReadyCondition); c != nil {
switch c.Status {
case metav1.ConditionTrue:
return true, nil
case metav1.ConditionFalse:
return false, fmt.Errorf(c.Message)
}
}
return false, nil
}
}

@ -24,7 +24,6 @@ import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
@ -33,7 +32,6 @@ import (
"github.com/fluxcd/flux2/internal/utils"
imagev1 "github.com/fluxcd/image-reflector-controller/api/v1alpha1"
"github.com/fluxcd/pkg/apis/meta"
)
var createImageRepositoryCmd = &cobra.Command{
@ -126,7 +124,7 @@ func createImageRepositoryRun(cmd *cobra.Command, args []string) error {
logger.Waitingf("waiting for ImageRepository reconciliation")
if err := wait.PollImmediate(pollInterval, timeout,
isImageRepositoryReady(ctx, kubeClient, namespacedName, &repo)); err != nil {
isReady(ctx, kubeClient, namespacedName, imageRepositoryAdapter{&repo})); err != nil {
return err
}
logger.Successf("ImageRepository reconciliation completed")
@ -160,28 +158,3 @@ func upsertImageRepository(ctx context.Context, kubeClient client.Client, repo *
}
return nsname, nil
}
func isImageRepositoryReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, imageRepository *imagev1.ImageRepository) wait.ConditionFunc {
return func() (bool, error) {
err := kubeClient.Get(ctx, namespacedName, imageRepository)
if err != nil {
return false, err
}
// Confirm the state we are observing is for the current generation
if imageRepository.Generation != imageRepository.Status.ObservedGeneration {
return false, nil
}
if c := apimeta.FindStatusCondition(imageRepository.Status.Conditions, meta.ReadyCondition); c != nil {
switch c.Status {
case metav1.ConditionTrue:
return true, nil
case metav1.ConditionFalse:
return false, fmt.Errorf(c.Message)
}
}
return false, nil
}
}

@ -22,7 +22,6 @@ import (
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
@ -31,7 +30,6 @@ import (
"github.com/fluxcd/flux2/internal/utils"
autov1 "github.com/fluxcd/image-automation-controller/api/v1alpha1"
"github.com/fluxcd/pkg/apis/meta"
)
var createImageUpdateCmd = &cobra.Command{
@ -130,7 +128,7 @@ func createImageUpdateRun(cmd *cobra.Command, args []string) error {
logger.Waitingf("waiting for ImageUpdateAutomation reconciliation")
if err := wait.PollImmediate(pollInterval, timeout,
isImageUpdateAutomationReady(ctx, kubeClient, namespacedName, &update)); err != nil {
isReady(ctx, kubeClient, namespacedName, imageUpdateAutomationAdapter{&update})); err != nil {
return err
}
logger.Successf("ImageUpdateAutomation reconciliation completed")
@ -164,28 +162,3 @@ func upsertImageUpdateAutomation(ctx context.Context, kubeClient client.Client,
}
return nsname, nil
}
func isImageUpdateAutomationReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, update *autov1.ImageUpdateAutomation) wait.ConditionFunc {
return func() (bool, error) {
err := kubeClient.Get(ctx, namespacedName, update)
if err != nil {
return false, err
}
// Confirm the state we are observing is for the current generation
if update.Generation != update.Status.ObservedGeneration {
return false, nil
}
if c := apimeta.FindStatusCondition(update.Status.Conditions, meta.ReadyCondition); c != nil {
switch c.Status {
case metav1.ConditionTrue:
return true, nil
case metav1.ConditionFalse:
return false, fmt.Errorf(c.Message)
}
}
return false, nil
}
}

@ -41,6 +41,7 @@ type resumable interface {
adapter
statusable
setUnsuspended()
successMessage() string
}
type resumeCommand struct {

@ -37,8 +37,6 @@ type statusable interface {
getObservedGeneration() int64
// this is usually implemented by GOTK API objects because it's used by pkg/apis/meta
GetStatusConditions() *[]metav1.Condition
// successMessage gives a short summary of the successful reconciliation
successMessage() string
}
func isReady(ctx context.Context, kubeClient client.Client,

Loading…
Cancel
Save