Merge pull request #4329 from fluxcd/tidy-nits

Address various issues throughout code base
pull/4351/head
Hidde Beydals 1 year ago committed by GitHub
commit 5f1fe306bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -131,8 +131,8 @@ func (names apiType) upsertAndWait(object upsertWaitable, mutate func() error) e
}
logger.Waitingf("waiting for %s reconciliation", names.kind)
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isReady(ctx, kubeClient, namespacedName, object)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isReady(kubeClient, namespacedName, object)); err != nil {
return err
}
logger.Successf("%s reconciliation completed", names.kind)
@ -165,6 +165,6 @@ func parseLabels() (map[string]string, error) {
}
func validateObjectName(name string) bool {
r := regexp.MustCompile("^[a-z0-9]([a-z0-9\\-]){0,61}[a-z0-9]$")
r := regexp.MustCompile(`^[a-z0-9]([a-z0-9\-]){0,61}[a-z0-9]$`)
return r.MatchString(name)
}

@ -132,8 +132,8 @@ func createAlertCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for Alert reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isAlertReady(ctx, kubeClient, namespacedName, &alert)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isAlertReady(kubeClient, namespacedName, &alert)); err != nil {
return err
}
logger.Successf("Alert %s is ready", name)
@ -171,9 +171,8 @@ func upsertAlert(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isAlertReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, alert *notificationv1b2.Alert) wait.ConditionFunc {
return func() (bool, error) {
func isAlertReady(kubeClient client.Client, namespacedName types.NamespacedName, alert *notificationv1b2.Alert) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, alert)
if err != nil {
return false, err

@ -127,8 +127,8 @@ func createAlertProviderCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for Provider reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isAlertProviderReady(ctx, kubeClient, namespacedName, &provider)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isAlertProviderReady(kubeClient, namespacedName, &provider)); err != nil {
return err
}
@ -168,9 +168,8 @@ func upsertAlertProvider(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isAlertProviderReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, provider *notificationv1.Provider) wait.ConditionFunc {
return func() (bool, error) {
func isAlertProviderReady(kubeClient client.Client, namespacedName types.NamespacedName, provider *notificationv1.Provider) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, provider)
if err != nil {
return false, err

@ -303,8 +303,8 @@ func createHelmReleaseCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for HelmRelease reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isHelmReleaseReady(ctx, kubeClient, namespacedName, &helmRelease)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isHelmReleaseReady(kubeClient, namespacedName, &helmRelease)); err != nil {
return err
}
logger.Successf("HelmRelease %s is ready", name)
@ -344,9 +344,8 @@ func upsertHelmRelease(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isHelmReleaseReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, helmRelease *helmv2.HelmRelease) wait.ConditionFunc {
return func() (bool, error) {
func isHelmReleaseReady(kubeClient client.Client, namespacedName types.NamespacedName, helmRelease *helmv2.HelmRelease) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, helmRelease)
if err != nil {
return false, err

@ -54,13 +54,12 @@ the status of the object.`),
RunE: createImagePolicyRun}
type imagePolicyFlags struct {
imageRef string
semver string
alpha string
numeric string
filterRegex string
filterExtract string
filterNumerical string
imageRef string
semver string
alpha string
numeric string
filterRegex string
filterExtract string
}
var imagePolicyArgs = imagePolicyFlags{}
@ -183,7 +182,6 @@ func validateExtractStr(template string, capNames []string) error {
name, num, rest, ok := extract(template)
if !ok {
// Malformed extract string, assume user didn't want this
template = template[1:]
return fmt.Errorf("--filter-extract is malformed")
}
template = rest

@ -263,8 +263,8 @@ func createKsCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for Kustomization reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isKustomizationReady(ctx, kubeClient, namespacedName, &kustomization)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isKustomizationReady(kubeClient, namespacedName, &kustomization)); err != nil {
return err
}
logger.Successf("Kustomization %s is ready", name)
@ -304,9 +304,8 @@ func upsertKustomization(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isKustomizationReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, kustomization *kustomizev1.Kustomization) wait.ConditionFunc {
return func() (bool, error) {
func isKustomizationReady(kubeClient client.Client, namespacedName types.NamespacedName, kustomization *kustomizev1.Kustomization) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, kustomization)
if err != nil {
return false, err

@ -139,8 +139,8 @@ func createReceiverCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for Receiver reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isReceiverReady(ctx, kubeClient, namespacedName, &receiver)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isReceiverReady(kubeClient, namespacedName, &receiver)); err != nil {
return err
}
logger.Successf("Receiver %s is ready", name)
@ -180,9 +180,8 @@ func upsertReceiver(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isReceiverReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, receiver *notificationv1.Receiver) wait.ConditionFunc {
return func() (bool, error) {
func isReceiverReady(kubeClient client.Client, namespacedName types.NamespacedName, receiver *notificationv1.Receiver) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, receiver)
if err != nil {
return false, err

@ -204,8 +204,8 @@ func createSourceBucketCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for Bucket source reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isBucketReady(ctx, kubeClient, namespacedName, bucket)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isBucketReady(kubeClient, namespacedName, bucket)); err != nil {
return err
}
logger.Successf("Bucket source reconciliation completed")
@ -248,9 +248,8 @@ func upsertBucket(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isBucketReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, bucket *sourcev1.Bucket) wait.ConditionFunc {
return func() (bool, error) {
func isBucketReady(kubeClient client.Client, namespacedName types.NamespacedName, bucket *sourcev1.Bucket) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, bucket)
if err != nil {
return false, err

@ -325,8 +325,8 @@ func createSourceGitCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for GitRepository source reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isGitRepositoryReady(ctx, kubeClient, namespacedName, &gitRepository)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isGitRepositoryReady(kubeClient, namespacedName, &gitRepository)); err != nil {
return err
}
logger.Successf("GitRepository source reconciliation completed")
@ -369,9 +369,8 @@ func upsertGitRepository(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isGitRepositoryReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, gitRepository *sourcev1.GitRepository) wait.ConditionFunc {
return func() (bool, error) {
func isGitRepositoryReady(kubeClient client.Client, namespacedName types.NamespacedName, gitRepository *sourcev1.GitRepository) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, gitRepository)
if err != nil {
return false, err

@ -231,8 +231,8 @@ func createSourceHelmCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for HelmRepository source reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isHelmRepositoryReady(ctx, kubeClient, namespacedName, helmRepository)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isHelmRepositoryReady(kubeClient, namespacedName, helmRepository)); err != nil {
return err
}
logger.Successf("HelmRepository source reconciliation completed")
@ -280,9 +280,8 @@ func upsertHelmRepository(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isHelmRepositoryReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, helmRepository *sourcev1.HelmRepository) wait.ConditionFunc {
return func() (bool, error) {
func isHelmRepositoryReady(kubeClient client.Client, namespacedName types.NamespacedName, helmRepository *sourcev1.HelmRepository) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, helmRepository)
if err != nil {
return false, err

@ -192,8 +192,8 @@ func createSourceOCIRepositoryCmdRun(cmd *cobra.Command, args []string) error {
}
logger.Waitingf("waiting for OCIRepository reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isOCIRepositoryReady(ctx, kubeClient, namespacedName, repository)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isOCIRepositoryReady(kubeClient, namespacedName, repository)); err != nil {
return err
}
logger.Successf("OCIRepository reconciliation completed")
@ -236,9 +236,8 @@ func upsertOCIRepository(ctx context.Context, kubeClient client.Client,
return namespacedName, nil
}
func isOCIRepositoryReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, ociRepository *sourcev1.OCIRepository) wait.ConditionFunc {
return func() (bool, error) {
func isOCIRepositoryReady(kubeClient client.Client, namespacedName types.NamespacedName, ociRepository *sourcev1.OCIRepository) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, ociRepository)
if err != nil {
return false, err

@ -46,7 +46,6 @@ type exportableWithSecretList interface {
}
type exportWithSecretCommand struct {
apiType
object exportableWithSecret
list exportableWithSecretList
}

@ -19,9 +19,10 @@ package main
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
notificationv1 "github.com/fluxcd/notification-controller/api/v1beta2"
@ -77,7 +78,8 @@ func init() {
func (s alertListAdapter) summariseItem(i int, includeNamespace bool, includeKind bool) []string {
item := s.Items[i]
status, msg := statusAndMessage(item.Status.Conditions)
return append(nameColumns(&item, includeNamespace, includeKind), strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
return append(nameColumns(&item, includeNamespace, includeKind),
cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (s alertListAdapter) headers(includeNamespace bool) []string {

@ -19,11 +19,13 @@ package main
import (
"fmt"
"strconv"
"strings"
helmv2 "github.com/fluxcd/helm-controller/api/v2beta1"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
helmv2 "github.com/fluxcd/helm-controller/api/v2beta1"
)
var getHelmReleaseCmd = &cobra.Command{
@ -75,7 +77,7 @@ func (a helmReleaseListAdapter) summariseItem(i int, includeNamespace bool, incl
revision := item.Status.LastAppliedRevision
status, msg := statusAndMessage(item.Status.Conditions)
return append(nameColumns(&item, includeNamespace, includeKind),
revision, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
revision, cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (a helmReleaseListAdapter) headers(includeNamespace bool) []string {

@ -19,10 +19,11 @@ package main
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
imagev1 "github.com/fluxcd/image-reflector-controller/api/v1beta2"
@ -82,7 +83,7 @@ func (s imageRepositoryListAdapter) summariseItem(i int, includeNamespace bool,
lastScan = item.Status.LastScanResult.ScanTime.Time.Format(time.RFC3339)
}
return append(nameColumns(&item, includeNamespace, includeKind),
lastScan, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
lastScan, cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (s imageRepositoryListAdapter) headers(includeNamespace bool) []string {

@ -19,10 +19,11 @@ package main
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
autov1 "github.com/fluxcd/image-automation-controller/api/v1beta1"
@ -81,7 +82,8 @@ func (s imageUpdateAutomationListAdapter) summariseItem(i int, includeNamespace
if item.Status.LastAutomationRunTime != nil {
lastRun = item.Status.LastAutomationRunTime.Time.Format(time.RFC3339)
}
return append(nameColumns(&item, includeNamespace, includeKind), lastRun, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
return append(nameColumns(&item, includeNamespace, includeKind), lastRun,
cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (s imageUpdateAutomationListAdapter) headers(includeNamespace bool) []string {

@ -19,9 +19,10 @@ package main
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1"
@ -83,7 +84,7 @@ func (a kustomizationListAdapter) summariseItem(i int, includeNamespace bool, in
revision = utils.TruncateHex(revision)
msg = utils.TruncateHex(msg)
return append(nameColumns(&item, includeNamespace, includeKind),
revision, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
revision, cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (a kustomizationListAdapter) headers(includeNamespace bool) []string {

@ -19,9 +19,10 @@ package main
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
notificationv1 "github.com/fluxcd/notification-controller/api/v1"
@ -74,7 +75,8 @@ func init() {
func (s receiverListAdapter) summariseItem(i int, includeNamespace bool, includeKind bool) []string {
item := s.Items[i]
status, msg := statusAndMessage(item.Status.Conditions)
return append(nameColumns(&item, includeNamespace, includeKind), strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
return append(nameColumns(&item, includeNamespace, includeKind),
cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (s receiverListAdapter) headers(includeNamespace bool) []string {

@ -19,9 +19,10 @@ package main
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
@ -85,7 +86,7 @@ func (a *bucketListAdapter) summariseItem(i int, includeNamespace bool, includeK
revision = utils.TruncateHex(revision)
msg = utils.TruncateHex(msg)
return append(nameColumns(&item, includeNamespace, includeKind),
revision, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
revision, cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (a bucketListAdapter) headers(includeNamespace bool) []string {

@ -19,9 +19,10 @@ package main
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
@ -86,7 +87,7 @@ func (a *helmChartListAdapter) summariseItem(i int, includeNamespace bool, inclu
// Message may still contain reference of e.g. commit chart was build from
msg = utils.TruncateHex(msg)
return append(nameColumns(&item, includeNamespace, includeKind),
revision, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
revision, cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (a helmChartListAdapter) headers(includeNamespace bool) []string {

@ -19,9 +19,10 @@ package main
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
@ -85,7 +86,7 @@ func (a *gitRepositoryListAdapter) summariseItem(i int, includeNamespace bool, i
revision = utils.TruncateHex(revision)
msg = utils.TruncateHex(msg)
return append(nameColumns(&item, includeNamespace, includeKind),
revision, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
revision, cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (a gitRepositoryListAdapter) headers(includeNamespace bool) []string {

@ -19,9 +19,10 @@ package main
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
@ -85,7 +86,7 @@ func (a *helmRepositoryListAdapter) summariseItem(i int, includeNamespace bool,
revision = utils.TruncateHex(revision)
msg = utils.TruncateHex(msg)
return append(nameColumns(&item, includeNamespace, includeKind),
revision, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
revision, cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (a helmRepositoryListAdapter) headers(includeNamespace bool) []string {

@ -19,9 +19,10 @@ package main
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"k8s.io/apimachinery/pkg/runtime"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
@ -85,7 +86,7 @@ func (a *ociRepositoryListAdapter) summariseItem(i int, includeNamespace bool, i
revision = utils.TruncateHex(revision)
msg = utils.TruncateHex(msg)
return append(nameColumns(&item, includeNamespace, includeKind),
revision, strings.Title(strconv.FormatBool(item.Spec.Suspend)), status, msg)
revision, cases.Title(language.English).String(strconv.FormatBool(item.Spec.Suspend)), status, msg)
}
func (a ociRepositoryListAdapter) headers(includeNamespace bool) []string {

@ -74,7 +74,7 @@ type logsFlags struct {
fluxNamespace string
allNamespaces bool
sinceTime string
sinceSeconds time.Duration
sinceDuration time.Duration
}
var logsArgs = logsFlags{
@ -91,7 +91,7 @@ func init() {
logsCmd.Flags().Int64VarP(&logsArgs.tail, "tail", "", logsArgs.tail, "lines of recent log file to display")
logsCmd.Flags().StringVarP(&logsArgs.fluxNamespace, "flux-namespace", "", rootArgs.defaults.Namespace, "the namespace where the Flux components are running")
logsCmd.Flags().BoolVarP(&logsArgs.allNamespaces, "all-namespaces", "A", false, "displays logs for objects across all namespaces")
logsCmd.Flags().DurationVar(&logsArgs.sinceSeconds, "since", logsArgs.sinceSeconds, "Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used.")
logsCmd.Flags().DurationVar(&logsArgs.sinceDuration, "since", logsArgs.sinceDuration, "Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used.")
logsCmd.Flags().StringVar(&logsArgs.sinceTime, "since-time", logsArgs.sinceTime, "Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used.")
rootCmd.AddCommand(logsCmd)
}
@ -129,8 +129,8 @@ func logsCmdRun(cmd *cobra.Command, args []string) error {
logOpts.TailLines = &logsArgs.tail
}
if len(logsArgs.sinceTime) > 0 && logsArgs.sinceSeconds != 0 {
return fmt.Errorf("at most one of `sinceTime` or `sinceSeconds` may be specified")
if len(logsArgs.sinceTime) > 0 && logsArgs.sinceDuration != 0 {
return fmt.Errorf("at most one of `sinceTime` or `sinceDuration` may be specified")
}
if len(logsArgs.sinceTime) > 0 {
@ -141,9 +141,9 @@ func logsCmdRun(cmd *cobra.Command, args []string) error {
logOpts.SinceTime = &t
}
if logsArgs.sinceSeconds != 0 {
if logsArgs.sinceDuration != 0 {
// round up to the nearest second
sec := int64(logsArgs.sinceSeconds.Round(time.Second).Seconds())
sec := int64(logsArgs.sinceDuration.Round(time.Second).Seconds())
logOpts.SinceSeconds = &sec
}

@ -82,7 +82,7 @@ func TestLogsSinceTimeInvalid(t *testing.T) {
func TestLogsSinceOnlyOneAllowed(t *testing.T) {
cmd := cmdTestCase{
args: "logs --since=2m --since-time=2021-08-06T14:26:25.546Z",
assert: assertError("at most one of `sinceTime` or `sinceSeconds` may be specified"),
assert: assertError("at most one of `sinceTime` or `sinceDuration` may be specified"),
}
cmd.runTestCmd(t)
}

@ -203,6 +203,9 @@ func NewTestEnvKubeManager(testClusterMode TestClusterMode) (*testEnvKubeManager
useExistingCluster := true
config, err := clientcmd.BuildConfigFromFlags("", testKubeConfig)
if err != nil {
return nil, err
}
testEnv := &envtest.Environment{
UseExistingCluster: &useExistingCluster,
Config: config,
@ -337,8 +340,6 @@ type cmdTestCase struct {
// Tests use assertFunc to assert on an output, success or failure. This
// can be a function defined by the test or existing function above.
assert assertFunc
// Filename that contains yaml objects to load into Kubernetes
objectFile string
}
func (cmd *cmdTestCase) runTestCmd(t *testing.T) {

@ -28,7 +28,6 @@ import (
"github.com/google/go-containerregistry/pkg/crane"
"github.com/google/go-containerregistry/pkg/logs"
"github.com/google/go-containerregistry/pkg/name"
reg "github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/spf13/cobra"
@ -266,12 +265,12 @@ func pushArtifactCmdRun(cmd *cobra.Command, args []string) error {
return fmt.Errorf("pushing artifact failed: %w", err)
}
digest, err := reg.NewDigest(digestURL)
digest, err := name.NewDigest(digestURL)
if err != nil {
return fmt.Errorf("artifact digest parsing failed: %w", err)
}
tag, err := reg.NewTag(url)
tag, err := name.NewTag(url)
if err != nil {
return fmt.Errorf("artifact tag parsing failed: %w", err)
}

@ -113,8 +113,8 @@ func (reconcile reconcileCommand) run(cmd *cobra.Command, args []string) error {
logger.Successf("%s annotated", reconcile.kind)
if reconcile.kind == notificationv1b2.AlertKind || reconcile.kind == notificationv1.ReceiverKind {
if err = wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isReconcileReady(ctx, kubeClient, namespacedName, reconcile.object)); err != nil {
if err = wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isReconcileReady(kubeClient, namespacedName, reconcile.object)); err != nil {
return err
}
@ -124,8 +124,8 @@ func (reconcile reconcileCommand) run(cmd *cobra.Command, args []string) error {
lastHandledReconcileAt := reconcile.object.lastHandledReconcileRequest()
logger.Waitingf("waiting for %s reconciliation", reconcile.kind)
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
reconciliationHandled(ctx, kubeClient, namespacedName, reconcile.object, lastHandledReconcileAt)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
reconciliationHandled(kubeClient, namespacedName, reconcile.object, lastHandledReconcileAt)); err != nil {
return err
}
readyCond := apimeta.FindStatusCondition(reconcilableConditions(reconcile.object), meta.ReadyCondition)
@ -140,9 +140,8 @@ func (reconcile reconcileCommand) run(cmd *cobra.Command, args []string) error {
return nil
}
func reconciliationHandled(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, obj reconcilable, lastHandledReconcileAt string) wait.ConditionFunc {
return func() (bool, error) {
func reconciliationHandled(kubeClient client.Client, namespacedName types.NamespacedName, obj reconcilable, lastHandledReconcileAt string) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, obj.asClientObject())
if err != nil {
return false, err
@ -176,9 +175,8 @@ func requestReconciliation(ctx context.Context, kubeClient client.Client,
})
}
func isReconcileReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, obj reconcilable) wait.ConditionFunc {
return func() (bool, error) {
func isReconcileReady(kubeClient client.Client, namespacedName types.NamespacedName, obj reconcilable) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, obj.asClientObject())
if err != nil {
return false, err

@ -84,8 +84,8 @@ func reconcileAlertProviderCmdRun(cmd *cobra.Command, args []string) error {
logger.Successf("Provider annotated")
logger.Waitingf("waiting for reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isAlertProviderReady(ctx, kubeClient, namespacedName, &alertProvider)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isAlertProviderReady(kubeClient, namespacedName, &alertProvider)); err != nil {
return err
}
logger.Successf("Provider reconciliation completed")

@ -43,10 +43,6 @@ func init() {
reconcileImageCmd.AddCommand(reconcileImageUpdateCmd)
}
func (obj imageUpdateAutomationAdapter) suspended() bool {
return obj.ImageUpdateAutomation.Spec.Suspend
}
func (obj imageUpdateAutomationAdapter) lastHandledReconcileRequest() string {
return obj.Status.GetLastHandledReconcileRequest()
}

@ -88,8 +88,8 @@ func reconcileReceiverCmdRun(cmd *cobra.Command, args []string) error {
logger.Successf("Receiver annotated")
logger.Waitingf("waiting for Receiver reconciliation")
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isReceiverReady(ctx, kubeClient, namespacedName, &receiver)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isReceiverReady(kubeClient, namespacedName, &receiver)); err != nil {
return err
}

@ -82,8 +82,8 @@ func (reconcile reconcileWithSourceCommand) run(cmd *cobra.Command, args []strin
logger.Successf("%s annotated", reconcile.kind)
logger.Waitingf("waiting for %s reconciliation", reconcile.kind)
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
reconciliationHandled(ctx, kubeClient, namespacedName, reconcile.object, lastHandledReconcileAt)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
reconciliationHandled(kubeClient, namespacedName, reconcile.object, lastHandledReconcileAt)); err != nil {
return err
}

@ -212,8 +212,8 @@ func (resume resumeCommand) reconcile(ctx context.Context, res resumable) reconc
logger.Waitingf("waiting for %s reconciliation", resume.kind)
if err := wait.PollImmediate(rootArgs.pollInterval, rootArgs.timeout,
isReady(ctx, resume.client, namespacedName, res)); err != nil {
if err := wait.PollUntilContextTimeout(ctx, rootArgs.pollInterval, rootArgs.timeout, true,
isReady(resume.client, namespacedName, res)); err != nil {
return reconcileResponse{
resumable: res,
err: err,

@ -56,9 +56,8 @@ func statusableConditions(object statusable) []metav1.Condition {
return []metav1.Condition{}
}
func isReady(ctx context.Context, kubeClient client.Client,
namespacedName types.NamespacedName, object statusable) wait.ConditionFunc {
return func() (bool, error) {
func isReady(kubeClient client.Client, namespacedName types.NamespacedName, object statusable) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
err := kubeClient.Get(ctx, namespacedName, object.asClientObject())
if err != nil {
return false, err

@ -48,6 +48,7 @@ require (
github.com/theckman/yacspin v0.13.12
golang.org/x/crypto v0.14.0
golang.org/x/term v0.13.0
golang.org/x/text v0.13.0
k8s.io/api v0.27.4
k8s.io/apiextensions-apiserver v0.27.4
k8s.io/apimachinery v0.27.4
@ -195,7 +196,6 @@ require (
golang.org/x/oauth2 v0.13.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.13.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect

@ -539,10 +539,8 @@ func maskDockerconfigjsonSopsData(dataMap map[string]string, encode bool) error
func maskBase64EncryptedSopsData(dataMap map[string]string, mask string) error {
for k, v := range dataMap {
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
if _, ok := err.(base64.CorruptInputError); ok {
return err
}
if corruptErr := base64.CorruptInputError(0); errors.As(err, &corruptErr) {
return corruptErr
}
if bytes.Contains(data, []byte("sops")) && bytes.Contains(data, []byte("ENC[")) {

@ -116,13 +116,14 @@ func (b *Builder) Diff() (string, bool, error) {
if err != nil {
return "", createdOrDrifted, err
}
defer cleanupDir(tmpDir)
err = diff(liveFile, mergedFile, &output)
if err != nil {
cleanupDir(tmpDir)
return "", createdOrDrifted, err
}
cleanupDir(tmpDir)
createdOrDrifted = true
}

@ -54,5 +54,5 @@ func (p *SafeRelativePath) Type() string {
}
func (p *SafeRelativePath) Description() string {
return fmt.Sprintf("secure relative path")
return "secure relative path"
}

@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
"k8s.io/apimachinery/pkg/util/wait"
"strings"
"time"
@ -172,10 +173,8 @@ func kustomizationPathDiffers(ctx context.Context, kube client.Client, objKey cl
return k.Spec.Path, nil
}
func kustomizationReconciled(ctx context.Context, kube client.Client, objKey client.ObjectKey,
kustomization *kustomizev1.Kustomization, expectRevision string) func() (bool, error) {
return func() (bool, error) {
func kustomizationReconciled(kube client.Client, objKey client.ObjectKey, kustomization *kustomizev1.Kustomization, expectRevision string) wait.ConditionWithContextFunc {
return func(ctx context.Context) (bool, error) {
if err := kube.Get(ctx, objKey, kustomization); err != nil {
return false, err
}

@ -325,7 +325,7 @@ func (b *PlainGitBootstrapper) ReconcileSyncConfig(ctx context.Context, options
return fmt.Errorf("failed to generate OpenPGP entity: %w", err)
}
}
commitMsg := fmt.Sprintf("Add Flux sync manifests")
commitMsg := "Add Flux sync manifests"
if b.commitMessageAppendix != "" {
commitMsg = commitMsg + "\n\n" + b.commitMessageAppendix
}
@ -401,9 +401,8 @@ func (b *PlainGitBootstrapper) ReportKustomizationHealth(ctx context.Context, op
expectRevision := fmt.Sprintf("%s@%s", options.Branch, git.Hash(head).Digest())
var k kustomizev1.Kustomization
if err := wait.PollImmediate(pollInterval, timeout, kustomizationReconciled(
ctx, b.kube, objKey, &k, expectRevision),
); err != nil {
if err := wait.PollUntilContextTimeout(ctx, pollInterval, timeout, true,
kustomizationReconciled(b.kube, objKey, &k, expectRevision)); err != nil {
b.logger.Failuref(err.Error())
return err
}
@ -465,9 +464,7 @@ func getOpenPgpEntity(keyRing openpgp.EntityList, passphrase, keyID string) (*op
var entity *openpgp.Entity
if keyID != "" {
if strings.HasPrefix(keyID, "0x") {
keyID = strings.TrimPrefix(keyID, "0x")
}
keyID = strings.TrimPrefix(keyID, "0x")
if len(keyID) != 16 {
return nil, fmt.Errorf("invalid GPG key id length; expected %d, got %d", 16, len(keyID))
}

@ -16,7 +16,7 @@ limitations under the License.
package kustomization
import "sigs.k8s.io/kustomize/api/filesys"
import "sigs.k8s.io/kustomize/kyaml/filesys"
type Options struct {
FileSystem filesys.FileSystem

@ -23,7 +23,7 @@ import (
"github.com/olekukonko/tablewriter"
)
// TablePrinter is a printer that prints Flux cmd outputs.
// TablePrinter is a printer that prints Flux cmd outputs.
func TablePrinter(header []string) PrinterFunc {
return func(w io.Writer, args ...interface{}) error {
var rows [][]string
@ -35,9 +35,7 @@ func TablePrinter(header []string) PrinterFunc {
if !ok {
return fmt.Errorf("unsupported type %T", v)
}
for i := range s {
rows = append(rows, s[i])
}
rows = append(rows, s...)
}
default:
return fmt.Errorf("unsupported type %T", arg)

@ -48,7 +48,8 @@ func Components(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list appsv1.DeploymentList
if err := kubeClient.List(ctx, &list, client.InNamespace(namespace), selector); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
if err := kubeClient.Delete(ctx, &r, opts); err != nil {
logger.Failuref("Deployment/%s/%s deletion failed: %s", r.Namespace, r.Name, err.Error())
aggregateErr = append(aggregateErr, err)
@ -61,7 +62,8 @@ func Components(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list corev1.ServiceList
if err := kubeClient.List(ctx, &list, client.InNamespace(namespace), selector); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
if err := kubeClient.Delete(ctx, &r, opts); err != nil {
logger.Failuref("Service/%s/%s deletion failed: %s", r.Namespace, r.Name, err.Error())
aggregateErr = append(aggregateErr, err)
@ -74,7 +76,8 @@ func Components(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list networkingv1.NetworkPolicyList
if err := kubeClient.List(ctx, &list, client.InNamespace(namespace), selector); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
if err := kubeClient.Delete(ctx, &r, opts); err != nil {
logger.Failuref("NetworkPolicy/%s/%s deletion failed: %s", r.Namespace, r.Name, err.Error())
aggregateErr = append(aggregateErr, err)
@ -87,7 +90,8 @@ func Components(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list corev1.ServiceAccountList
if err := kubeClient.List(ctx, &list, client.InNamespace(namespace), selector); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
if err := kubeClient.Delete(ctx, &r, opts); err != nil {
logger.Failuref("ServiceAccount/%s/%s deletion failed: %s", r.Namespace, r.Name, err.Error())
aggregateErr = append(aggregateErr, err)
@ -100,7 +104,8 @@ func Components(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list rbacv1.ClusterRoleList
if err := kubeClient.List(ctx, &list, selector); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
if err := kubeClient.Delete(ctx, &r, opts); err != nil {
logger.Failuref("ClusterRole/%s deletion failed: %s", r.Name, err.Error())
aggregateErr = append(aggregateErr, err)
@ -113,7 +118,8 @@ func Components(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list rbacv1.ClusterRoleBindingList
if err := kubeClient.List(ctx, &list, selector); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
if err := kubeClient.Delete(ctx, &r, opts); err != nil {
logger.Failuref("ClusterRoleBinding/%s deletion failed: %s", r.Name, err.Error())
aggregateErr = append(aggregateErr, err)
@ -134,7 +140,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list sourcev1.GitRepositoryList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -148,7 +155,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list sourcev1b2.OCIRepositoryList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -162,7 +170,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list sourcev1b2.HelmRepositoryList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -176,7 +185,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list sourcev1b2.HelmChartList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -190,7 +200,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list sourcev1b2.BucketList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -204,7 +215,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list kustomizev1.KustomizationList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -218,7 +230,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list helmv2.HelmReleaseList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -232,7 +245,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list notificationv1b2.AlertList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -246,7 +260,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list notificationv1b2.ProviderList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -260,7 +275,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list notificationv1.ReceiverList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -274,7 +290,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list imagev1.ImagePolicyList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -288,7 +305,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list imagev1.ImageRepositoryList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -302,7 +320,8 @@ func Finalizers(ctx context.Context, logger log.Logger, kubeClient client.Client
{
var list autov1.ImageUpdateAutomationList
if err := kubeClient.List(ctx, &list, client.InNamespace("")); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
r.Finalizers = []string{}
if err := kubeClient.Update(ctx, &r, opts); err != nil {
logger.Failuref("%s/%s/%s removing finalizers failed: %s", r.Kind, r.Namespace, r.Name, err.Error())
@ -324,7 +343,8 @@ func CustomResourceDefinitions(ctx context.Context, logger log.Logger, kubeClien
{
var list apiextensionsv1.CustomResourceDefinitionList
if err := kubeClient.List(ctx, &list, selector); err == nil {
for _, r := range list.Items {
for i := range list.Items {
r := list.Items[i]
if err := kubeClient.Delete(ctx, &r, opts); err != nil {
logger.Failuref("CustomResourceDefinition/%s deletion failed: %s", r.Name, err.Error())
aggregateErr = append(aggregateErr, err)

Loading…
Cancel
Save