diff --git a/docs/_files/cluster-dashboard.png b/docs/_files/cluster-dashboard.png deleted file mode 100644 index c7c4ba0e..00000000 Binary files a/docs/_files/cluster-dashboard.png and /dev/null differ diff --git a/docs/_files/commit-status-flow.png b/docs/_files/commit-status-flow.png deleted file mode 100644 index e30fc6fc..00000000 Binary files a/docs/_files/commit-status-flow.png and /dev/null differ diff --git a/docs/_files/commit-status-github-failure.png b/docs/_files/commit-status-github-failure.png deleted file mode 100644 index 09e92464..00000000 Binary files a/docs/_files/commit-status-github-failure.png and /dev/null differ diff --git a/docs/_files/commit-status-github-overview.png b/docs/_files/commit-status-github-overview.png deleted file mode 100644 index 898d86b5..00000000 Binary files a/docs/_files/commit-status-github-overview.png and /dev/null differ diff --git a/docs/_files/commit-status-github-success.png b/docs/_files/commit-status-github-success.png deleted file mode 100644 index 17cf5fd1..00000000 Binary files a/docs/_files/commit-status-github-success.png and /dev/null differ diff --git a/docs/_files/commit-status-gitlab-failure.png b/docs/_files/commit-status-gitlab-failure.png deleted file mode 100644 index a0ba2f99..00000000 Binary files a/docs/_files/commit-status-gitlab-failure.png and /dev/null differ diff --git a/docs/_files/commit-status-gitlab-success.png b/docs/_files/commit-status-gitlab-success.png deleted file mode 100644 index dcbb844a..00000000 Binary files a/docs/_files/commit-status-gitlab-success.png and /dev/null differ diff --git a/docs/_files/cp-dashboard-p1.png b/docs/_files/cp-dashboard-p1.png deleted file mode 100644 index 67dbaacf..00000000 Binary files a/docs/_files/cp-dashboard-p1.png and /dev/null differ diff --git a/docs/_files/cp-dashboard-p2.png b/docs/_files/cp-dashboard-p2.png deleted file mode 100644 index ddb63b90..00000000 Binary files a/docs/_files/cp-dashboard-p2.png and /dev/null differ diff --git a/docs/_files/flux-icon.svg b/docs/_files/flux-icon.svg deleted file mode 100644 index 36ad500e..00000000 --- a/docs/_files/flux-icon.svg +++ /dev/null @@ -1,22 +0,0 @@ - - - - flux-icon - Created with Sketch. - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_files/flux-icon@2x.png b/docs/_files/flux-icon@2x.png deleted file mode 100644 index c6293d30..00000000 Binary files a/docs/_files/flux-icon@2x.png and /dev/null differ diff --git a/docs/_files/helm-controller-alerts.png b/docs/_files/helm-controller-alerts.png deleted file mode 100644 index 25f36fa0..00000000 Binary files a/docs/_files/helm-controller-alerts.png and /dev/null differ diff --git a/docs/_files/helm-controller.png b/docs/_files/helm-controller.png deleted file mode 100644 index 5777ef4e..00000000 Binary files a/docs/_files/helm-controller.png and /dev/null differ diff --git a/docs/_files/image-update-automation.png b/docs/_files/image-update-automation.png deleted file mode 100644 index ab2bb179..00000000 Binary files a/docs/_files/image-update-automation.png and /dev/null differ diff --git a/docs/_files/kustomize-controller.png b/docs/_files/kustomize-controller.png deleted file mode 100644 index 1ac9cc70..00000000 Binary files a/docs/_files/kustomize-controller.png and /dev/null differ diff --git a/docs/_files/notification-controller.png b/docs/_files/notification-controller.png deleted file mode 100644 index f1a0f5c5..00000000 Binary files a/docs/_files/notification-controller.png and /dev/null differ diff --git a/docs/_files/slack-error-alert.png b/docs/_files/slack-error-alert.png deleted file mode 100644 index 71bde57e..00000000 Binary files a/docs/_files/slack-error-alert.png and /dev/null differ diff --git a/docs/_files/slack-info-alert.png b/docs/_files/slack-info-alert.png deleted file mode 100644 index d6191008..00000000 Binary files a/docs/_files/slack-info-alert.png and /dev/null differ diff --git a/docs/_files/source-controller.png b/docs/_files/source-controller.png deleted file mode 100644 index 554c0054..00000000 Binary files a/docs/_files/source-controller.png and /dev/null differ diff --git a/docs/_files/toolkit-icon.png b/docs/_files/toolkit-icon.png deleted file mode 100644 index 1c68f060..00000000 Binary files a/docs/_files/toolkit-icon.png and /dev/null differ diff --git a/docs/_static/custom.css b/docs/_static/custom.css deleted file mode 100644 index e17c215b..00000000 --- a/docs/_static/custom.css +++ /dev/null @@ -1,122 +0,0 @@ -@import url("https://fonts.googleapis.com/css?family=Montserrat&display=swap"); - -body { - font-family: "Montserrat", sans-serif; -} - -.md-logo { - width: 40px; - height: 40px; - padding-bottom: 2px; - padding-top: 2px; -} -.md-logo img { - width: 40px; - height: 40px; -} - -.md-header, .md-footer-nav { - background-image: linear-gradient(45deg, rgb(0, 150, 225) 0%, rgb(27, 141, 226) 24%, rgb(42, 125, 227) 53%, rgb(53, 112, 227) 78%, rgb(53, 112, 227) 100%); -} - -.md-header-nav__title { - font-size: .85rem; -} - -.check-bullet { - color:#07bfa5; - background-color: white; - margin-left:-22px; -} - -/* Progress bar styling */ - -.progress-label { - position: absolute; - text-align: center; - font-weight: 700; - width: 100%; - /* remove original styling for thin styling - margin: 0 ! important; */ - margin-top: -0.4rem ! important; - line-height: 1.2rem; - white-space: nowrap; - overflow: hidden; - } - - .progress-bar { - /*remove original styling for thin styling - height: 1.2rem; */ - height: 0.4rem; - float: left; - background: repeating-linear-gradient( - 45deg, - rgba(255, 255, 255, 0.2), - rgba(255, 255, 255, 0.2) 10px, - rgba(255, 255, 255, 0.3) 10px, - rgba(255, 255, 255, 0.3) 20px - ) #2979ff; - border-radius: 2px; - } - - .progress { - display: block; - width: 100%; - /* remove original styling for thin styling - margin: 0.5rem 0; - height: 1.2rem; */ - margin-top: 0.9rem; - height: 0.4rem; - background-color: #eeeeee; - position: relative; - border-radius: 2px; - } - - .progress-100plus .progress-bar { - background-color: #00c853; - } - - .progress-80plus .progress-bar { - background-color: #64dd17; - } - - .progress-60plus .progress-bar { - background-color: #fbc02d; - } - - .progress-40plus .progress-bar { - background-color: #ff9100; - } - - .progress-20plus .progress-bar { - background-color: #ff5252; - } - - .progress-0plus .progress-bar { - background-color: #ff1744; - } - -/* Custom admonitions */ -/* See https://squidfunk.github.io/mkdocs-material/reference/admonitions */ -:root { - --md-admonition-icon--heart: url('data:image/svg+xml;charset=utf-8,') -} -.md-typeset .admonition.heart, -.md-typeset details.heart { - border-color: rgb(233, 30, 99); -} -.md-typeset .heart > .admonition-title, -.md-typeset .heart > summary { - background-color: rgba(233, 30, 99, 0.1); -} -.md-typeset .heart > .admonition-title::before, -.md-typeset .heart > summary::before { - background-color: rgb(233, 30, 99); - -webkit-mask-image: var(--md-admonition-icon--heart); - mask-image: var(--md-admonition-icon--heart); -} - -.timetable-explicit-col-widths th:nth-child(1) { width: 4%; } -.timetable-explicit-col-widths th:nth-child(2) { width: 32%; } -.timetable-explicit-col-widths th:nth-child(3) { width: 32%; } -.timetable-explicit-col-widths th:nth-child(4) { width: 32%; } diff --git a/docs/components/helm/controller.md b/docs/components/helm/controller.md deleted file mode 100644 index e35b7a5e..00000000 --- a/docs/components/helm/controller.md +++ /dev/null @@ -1,28 +0,0 @@ -# Helm Controller - -The Helm Controller is a Kubernetes operator, allowing one to declaratively manage Helm chart -releases with Kubernetes manifests. - -![](../../_files/helm-controller.png) - -The desired state of a Helm release is described through a Kubernetes Custom Resource named `HelmRelease`. -Based on the creation, mutation or removal of a `HelmRelease` resource in the cluster, -Helm actions are performed by the controller. - -Features: - -- Watches for `HelmRelease` objects and generates `HelmChart` objects -- Supports `HelmChart` artifacts produced from `HelmRepository` and `GitRepository` sources -- Fetches artifacts produced by [source-controller](../source/controller.md) from `HelmChart` objects -- Watches `HelmChart` objects for revision changes (including semver ranges for charts from `HelmRepository` sources) -- Performs automated Helm actions, including Helm tests, rollbacks and uninstalls -- Offers extensive configuration options for automated remediation (rollback, uninstall, retry) on failed Helm install, upgrade or test actions -- Runs Helm install/upgrade in a specific order, taking into account the depends-on relationship defined in a set of `HelmRelease` objects -- Prunes Helm releases removed from cluster (garbage collection) -- Reports Helm releases statuses (alerting provided by [notification-controller](../notification/controller.md)) -- Built-in Kustomize compatible Helm post renderer, providing support for strategic merge, JSON 6902 and images patches - -Links: - -- Source code [fluxcd/helm-controller](https://github.com/fluxcd/helm-controller) -- Specification [docs](https://github.com/fluxcd/helm-controller/tree/main/docs/spec) diff --git a/docs/components/image/controller.md b/docs/components/image/controller.md deleted file mode 100644 index 8829afad..00000000 --- a/docs/components/image/controller.md +++ /dev/null @@ -1,18 +0,0 @@ -# Image reflector and automation controllers - -The image-reflector-controller and image-automation-controller work together to update a Git -repository when new container images are available. - -- The image-reflector-controller scans image repositories and reflects the image metadata in - Kubernetes resources. -- The image-automation-controller updates YAML files based on the latest images scanned, and commits - the changes to a given Git repository. - -![](../../_files/image-update-automation.png) - -Links: - -- Source code [fluxcd/image-reflector-controller](https://github.com/fluxcd/image-reflector-controller) -- Reflector [specification docs](https://github.com/fluxcd/image-reflector-controller/tree/main/docs/spec) -- Source code [fluxcd/image-automation-controller](https://github.com/fluxcd/image-automation-controller) -- Automation [specification docs](https://github.com/fluxcd/image-automation-controller/tree/main/docs/spec) diff --git a/docs/components/index.md b/docs/components/index.md deleted file mode 100644 index ba58e794..00000000 --- a/docs/components/index.md +++ /dev/null @@ -1,30 +0,0 @@ -# GitOps Toolkit components - -The GitOps Toolkit is the set of APIs and controllers that make up the -runtime for Flux v2. The APIs comprise Kubernetes custom resources, -which can be created and updated by a cluster user, or by other -automation tooling. - -You can use the toolkit to extend Flux, and to build your own systems -for continuous delivery. The [the source-watcher -guide](../dev-guides/source-watcher/) is a good place to start. - -A reference for each component and API type is linked below. - -- [Source Controller](source/controller.md) - - [GitRepository CRD](source/gitrepositories.md) - - [HelmRepository CRD](source/helmrepositories.md) - - [HelmChart CRD](source/helmcharts.md) - - [Bucket CRD](source/buckets.md) -- [Kustomize Controller](kustomize/controller.md) - - [Kustomization CRD](kustomize/kustomization.md) -- [Helm Controller](helm/controller.md) - - [HelmRelease CRD](helm/helmreleases.md) -- [Notification Controller](notification/controller.md) - - [Provider CRD](notification/provider.md) - - [Alert CRD](notification/alert.md) - - [Receiver CRD](notification/receiver.md) -- [Image automation controllers](image/controller.md) - - [ImageRepository CRD](image/imagerepositories.md) - - [ImagePolicy CRD](image/imagepolicies.md) - - [ImageUpdateAutomation CRD](image/imageupdateautomations.md) diff --git a/docs/components/kustomize/controller.md b/docs/components/kustomize/controller.md deleted file mode 100644 index 3d0bafb9..00000000 --- a/docs/components/kustomize/controller.md +++ /dev/null @@ -1,23 +0,0 @@ -# Kustomize Controller - -The kustomize-controller is a Kubernetes operator, -specialized in running continuous delivery pipelines for infrastructure and -workloads defined with Kubernetes manifests and assembled with Kustomize. - -![](../../_files/kustomize-controller.png) - -Features: - -- Reconciles the cluster state from multiple sources (provided by source-controller) -- Generates manifests with Kustomize (from plain Kubernetes yamls or Kustomize overlays) -- Validates manifests against Kubernetes API -- Impersonates service accounts (multi-tenancy RBAC) -- Health assessment of the deployed workloads -- Runs pipelines in a specific order (depends-on relationship) -- Prunes objects removed from source (garbage collection) -- Reports cluster state changes (alerting provided by notification-controller) - -Links: - -- Source code [fluxcd/kustomize-controller](https://github.com/fluxcd/kustomize-controller) -- Specification [docs](https://github.com/fluxcd/kustomize-controller/tree/main/docs/spec) diff --git a/docs/components/notification/controller.md b/docs/components/notification/controller.md deleted file mode 100644 index e3bc97f6..00000000 --- a/docs/components/notification/controller.md +++ /dev/null @@ -1,17 +0,0 @@ -# Notification Controller - -The Notification Controller is a Kubernetes operator, specialized in handling inbound and outbound events. - -![](../../_files/notification-controller.png) - -The controller handles events coming from external systems (GitHub, GitLab, Bitbucket, Harbor, Jenkins, etc) -and notifies the GitOps toolkit controllers about source changes. - -The controller handles events emitted by the GitOps toolkit controllers (source, kustomize, helm) -and dispatches them to external systems (Slack, Microsoft Teams, Discord, Rocker) -based on event severity and involved objects. - -Links: - -- Source code [fluxcd/notification-controller](https://github.com/fluxcd/notification-controller) -- Specification [docs](https://github.com/fluxcd/notification-controller/tree/main/docs/spec) diff --git a/docs/components/source/controller.md b/docs/components/source/controller.md deleted file mode 100644 index a590e454..00000000 --- a/docs/components/source/controller.md +++ /dev/null @@ -1,24 +0,0 @@ -# Source Controller - -The main role of the source management component is to provide a common interface for artifacts acquisition. -The source API defines a set of Kubernetes objects that cluster admins and various automated operators can -interact with to offload the Git and Helm repositories operations to a dedicated controller. - -![](../../_files/source-controller.png) - -Features: - -- Validate source definitions -- Authenticate to sources (SSH, user/password, API token) -- Validate source authenticity (PGP) -- Detect source changes based on update policies (semver) -- Fetch resources on-demand and on-a-schedule -- Package the fetched resources into a well-known format (tar.gz, yaml) -- Make the artifacts addressable by their source identifier (sha, version, ts) -- Make the artifacts available in-cluster to interested 3rd parties -- Notify interested 3rd parties of source changes and availability (status conditions, events, hooks) - -Links: - -- Source code [fluxcd/source-controller](https://github.com/fluxcd/source-controller) -- Specification [docs](https://github.com/fluxcd/source-controller/tree/main/docs/spec) \ No newline at end of file diff --git a/docs/contributing/index.md b/docs/contributing/index.md deleted file mode 120000 index f939e75f..00000000 --- a/docs/contributing/index.md +++ /dev/null @@ -1 +0,0 @@ -../../CONTRIBUTING.md \ No newline at end of file diff --git a/docs/core-concepts/index.md b/docs/core-concepts/index.md deleted file mode 100644 index eae7e56a..00000000 --- a/docs/core-concepts/index.md +++ /dev/null @@ -1,52 +0,0 @@ -# Core Concepts - -!!! note "Work in progress" - This document is a work in progress. - -These are some core concepts in Flux. - -## GitOps - -GitOps is a way of managing your infrastructure and applications so that whole system is described declaratively and version controlled (most likely in a Git repository), and having an automated process that ensures that the deployed environment matches the state specified in a repository. - -For more information, take a look at ["What is GitOps?"](https://www.gitops.tech/#what-is-gitops). - -## Sources - -A *Source* defines the origin of a source and the requirements to obtain -it (e.g. credentials, version selectors). For example, the latest `1.x` tag -available from a Git repository over SSH. - -Sources produce an artifact that is consumed by other Flux elements to perform -actions, like applying the contents of the artifact on the cluster. A source -may be shared by multiple consumers to deduplicate configuration and/or storage. - -The origin of the source is checked for changes on a defined interval, if -there is a newer version available that matches the criteria, a new artifact -is produced. - -All sources are specified as Custom Resources in a Kubernetes cluster, examples -of sources are `GitRepository`, `HelmRepository` and `Bucket` resources. - -For more information, take a look at [the source controller documentation](../components/source/controller.md). - -## Reconciliation - -Reconciliation refers to ensuring that a given state (e.g application running in the cluster, infrastructure) matches a desired state declaratively defined somewhere (e.g a git repository). There are various examples of these in flux e.g: - -- HelmRelease reconciliation: ensures the state of the Helm release matches what is defined in the resource, performs a release if this is not the case (including revision changes of a HelmChart resource). -- Bucket reconciliation: downloads and archives the contents of the declared bucket on a given interval and stores this as an artifact, records the observed revision of the artifact and the artifact itself in the status of resource. -- [Kustomization](#kustomization) reconciliation: ensures the state of the application deployed on a cluster matches resources contained in a git repository. - -## Kustomization - -The kustomization represents a local set of Kubernetes resources that Flux is supposed to reconcile in the cluster. The reconciliation runs every one minute by default but this can be specified in the kustomization. If you make any changes to the cluster using `kubectl edit` or `kubectl patch`, it will be promptly reverted. You either suspend the reconciliation or push your changes to a Git repository. - -For more information, take a look at [this documentation](../components/kustomize/kustomization.md). - -## Bootstrap - -The process of installing the Flux components in a complete GitOps way is called a bootstrap. The manifests are applied to the cluster, a `GitRepository` and `Kustomization` are created for the Flux components, and the manifests are pushed to an existing Git repository (or a new one is created). Flux can manage itself just as it manages other resources. -The bootstrap is done using the `flux` CLI `flux bootstrap`. - -For more information, take a look at [the documentation for the bootstrap command](../cmd/flux_bootstrap.md). diff --git a/docs/dev-guides/debugging.md b/docs/dev-guides/debugging.md deleted file mode 100644 index b0b125cc..00000000 --- a/docs/dev-guides/debugging.md +++ /dev/null @@ -1,42 +0,0 @@ -# Advanced debugging - -This guide covers more advanced debugging topics such as collecting -runtime profiling data from GitOps Toolkit components. - -As a user, this page normally should be a last resort, but you may -be asked by a maintainer to share a [collected profile](#collecting-a-profile) -to debug e.g. performance issues. - -## Pprof - -The [GitOps Toolkit components](../components/index.md) serve [`pprof`](https://golang.org/pkg/net/http/pprof/) -runtime profiling data on their metrics HTTP server (default `:8080`). - -### Endpoints - -| Endpoint | Path | -|-------------|------------------------| -| Index | `/debug/pprof/` | -| CPU profile | `/debug/pprof/profile` | -| Symbol | `/debug/pprof/symbol` | -| Trace | `/debug/pprof/trace` | - -### Collecting a profile - -To collect a profile, port-forward to the component's metrics endpoint -and collect the data from the [endpoint](#endpoints) of choice: - -```console -$ kubectl port-forward -n deploy/ 8080 -$ curl -Sk -v http://localhost:8080/debug/pprof/heap > heap.out -``` - -The collected profile [can be analyzed using `go`](https://blog.golang.org/pprof), -or shared with one of the maintainers. - -## Resource usage - -As `kubectl top` gives a limited (and at times inaccurate) overview of -resource usage, it is often better to make use of the Grafana metrics -to gather insights. See [monitoring](../guides/monitoring.md) for a -guide on how to visualize this data with a Grafana dashboard. diff --git a/docs/dev-guides/source-watcher.md b/docs/dev-guides/source-watcher.md deleted file mode 100644 index 4353d444..00000000 --- a/docs/dev-guides/source-watcher.md +++ /dev/null @@ -1,230 +0,0 @@ -# Watching for source changes - -In this guide you'll be developing a Kubernetes controller with -[Kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) -that subscribes to [GitRepository](../components/source/gitrepositories.md) -events and reacts to revision changes by downloading the artifact produced by -[source-controller](../components/source/controller.md). - -## Prerequisites - -On your dev machine install the following tools: - -* go >= 1.15 -* kubebuilder >= 2.3 -* kind >= 0.8 -* kubectl >= 1.18 -* kustomize >= 3.5 -* docker >= 19.03 - -## Install Flux - -Create a cluster for testing: - -```sh -kind create cluster --name dev -``` - -Install the Flux CLI: - -```sh -curl -s https://fluxcd.io/install.sh | sudo bash -``` - -Verify that your dev machine satisfies the prerequisites with: - -```sh -flux check --pre -``` - -Install source-controller on the dev cluster: - -```sh -flux install \ ---namespace=flux-system \ ---network-policy=false \ ---components=source-controller -``` - -## Clone the sample controller - -You'll be using [fluxcd/source-watcher](https://github.com/fluxcd/source-watcher) as -a template for developing your own controller. The source-watcher was scaffolded with `kubebuilder init`. - -Clone the source-watcher repository: - -```sh -git clone https://github.com/fluxcd/source-watcher -cd source-watcher -``` - -Build the controller: - -```sh -make -``` - -## Run the controller - -Port forward to source-controller artifacts server: - -```sh -kubectl -n flux-system port-forward svc/source-controller 8181:80 -``` - -Export the local address as `SOURCE_HOST`: - -```sh -export SOURCE_HOST=localhost:8181 -``` - -Run source-watcher locally: - -```sh -make run -``` - -Create a Git source: - -```sh -flux create source git test \ ---url=https://github.com/stefanprodan/podinfo \ ---tag=4.0.0 -``` - -The source-watcher should log the revision: - -```console -New revision detected {"gitrepository": "flux-system/test", "revision": "4.0.0/ab953493ee14c3c9800bda0251e0c507f9741408"} -Extracted tarball into /var/folders/77/3y6x_p2j2g9fspdkzjbm5_s40000gn/T/test292235827: 123 files, 29 dirs (32.603415ms) -Processing files... -``` - -Change the Git tag: - -```sh -flux create source git test \ ---url=https://github.com/stefanprodan/podinfo \ ---tag=4.0.1 -``` - -The source-watcher should log the new revision: - -```console -New revision detected {"gitrepository": "flux-system/test", "revision": "4.0.1/113360052b3153e439a0cf8de76b8e3d2a7bdf27"} -``` - -The source-controller reports the revision under `GitRepository.Status.Artifact.Revision` in the format: `/`. - -## How it works - -The [GitRepositoryWatcher](https://github.com/fluxcd/source-watcher/blob/main/controllers/gitrepository_watcher.go) -controller does the following: - -* subscribes to `GitRepository` events -* detects when the Git revision changes -* downloads and extracts the source artifact -* write to stdout the extracted file names - -```go -// GitRepositoryWatcher watches GitRepository objects for revision changes -type GitRepositoryWatcher struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get -func (r *GitRepositoryWatcher) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := logr.FromContext(ctx) - - // get source object - var repository sourcev1.GitRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - log.Info("New revision detected", "revision", repository.Status.Artifact.Revision) - - // create tmp dir - tmpDir, err := ioutil.TempDir("", repository.Name) - if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create temp dir, error: %w", err) - } - defer os.RemoveAll(tmpDir) - - // download and extract artifact - summary, err := r.fetchArtifact(ctx, repository, tmpDir) - if err != nil { - log.Error(err, "unable to fetch artifact") - return ctrl.Result{}, err - } - log.Info(summary) - - // list artifact content - files, err := ioutil.ReadDir(tmpDir) - if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to list files, error: %w", err) - } - - // do something with the artifact content - for _, f := range files { - log.Info("Processing " + f.Name()) - } - - return ctrl.Result{}, nil -} - -func (r *GitRepositoryWatcher) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&sourcev1.GitRepository{}, builder.WithPredicates(GitRepositoryRevisionChangePredicate{})). - Complete(r) -} -``` - -To add the watcher to an existing project, copy the controller and the revision change predicate to your `controllers` dir: - -* [gitrepository_watcher.go](https://github.com/fluxcd/source-watcher/blob/main/controllers/gitrepository_watcher.go) -* [gitrepository_predicate.go](https://github.com/fluxcd/source-watcher/blob/main/controllers/gitrepository_predicate.go) - -In your `main.go` init function, register the Source API schema: - -```go -import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - -func init() { - _ = clientgoscheme.AddToScheme(scheme) - _ = sourcev1.AddToScheme(scheme) - - // +kubebuilder:scaffold:scheme -} -``` - -Start the controller in the main function: - -```go -func main() { - - if err = (&controllers.GitRepositoryWatcher{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "GitRepositoryWatcher") - os.Exit(1) - } - -} -``` - -Note that the watcher controller depends on Kubernetes client-go >= 1.20. -Your `go.mod` should require controller-runtime v0.8 or newer: - -```go -require ( - k8s.io/apimachinery v0.20.2 - k8s.io/client-go v0.20.2 - sigs.k8s.io/controller-runtime v0.8.3 -) -``` - -That's it! Happy hacking! diff --git a/docs/faq/index.md b/docs/faq/index.md deleted file mode 100644 index e4384476..00000000 --- a/docs/faq/index.md +++ /dev/null @@ -1,273 +0,0 @@ -# Frequently asked questions - -## Kustomize questions - -### Are there two Kustomization types? - -Yes, the `kustomization.kustomize.toolkit.fluxcd.io` is a Kubernetes -[custom resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -while `kustomization.kustomize.config.k8s.io` is the type used to configure a -[Kustomize overlay](https://kubectl.docs.kubernetes.io/references/kustomize/). - -The `kustomization.kustomize.toolkit.fluxcd.io` object refers to a `kustomization.yaml` -file path inside a Git repository or Bucket source. - -### How do I use them together? - -Assuming an app repository with `./deploy/prod/kustomization.yaml`: - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - deployment.yaml - - service.yaml - - ingress.yaml -``` - -Define a source of type `gitrepository.source.toolkit.fluxcd.io` -that pulls changes from the app repository every 5 minutes inside the cluster: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: my-app - namespace: default -spec: - interval: 5m - url: https://github.com/my-org/my-app - ref: - branch: main -``` - -Then define a `kustomization.kustomize.toolkit.fluxcd.io` that uses the `kustomization.yaml` -from `./deploy/prod` to determine which resources to create, update or delete: - -```yaml -apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 -kind: Kustomization -metadata: - name: my-app - namespace: default -spec: - interval: 15m - path: "./deploy/prod" - prune: true - sourceRef: - kind: GitRepository - name: my-app -``` - -### What is a Kustomization reconciliation? - -In the above example, we pull changes from Git every 5 minutes, -and a new commit will trigger a reconciliation of -all the `Kustomization` objects using that source. - -Depending on your configuration, a reconciliation can mean: - -* generating a kustomization.yaml file in the specified path -* building the kustomize overlay -* decrypting secrets -* validating the manifests with client or server-side dry-run -* applying changes on the cluster -* health checking of deployed workloads -* garbage collection of resources removed from Git -* issuing events about the reconciliation result -* recoding metrics about the reconciliation process - -The 15 minutes reconciliation interval, is the interval at which you want to undo manual changes -.e.g. `kubectl set image deployment/my-app` by reapplying the latest commit on the cluster. - -Note that a reconciliation will override all fields of a Kubernetes object, that diverge from Git. -For example, you'll have to omit the `spec.replicas` field from your `Deployments` YAMLs if you -are using a `HorizontalPodAutoscaler` that changes the replicas in-cluster. - -### Can I use repositories with plain YAMLs? - -Yes, you can specify the path where the Kubernetes manifests are, -and kustomize-controller will generate a `kustomization.yaml` if one doesn't exist. - -Assuming an app repository with the following structure: - -``` -├── deploy -│   └── prod -│   ├── .yamllint.yaml -│   ├── deployment.yaml -│   ├── service.yaml -│   └── ingress.yaml -└── src -``` - -Create a `GitRepository` definition and exclude all the files that are not Kubernetes manifests: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: my-app - namespace: default -spec: - interval: 5m - url: https://github.com/my-org/my-app - ref: - branch: main - ignore: | - # exclude all - /* - # include deploy dir - !/deploy - # exclude non-Kubernetes YAMLs - /deploy/**/.yamllint.yaml -``` - -Then create a `Kustomization` definition to reconcile the `./deploy/prod` dir: - -```yaml -apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 -kind: Kustomization -metadata: - name: my-app - namespace: default -spec: - interval: 15m - path: "./deploy/prod" - prune: true - sourceRef: - kind: GitRepository - name: my-app -``` - -With the above configuration, source-controller will pull the Kubernetes manifests -from the app repository and kustomize-controller will generate a -`kustomization.yaml` including all the resources found with `./deploy/prod/**/*.yaml`. - -The kustomize-controller creates `kustomization.yaml` files similar to: - -```sh -cd ./deploy/prod && kustomize create --autodetect --recursive -``` - -### What is the behavior of Kustomize used by Flux - -We referred to the Kustomization CLI flags here, so that you can replicate the same behavior using the CLI. -The behavior of Kustomize used by the controller is currently configured as following: - -- `--allow_id_changes` is set to false, so it does not change any resource IDs. -- `--enable_kyaml` is disabled by default, so it currently used `k8sdeps` to process YAMLs. -- `--enable_alpha_plugins` is disabled by default, so it uses only the built-in plugins. -- `--load_restrictor` is set to `LoadRestrictionsNone`, so it allows loading files outside the dir containing `kustomization.yaml`. -- `--reorder` resources is done in the `legacy` mode, so the output will have namespaces and cluster roles/role bindings first, CRDs before CRs, and webhooks last. - -!!! hint "`kustomization.yaml` validation" - To validate changes before committing and/or merging, [a validation - utility script is available](https://github.com/fluxcd/flux2-kustomize-helm-example/blob/main/scripts/validate.sh), - it runs `kustomize` locally or in CI with the same set of flags as - the controller and validates the output using `kubeval`. - -## Helm questions - -### How to debug "not ready" errors? - -Misconfiguring the `HelmRelease.spec.chart`, like a typo in the chart name, version or chart source URL -would result in a "HelmChart is not ready" error displayed by: - -```console -$ flux get helmreleases --all-namespaces -NAMESPACE NAME READY MESSAGE -default podinfo False HelmChart 'default/default-podinfo' is not ready -``` - -In order to get to the root cause, first make sure the source e.g. the `HelmRepository` -is configured properly and has access to the remote `index.yaml`: - -```console -$ flux get sources helm --all-namespaces -NAMESPACE NAME READY MESSAGE -default podinfo False failed to fetch https://stefanprodan.github.io/podinfo2/index.yaml : 404 Not Found -``` - -If the source is `Ready`, then the error must be caused by the chart, -for example due to an invalid chart name or non-existing version: - -```console -$ flux get sources chart --all-namespaces -NAMESPACE NAME READY MESSAGE -default default-podinfo False no chart version found for podinfo-9.0.0 -``` - -### Can I use Flux HelmReleases without GitOps? - -Yes, you can install the Flux components directly on a cluster -and manage Helm releases with `kubectl`. - -Install the controllers needed for Helm operations with `flux`: - -```sh -flux install \ ---namespace=flux-system \ ---network-policy=false \ ---components=source-controller,helm-controller -``` - -Create a Helm release with `kubectl`: - -```sh -cat << EOF | kubectl apply -f - ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: bitnami - namespace: flux-system -spec: - interval: 30m - url: https://charts.bitnami.com/bitnami ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: metrics-server - namespace: kube-system -spec: - interval: 60m - releaseName: metrics-server - chart: - spec: - chart: metrics-server - version: "^5.x" - sourceRef: - kind: HelmRepository - name: bitnami - namespace: flux-system - values: - apiService: - create: true -EOF -``` - -Based on the above definition, Flux will upgrade the release automatically -when Bitnami publishes a new version of the metrics-server chart. - -## Flux v1 vs v2 questions - -### What are the differences between v1 and v2? - -Flux v1 is a monolithic do-it-all operator; -Flux v2 separates the functionalities into specialized controllers, collectively called the GitOps Toolkit. - -You can find a detailed comparison of Flux v1 and v2 features in the [migration FAQ](../guides/faq-migration.md). - -### How can I migrate from v1 to v2? - -The Flux community has created guides and example repositories -to help you migrate to Flux v2: - -- [Migrate from Flux v1](https://toolkit.fluxcd.io/guides/flux-v1-migration/) -- [Migrate from `.flux.yaml` and kustomize](https://toolkit.fluxcd.io/guides/flux-v1-migration/#flux-with-kustomize) -- [Migrate from Flux v1 automated container image updates](https://toolkit.fluxcd.io/guides/flux-v1-automation-migration/) -- [How to manage multi-tenant clusters with Flux v2](https://github.com/fluxcd/flux2-multi-tenancy) -- [Migrate from Helm Operator to Flux v2](https://toolkit.fluxcd.io/guides/helm-operator-migration/) -- [How to structure your HelmReleases](https://github.com/fluxcd/flux2-kustomize-helm-example) diff --git a/docs/get-started/index.md b/docs/get-started/index.md deleted file mode 100644 index 64826125..00000000 --- a/docs/get-started/index.md +++ /dev/null @@ -1,299 +0,0 @@ -# Get started with Flux v2 - -!!! note "Basic knowledge" - This guide assumes you have some understanding of the core concepts and have read the introduction to Flux. - The core concepts used in this guide are [GitOps](../core-concepts/index.md#gitops), - [Sources](../core-concepts/index.md#sources), [Kustomization](../core-concepts/index.md#kustomization). - -In this tutorial, you will deploy an application to a kubernetes cluster with Flux -and manage the cluster in a complete GitOps manner. -You'll be using a dedicated Git repository e.g. `fleet-infra` to manage your Kubernetes clusters. - -## Prerequisites - -In order to follow the guide, you will need a Kubernetes cluster version 1.16 or newer and kubectl version 1.18. -For a quick local test, you can use [Kubernetes kind](https://kind.sigs.k8s.io/docs/user/quick-start/). -Any other Kubernetes setup will work as well though. - -Flux is installed in a GitOps way and its manifest will be pushed to the repository, -so you will also need a GitHub account and a -[personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line) -that can create repositories (check all permissions under `repo`) to enable Flux do this. - -Export your GitHub personal access token and username: - -```sh -export GITHUB_TOKEN= -export GITHUB_USER= -``` - -## Install the Flux CLI - -To install the latest `flux` release on MacOS and Linux using -[Homebrew](https://brew.sh/) run: - -```sh -brew install fluxcd/tap/flux -``` - -Or install `flux` by downloading precompiled binaries using a Bash script: - -```sh -curl -s https://fluxcd.io/install.sh | sudo bash -``` - -The install script downloads the flux binary to `/usr/local/bin`. - -If using Arch Linux, install the latest stable version from **AUR** using -either [flux-bin](https://aur.archlinux.org/packages/flux-bin) (pre-built -binary) or [flux-go](https://aur.archlinux.org/packages/flux-go) (locally built -binary). - -Binaries for **macOS**, **Windows** and **Linux** AMD64/ARM are available for download on the -[release page](https://github.com/fluxcd/flux2/releases). - -To configure your shell to load `flux` [bash completions](../cmd/flux_completion_bash.md) add to your profile: - -```sh -# ~/.bashrc or ~/.bash_profile -. <(flux completion bash) -``` - -[`zsh`](../cmd/flux_completion_zsh.md), [`fish`](../cmd/flux_completion_fish.md), and [`powershell`](../cmd/flux_completion_powershell.md) are also supported with their own sub-commands. - -## Install Flux components - -Create the cluster using Kubernetes kind or set the kubectl context to an existing cluster: - -```sh -kind create cluster -kubectl cluster-info -``` - -Verify that your staging cluster satisfies the prerequisites with: - -```console -$ flux check --pre -► checking prerequisites -✔ kubectl 1.18.3 >=1.18.0 -✔ kubernetes 1.18.2 >=1.16.0 -✔ prerequisites checks passed -``` - -Run the bootstrap command: - -```sh -flux bootstrap github \ - --owner=$GITHUB_USER \ - --repository=fleet-infra \ - --branch=main \ - --path=./clusters/my-cluster \ - --personal -``` - -!!! hint "Multi-arch images" - The component images are published as [multi-arch container images](https://docs.docker.com/docker-for-mac/multi-arch/) - with support for Linux `amd64`, `arm64` and `armv7` (e.g. 32bit Raspberry Pi) - architectures. - -The bootstrap command creates a repository if one doesn't exist, -commits the manifests for the Flux components to the default branch at the specified path, -and installs the Flux components. -Then it configures the target cluster to synchronize with the specified path inside the repository. - -If you wish to create the repository under a GitHub organization: - -```sh -flux bootstrap github \ - --owner= \ - --repository= \ - --branch= \ - --team= \ - --team= \ - --path=./clusters/my-cluster -``` - -Example output: - -```console -$ flux bootstrap github --owner=gitopsrun --team=devs --repository=fleet-infra --path=./clusters/my-cluster -► connecting to github.com -✔ repository created -✔ devs team access granted -✔ repository cloned -✚ generating manifests -✔ components manifests pushed -► installing components in flux-system namespace -deployment "source-controller" successfully rolled out -deployment "kustomize-controller" successfully rolled out -deployment "helm-controller" successfully rolled out -deployment "notification-controller" successfully rolled out -✔ install completed -► configuring deploy key -✔ deploy key configured -► generating sync manifests -✔ sync manifests pushed -► applying sync manifests -◎ waiting for cluster sync -✔ bootstrap finished -``` - -If you prefer GitLab, export `GITLAB_TOKEN` env var and -use the command [flux bootstrap gitlab](../guides/installation.md#gitlab-and-gitlab-enterprise). - -!!! hint "Idempotency" - It is safe to run the bootstrap command as many times as you want. - If the Flux components are present on the cluster, - the bootstrap command will perform an upgrade if needed. - You can target a specific Flux [version](https://github.com/fluxcd/flux2/releases) - with `flux bootstrap --version=`. - -## Clone the git repository - -We are going to drive app deployments in a GitOps manner, -using the Git repository as the desired state for our cluster. -Instead of applying the manifests directly to the cluster, -Flux will apply it for us instead. - -Therefore, we need to clone the repository to our local machine: - -```sh -git clone https://github.com/$GITHUB_USER/fleet-infra -cd fleet-infra -``` - -## Add podinfo repository to Flux - -We will be using a public repository [github.com/stefanprodan/podinfo](https://github.com/stefanprodan/podinfo), -podinfo is a tiny web application made with Go. - -Create a [GitRepository](../components/source/gitrepositories/) -manifest pointing to podinfo repository's master branch: - -```sh -flux create source git podinfo \ - --url=https://github.com/stefanprodan/podinfo \ - --branch=master \ - --interval=30s \ - --export > ./clusters/my-cluster/podinfo-source.yaml -``` - -The above command generates the following manifest: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: podinfo - namespace: flux-system -spec: - interval: 30s - ref: - branch: master - url: https://github.com/stefanprodan/podinfo -``` - -Commit and push it to the `fleet-infra` repository: - -```sh -git add -A && git commit -m "Add podinfo GitRepository" -git push -``` - -## Deploy podinfo application - -We will create a Flux [Kustomization](../components/kustomize/kustomization/) manifest for podinfo. -This configures Flux to build and apply the [kustomize](https://github.com/stefanprodan/podinfo/tree/master/kustomize) -directory located in the podinfo repository. - -```sh -flux create kustomization podinfo \ - --source=podinfo \ - --path="./kustomize" \ - --prune=true \ - --validation=client \ - --interval=5m \ - --export > ./clusters/my-cluster/podinfo-kustomization.yaml -``` - -The above command generates the following manifest: - -```yaml -apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 -kind: Kustomization -metadata: - name: podinfo - namespace: flux-system -spec: - interval: 5m0s - path: ./kustomize - prune: true - sourceRef: - kind: GitRepository - name: podinfo - validation: client -``` - -Commit and push the `Kustomization` manifest to the repository: - -```sh -git add -A && git commit -m "Add podinfo Kustomization" -git push -``` - -The structure of your repository should look like this: - -``` -fleet-infra -└── clusters/ - └── my-cluster/ - ├── flux-system/ - │ ├── gotk-components.yaml - │ ├── gotk-sync.yaml - │ └── kustomization.yaml - ├── podinfo-kustomization.yaml - └── podinfo-source.yaml -``` - -## Watch Flux sync the application - -In about 30s the synchronization should start: - -```console -$ watch flux get kustomizations -NAME READY MESSAGE -flux-system True Applied revision: main/fc07af652d3168be329539b30a4c3943a7d12dd8 -podinfo True Applied revision: master/855f7724be13f6146f61a893851522837ad5b634 -``` - -When the synchronization finishes you can check that podinfo has been deployed on your cluster: - -```console -$ kubectl -n default get deployments,services -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/podinfo 2/2 2 2 108s - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/podinfo ClusterIP 10.100.149.126 9898/TCP,9999/TCP 108s -``` - -!!! tip - From this moment forward, any changes made to the podinfo - Kubernetes manifests in the master branch will be synchronised with your cluster. - -If a Kubernetes manifest is removed from the podinfo repository, Flux will remove it from your cluster. -If you delete a `Kustomization` from the fleet-infra repository, Flux will remove all Kubernetes objects that -were previously applied from that `Kustomization`. - -If you alter the podinfo deployment using `kubectl edit`, the changes will be reverted to match -the state described in Git. When dealing with an incident, you can pause the reconciliation of a -kustomization with `flux suspend kustomization `. Once the debugging session -is over, you can re-enable the reconciliation with `flux resume kustomization `. - -## Multi-cluster Setup - -To use Flux to manage more than one cluster or promote deployments from staging to production, take a look at the -two approaches in the repositories listed below. - -1. [https://github.com/fluxcd/flux2-kustomize-helm-example](https://github.com/fluxcd/flux2-kustomize-helm-example) -2. [https://github.com/fluxcd/flux2-multi-tenancy](https://github.com/fluxcd/flux2-multi-tenancy) \ No newline at end of file diff --git a/docs/guides/faq-migration.md b/docs/guides/faq-migration.md deleted file mode 100644 index 2570be5f..00000000 --- a/docs/guides/faq-migration.md +++ /dev/null @@ -1,92 +0,0 @@ -## Flux v1 vs v2 questions - -### What does Flux v2 mean for Flux? - -Flux v1 is a monolithic do-it-all operator; Flux v2 separates the functionalities into specialized controllers, collectively called the GitOps Toolkit. - -You can install and operate Flux v2 simply using the `flux` command. You can easily pick and choose the functionality you need and extend it to serve your own purposes. - -The timeline we are looking at right now is: - -1. Put Flux v1 into maintenance mode (no new features being added; bugfixes and CVEs patched only). -1. Continue work on the [Flux v2 roadmap](https://toolkit.fluxcd.io/roadmap/). -1. We will provide transition guides for specific user groups, e.g. users of Flux v1 in read-only mode, or of Helm Operator v1, etc. once the functionality is integrated into Flux v2 and it's deemed "ready". -1. Once the use-cases of Flux v1 are covered, we will continue supporting Flux v1 for 6 months. This will be the transition period before it's considered unsupported. - -### Why did you rewrite Flux? - -Flux v2 implements its functionality in individual controllers, which allowed us to address long-standing feature requests much more easily. - -By basing these controllers on modern Kubernetes tooling (`controller-runtime` libraries), they can be dynamically configured with Kubernetes custom resources either by cluster admins or by other automated tools -- and you get greatly increased observability. - -This gave us the opportunity to build Flux v2 with the top Flux v1 feature requests in mind: - -- Supporting multiple source Git repositories -- Operational insight through health checks, events and alerts -- Multi-tenancy capabilities, like applying each source repository with its own set of permissions - -On top of that, testing the individual components and understanding the codebase becomes a lot easier. - -### What are significant new differences between Flux v1 and Flux v2? - -#### Reconciliation - -Flux v1 | Flux v2 ----------------------------------- | ---------------------------------- -Limited to a single Git repository | Multiple Git repositories -Declarative config via arguments in the Flux deployment | `GitRepository` custom resource, which produces an artifact which can be reconciled by other controllers -Follow `HEAD` of Git branches | Supports Git branches, pinning on commits and tags, follow SemVer tag ranges -Suspending of reconciliation by downscaling Flux deployment | Reconciliation can be paused per resource by suspending the `GitRepository` -Credentials config via Arguments and/or Secret volume mounts in the Flux pod | Credentials config per `GitRepository` resource: SSH private key, HTTP/S username/password/token, OpenPGP public keys - -#### `kustomize` support - -Flux v1 | Flux v2 ----------------------------------- | ---------------------------------- -Declarative config through `.flux.yaml` files in the Git repository | Declarative config through a `Kustomization` custom resource, consuming the artifact from the GitRepository -Manifests are generated via shell exec and then reconciled by `fluxd` | Generation, server-side validation, and reconciliation is handled by a specialised `kustomize-controller` -Reconciliation using the service account of the Flux deployment | Support for service account impersonation -Garbage collection needs cluster role binding for Flux to query the Kubernetes discovery API | Garbage collection needs no cluster role binding or access to Kubernetes discovery API -Support for custom commands and generators executed by fluxd in a POSIX shell | No support for custom commands - -#### Helm integration - -Flux v1 | Flux v2 ----------------------------------- | ---------------------------------- -Declarative config in a single Helm custom resource | Declarative config through `HelmRepository`, `GitRepository`, `Bucket`, `HelmChart` and `HelmRelease` custom resources -Chart synchronisation embedded in the operator | Extensive release configuration options, and a reconciliation interval per source -Support for fixed SemVer versions from Helm repositories | Support for SemVer ranges for `HelmChart` resources -Git repository synchronisation on a global interval | Planned support for charts from GitRepository sources -Limited observability via the status object of the HelmRelease resource | Better observability via the HelmRelease status object, Kubernetes events, and notifications -Resource heavy, relatively slow | Better performance -Chart changes from Git sources are determined from Git metadata | Chart changes must be accompanied by a version bump in `Chart.yaml` to produce a new artifact - -#### Notifications, webhooks, observability - -Flux v1 | Flux v2 ----------------------------------- | ---------------------------------- -Emits "custom Flux events" to a webhook endpoint | Emits Kubernetes events for included custom resources -RPC endpoint can be configured to a 3rd party solution like FluxCloud to be forwarded as notifications to e.g. Slack | Flux v2 components can be configured to POST the events to a `notification-controller` endpoint. Selective forwarding of POSTed events as notifications using `Provider` and `Alert` custom resources. -Webhook receiver is a side-project | Webhook receiver, handling a wide range of platforms, is included -Unstructured logging | Structured logging for all components -Custom Prometheus metrics | Generic / common `controller-runtime` Prometheus metrics - -### Are there any breaking changes? - -- In Flux v1 Kustomize support was implemented through `.flux.yaml` files in the Git repository. As indicated in the comparison table above, while this approach worked, we found it to be error-prone and hard to debug. The new [Kustomization CR](https://github.com/fluxcd/kustomize-controller/blob/master/docs/spec/v1alpha1/kustomization.md) should make troubleshooting much easier. Unfortunately we needed to drop the support for custom commands as running arbitrary shell scripts in-cluster poses serious security concerns. -- Helm users: we redesigned the `HelmRelease` API and the automation will work quite differently, so upgrading to `HelmRelease` v2 will require a little work from you, but you will gain more flexibility, better observability and performance. - -### Is the GitOps Toolkit related to the GitOps Engine? - -In an announcement in August 2019, the expectation was set that the Flux project would integrate the GitOps Engine, then being factored out of ArgoCD. Since the result would be backward-incompatible, it would require a major version bump: Flux v2. - -After experimentation and considerable thought, we (the maintainers) have found a path to Flux v2 that we think better serves our vision of GitOps: the GitOps Toolkit. In consequence, we do not now plan to integrate GitOps Engine into Flux. - -### How can I get involved? - -There are a variety of ways and we look forward to having you on board building the future of GitOps together: - -- [Discuss the direction](https://github.com/fluxcd/flux2/discussions) of Flux v2 with us -- Join us in #flux-dev on the [CNCF Slack](https://slack.cncf.io) -- Check out our [contributor docs](https://toolkit.fluxcd.io/contributing/) -- Take a look at the [roadmap for Flux v2](https://toolkit.fluxcd.io/roadmap/) diff --git a/docs/guides/flux-v1-automation-migration.md b/docs/guides/flux-v1-automation-migration.md deleted file mode 100644 index 7a8d6200..00000000 --- a/docs/guides/flux-v1-automation-migration.md +++ /dev/null @@ -1,751 +0,0 @@ - -# Migrating image update automation to Flux v2 - -"Image Update Automation" is a process in which Flux makes commits to your Git repository when it -detects that there is a new image to be used in a workload (e.g., a Deployment). In Flux v2 this -works quite differently to how it worked in Flux v1. This guide explains the differences and how to -port your cluster configuration from v1 to v2. There is also a [tutorial for using image update -automation with a new cluster][image-update-tute]. - -## Overview of changes between v1 and v2 - -In Flux v1, image update automation (from here, just "automation") was built into the Flux daemon, -which scanned everything it found in the cluster and updated the Git repository it was syncing. - -In Flux v2, - - - automation is controlled with custom resources, not annotations - - ordering images by build time is not supported (there is [a section - below](#how-to-migrate-annotations-to-image-policies) explaining what to do instead) - - the fields to update in files are marked explicitly, rather than inferred from annotations. - -#### Automation is now controlled by custom resources - -Flux v2 breaks down the functions in Flux v1's daemon into controllers, with each having a specific -area of concern. Automation is now done by two controllers: one which scans image repositories to -find the latest images, and one which uses that information to commit changes to git -repositories. These are in turn separate to the syncing controllers. - -This means that automation in Flux v2 is governed by custom resources. In Flux v1 the daemon scanned -everything, and looked at annotations on the resources to determine what to update. Automation in v2 -is more explicit than in v1 -- you have to mention exactly which images you want to be scanned, and -which fields you want to be updated. - -A consequence of using custom resources is that with Flux v2 you can have an arbitrary number of -automations, targeting different Git repositories if you wish, and updating different sets of -images. If you run a multitenant cluster, the tenants can define automation in their own namespaces, -for their own Git repositories. - -#### Selecting an image is more flexible - -The ways in which you choose to select an image have changed. In Flux v1, you generally supply a -filter pattern, and the latest image is the image with the most recent build time out of those -filtered. In Flux v2, you choose an ordering, and separately specify a filter for the tags to -consider. These are dealt with in detail below. - -Selecting an image by build time is no longer supported. This is the implicit default in Flux v1. In -Flux v2, you will need to tag images so that they sort in the order you would like -- [see -below](#how-to-use-sortable-image-tags) for how to do this conveniently. - -#### Fields to update are explicitly marked - -Lastly, in Flux v2 the fields to update in files are marked explicitly. In Flux v1 they are inferred -from the type of the resource, along with the annotations given. The approach in Flux v1 was limited -to the types that had been programmed in, whereas Flux v2 can update any Kubernetes object (and some -files that aren't Kubernetes objects, like `kustomization.yaml`). - -## Preparing for migration - -It is best to complete migration of your system to _Flux v2 syncing_ first, using the [Flux v1 -migration guide][flux-v1-migration]. This will remove Flux v1 from the system, along with its image -automation. You can then reintroduce automation with Flux v2 by following the instructions in this -guide. - -It is safe to leave the annotations for Flux v1 in files while you reintroduce automation, because -Flux v2 will ignore them. - -To migrate to Flux v2 automation, you will need to do three things: - - - make sure you are running the automation controllers; then, - - declare the automation with an `ImageUpdateAutomation` object; and, - - migrate each manifest by translate Flux v1 annotations to Flux v2 `ImageRepository` and - `ImagePolicy` objects, and putting update markers in the manifest file. - -### Where to keep `ImageRepository`, `ImagePolicy` and `ImageUpdateAutomation` manifests - -This guide assumes you want to manage automation itself via Flux. In the following sections, -manifests for the objects controlling automation are saved in files, committed to Git, and applied -in the cluster with Flux. - -A Flux v2 installation will typically have a Git repository structured like this: - -``` -<...>/flux-system/ - gotk-components.yaml - gotk-sync.yaml -<...>/app/ - # deployments etc. -``` - -The `<...>` is the path to a particular cluster's definitions -- this may be simply `.`, or -something like `clusters/my-cluster`. To get the files in the right place, set a variable for this -path: - -```bash -$ CLUSTER_PATH=<...> # e.g., "." or "clusters/my-cluster", or ... -$ AUTO_PATH=$CLUSTER_PATH/automation -$ mkdir ./$AUTO_PATH -``` - -The file `$CLUSTER_PATH/flux-system/gotk-components.yaml` has definitions of all the Flux v2 -controllers and custom resource definitions. The file `gotk-sync.yaml` defines a `GitRepository` and -a `Kustomization` which will sync manifests under `$CLUSTER_PATH/`. - -To these will be added definitions for automation objects. This guide puts manifest files for -automation in `$CLUSTER_PATH/automation/`, but there is no particular structure required -by Flux. The automation objects do not have to be in the same namespace as the objects to be -updated. - -#### Migration on a branch - -This guide assumes you will commit changes to the branch that is synced by Flux, as this is the -simplest way to understand. - -It may be less disruptive to put migration changes on a branch, then merging when you have completed -the migration. You would need to either change the `GitRepository` to point at the migration branch, -or have separate `GitRepository` and `Kustomization` objects for the migrated parts of your Git -repository. The main thing to avoid is syncing the same objects in two different places; e.g., avoid -having Kustomizations that sync both the unmigrated and migrated application configuration. - -### Installing the command-line tool `flux` - -The command-line tool `flux` will be used below; see [these instructions][install-cli] for how to -install it. - -## Running the automation controllers - -The first thing to do is to deploy the automation controllers to your cluster. The best way to -proceed will depend on the approach you took when following the [Flux read-only migration -guide][flux-v1-migration]. - - - If you used `flux bootstrap` to create a new Git repository, then ported your cluster - configuration to that repository, use [After `flux bootstrap`](#after-flux-bootstrap); - - If you used `flux install` to install the controllers directly, use [After migrating Flux v1 in - place](#after-migrating-flux-v1-in-place); - - If you used `flux install` and exported the configuration to a file, use [After committing Flux - v2 configuration to Git](#after-committing-a-flux-v2-configuration-to-git). - -### After `flux bootstrap` - -When starting from scratch, you are likely to have used `flux bootstrap`. Rerun the command, and -include the image automation controllers in your starting configuration with the flag -`--components-extra`, [as shown in the installation guide][flux-bootstrap]. - -This will commit changes to your Git repository and sync them in the cluster. - -```bash -flux check --components-extra=image-reflector-controller,image-automation-controller -``` - -Now jump to the section [Migrating each manifest to Flux v2](#migrating-each-manifest-to-flux-v2). - -### After migrating Flux v1 in place - -If you followed the [Flux v1 migration guide][flux-v1-migration], you will already be running some -Flux v2 controllers. The automation controllers are currently considered an optional extra to those, -but are installed and run in much the same way. You may or may not have committed the Flux v2 -configuration to your Git repository. If you did, go to the section [After committing Flux v2 -configuration to Git](#after-committing-flux-v2-configuration-to-git). - -If _not_, you will be installing directly to the cluster: - -```bash -$ flux install --components-extra=image-reflector-controller,image-automation-controller -``` - -It is safe to repeat the installation command, or to run it after using `flux bootstrap`, so long as -you repeat any arguments you supplied the first time. - -Now jump ahead to [Migrating each manifest to Flux v2](#migrating-each-manifest-to-flux-v2). - -#### After committing a Flux v2 configuration to Git - -If you added the Flux v2 configuration to your git repository, assuming it's in the file -`$CLUSTER_PATH/flux-system/gotk-components.yaml` as used in the guide, use `flux install` and write -it back to that file: - -```bash -$ flux install \ - --components-extra=image-reflector-controller,image-automation-controller \ - --export > "$CLUSTER_PATH/flux-system/gotk-components.yaml" -``` - -Commit changes to the `$CLUSTER_PATH/flux-system/gotk-components.yaml` file and sync the cluster: - -```bash -$ git add $CLUSTER_PATH/flux-system/gotk-components.yaml -$ git commit -s -m "Add image automation controllers to Flux config" -$ git push -$ flux reconcile kustomization --with-source flux-system -``` - -## Controlling automation with an `ImageUpdateAutomation` object - -In Flux v1, automation was run by default. With Flux v2, you have to explicitly tell the controller -which Git repository to update and how to do so. These are defined in an `ImageUpdateAutomation` -object; but first, you need a `GitRepository` with write access, for the automation to use. - -If you followed the [Flux v1 read-only migration guide][flux-v1-migration], you will have a -`GitRepository` defined in the namespace `flux-system`, for syncing to use. This `GitRepository` -will have _read_ access to the Git repository by default, and automation needs _write_ access to -push commits. - -To give it write access, you can replace the secret it refers to. How to do this will depend on what -kind of authentication you used to install Flux v2. - -### Replacing the Git credentials secret - -The secret with Git credentials will be named in the `.spec.secretRef.name` field of the -`GitRepository` object. Say your `GitRepository` is in the _namespace_ `flux-system` and _named_ -`flux-system` (these are the defaults if you used `flux bootstrap`); you can retrieve the secret -name and Git URL with: - -```bash -$ FLUX_NS=flux-system -$ GIT_NAME=flux-system -$ SECRET_NAME=$(kubectl -n $FLUX_NS get gitrepository $GIT_NAME -o jsonpath={.spec.secretRef.name}) -$ GIT_URL=$(kubectl -n $FLUX_NS get gitrepository $GIT_NAME -o jsonpath='{.spec.url}') -$ echo $SECRET_NAME $GIT_URL # make sure they have values -``` - -If you're not sure which kind of credentials you're using, look at the secret: - -```bash -$ kubectl -n $FLUX_NS describe secret $SECRET_NAME -``` - -An entry at `.data.identity` indicates that you are using an SSH key (the [first -section](#replacing-an-ssh-key-secret) below); an entry at `.data.username` indicates you are using -a username and password or token (the [second section](#replacing-a-usernamepassword-secret) -below). - -#### Replacing an SSH key secret - -When using an SSH (deploy) key, create a new key: - -```bash -$ flux create secret git -n $FLUX_NS $SECRET_NAME --url=$GIT_URL -``` - -You will need to copy the public key that's printed out, and install that as a deploy key for your -Git repo **making sure to check the 'All write access' box** (or otherwise give the key write -permissions). Remove the old deploy key. - -#### Replacing a username/password secret - -When you're using a username and password to authenticate, you may be able to change the permissions -associated with that account. - -If not, you will need to create a new access token (e.g., ["Personal Access Token"][github-pat] in -GitHub). In this case, once you have the new token you can replace the secret with the following: - -```bash -$ flux create secret git -n $FLUX_NS $SECRET_NAME \ - --username --password --url $GIT_URL -``` - -#### Checking the new credentials - -To check if your replaced credentials still work, try syncing the `GitRepository` object: - -```bash -$ flux reconcile source git -n $FLUX_NS $GIT_NAME -► annotating GitRepository flux-system in flux-system namespace -✔ GitRepository annotated -◎ waiting for GitRepository reconciliation -✔ GitRepository reconciliation completed -✔ fetched revision main/d537304e8f5f41f1584ca1e807df5b5752b2577e -``` - -When this is successful, it tells you the new credentials have at least read access. - -### Making an automation object - -To set automation running, you create an [`ImageUpdateAutomation`][auto-ref] object. Each object -will update a Git repository, according to the image policies in the namespace. - -Here is an `ImageUpdateAutomation` manifest for the example (note: you will have to supply your own -value for at least the host part of the email address): - -```yaml -$ # the environment variables $AUTO_PATH and $GIT_NAME are set above -$ FLUXBOT_EMAIL=fluxbot@example.com # supply your own host or address here -$ flux create image update my-app-auto \ - --author-name FluxBot --author-email "$FLUXBOT_EMAIL" \ - --git-repo-ref $GIT_NAME --branch main \ - --interval 5m \ - --export > ./$AUTO_PATH/my-app-auto.yaml -$ cat my-app-auto.yaml ---- -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImageUpdateAutomation -metadata: - name: my-app-auto - namespace: flux-system -spec: - interval: 5m0s - sourceRef: - kind: GitRepository - name: flux-system - git: - checkout: - ref: - branch: main - commit: - author: - email: fluxbot@example.com - name: FluxBot -``` - -#### Commit and check that the automation object works - -Commit the manifeat file and push: - -```bash -$ git add ./$AUTO_PATH/my-app-auto.yaml -$ git commit -s -m "Add image update automation" -$ git push -# ... -``` - -Then sync and check the object status: - -```bash -$ flux reconcile kustomization --with-source flux-system -► annotating GitRepository flux-system in flux-system namespace -✔ GitRepository annotated -◎ waiting for GitRepository reconciliation -✔ GitRepository reconciliation completed -✔ fetched revision main/401dd3b550f82581c7d12bb79ade389089c6422f -► annotating Kustomization flux-system in flux-system namespace -✔ Kustomization annotated -◎ waiting for Kustomization reconciliation -✔ Kustomization reconciliation completed -✔ reconciled revision main/401dd3b550f82581c7d12bb79ade389089c6422f -$ flux get image update -NAME READY MESSAGE LAST RUN SUSPENDED -my-app-auto True no updates made 2021-02-08T14:53:43Z False -``` - -Read on to the next section to see how to change each manifest file to work with Flux v2. - -## Migrating each manifest to Flux v2 - -In Flux v1, the annotation - - fluxcd.io/automated: "true" - -switches automation on for a manifest (a description of a Kubernetes object). For each manifest that -has that annotation, you will need to create custom resources to scan for the latest image, and to -replace the annotations with field markers. - -The following sections explain these steps, using this example Deployment manifest which is -initially annotated to work with Flux v1: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app - namespace: default - annotations: - fluxcd.io/automated: "true" - fluxcd.io/tag.app: semver:^5.0 -spec: - template: - spec: - containers: - - name: app - image: ghcr.io/stefanprodan/podinfo:5.0.0 -``` - -!!! warning - A YAML file may have more than one manifest in it, separated with - `---`. Be careful to account for each manifest in a file. - -You may wish to try migrating the automation of just one file or manifest and follow it through to -the end of the guide, before returning here to do the remainder. - -### How to migrate annotations to image policies - -For each image repository that is the subject of automation you will need to create an -`ImageRepository` object, so that the image repository is scanned for tags. The image repository in -the example deployment is `ghcr.io/stefanprodan/podinfo`, which is the image reference minus its -tag: - -```yaml -$ cat $CLUSTER_PATH/app/my-app.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app - namespace: default - annotations: - fluxcd.io/automated: "true" - fluxcd.io/tag.app: semver:^5.0 -spec: - template: - spec: - containers: - - name: app - image: ghcr.io/stefanprodan/podinfo:5.0.0 # <-- image reference -``` - -The command-line tool `flux` will help create a manifest for you. Note that the output is redirected -to a file under `$AUTO_PATH`, so it can be added to the Git repository and synced to the cluster. - -```bash -$ # the environment variable $AUTO_PATH was set earlier -$ flux create image repository podinfo-image \ - --image ghcr.io/stefanprodan/podinfo \ - --interval 5m \ - --export > ./$AUTO_PATH/podinfo-image.yaml -$ cat ./$AUTO_PATH/podinfo-image.yaml ---- -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImageRepository -metadata: - name: podinfo-image - namespace: flux-system -spec: - image: ghcr.io/stefanprodan/podinfo - interval: 5m0s -``` - -!!! hint - If you are using the same image repository in several manifests, you only need one - `ImageRepository` object for it. - -##### Using image registry credentials for scanning - -When your image repositories are private, you supply Kubernetes with "image pull secrets" with -credentials for accessing the image registry (e.g., DockerHub). The image reflector controller needs -the same kind of credentials to scan image repositories. - -There are several ways that image pull secrets can be made available for the image reflector -controller. The [image update tutorial][image-update-tute-creds] describes how to create or arrange -secrets for scanning to use. Also see later in the tutorial for [instructions specific to some cloud -platforms][image-update-tute-clouds]. - -##### Committing and checking the ImageRepository - -Add the `ImageRepository` manifest to the Git index and commit it: - -```bash -$ git add ./$AUTO_PATH/podinfo-image.yaml -$ git commit -s -m "Add image repository object for podinfo" -$ git push -# ... -``` - -Now you can sync the new commit, and check that the object is working: - -```bash -$ flux reconcile kustomization --with-source flux-system -► annotating GitRepository flux-system in flux-system namespace -✔ GitRepository annotated -◎ waiting for GitRepository reconciliation -✔ GitRepository reconciliation completed -✔ fetched revision main/fd2fe8a61d4537bcfa349e4d1dbc480ea699ba8a -► annotating Kustomization flux-system in flux-system namespace -✔ Kustomization annotated -◎ waiting for Kustomization reconciliation -✔ Kustomization reconciliation completed -✔ reconciled revision main/fd2fe8a61d4537bcfa349e4d1dbc480ea699ba8a -$ flux get image repository podinfo-image -NAME READY MESSAGE LAST SCAN SUSPENDED -podinfo-image True successful scan, found 16 tags 2021-02-08T14:31:38Z False -``` - -#### Replacing automation annotations - -For each _field_ that's being updated by automation, you'll need an `ImagePolicy` object to describe -how to select an image for the field value. In the example, the field `.image` in the container -named `"app"` is the field being updated. - -In Flux v1, annotations describe how to select the image to update to, using a prefix. In the -example, the prefix is `semver:`: - -```yaml - annotations: - fluxcd.io/automated: "true" - fluxcd.io/tag.app: semver:^5.0 -``` - -These are the prefixes supported in Flux v1, and what to use in Flux v2: - -| Flux v1 prefix | Meaning | Flux v2 equivalent | -|----------------|---------|--------------------| -| `glob:` | Filter for tags matching the glob pattern, then select the newest by build time | [Use sortable tags](#how-to-use-sortable-image-tags) | -| `regex:` | Filter for tags matching the regular expression, then select the newest by build time |[Use sortable tags](#how-to-use-sortable-image-tags) | -| `semver:` | Filter for tags that represent versions, and select the highest version in the given range | [Use semver ordering](#how-to-use-semver-image-tags) | - -#### How to use sortable image tags - -To give image tags a useful ordering, you can use a timestamp or serial number as part of each -image's tag, then sort either alphabetically or numerically. - -This is a change from Flux v1, in which the build time was fetched from each image's config, and -didn't need to be included in the image tag. Therefore, this is likely to require a change to your -build process. - -The guide [How to make sortable image tags][image-tags-guide] explains how to change your build -process to tag images with a timestamp. This will mean Flux v2 can sort the tags to find the most -recently built image. - -##### Filtering the tags in an `ImagePolicy` - -The recommended format for image tags using a timestamp is: - - -- - -The timestamp (or serial number) is the part of the tag that you want to order on. The SHA1 is there -so you can trace an image back to the commit from which it was built. You don't need the branch for -sorting, but you may want to include only builds from a specific branch. - -Say you want to filter for only images that are from `main` branch, and pick the most recent. Your -`ImagePolicy` would look like this: - -```yaml -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImagePolicy -metadata: - name: my-app-policy - namespace: flux-system -spec: - imageRepositoryRef: - name: podinfo-image - filterTags: - pattern: '^main-[a-f0-9]+-(?P[0-9]+)' - extract: '$ts' - policy: - numerical: - order: asc -``` - -The `.spec.filterTags.pattern` field gives a regular expression that a tag must match to be included. The -`.spec.filterTags.extract` field gives a replacement pattern that can refer back to capture groups in the -filter pattern. The extracted values are sorted to find the selected image tag. In this case, the -timestamp part of the tag will be extracted and sorted numerically in ascending order. See [the -reference docs][imagepolicy-ref] for more examples. - -Once you have made sure you have image tags and an `ImagePolicy`, jump ahead to [Checking -the ImagePolicy works](#checking-that-the-image-policy-works). - -### How to use SemVer image tags - -The other kind of sorting is by [SemVer][semver], picking the highest version from among those -included by the filter. A semver range will also filter for tags that fit in the range. For example, - -```yaml - semver: - range: ^5.0 -``` - -includes only tags that have a major version of `5`, and selects whichever is the highest. - -This can be combined with a regular expression pattern, to filter on other parts of the tags. For -example, you might put a target environment as well as the version in your image tags, like -`dev-v1.0.3`. - -Then you would use an `ImagePolicy` similar to this one: - -```yaml -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImagePolicy -metadata: - name: my-app-policy - namespace: flux-system -spec: - imageRepositoryRef: - name: podinfo-image - filterTags: - pattern: '^dev-v(?P.*)' - extract: '$version' - policy: - semver: - range: '^1.0' -``` - -Continue on to the next sections to see an example, and how to check that your `ImagePolicy` works. - -#### An `ImagePolicy` for the example - -The example Deployment has annotations using `semver:` as a prefix, so the policy object also uses -semver: - -```bash -$ # the environment variable $AUTO_PATH was set earlier -$ flux create image policy my-app-policy \ - --image-ref podinfo-image \ - --semver '^5.0' \ - --export > ./$AUTO_PATH/my-app-policy.yaml -$ cat ./$AUTO_PATH/my-app-policy.yaml ---- -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImagePolicy -metadata: - name: my-app-policy - namespace: flux-system -spec: - imageRepositoryRef: - name: podinfo-image - policy: - semver: - range: ^5.0 -``` - -#### Checking that the `ImagePolicy` works - -Commit the manifest file, and push: - -```bash -$ git add ./$AUTO_PATH/my-app-policy.yaml -$ git commit -s -m "Add image policy for my-app" -$ git push -# ... -``` - -Then you can reconcile and check that the image policy works: - -```bash -$ flux reconcile kustomization --with-source flux-system -► annotating GitRepository flux-system in flux-system namespace -✔ GitRepository annotated -◎ waiting for GitRepository reconciliation -✔ GitRepository reconciliation completed -✔ fetched revision main/7dcf50222499be8c97e22cd37e26bbcda8f70b95 -► annotating Kustomization flux-system in flux-system namespace -✔ Kustomization annotated -◎ waiting for Kustomization reconciliation -✔ Kustomization reconciliation completed -✔ reconciled revision main/7dcf50222499be8c97e22cd37e26bbcda8f70b95 -$ flux get image policy flux-system -NAME READY MESSAGE LATEST IMAGE -my-app-policy True Latest image tag for 'ghcr.io/stefanprodan/podinfo' resolved to: 5.1.4 ghcr.io/stefanprodan/podinfo:5.1.4 -``` - -### How to mark up files for update - -The last thing to do in each manifest is to mark the fields that you want to be updated. - -In Flux v1, the annotations in a manifest determines the fields to be updated. In the example, the -annotations target the image used by the container `app`: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app - namespace: default - annotations: - fluxcd.io/automated: "true" - fluxcd.io/tag.app: semver:^5.0 # <-- `.app` here -spec: - template: - spec: - containers: - - name: app # <-- targets `app` here - image: ghcr.io/stefanprodan/podinfo:5.0.0 -``` - -This works straight-forwardly for Deployment manifests, but when it comes to `HelmRelease` -manifests, it [gets complicated][helm-auto], and it doesn't work at all for many kinds of resources. - -For Flux v2, you mark the field you want to be updated directly, with the namespaced name of the -image policy to apply. This is the example Deployment, marked up for Flux v2: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: default - name: my-app -spec: - template: - spec: - containers: - - name: app - image: ghcr.io/stefanprodan/podinfo:5.0.0 # {"$imagepolicy": "flux-system:my-app-policy"} -``` - -The value `flux-system:my-app-policy` names the policy that selects the desired image. - -This works in the same way for `DaemonSet` and `CronJob` manifests. For `HelmRelease` manifests, put -the marker alongside the part of the `values` that has the image tag. If the image tag is a separate -field, you can put `:tag` on the end of the name, to replace the value with just the selected -image's tag. The [image automation guide][image-update-tute-custom] has examples for `HelmRelease` -and other custom resources. - -### Committing the marker change and checking that automation works - -Referring to the image policy created earlier, you can see the example Deployment does not use the -most recent image. When you commit the manifest file with the update marker added, you would expect -automation to update the file. - -Commit the change that adds an update marker: - -```bash -$ git add app/my-app.yaml # the filename of the example -$ git commit -s -m "Add update marker to my-app manifest" -$ git push -# ... -``` - -Now to check that the automation makes a change: - -```bash -$ flux reconcile image update my-app-auto -► annotating ImageUpdateAutomation my-app-auto in flux-system namespace -✔ ImageUpdateAutomation annotated -◎ waiting for ImageUpdateAutomation reconciliation -✔ ImageUpdateAutomation reconciliation completed -✔ committed and pushed a92a4b654f520c00cb6c46b2d5e4fb4861aa58fc -``` - -## Troubleshooting - -If a change was not pushed by the image automation, there's several things you can check: - - - it's possible it made a change that is not reported in the latest status -- pull from the origin - and check the commit log - - check that the name used in the marker corresponds to the namespace and name of an `ImagePolicy` - - check that the `ImageUpdateAutomation` is in the same namespace as the `ImagePolicy` objects - named in markers - - check that the image policy and the image repository are both reported as `Ready` - - check that the credentials referenced by the `GitRepository` object have write permission, and - create new credentials if necessary. - -As a fallback, you can scan the logs of the automation controller to see if it logged errors: - -```bash -$ kubectl logs -n flux-system deploy/image-automation-controller -``` - -Once you are satisfied that it is working, you can migrate the rest of the manifests using the steps -from ["Migrating each manifest to Flux v2"](#migrating-each-manifest-to-flux-v2) above. - -[image-update-tute]: https://toolkit.fluxcd.io/guides/image-update/ -[imagepolicy-ref]: https://toolkit.fluxcd.io/components/image/imagepolicies/ -[helm-auto]: https://docs.fluxcd.io/en/1.21.1/references/helm-operator-integration/#automated-image-detection -[image-update-tute-custom]: https://toolkit.fluxcd.io/guides/image-update/#configure-image-update-for-custom-resources -[flux-v1-migration]: https://toolkit.fluxcd.io/guides/flux-v1-migration/ -[install-cli]: https://toolkit.fluxcd.io/get-started/#install-the-flux-cli -[flux-bootstrap]: https://toolkit.fluxcd.io/guides/installation/#bootstrap -[github-pat]: https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token -[auto-object-ref]: https://toolkit.fluxcd.io/components/image/imageupdateautomations/ -[image-update-tute-creds]: https://toolkit.fluxcd.io/guides/image-update/#configure-image-scanning -[image-update-tute-clouds]: https://toolkit.fluxcd.io/guides/image-update/#imagerepository-cloud-providers-authentication -[image-tags-guide]: https://toolkit.fluxcd.io/guides/sortable-image-tags/ -[auto-ref]: https://toolkit.fluxcd.io/components/image/imageupdateautomations/ -[semver]: https://semver.org diff --git a/docs/guides/flux-v1-migration.md b/docs/guides/flux-v1-migration.md deleted file mode 100644 index 470c94ed..00000000 --- a/docs/guides/flux-v1-migration.md +++ /dev/null @@ -1,330 +0,0 @@ -# Migrate from Flux v1 to v2 - -This guide walks you through migrating from Flux v1 to v2. -Read the [FAQ](faq-migration.md) to find out what differences are between v1 and v2. - -!!! info "Automated image updates" - The image automation feature is under development in Flux v2. - Please consult the [roadmap](../roadmap/index.md) for more details. - -!!! info "Feature parity" - "Feature parity" does not mean Flux v2 works exactly the same as v1 (or is - backward-compatible); it means you can accomplish the same results, while - accounting for the fact that it's a system with a substantially different - design. - This may at times mean that you have to make adjustments to the way your - current cluster configuration is structured. If you are in this situation - and need help, please refer to the [support page](https://fluxcd.io/support/). - -## Prerequisites - -You will need a Kubernetes cluster version **1.16** or newer -and kubectl version **1.18** or newer. - -### Install Flux v2 CLI - -With Homebrew: - -```sh -brew install fluxcd/tap/flux -``` - -With Bash: - -```sh -curl -s https://fluxcd.io/install.sh | sudo bash - -# enable completions in ~/.bash_profile -. <(flux completion bash) -``` - -Command-line completion for `zsh`, `fish`, and `powershell` -are also supported with their own sub-commands. - -Binaries for macOS, Windows and Linux AMD64/ARM are available for download on the -[release page](https://github.com/fluxcd/flux2/releases). - -Verify that your cluster satisfies the prerequisites with: - -```sh -flux check --pre -``` - -## GitOps migration - -Flux v2 offers an installation procedure that is declarative first -and disaster resilient. - -Using the `flux bootstrap` command you can install Flux on a -Kubernetes cluster and configure it to manage itself from a Git -repository. The Git repository created during bootstrap can be used -to define the state of your fleet of Kubernetes clusters. - -For a detailed walk-through of the bootstrap procedure please see the [installation guide](installation.md). - -!!! warning "`flux bootstrap` target" - `flux bootstrap` should not be run against a Git branch or path - that is already being synchronized by Flux v1, as this will make - them fight over the resources. Instead, bootstrap to a **new Git - repository, branch or path**, and continue with moving the - manifests. - -After you've installed Flux v2 on your cluster using bootstrap, -you can delete the Flux v1 from your clusters and move the manifests from the -Flux v1 repository to the bootstrap one. - -## In-place migration - -!!! warning - For production use we recommend using the **bootstrap** procedure (see the [Gitops migration](#gitops-migration) section above), - but if you wish to install Flux v2 in the - same way as Flux v1 then follow along. - -### Flux read-only mode - -Assuming you've installed Flux v1 to sync a directory with plain YAMLs from a private Git repo: - -```sh -# create namespace -kubectl create ns flux - -# deploy Flux v1 -fluxctl install \ ---git-url=git@github.com:org/app \ ---git-branch=main \ ---git-path=./deploy \ ---git-readonly \ ---namespace=flux | kubectl apply -f - - -# print deploy key -fluxctl identity --k8s-fwd-ns flux - -# trigger sync -fluxctl sync --k8s-fwd-ns flux -``` - -!!! hint "Uninstall Flux v1" - Before you proceed, scale the Flux v1 deployment to zero - or delete its namespace and RBAC. - -If there are YAML files in your `deploy` dir that are not meant to be -applied on the cluster, you can exclude them by placing a `.sourceignore` in your repo root: - -```console -$ cat .sourceignore -# exclude all -/* -# include deploy dir -!/deploy -# exclude files from deploy dir -/deploy/**/eksctl.yaml -/deploy/**/charts -``` - -Install Flux v2 in the `flux-system` namespace: - -```console -$ flux install \ - --network-policy=true \ - --watch-all-namespaces=true \ - --namespace=flux-system -✚ generating manifests -✔ manifests build completed -► installing components in flux-system namespace -✔ install completed -◎ verifying installation -✔ source-controller ready -✔ kustomize-controller ready -✔ helm-controller ready -✔ notification-controller ready -✔ install finished -``` - -Register your Git repository and add the deploy key with read-only access: - -```console -$ flux create source git app \ - --url=ssh://git@github.com/org/app \ - --branch=main \ - --interval=1m -► generating deploy key pair -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp2x9ghVmv1zD... -Have you added the deploy key to your repository: y -► collecting preferred public key from SSH server -✔ collected public key from SSH server: -github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A... -► applying secret with keys -✔ authentication configured -✚ generating GitRepository source -► applying GitRepository source -✔ GitRepository source created -◎ waiting for GitRepository source reconciliation -✔ GitRepository source reconciliation completed -✔ fetched revision: main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b -``` - -Configure the reconciliation of the `deploy` dir on your cluster: - -```console -$ flux create kustomization app \ - --source=app \ - --path="./deploy" \ - --prune=true \ - --interval=10m -✚ generating Kustomization -► applying Kustomization -✔ Kustomization created -◎ waiting for Kustomization reconciliation -✔ Kustomization app is ready -✔ applied revision main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b -``` - -If your repository contains secrets encrypted with Mozilla SOPS, please read this [guide](mozilla-sops.md). - -Pull changes from Git and apply them immediately: - -```sh -flux reconcile kustomization app --with-source -``` - -List all Kubernetes objects reconciled by `app`: - -```sh -kubectl get all --all-namespaces \ --l=kustomize.toolkit.fluxcd.io/name=app \ --l=kustomize.toolkit.fluxcd.io/namespace=flux-system -``` - -### Flux with Kustomize - -Assuming you've installed Flux v1 to sync a Kustomize overlay from an HTTPS Git repository: - -```sh -fluxctl install \ ---git-url=https://github.com/org/app \ ---git-branch=main \ ---manifest-generation \ ---namespace=flux | kubectl apply -f - -``` - -With the following `.flux.yaml` in the root dir: - -```yaml -version: 1 -patchUpdated: - generators: - - command: kustomize build ./overlays/prod - patchFile: flux-patch.yaml -``` - -!!! hint "Uninstall Flux v1" - Before you proceed, delete the Flux v1 namespace - and remove the `.flux.yaml` from your repo. - -Install Flux v2 in the `flux-system` namespace: - -```sh -flux install -``` - -Register the Git repository using a personal access token: - -```sh -flux create source git app \ - --url=https://github.com/org/app \ - --branch=main \ - --username=git \ - --password=token \ - --interval=1m -``` - -Configure the reconciliation of the `prod` overlay on your cluster: - -```sh -flux create kustomization app \ - --source=GitRepository/app \ - --path="./overlays/prod" \ - --prune=true \ - --interval=10m -``` - -Check the status of the Kustomization reconciliation: - -```console -$ flux get kustomizations app -NAME REVISION SUSPENDED READY -app main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b False True -``` - -### Flux with Slack notifications - -Assuming you've configured Flux v1 to send notifications to Slack with FluxCloud. - -With Flux v2, create an alert provider for a Slack channel: - -```sh -flux create alert-provider slack \ - --type=slack \ - --channel=general \ - --address=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK -``` - -And configure notifications for the `app` reconciliation events: - -```sh -flux create alert app \ - --provider-ref=slack \ - --event-severity=info \ - --event-source=GitRepository/app \ - --event-source=Kustomization/app -``` - -For more details, read the guides on how to configure -[notifications](notifications.md) and [webhooks](webhook-receivers.md). - -### Flux debugging - -Check the status of Git operations: - -```console -$ kubectl -n flux-system get gitrepositories -NAME READY MESSAGE -app True Fetched revision: main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b -test False SSH handshake failed: unable to authenticate, attempted methods [none publickey] -``` - -Check the status of the cluster reconciliation with kubectl: - -```console -$ kubectl -n flux-system get kustomizations -NAME READY STATUS -app True Applied revision: main/5302d04c2ab8f0579500747efa0fe7abc72c8f9 -test False The Service 'backend' is invalid: spec.type: Unsupported value: 'Ingress' -``` - -Suspend a reconciliation: - -```console -$ flux suspend kustomization app -► suspending kustomization app in flux-system namespace -✔ kustomization suspended -``` - -Check the status with kubectl: - -```console -$ kubectl -n flux-system get kustomization app -NAME READY STATUS -app False Kustomization is suspended, skipping reconciliation -``` - -Resume a reconciliation: - -```console -$ flux resume kustomization app -► resuming Kustomization app in flux-system namespace -✔ Kustomization resumed -◎ waiting for Kustomization reconciliation -✔ Kustomization reconciliation completed -✔ applied revision main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b -``` diff --git a/docs/guides/helm-operator-migration.md b/docs/guides/helm-operator-migration.md deleted file mode 100644 index 253f0e2c..00000000 --- a/docs/guides/helm-operator-migration.md +++ /dev/null @@ -1,861 +0,0 @@ -# Migrate to the Helm Controller - -This guide will learn you everything you need to know to be able to migrate from the [Helm Operator](https://github.com/fluxcd/helm-operator) to the [Helm Controller](https://github.com/fluxcd/helm-controller). - -## Overview of changes - -### Support for Helm v2 dropped - -The Helm Operator offered support for both Helm v2 and v3, due to Kubernetes client incompatibility issues between the versions. This has blocked the Helm Operator from being able to upgrade to a newer v3 version since the release of `3.2.0`. - -In combination with the fact that [Helm v2 reaches end of life after November 13, 2020](https://helm.sh/blog/helm-v2-deprecation-timeline/), support for Helm v2 has been dropped. - -### Helm and Git repositories, and even Helm charts are now Custom Resources - -When working with the Helm Operator, you had to mount various files to either make it recognize new (private) Helm repositories or make it gain access to Helm and/or Git repositories. While this approach was declarative, it did not provide a great user experience and was at times hard to set up. - -By moving this configuration to [`HelmRepository`](../components/source/helmrepositories.md), [`GitRepository`](../components/source/gitrepositories.md), [`Bucket`](../components/source/buckets.md) and [`HelmChart`](../components/source/helmcharts.md) Custom Resources, they can now be declaratively described (including their credentials using references to `Secret` resources), and applied to the cluster. - -The reconciliation of these resources has been offloaded to a dedicated [Source Controller](../components/source/controller.md), specialized in the acquisition of artifacts from external sources. - -The result of this all is an easier and more flexible configuration, with much better observability. Failures are traceable to the level of the resource that lead to a failure, and are easier to resolve. As polling intervals can now be configured per resource, you can customize your repository and/or chart configuration to a much finer grain. - -From a technical perspective, this also means less overhead, as the resources managed by the Source Controller can be shared between multiple `HelmRelease` resources, or even reused by other controllers like the [Kustomize Controller](../components/kustomize/controller.md). - -### The `HelmRelease` Custom Resource group domain changed - -Due to the Helm Controller becoming part of the extensive set of controller components Flux now has, the Custom Resource group domain has changed from `helm.fluxcd.io` to `helm.toolkit.fluxcd.io`. - -Together with the new API version (`v2beta1` at time of writing), the full `apiVersion` you use in your YAML document becomes `helm.toolkit.fluxcd.io/v2beta1`. - -### The API specification changed (quite a lot), for the better - -While developing the Helm Controller, we were given the chance to rethink what a declarative API for driving automated Helm releases would look like. This has, in short, resulted in the following changes: - -- Extensive configuration options per Helm action (install, upgrade, test, rollback); this includes things like timeouts, disabling hooks, and ignoring failures for tests. -- Strategy-based remediation on failures. This makes it possible, for example, to uninstall a release instead of rolling it back after a failed upgrade. The number of retries or keeping the last failed state when the retries are exhausted is now a configurable option. -- Better observability. The `Status` field in the `HelmRelease` provides a much better view of the current state of the release, including dedicated `Ready`, `Released`, `TestSuccess`, and `Remediated` conditions. - -For a comprehensive overview, see the [API spec changes](#api-spec-changes). - -### Helm storage drift detection no longer relies on dry-runs - -The Helm Controller no longer uses dry-runs as a way to detect mutations to the Helm storage. Instead, it uses a simpler model of bookkeeping based on the observed state and revisions. This has resulted in much better performance, a lower memory and CPU footprint, and more reliable drift detection. - -### No longer supports [Helm downloader plugins](https://helm.sh/docs/topics/plugins/#downloader-plugins) - -We have reduced our usage of Helm packages to a bare minimum (that being: as much as we need to be able to work with chart repositories and charts), and are avoiding shell outs as much as we can. - -Given the latter, and the fact that Helm (downloader) plugins work based on shelling out to another command and/or binary, support for this had to be dropped. - -We are aware some of our users are using this functionality to be able to retrieve charts from S3 or GCS. The Source Controller already has support for S3 storage compatible buckets ([this includes GCS](https://cloud.google.com/storage/docs/interoperability)), and we hope to extend this support in the foreseeable future to be on par with the plugins that offered support for these Helm repository types. - -### Values from `ConfigMap` and `Secret` resources in other namespaces are no longer supported - -Support for values references to `ConfigMap` and `Secret` resources in other namespaces than the namespace of the `HelmRelease` has been dropped, as this allowed information from other namespaces to leak into the composed values for the Helm release. - -### Values from external source references (URLs) are no longer supported - -We initially introduced this feature to support alternative (production focused) `values.yaml` files that sometimes come with charts. It was also used by users to use generic and/or dynamic `values.yaml` files in their `HelmRelease` resources. - -The former can now be achieved by defining a [`ValuesFiles` overwrite in the `HelmChartTemplateSpec`](#chart-file-references), which will make the Source Controller look for the referenced file in the chart, and overwrite the default values with the contents from that file. - -Support for the latter use has been dropped, as it goes against the principles of GitOps and declarative configuration. You can not reliably restore the cluster state from a Git repository if the configuration of a service relies on some URL being available. - -Getting similar behaviour is still possible [using a workaround that makes use of a `CronJob` to download the contents of the external URL on an interval](#external-source-references). - -### You can now merge single values at a given path - -There was a long outstanding request for the Helm Operator to support merging single values at a given path. - -With the Helm Controller this now possible by defining a [`targetPath` in the `ValuesReference`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.ValuesReference), which supports the same formatting as you would supply as an argument to the `helm` binary using `--set [path]=[value]`. In addition to this, the referred value can contain the same value formats (e.g. `{a,b,c}` for a list). You can read more about the available formats and limitations in the [Helm documentation](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set). - -### Support added for depends-on relationships - -We have added support for depends-on relationships to install `HelmRelease` resources in a given order; for example, because a chart relies on the presence of a Custom Resource Definition installed by another `HelmRelease` resource. - -Entries defined in the `spec.dependsOn` list of the `HelmRelease` must be in a `Ready` state before the Helm Controller proceeds with installation and/or upgrade actions. - -Note that this does not account for upgrade ordering. Kubernetes only allows applying one resource (`HelmRelease` in this case) at a time, so there is no way for the controller to know when a dependency `HelmRelease` may be updated. - -Also, circular dependencies between `HelmRelease` resources must be avoided, otherwise the interdependent `HelmRelease` resources will never be reconciled. - -### You can now suspend a HelmRelease - -There is a new `spec.suspend` field, that if set to `true` causes the Helm Controller to skip reconciliation for the resource. This can be utilized to e.g. temporarily ignore chart changes, and prevent a Helm release from getting upgraded. - -### Helm releases can target another cluster - -We have added support for making Helm releases to other clusters. If the `spec.kubeConfig` field in the `HelmRelease` is set, Helm actions will run against the default cluster specified in that KubeConfig instead of the local cluster that is responsible for the reconciliation of the `HelmRelease`. - -The Helm storage is stored on the remote cluster in a namespace that equals to the namespace of the `HelmRelease`, or the configured `spec.storageNamespace`. The release itself is made in a namespace that equals to the namespace of the `HelmRelease`, or the configured `spec.targetNamespace`. The namespaces are expected to exist, and can for example be created using the [Kustomize Controller](https://toolkit.fluxcd.io/components/kustomize/controller/) which has the same cross-cluster support. -Other references to Kubernetes resources in the `HelmRelease`, like `ValuesReference` resources, are expected to exist on the reconciling cluster. - -### Added support for notifications and webhooks - -Sending notifications and/or alerts to Slack, Microsoft Teams, Discord, or Rocker is now possible using the [Notification Controller](../components/notification/controller.md), [`Provider` Custom Resources](../components/notification/provider.md) and [`Alert` Custom Resources](../components/notification/alert.md). - -It does not stop there, using [`Receiver` Custom Resources](../components/notification/receiver.md) you can trigger **push based** reconciliations from Harbor, GitHub, GitLab, BitBucket or your CI system by making use of the webhook endpoint the resource creates. - -### Introduction of the `flux` CLI to create and/or generate Custom Resources - -With the new [`flux` CLI](../cmd/flux.md) it is now possible to create and/or generate the Custom Resources mentioned earlier. To generate the YAML for a `HelmRepository` and `HelmRelease` resource, you can for example run: - -```console -$ flux create source helm podinfo \ - --url=https://stefanprodan.github.io/podinfo \ - --interval=10m \ - --export ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: podinfo - namespace: flux-system -spec: - interval: 10m0s - url: https://stefanprodan.github.io/podinfo - -$ flux create helmrelease podinfo \ - --interval=10m \ - --source=HelmRepository/podinfo \ - --chart=podinfo \ - --chart-version=">4.0.0" \ - --export ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: podinfo - namespace: flux-system -spec: - chart: - spec: - chart: podinfo - sourceRef: - kind: HelmRepository - name: podinfo - version: '>4.0.0' - interval: 10m0s - -``` - -## API spec changes - -The following is an overview of changes to the API spec, including behavioral changes compared to how the Helm Operator performs actions. For a full overview of the new API spec, consult the [API spec documentation](../components/helm/helmreleases.md#specification). - -### Defining the Helm chart - -#### Helm repository - -For the Helm Operator, you used to configure a chart from a Helm repository as follows: - -```yaml ---- -apiVersion: helm.fluxcd.io/v1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - chart: - # The repository URL - repository: https://charts.example.com - # The name of the chart (without an alias) - name: my-chart - # The SemVer version of the chart - version: 1.2.3 -``` - -With the Helm Controller, you now create a `HelmRepository` resource in addition to the `HelmRelease` you would normally create (for all available fields, consult the [Source API reference](../components/source/api.md#source.toolkit.fluxcd.io/v1beta1.HelmRepository)): - -```yaml ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: my-repository - namespace: default -spec: - # The interval at wich to check the upstream for updates - interval: 10m - # The repository URL, a valid URL contains at least a protocol and host - url: https://chart.example.com -``` - -If you make use of a private Helm repository, instead of configuring the credentials by mounting a `repositories.yaml` file, you can now configure the HTTP/S basic auth and/or TLS credentials by referring to a `Secret` in the same namespace as the `HelmRepository`: - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: my-repository-creds - namespace: default -data: - # HTTP/S basic auth credentials - username: - password: - # TLS credentials (certFile and keyFile, and/or caCert) - certFile: - keyFile: - caCert: ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: my-repository - namespace: default -spec: - # ...omitted for brevity - secretRef: - name: my-repository-creds -``` - -In the `HelmRelease`, you then use a reference to the `HelmRepository` resource in the `spec.chart.spec` (for all available fields, consult the [Helm API reference](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.HelmChartTemplate)): - -```yaml ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # The interval at which to reconcile the Helm release - interval: 10m - chart: - spec: - # The name of the chart as made available by the HelmRepository - # (without any aliases) - chart: my-chart - # A fixed SemVer, or any SemVer range - # (i.e. >=4.0.0 <5.0.0) - version: 1.2.3 - # The reference to the HelmRepository - sourceRef: - kind: HelmRepository - name: my-repository - # Optional, defaults to the namespace of the HelmRelease - namespace: default -``` - -The `spec.chart.spec` values are used by the Helm Controller as a template to create a new `HelmChart` resource in the same namespace as the `sourceRef`, to be reconciled by the Source Controller. The Helm Controller watches `HelmChart` resources for (revision) changes, and performs an installation or upgrade when it notices a change. - -#### Git repository - -For the Helm Operator, you used to configure a chart from a Git repository as follows: - -```yaml ---- -apiVersion: helm.fluxcd.io/v1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - chart: - # The URL of the Git repository - git: https://example.com/org/repo - # The Git branch (or other Git reference) - ref: master - # The path of the chart relative to the repository root - path: ./charts/my-chart -``` - -With the Helm Controller, you create a `GitRepository` resource in addition to the `HelmRelease` you would normally create (for all available fields, consult the [Source API reference](../components/source/api.md#source.toolkit.fluxcd.io/v1beta1.GitRepository): - -```yaml ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: my-repository - namespace: default -spec: - # The interval at which to check the upstream for updates - interval: 10m - # The repository URL, can be a HTTP/S or SSH address - url: https://example.com/org/repo - # The Git reference to checkout and monitor for changes - # (defaults to master) - # For all available options, see: - # https://toolkit.fluxcd.io/components/source/api/#source.toolkit.fluxcd.io/v1beta1.GitRepositoryRef - ref: - branch: master -``` - -If you make use of a private Git repository, instead of configuring the credentials by mounting a private key and making changes to the `known_hosts` file, you can now configure the credentials for both HTTP/S and SSH by referring to a `Secret` in the same namespace as the `GitRepository`: - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: my-repository-creds - namespace: default -data: - # HTTP/S basic auth credentials - username: - password: - # SSH credentials - identity: - identity.pub: - known_hosts: ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: my-repository - namespace: default -spec: - # ...omitted for brevity - secretRef: - name: my-repository-creds -``` - -In the `HelmRelease`, you then use a reference to the `GitRepository` resource in the `spec.chart.spec` (for all available fields, consult the [Helm API reference](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.HelmChartTemplate)): - -```yaml ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # The interval at which to reconcile the Helm release - interval: 10m - chart: - spec: - # The path of the chart relative to the repository root - chart: ./charts/my-chart - # The reference to the GitRepository - sourceRef: - kind: GitRepository - name: my-repository - # Optional, defaults to the namespace of the HelmRelease - namespace: default -``` - -The `spec.chart.spec` values are used by the Helm Controller as a template to create a new `HelmChart` resource in the same namespace as the `sourceRef`, to be reconciled by the Source Controller. The Helm Controller watches `HelmChart` resources for (revision) changes, and performs an installation or upgrade when it notices a change. - -### Defining values - -#### Inlined values - -Inlined values (defined in the `spec.values` of the `HelmRelease`) still work as with the Helm operator. It represents a YAML map as you would put in a file and supply to `helm` with `-f values.yaml`, but inlined into the `HelmRelease` manifest: - -```yaml ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - values: - foo: value1 - bar: - baz: value2 - oof: - - item1 - - item2 -``` - -#### Values from sources - -As described in the [overview of changes](#overview-of-changes), there have been multiple changes to the way you can refer to values from sources (like `ConfigMap` and `Secret` references), including the [drop of support for external source (URL) references](#values-from-external-source-references-urls-are-no-longer-supported) and [added support for merging single values at a specific path](#you-can-now-merge-single-values-at-a-given-path). - -Values are still merged in the order given, with later values overwriting earlier. The values from sources always have a lower priority than the values inlined in the `HelmRelease` via the `spec.values` key. - -##### `ConfigMap` and `Secret` references - -`ConfigMap` and `Secret` references used to be defined as follows: - -```yaml ---- -apiVersion: helm.fluxcd.io/v1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - valuesFrom: - - configMapKeyRef: - name: my-config-values - namespace: my-ns - key: values.yaml - optional: false - - secretKeyRef: - name: my-secret-values - namespace: my-ns - key: values.yaml - optional: true -``` - -In the new API spec the individual `configMapKeyRef` and `secretKeyRef` objects are bundled into a single [`ValuesReference`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.ValuesReference) which [does no longer allow refering to resources in other namespaces](#values-from-external-source-references-urls-are-no-longer-supported): - -```yaml ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - valuesFrom: - - kind: ConfigMap - name: my-config-values - valuesKey: values.yaml - optional: false - - kind: Secret - name: my-secret-values - valuesKey: values.yaml - optional: true -``` - -Another thing to take note of is that the behavior for values references marked as `optional` has changed. When set, a "not found" error for the values reference is ignored, but any `valuesKey`, `targetPath` or transient error will still result in a reconciliation failure. - -##### Chart file references - -With the Helm Operator it was possible to refer to an alternative values file (for e.g. production usage) in the directory of a chart from a Git repository: - -```yaml ---- -apiVersion: helm.fluxcd.io/v1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - valuesFrom: - # Values file to merge in, - # expected to be a relative path in the chart directory - - chartFileRef: - path: values-prod.yaml -``` - -With the Helm Controller, this declaration has moved to the `spec.chart.spec`, and the feature is no longer limited to charts from a Git repository: - -```yaml ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - chart: - spec: - chart: my-chart - version: 1.2.3 - # Alternative values file to use as the default values, - # expected to be a relative path in the sourceRef - valuesFiles: - - values.yaml - - values-prod.yaml - sourceRef: - kind: HelmRepository - name: my-repository -``` - -When `valuesFiles` is defined, the chart will be (re)packaged with the values from the referenced files as the default values, merged in the order they appear. Note that this behavior is different from the Helm Operator as the default values (values.yaml) are not merged by default and must be explicitly added to the list. - -##### External source references - -While [the support for external source references has been dropped](#values-from-external-source-references-urls-are-no-longer-supported), it is possible to work around this limitation by creating a `CronJob` that periodically fetches the values from an external URL and saves them to a `ConfigMap` or `Secret` resource. - -First, create a `ServiceAccount`, `Role` and `RoleBinding` capable of updating a limited set of `ConfigMap` resources: - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: values-fetcher - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: configmap-updater - namespace: default -rules: -- apiGroups: [""] - resources: ["configmaps"] - # ResourceNames limits the access of the role to - # a defined set of ConfigMap resources - resourceNames: ["my-external-values"] - verbs: ["patch", "get"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: update-values-configmaps - namespace: default -subjects: -- kind: ServiceAccount - name: values-fetcher - namespace: default -roleRef: - kind: Role - name: configmap-updater - apiGroup: rbac.authorization.k8s.io -``` - -As `resourceNames` scoping in the `Role` [does not allow restricting `create` requests](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources), we need to create empty placeholder(s) for the `ConfigMap` resource(s) that will hold the fetched values: - -```yaml ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: my-external-values - namespace: default -data: {} -``` - -Lastly, create a `CronJob` that uses the `ServiceAccount` defined above, fetches the external values on an interval, and applies them to the `ConfigMap`: - -```yaml ---- -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: fetch-external-values -spec: - concurrencyPolicy: Forbid - schedule: "*/5 * * * *" - successfulJobsHistoryLimit: 3 - failedJobsHistoryLimit: 3 - jobTemplate: - spec: - template: - spec: - serviceAccountName: values-fetcher - containers: - - name: kubectl - image: bitnami/kubectl:1.19 - volumeMounts: - - mountPath: /tmp - name: tmp-volume - command: - - sh - - -c - args: - - >- - curl -f -# https://example.com/path/to/values.yaml -o /tmp/values.yaml && - kubectl create configmap my-external-values --from-file=/tmp/values.yaml -oyaml --dry-run=client | - kubectl apply -f - - volumes: - - name: tmp-volume - emptyDir: - medium: Memory - restartPolicy: OnFailure -``` - -You can now refer to the `my-external-values` `ConfigMap` resource in your `HelmRelease`: - -```yaml ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - valuesFrom: - - kind: ConfigMap - name: my-external-values -``` - -### Defining release options - -With the Helm Operator the release options used to be configured in the `spec` of the `HelmRelease` and applied to both Helm install and upgrade actions. - -This has changed for the Helm Controller, where some defaults can be defined in the [`spec`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.HelmReleaseSpec), but specific action configurations and overwrites for the defaults can be defined in the [`spec.install`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Install), [`spec.upgrade`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Upgrade) and [`spec.test`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Test) sections of the `HelmRelease`. - -### Defining a rollback / uninstall configuration - -With the Helm Operator, uninstalling a release after an installation failure was done automatically, and rolling back from a faulty upgrade and configuring options like retries was done as follows: - -```yaml ---- -apiVersion: helm.fluxcd.io/v1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - rollback: - enable: true - retries: true - maxRetries: 5 - disableHooks: false - force: false - recreate: false - timeout: 300 -``` - -The Helm Controller offers an extensive set of configuration options to remediate when a Helm release fails, using [`spec.install.remediate`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.InstallRemediation), [`spec.upgrade.remediate`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.UpgradeRemediation), [`spec.rollback`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Rollback) and [`spec.uninstall`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Uninstall). Some of the new features include the option to remediate with an uninstall after an upgrade failure, and the option to keep a failed release for debugging purposes when it has run out of retries. - -#### Automated uninstalls - -The configuration below mimics the uninstall behavior of the Helm Operator (for all available fields, consult the [`InstallRemediation`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.InstallRemediation) and [`Uninstall`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Uninstall) API references): - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - install: - # Remediation configuration for when the Helm install - # (or sequent Helm test) action fails - remediation: - # Number of retries that should be attempted on failures before - # bailing, a negative integer equals to unlimited retries - retries: -1 - # Configuration options for the Helm uninstall action - uninstall: - timeout: 5m - disableHooks: false - keepHistory: false -``` - -#### Automated rollbacks - -The configuration below shows an automated rollback configuration that equals [the configuration for the Helm Operator showed above](#defining-a-rollback-uninstall-configuration) (for all available fields, consult the [`UpgradeRemediation`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.UpgradeRemediation) and [`Rollback`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Rollback) API references): - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - upgrade: - # Remediaton configuration for when an Helm upgrade action fails - remediation: - # Amount of retries to attempt after a failure, - # setting this to 0 means no remedation will be - # attempted - retries: 5 - # Configuration options for the Helm rollback action - rollback: - timeout: 5m - disableWait: false - disableHooks: false - recreate: false - force: false - cleanupOnFail: false -``` - -## Migration strategy - -Due to the high number of changes to the API spec, there are no detailed instructions available to provide a simple migration path. But there is a [simple procedure to follow](#steps), which combined with the detailed list of [API spec changes](#api-spec-changes) should make the migration path relatively easy. - -Here are some things to know: - -* The Helm Controller will ignore the old custom resources (and the Helm Operator will ignore the new resources). -* Deleting a resource while the corresponding controller is running will result in the Helm release also being deleted. -* Deleting a `CustomResourceDefinition` will also delete all custom resources of that kind. -* If both the Helm Controller and Helm Operator are running, and both a new and old custom resources define a release, they will fight over the release. -* The Helm Controller will always perform an upgrade the first time it encounters a new `HelmRelease` for an existing release; this is [due to the changes to release mechanics and bookkeeping](#helm-storage-drift-detection-no-longer-relies-on-dry-runs). - -The safest way to upgrade is to avoid deletions and fights by stopping the Helm Operator. Once the operator is not running, it is safe to deploy the Helm Controller (e.g., by following the [Get Started guide](../get-started/index.md), [utilizing `flux install`](../cmd/flux_install.md), or using the manifests from the [release page](https://github.com/fluxcd/helm-controller/releases)), and start replacing the old resources with new resources. You can keep the old resources around during this process, since the Helm Controller will ignore them. - -### Steps - -The recommended migration steps for a single `HelmRelease` are as follows: - -1. Ensure the Helm Operator is not running, as otherwise the Helm Controller and Helm Operator will fight over the release. -1. Create a [`GitRepository` or `HelmRepository` resource for the `HelmRelease`](#defining-the-helm-chart), including any `Secret` that may be required to access the source. Note that it is possible for multiple `HelmRelease` resources to share a `GitRepository` or `HelmRepository` resource. -1. Create a new `HelmRelease` resource ([with the `helm.toolkit.fluxcd.io` group domain](#the-helmrelease-custom-resource-group-domain-changed)), define the `spec.releaseName` (plus the `spec.targetNamespace` and `spec.storageNamespace` if applicable) to match that of the existing release, and rewrite the configuration to adhere to the [API spec changes](#api-spec-changes). -1. Confirm the Helm Controller successfully upgrades the release. - -### Example - -As a full example, this is an old resource: - -```yaml ---- -apiVersion: helm.fluxcd.io/v1 -kind: HelmRelease -metadata: - name: podinfo - namespace: default -spec: - chart: - repository: https://stefanprodan.github.io/podinfo - name: podinfo - version: 5.0.3 - values: - replicaCount: 1 -``` - -The custom resources for the Helm Controller would be: - -```yaml ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: podinfo - namespace: default -spec: - interval: 10m - url: https://stefanprodan.github.io/podinfo ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: podinfo - namespace: default -spec: - interval: 5m - releaseName: default-podinfo - chart: - spec: - chart: podinfo - version: 5.0.3 - sourceRef: - kind: HelmRepository - name: podinfo - interval: 10m - values: - replicaCount: 1 -``` - -### Migrating gradually - -Gradually migrating to the Helm Controller is possible by scaling down the Helm Operator while you move over resources, and scaling it up again once you have migrated some of the releases to the Helm Controller. - -While doing this, make sure that once you scale up the Helm Operator again, there are no old and new `HelmRelease` resources pointing towards the same release, as they will fight over the release. - -Alternatively, you can gradually migrate per namespace without ever needing to shut the Helm Operator down, enabling no continuous delivery interruption on most namespaces. To do so, you can customize the Helm Operator roles associated to its `ServiceAccount` to prevent it from interfering with the Helm Controller in namespaces you are migrating. First, create a new `ClusterRole` for the Helm Operator to operate in "read-only" mode cluster-wide: - -```yaml ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: helm-operator-ro -rules: - - apiGroups: ['*'] - resources: ['*'] - verbs: - - get - - watch - - list - - nonResourceURLs: ['*'] - verbs: ['*'] -``` - -By default, [the `helm-operator` `ServiceAccount` is bound to a `ClusterRole` that allows it to create, patch and delete resources in all namespaces](https://github.com/fluxcd/helm-operator/blob/1baacd6dee865b57da80e0e767286ed68d578246/deploy/rbac.yaml#L9-L36). Bind the `ServiceAccount` to the new `helm-operator-ro` `ClusterRole`: - -```diff ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: helm-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole -- name: helm-operator -+ name: helm-operator-ro -subjects: - - kind: ServiceAccount - name: helm-operator - namespace: flux -``` - -Finally, create `RoleBindings` for each namespace, but the one you are currently migrating: - -```yaml -# Create a `RoleBinding` for each namespace the Helm Operator is allowed to process `HelmReleases` in ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: helm-operator - namespace: helm-operator-watched-namespace -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: helm-operator -subjects: - - name: helm-operator - namespace: flux - kind: ServiceAccount -# Do not create the following to prevent the Helm Operator from watching `HelmReleases` in `helm-controller-watched-namespace` -# --- -# apiVersion: rbac.authorization.k8s.io/v1 -# kind: RoleBinding -# metadata: -# name: helm-operator -# namespace: helm-controller-watched-namespace -# roleRef: -# apiGroup: rbac.authorization.k8s.io -# kind: ClusterRole -# name: helm-operator -# subjects: -# - name: helm-operator -# namespace: flux -# kind: ServiceAccount -``` - -If you are using [the Helm Operator chart](https://github.com/fluxcd/helm-operator/tree/master/chart/helm-operator), make sure to set `rbac.create` to `false` in order to take over `ClusterRoleBindings` and `RoleBindings` as you wish. - -### Deleting old resources - -Once you have migrated all your `HelmRelease` resources to the Helm Controller. You can remove all of the old resources by removing the old Custom Resource Definition. - -```sh -kubectl delete crd helmreleases.helm.fluxcd.io -``` - -## Frequently Asked Questions - -### Are automated image updates supported? - -Not yet, but the feature is under active development. See the [image update feature parity section on the roadmap](https://toolkit.fluxcd.io/roadmap/#flux-image-update-feature-parity) for updates on this topic. - -### How do I automatically apply my `HelmRelease` resources to the cluster? - -If you are currently a Flux v1 user, you can commit the `HelmRelease` resources to Git, and Flux will automatically apply them to the cluster like any other resource. It does however not support automated image updates for Helm Controller resources. - -If you are not a Flux v1 user or want to fully migrate to Flux v2, the [Kustomize Controller](https://toolkit.fluxcd.io/components/kustomize/controller/) will serve your needs. - -### I am still running Helm v2, what is the right upgrade path for me? - -Migrate your Helm v2 releases to v3 using [the Helm Operator's migration feature](https://docs.fluxcd.io/projects/helm-operator/en/stable/helmrelease-guide/release-configuration/#migrating-from-helm-v2-to-v3), or make use of the [`helm-2to3`](https://github.com/helm/helm-2to3) plugin directly, before continuing following the [migration steps](#steps). - -### Is the Helm Controller ready for production? - -Probably, but with some side notes: - -1. It is still under active development, and while our focus has been to stabilize the API as much as we can during the first development phase, we do not guarantee there will not be any breaking changes before we reach General Availability. We are however committed to provide [conversion webhooks](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) for upcoming API versions. -1. There may be (internal) behavioral changes in upcoming releases, but they should be aimed at further stabilizing the Helm Controller itself, solving edge case issues, providing better logging, observability, and/or other improvements. - -### Can I use Helm Controller standalone? - -Helm Controller depends on [Source Controller](../components/source/controller.md), you can install both controllers -and manager Helm releases in a declarative way without GitOps. -For more details please see this [answer](../faq/index.md#can-i-use-flux-helmreleases-without-gitops). - -### I have another question - -Given the amount of changes, it is quite possible that this document did not provide you with a clear answer for you specific setup. If this applies to you, do not hesitate to ask for help in the [GitHub Discussions](https://github.com/fluxcd/flux2/discussions/new?category_id=31999889) or on the [`#flux` CNCF Slack channel](https://slack.cncf.io)! diff --git a/docs/guides/helmreleases.md b/docs/guides/helmreleases.md deleted file mode 100644 index 96e8a0f2..00000000 --- a/docs/guides/helmreleases.md +++ /dev/null @@ -1,493 +0,0 @@ -# Manage Helm Releases - -The [helm-controller](../components/helm/controller.md) allows you to -declaratively manage Helm chart releases with Kubernetes manifests. -It makes use of the artifacts produced by the -[source-controller](../components/source/controller.md) from -`HelmRepository`, `GitRepository`, `Bucket` and `HelmChart` resources. -The helm-controller is part of the default toolkit installation. - -## Prerequisites - -To follow this guide you'll need a Kubernetes cluster with the GitOps -toolkit controllers installed on it. -Please see the [get started guide](../get-started/index.md) -or the [installation guide](installation.md). - -## Define a chart source - -To be able to release a Helm chart, the source that contains the chart -(either a `HelmRepository`, `GitRepository`, or `Bucket`) has to be known -first to the source-controller, so that the `HelmRelease` can reference -to it. - -### Helm repository - -Helm repositories are the recommended source to retrieve Helm charts -from, as they are lightweight in processing and make it possible to -configure a semantic version selector for the chart version that should -be released. - -They can be declared by creating a `HelmRepository` resource, the -source-controller will fetch the Helm repository index for this -resource on an interval and expose it as an artifact: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: podinfo - namespace: flux-system -spec: - interval: 1m - url: https://stefanprodan.github.io/podinfo -``` - -The `interval` defines at which interval the Helm repository index -is fetched, and should be at least `1m`. Setting this to a higher -value means newer chart versions will be detected at a slower pace, -a push-based fetch can be introduced using [webhook receivers](webhook-receivers.md) - -The `url` can be any HTTP/S Helm repository URL. - -!!! hint "Authentication" - HTTP/S basic and TLS authentication can be configured for private - Helm repositories. See the [`HelmRepository` CRD docs](../components/source/helmrepositories.md) - for more details. - -### Git repository - -Charts from Git repositories can be released by declaring a -`GitRepository`, the source-controller will fetch the contents of the -repository on an interval and expose it as an artifact. - -The source-controller can build and expose Helm charts as artifacts -from the contents of the `GitRepository` artifact (more about this -later on in the guide). - -**There is one caveat you should be aware of:** to make the -source-controller produce a new chart artifact, the `version` in the -`Chart.yaml` of the chart must be bumped. - -An example `GitRepository`: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: podinfo - namespace: flux-system -spec: - interval: 1m - url: https://github.com/stefanprodan/podinfo - ref: - branch: master - ignore: | - # exclude all - /* - # include charts directory - !/charts/ -``` - -The `interval` defines at which interval the Git repository contents -are fetched, and should be at least `1m`. Setting this to a higher -value means newer chart versions will be detected at a slower pace, -a push-based fetch can be introduced using [webhook receivers](webhook-receivers.md) - -The `url` can be any HTTP/S or SSH address (the latter requiring -authentication). - -The `ref` defines the checkout strategy, and is set to follow the -`master` branch in the above example. For other strategies like -tags or commits, see the [`GitRepository` CRD docs](../components/source/gitrepositories.md). - -The `ignore` defines file and folder exclusion for the -artifact produced, and follows the [`.gitignore` pattern -format](https://git-scm.com/docs/gitignore#_pattern_format). -The above example only includes the `charts` directory of the -repository and omits all other files. - -!!! hint "Authentication" - HTTP/S basic and SSH authentication can be configured for private - Git repositories. See the [`GitRepository` CRD docs](../components/source/gitrepositories.md) - for more details. - -### Cloud Storage - -It is inadvisable while still possible to use a `Bucket` as a source for a `HelmRelease`, -as the whole storage bucket will be downloaded by source controller at each sync. The -bucket can easily become very large if there are frequent releases of multiple charts -that are stored in the same bucket. - -A better option is to use [Chartmuseum](https://github.com/helm/chartmuseum) and run a cluster -local Helm repository that can be used by source controller. Chartmuseum has support -for multiple different cloud storage solutions such as S3, GCS, and Azure Blob Storage, -meaning that you are not limited to only using storage providers that support the S3 protocol. - -You can deploy a Chartmuseum instance with a `HelmRelease` that exposes a Helm repository stored -in a S3 bucket. Please refer to [Chartmuseums how to run documentation](https://chartmuseum.com/docs/#how-to-run) -for details about how to use other storage backends. - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: chartmuseum - namespace: flux-system -spec: - url: https://chartmuseum.github.io/charts - interval: 10m ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: chartmuseum - namespace: flux-system -spec: - interval: 5m - chart: - spec: - chart: chartmuseum - version: "2.14.2" - sourceRef: - kind: HelmRepository - name: chartmuseum - namespace: flux-system - interval: 1m - values: - env: - open: - AWS_SDK_LOAD_CONFIG: true - STORAGE: amazon - STORAGE_AMAZON_BUCKET: "bucket-name" - STORAGE_AMAZON_PREFIX: "" - STORAGE_AMAZON_REGION: "region-name" - serviceAccount: - create: true - annotations: - eks.amazonaws.com/role-arn: "role-arn" - securityContext: - enabled: true - fsGroup: 65534 -``` - -After Chartmuseum is up and running it should be possible to use the accompanying -service as the url for the `HelmRepository`. - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: helm-charts - namespace: flux-system -spec: - interval: 1m - url: http://chartmuseum-chartmuseum:8080 -``` - -## Define a Helm release - -With the chart source created, define a new `HelmRelease` to release -the Helm chart: - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: podinfo - namespace: default -spec: - interval: 5m - chart: - spec: - chart: - version: '4.0.x' - sourceRef: - kind: - name: podinfo - namespace: flux-system - interval: 1m - values: - replicaCount: 2 -``` - -The `chart.spec` values are used by the helm-controller as a template -to create a new `HelmChart` resource in the same namespace as the -`sourceRef`. The source-controller will then lookup the chart in the -artifact of the referenced source, and either fetch the chart for a -`HelmRepository`, or build it from a `GitRepository` or `Bucket`. -It will then make it available as a `HelmChart` artifact to be used by -the helm-controller. - -The `chart.spec.chart` can either contain: - -* The name of the chart as made available by the `HelmRepository` - (without any aliases), for example: `podinfo` -* The relative path the chart can be found at in the `GitRepository` - or `Bucket`, for example: `./charts/podinfo` -* The relative path the chart package can be found at in the - `GitRepository` or `Bucket`, for example: `./charts/podinfo-1.2.3.tgz` - -The `chart.spec.version` can be a fixed semver, or any semver range -(i.e. `>=4.0.0 <5.0.0`). It is only taken into account for `HelmRelease` -resources that reference a `HelmRepository` source. - -!!! hint "Advanced configuration" - The `HelmRelease` offers an extensive set of configurable flags - for finer grain control over how Helm actions are performed. - See the [`HelmRelease` CRD docs](../components/helm/helmreleases.md) - for more details. - -## Refer to values in `ConfigMap` and `Secret` resources - -It is possible to define a list of `ConfigMap` and `Secret` resources -from which to take values. The values are merged in the order given, -with the later values overwriting earlier. These values always have a -lower priority than the values inlined in the `HelmRelease` via the -`spec.values` parameter. - -```yaml -spec: - valuesFrom: - - kind: ConfigMap - name: prod-env-values - valuesKey: values-prod.yaml - - kind: Secret - name: prod-tls-values - valuesKey: crt - targetPath: tls.crt -``` - -The definition of the listed keys is as follows: - -- `kind`: Kind of the values referent (`ConfigMap` or `Secret`). -- `name`: Name of the values referent, in the same namespace as the - `HelmRelease`. -- `valuesKey` _(Optional)_: The data key where the values.yaml or a - specific value can be found. Defaults to `values.yaml` when omitted. -- `targetPath` _(Optional)_: The YAML dot notation path at which the - value should be merged. When set, the `valuesKey` is expected to be - a single flat value. Defaults to `None` when omitted, which results - in the values getting merged at the root. - -!!! hint "Note" - The `targetPath` supports the same formatting as you would supply - as an argument to the `helm` binary using `--set [path]=[value]`. - In addition to this, the referred value can contain the same - value formats (e.g. `{a,b,c}` for a list). - You can read more about the available formats and limitations in - the [Helm documentation](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set). - -!!! warning "`TargetPath` and JSON values" - When using `TargetPath` in combination with a JSON string, the - [limitations are the same as while using `helm`](https://github.com/helm/helm/issues/5618), - and require you to escape the full JSON string (including `=`, `[`, `,`, `.`). - -## Refer to values in `ConfigMaps` generated with Kustomize - -It is possible to use Kustomize [ConfigMap generator](https://kubectl.docs.kubernetes.io/references/kustomize/configmapgenerator/) -to trigger a Helm release upgrade every time the encoded values change. - -First create a `kustomizeconfig.yaml` for Kustomize to be able to patch -`ConfigMaps` referenced in `HelmRelease` manifests: - -```yaml -nameReference: -- kind: ConfigMap - version: v1 - fieldSpecs: - - path: spec/valuesFrom/name - kind: HelmRelease -``` - -Create a `HelmRelease` definition that references a `ConfigMap`: - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: podinfo - namespace: podinfo -spec: - interval: 5m - releaseName: podinfo - chart: - spec: - chart: podinfo - sourceRef: - kind: HelmRepository - name: podinfo - valuesFrom: - - kind: ConfigMap - name: podinfo-values -``` - -Create a `kustomization.yaml` that generates the `ConfigMap` using our kustomize config: - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: podinfo -resources: - - namespace.yaml - - repository.yaml - - release.yaml -configMapGenerator: - - name: podinfo-values - files: - - values.yaml=my-values.yaml -configurations: - - kustomizeconfig.yaml -``` - -When [kustomize-controller](../components/kustomize/controller.md) reconciles the above manifests, it will generate -a unique name of the `ConfigMap` every time `my-values.yaml` content is updated in Git: - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: podinfo - namespace: podinfo -spec: - valuesFrom: - - kind: ConfigMap - name: podinfo-values-2mh2t8m94h -``` - -!!! hint "Note" - Stale `ConfigMaps`, previously generated by Kustomize, will be - removed from the cluster by kustomize-controller if - [pruning](../components/kustomize/kustomization/#garbage-collection) is enabled. - -## Refer to values inside the chart - -It is possible to replace the `values.yaml` with a different file present inside the Helm chart. - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: mongodb - namespace: mongodb -spec: - interval: 5m - chart: - spec: - chart: mongodb - sourceRef: - kind: HelmRepository - name: bitnami - valuesFiles: - - values.yaml - - values-production.yaml - values: - replicaCount: 5 -``` - -If the `spec.chart.spec.valuesFiles` doesn't exists inside the chart, helm-controller will not be able to -fetch the chart. To determine why the `HelmChart` fails to produce an artifact, you can inspect the status with: - -```console -$ kubectl get helmcharts --all-namespaces -NAME READY STATUS -mongodb False failed to locate override values file: values-prod.yaml -``` - -## Configure notifications - -The default toolkit installation configures the helm-controller to -broadcast events to the [notification-controller](../components/notification/controller.md). - -To receive the events as notifications, a `Provider` needs to be setup -first as described in the [notifications guide](notifications.md#define-a-provider). -Once you have set up the `Provider`, create a new `Alert` resource in -the `flux-system` to start receiving notifications about the Helm -release: - -```yaml -apiVersion: notification.toolkit.fluxcd.io/v1beta1 - kind: Alert - metadata: - generation: 2 - name: helm-podinfo - namespace: flux-system - spec: - providerRef: - name: slack - eventSeverity: info - eventSources: - - kind: HelmRepository - name: podinfo - - kind: HelmChart - name: default-podinfo - - kind: HelmRelease - name: podinfo - namespace: default -``` - -![helm-controller alerts](../_files/helm-controller-alerts.png) - -## Configure webhook receivers - -When using semver ranges for Helm releases, you may want to trigger an update -as soon as a new chart version is published to your Helm repository. -In order to notify source-controller about a chart update, -you can [setup webhook receivers](webhook-receivers.md). - -First generate a random string and create a secret with a `token` field: - -```sh -TOKEN=$(head -c 12 /dev/urandom | shasum | cut -d ' ' -f1) -echo $TOKEN - -kubectl -n flux-system create secret generic webhook-token \ ---from-literal=token=$TOKEN -``` - -When using [Harbor](https://goharbor.io/) as your Helm repository, you can define a receiver with: - -```yaml -apiVersion: notification.toolkit.fluxcd.io/v1beta1 -kind: Receiver -metadata: - name: helm-podinfo - namespace: flux-system -spec: - type: harbor - secretRef: - name: webhook-token - resources: - - kind: HelmRepository - name: podinfo -``` - -The notification-controller generates a unique URL using the provided token and the receiver name/namespace. - -Find the URL with: - -```console -$ kubectl -n flux-system get receiver/helm-podinfo - -NAME READY STATUS -helm-podinfo True Receiver initialised with URL: /hook/bed6d00b5555b1603e1f59b94d7fdbca58089cb5663633fb83f2815dc626d92b -``` - -Log in to the Harbor interface, go to Projects, select a project, and select Webhooks. -Fill the form with: - -* Endpoint URL: compose the address using the receiver LB and the generated URL `http:///` -* Auth Header: use the `token` string - -With the above settings, when you upload a chart, the following happens: - -* Harbor sends the chart push event to the receiver address -* Notification controller validates the authenticity of the payload using the auth header -* Source controller is notified about the changes -* Source controller pulls the changes into the cluster and updates the `HelmChart` version -* Helm controller is notified about the version change and upgrades the release - -!!! hint "Note" - Besides Harbor, you can define receivers for **GitHub**, **GitLab**, **Bitbucket** - and any other system that supports webhooks e.g. Jenkins, CircleCI, etc. - See the [Receiver CRD docs](../components/notification/receiver.md) for more details. diff --git a/docs/guides/image-update.md b/docs/guides/image-update.md deleted file mode 100644 index 6cc399ef..00000000 --- a/docs/guides/image-update.md +++ /dev/null @@ -1,949 +0,0 @@ -# Automate image updates to Git - -This guide walks you through configuring container image scanning and deployment rollouts with Flux. - -For a container image you can configure Flux to: - -- scan the container registry and fetch the image tags -- select the latest tag based on the defined policy (semver, calver, regex) -- replace the tag in Kubernetes manifests (YAML format) -- checkout a branch, commit and push the changes to the remote Git repository -- apply the changes in-cluster and rollout the container image - -!!! warning "Alpha version" - Note that the image update feature is currently alpha, - see the [roadmap](../roadmap/index.md) for more details. - -For production environments, this feature allows you to automatically deploy application patches -(CVEs and bug fixes), and keep a record of all deployments in Git history. - -**Production CI/CD workflow** - -* DEV: push a bug fix to the app repository -* DEV: bump the patch version and release e.g. `v1.0.1` -* CI: build and push a container image tagged as `registry.domain/org/app:v1.0.1` -* CD: pull the latest image metadata from the app registry (Flux image scanning) -* CD: update the image tag in the app manifest to `v1.0.1` (Flux cluster to Git reconciliation) -* CD: deploy `v1.0.1` to production clusters (Flux Git to cluster reconciliation) - -For staging environments, this features allow you to deploy the latest build of a branch, -without having to manually edit the app deployment manifest in Git. - -**Staging CI/CD workflow** - -* DEV: push code changes to the app repository `main` branch -* CI: build and push a container image tagged as `${GIT_BRANCH}-${GIT_SHA:0:7}-$(date +%s)` -* CD: pull the latest image metadata from the app registry (Flux image scanning) -* CD: update the image tag in the app manifest to `main-2d3fcbd-1611906956` (Flux cluster to Git reconciliation) -* CD: deploy `main-2d3fcbd-1611906956` to staging clusters (Flux Git to cluster reconciliation) - -## Prerequisites - -You will need a Kubernetes cluster version 1.16 or newer and kubectl version 1.18. -For a quick local test, you can use [Kubernetes kind](https://kind.sigs.k8s.io/docs/user/quick-start/). -Any other Kubernetes setup will work as well. - -In order to follow the guide you'll need a GitHub account and a -[personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line) -that can create repositories (check all permissions under `repo`). - -Export your GitHub personal access token and username: - -```sh -export GITHUB_TOKEN= -export GITHUB_USER= -``` - -## Install Flux - -!!! hint "Enable image automation components" - If you bootstrapped Flux before without the `--components-extra=` argument, you need to add - `--components-extra=image-reflector-controller,image-automation-controller` to your - bootstrapping routine as image automation components are not installed by default. - -Install Flux with the image automation components: - -```sh -flux bootstrap github \ - --components-extra=image-reflector-controller,image-automation-controller \ - --owner=$GITHUB_USER \ - --repository=flux-image-updates \ - --branch=main \ - --path=clusters/my-cluster \ - --token-auth \ - --personal -``` - -The bootstrap command creates a repository if one doesn't exist, and commits the manifests for the -Flux components to the default branch at the specified path. It then configures the target cluster to -synchronize with the specified path inside the repository. - -!!! hint "GitLab and other Git platforms" - You can install Flux and bootstrap repositories hosted on GitLab, BitBucket, Azure DevOps and - any other Git provider that support SSH or token-based authentication. - When using SSH, make sure the deploy key is configured with write access. - Please see the [installation guide](installation.md) for more details. - -## Deploy a demo app - -We'll be using a tiny webapp called [podinfo](https://github.com/stefanprodan/podinfo) to -showcase the image update feature. - -Clone your repository with: - -```sh -git clone https://github.com/$GITHUB_USER/flux-image-updates -cd flux-image-updates -``` - -Add the podinfo Kubernetes deployment file inside `cluster/my-cluster`: - -```sh -curl -sL https://raw.githubusercontent.com/stefanprodan/podinfo/5.0.0/kustomize/deployment.yaml \ -> ./clusters/my-cluster/podinfo-deployment.yaml -``` - -Commit and push changes to main branch: - -```sh -git add -A && \ -git commit -m "add podinfo deployment" && \ -git push origin main -``` - -Tell Flux to pull and apply the changes or wait one minute for Flux to detect the changes on its own: - -```sh -flux reconcile kustomization flux-system --with-source -``` - -Print the podinfo image deployed on your cluster: - -```console -$ kubectl get deployment/podinfo -oyaml | grep 'image:' -image: ghcr.io/stefanprodan/podinfo:5.0.0 -``` - -## Configure image scanning - -Create an `ImageRepository` to tell Flux which container registry to scan for new tags: - -```sh -flux create image repository podinfo \ ---image=ghcr.io/stefanprodan/podinfo \ ---interval=1m \ ---export > ./clusters/my-cluster/podinfo-registry.yaml -``` - -The above command generates the following manifest: - -```yaml -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImageRepository -metadata: - name: podinfo - namespace: flux-system -spec: - image: ghcr.io/stefanprodan/podinfo - interval: 1m0s -``` - -For private images, you can create a Kubernetes secret -in the same namespace as the `ImageRepository` with -`kubectl create secret docker-registry`. Then you can configure -Flux to use the credentials by referencing the Kubernetes secret -in the `ImageRepository`: - -```yaml -kind: ImageRepository -spec: - secretRef: - name: regcred -``` - -!!! hint "Storing secrets in Git" - Note that if you want to store the image pull secret in Git, you can encrypt - the manifest with [Mozilla SOPS](mozilla-sops.md) or [Sealed Secrets](sealed-secrets.md). - -Create an `ImagePolicy` to tell Flux which semver range to use when filtering tags: - -```sh -flux create image policy podinfo \ ---image-ref=podinfo \ ---select-semver=5.0.x \ ---export > ./clusters/my-cluster/podinfo-policy.yaml -``` - -The above command generates the following manifest: - -```yaml -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImagePolicy -metadata: - name: podinfo - namespace: flux-system -spec: - imageRepositoryRef: - name: podinfo - policy: - semver: - range: 5.0.x -``` - -!!! hint "semver ranges" - A semver range that includes stable releases can be defined with - `1.0.x` (patch versions only) or `>=1.0.0 <2.0.0` (minor and patch versions). - If you want to include pre-release e.g. `1.0.0-rc.1`, - you can define a range like: `^1.x-0` or `>1.0.0-rc <2.0.0-rc`. - -!!! hint "Other policy examples" - For policies that make use of CalVer, build IDs or alphabetical sorting, - have a look at [the examples](../components/image/imagepolicies.md#examples). - -Commit and push changes to main branch: - -```sh -git add -A && \ -git commit -m "add podinfo image scan" && \ -git push origin main -``` - -Tell Flux to pull and apply changes: - -```sh -flux reconcile kustomization flux-system --with-source -``` - -Wait for Flux to fetch the image tag list from GitHub container registry: - -```console -$ flux get image repository podinfo -NAME READY MESSAGE LAST SCAN -podinfo True successful scan, found 13 tags 2020-12-13T17:51:48+02:00 -``` - -Find which image tag matches the policy semver range with: - -```console -$ flux get image policy podinfo -NAME READY MESSAGE -podinfo True Latest image tag for 'ghcr.io/stefanprodan/podinfo' resolved to: 5.0.3 -``` - -## Configure image updates - -Edit the `podinfo-deployment.yaml` and add a marker to tell Flux which policy to use when updating the container image: - -```yaml -spec: - containers: - - name: podinfod - image: ghcr.io/stefanprodan/podinfo:5.0.0 # {"$imagepolicy": "flux-system:podinfo"} -``` - -Create an `ImageUpdateAutomation` to tell Flux which Git repository to write image updates to: - -```sh -flux create image update flux-system \ ---git-repo-ref=flux-system \ ---git-repo-path="./clusters/my-cluster" \ ---checkout-branch=main \ ---push-branch=main \ ---author-name=fluxcdbot \ ---author-email=fluxcdbot@users.noreply.github.com \ ---commit-template="{{range .Updated.Images}}{{println .}}{{end}}" \ ---export > ./clusters/my-cluster/flux-system-automation.yaml -``` - -The above command generates the following manifest: - -```yaml -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImageUpdateAutomation -metadata: - name: flux-system - namespace: flux-system -spec: - interval: 1m0s - sourceRef: - kind: GitRepository - name: flux-system - git: - checkout: - ref: - branch: main - commit: - author: - email: fluxcdbot@users.noreply.github.com - name: fluxcdbot - messageTemplate: '{{range .Updated.Images}}{{println .}}{{end}}' - push: - branch: main - update: - path: ./clusters/my-cluster - strategy: Setters -``` - -Commit and push changes to main branch: - -```sh -git add -A && \ -git commit -m "add image updates automation" && \ -git push origin main -``` - -Note that the `ImageUpdateAutomation` runs all the policies found in its namespace at the specified interval. - -Tell Flux to pull and apply changes: - -```sh -flux reconcile kustomization flux-system --with-source -``` - -In a couple of seconds, Flux will push a commit to your repository with -the latest image tag that matches the podinfo policy: - -```console -$ git pull && cat clusters/my-cluster/podinfo-deployment.yaml | grep "image:" -image: ghcr.io/stefanprodan/podinfo:5.0.3 # {"$imagepolicy": "flux-system:podinfo"} -``` - -Wait for Flux to apply the latest commit on the cluster and verify that podinfo was updated to `5.0.3`: - -```console -$ watch "kubectl get deployment/podinfo -oyaml | grep 'image:'" -image: ghcr.io/stefanprodan/podinfo:5.0.3 -``` - -You can check the status of the image automation objects with: - -```sh -flux get images all --all-namespaces -``` - -## Configure image update for custom resources - -Besides Kubernetes native kinds (Deployment, StatefulSet, DaemonSet, CronJob), -Flux can be used to patch image tags in any Kubernetes custom resource stored in Git. - -The image policy marker format is: - -* `{"$imagepolicy": ":"}` -* `{"$imagepolicy": "::tag"}` - - -`HelmRelease` example: - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: podinfo - namespace: default -spec: - values: - image: - repository: ghcr.io/stefanprodan/podinfo - tag: 5.0.0 # {"$imagepolicy": "flux-system:podinfo:tag"} -``` - -Tekton `Task` example: - -```yaml -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: golang - namespace: default -spec: - steps: - - name: golang - image: docker.io/golang:1.15.6 # {"$imagepolicy": "flux-system:golang"} -``` - -Flux `Kustomization` example: - -```yaml -apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 -kind: Kustomization -metadata: - name: podinfo - namespace: default -spec: - images: - - name: ghcr.io/stefanprodan/podinfo - newName: ghcr.io/stefanprodan/podinfo - newTag: 5.0.0 # {"$imagepolicy": "flux-system:podinfo:tag"} -``` - -Kustomize config (`kustomization.yaml`) example: - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- deployment.yaml -images: -- name: ghcr.io/stefanprodan/podinfo - newName: ghcr.io/stefanprodan/podinfo - newTag: 5.0.0 # {"$imagepolicy": "flux-system:podinfo:tag"} -``` - -## Push updates to a different branch - -With `.spec.git.push.branch` you can configure Flux to push the image updates to different branch -than the one used for checkout. If the specified branch doesn't exist, Flux will create it for you. - -```yaml -kind: ImageUpdateAutomation -metadata: - name: flux-system -spec: - git: - checkout: - ref: - branch: main - push: - branch: flux-image-updates -``` - -You can use CI automation e.g. GitHub Actions such as -[create-pull-request](https://github.com/peter-evans/create-pull-request) -to open a pull request against the checkout branch. - -This way you can manually approve the image updates before they are applied on your clusters. - -## Configure the commit message - -The `.spec.git.commit.messageTemplate` field is a string which is used as a template for the commit message. - -The message template is a [Go text template](https://golang.org/pkg/text/template/) that -lets you range over the objects and images e.g.: - -```yaml -kind: ImageUpdateAutomation -metadata: - name: flux-system -spec: - git: - commit: - messageTemplate: | - Automated image update - - Automation name: {{ .AutomationObject }} - - Files: - {{ range $filename, $_ := .Updated.Files -}} - - {{ $filename }} - {{ end -}} - - Objects: - {{ range $resource, $_ := .Updated.Objects -}} - - {{ $resource.Kind }} {{ $resource.Name }} - {{ end -}} - - Images: - {{ range .Updated.Images -}} - - {{.}} - {{ end -}} - author: - email: fluxcdbot@users.noreply.github.com - name: fluxcdbot -``` - -## Trigger image updates with webhooks - -You may want to trigger a deployment -as soon as a new image tag is pushed to your container registry. -In order to notify the image-reflector-controller about new images, -you can [setup webhook receivers](webhook-receivers.md). - -First generate a random string and create a secret with a `token` field: - -```sh -TOKEN=$(head -c 12 /dev/urandom | shasum | cut -d ' ' -f1) -echo $TOKEN - -kubectl -n flux-system create secret generic webhook-token \ ---from-literal=token=$TOKEN -``` - -Define a receiver for DockerHub: - -```yaml -apiVersion: notification.toolkit.fluxcd.io/v1beta1 -kind: Receiver -metadata: - name: podinfo - namespace: flux-system -spec: - type: dockerhub - secretRef: - name: webhook-token - resources: - - kind: ImageRepository - name: podinfo -``` - -The notification-controller generates a unique URL using the provided token and the receiver name/namespace. - -Find the URL with: - -```console -$ kubectl -n flux-system get receiver/podinfo - -NAME READY STATUS -podinfo True Receiver initialised with URL: /hook/bed6d00b5555b1603e1f59b94d7fdbca58089cb5663633fb83f2815dc626d92b -``` - -Log in to DockerHub web interface, go to your image registry Settings and select Webhooks. -Fill the form "Webhook URL" by composing the address using the receiver -LB and the generated URL `http:///`. - -!!! hint "Note" - Besides DockerHub, you can define receivers for **Harbor**, **Quay**, **Nexus**, **GCR**, - and any other system that supports webhooks e.g. GitHub Actions, Jenkins, CircleCI, etc. - See the [Receiver CRD docs](../components/notification/receiver.md) for more details. - -## Incident management - -### Suspend automation - -During an incident you may wish to stop Flux from pushing image updates to Git. - -You can suspend the image automation directly in-cluster: - -```sh -flux suspend image update flux-system -``` - -Or by editing the `ImageUpdateAutomation` manifest in Git: - -```yaml -kind: ImageUpdateAutomation -metadata: - name: flux-system - namespace: flux-system -spec: - suspend: true -``` - -Once the incident is resolved, you can resume automation with: - -```sh -flux resume image update flux-system -``` - -If you wish to pause the automation for a particular image only, -you can suspend/resume the image scanning: - -```sh -flux suspend image repository podinfo -``` - -### Revert image updates - -Assuming you've configured Flux to update an app to its latest stable version: - -```sh -flux create image policy podinfo \ ---image-ref=podinfo \ ---select-semver=">=5.0.0" -``` - -If the latest version e.g. `5.0.1` causes an incident in production, you can tell Flux to -revert the image tag to a previous version e.g. `5.0.0` with: - -```sh -flux create image policy podinfo \ ---image-ref=podinfo \ ---select-semver=5.0.0 -``` - -Or by changing the semver range in Git: - -```yaml -kind: ImagePolicy -metadata: - name: podinfo - namespace: flux-system -spec: - policy: - semver: - range: 5.0.0 -``` - -Based on the above configuration, Flux will patch the podinfo deployment manifest in Git -and roll out `5.0.0` in-cluster. - -When a new version is available e.g. `5.0.2`, you can update the policy once more -and tell Flux to consider only versions greater than `5.0.1`: - -```sh -flux create image policy podinfo \ ---image-ref=podinfo \ ---select-semver=">5.0.1" -``` - -## ImageRepository cloud providers authentication - -If relying on a cloud provider image repository, you might need to do some extra -work in order to configure the ImageRepository resource credentials. Here are -some common examples for the most popular cloud provider docker registries. - -!!! warning "Workarounds" - The examples below are intended as workaround solutions until native - authentication mechanisms are implemented in Flux itself to support this in - a more straightforward manner. - -### AWS Elastic Container Registry - -The registry authentication credentials for ECR expire every 12 hours. -Considering this limitation, one needs to ensure the credentials are being -refreshed before expiration so that the controller can rely on them for -authentication. - -The solution proposed is to create a cronjob that runs every 6 hours which would -re-create the `docker-registry` secret using a new token. - -Edit and save the following snippet to a file -`./clusters/my-cluster/ecr-sync.yaml`, commit and push it to git. - -```yaml -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: ecr-credentials-sync - namespace: flux-system -rules: -- apiGroups: [""] - resources: - - secrets - verbs: - - delete - - create ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: ecr-credentials-sync - namespace: flux-system -subjects: -- kind: ServiceAccount - name: ecr-credentials-sync -roleRef: - kind: Role - name: ecr-credentials-sync - apiGroup: "" ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: ecr-credentials-sync - namespace: flux-system - # Uncomment and edit if using IRSA - # annotations: - # eks.amazonaws.com/role-arn: ---- -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: ecr-credentials-sync - namespace: flux-system -spec: - suspend: false - schedule: 0 */6 * * * - failedJobsHistoryLimit: 1 - successfulJobsHistoryLimit: 1 - jobTemplate: - spec: - template: - spec: - serviceAccountName: ecr-credentials-sync - restartPolicy: Never - volumes: - - name: token - emptyDir: - medium: Memory - initContainers: - - image: amazon/aws-cli - name: get-token - imagePullPolicy: IfNotPresent - # You will need to set the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables if not using - # IRSA. It is recommended to store the values in a Secret and load them in the container using envFrom. - # envFrom: - # - secretRef: - # name: aws-credentials - env: - - name: REGION - value: us-east-1 # change this if ECR repo is in a different region - volumeMounts: - - mountPath: /token - name: token - command: - - /bin/sh - - -ce - - aws ecr get-login-password --region ${REGION} > /token/ecr-token - containers: - - image: bitnami/kubectl - name: create-secret - imagePullPolicy: IfNotPresent - env: - - name: SECRET_NAME - value: ecr-credentials - - name: ECR_REGISTRY - value: .dkr.ecr..amazonaws.com # fill in the account id and region - volumeMounts: - - mountPath: /token - name: token - command: - - /bin/bash - - -ce - - |- - kubectl delete secret --ignore-not-found $SECRET_NAME - kubectl create secret docker-registry $SECRET_NAME \ - --docker-server="$ECR_REGISTRY" \ - --docker-username=AWS \ - --docker-password="$(@.iam.gserviceaccount.com - name: gcr-credentials-sync - namespace: flux-system ---- -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: gcr-credentials-sync - namespace: flux-system -spec: - suspend: false - schedule: "*/45 * * * *" - failedJobsHistoryLimit: 1 - successfulJobsHistoryLimit: 1 - jobTemplate: - spec: - template: - spec: - serviceAccountName: gcr-credentials-sync - restartPolicy: Never - containers: - - image: google/cloud-sdk - name: create-secret - imagePullPolicy: IfNotPresent - env: - - name: SECRET_NAME - value: gcr-credentials - - name: GCR_REGISTRY - value: # fill in the registry name e.g gcr.io, eu.gcr.io - command: - - /bin/bash - - -ce - - |- - kubectl delete secret --ignore-not-found $SECRET_NAME - kubectl create secret docker-registry $SECRET_NAME \ - --docker-server="$GCR_REGISTRY" \ - --docker-username=oauth2accesstoken \ - --docker-password="$(gcloud auth print-access-token)" -``` - -Since the cronjob will not create a job right away, after applying the manifest, -you can manually create an init job using the following command: - -```console -$ kubectl create job --from=cronjob/gcr-credentials-sync -n flux-system gcr-credentials-sync-init -``` - -After the job runs, a secret named `gcr-credentials` should be created. Use this -name in your GCR ImageRepository resource manifest as the value for -`.spec.secretRef.name`. - -```yaml -spec: - secretRef: - name: gcr-credentials -``` - -#### Using a JSON key [long-lived] - -!!! warning "Less secure option" - From [Google documentation on authenticating container registry](https://cloud.google.com/container-registry/docs/advanced-authentication#json-key) - > A user-managed key-pair that you can use as a credential for a service account. - > Because the credential is long-lived, it is the least secure option of all the available authentication methods. - > When possible, use an access token or another available authentication method to reduce the risk of - > unauthorized access to your artifacts. If you must use a service account key, - > ensure that you follow best practices for managing credentials. - -A Json key doesn't expire, so we don't need a cronjob, -we just need to create the secret and reference it in the ImagePolicy. - -First, create a json key file by following this -[documentation](https://cloud.google.com/container-registry/docs/advanced-authentication). -Grant the service account the role of `Container Registry Service Agent` -so that it can access GCR and download the json file. - -Then create a secret, encrypt it using [Mozilla SOPS](mozilla-sops.md) -or [Sealed Secrets](sealed-secrets.md) , commit and push the encypted file to git. - -```sh -kubectl create secret docker-registry \ - --docker-server= \ # e.g gcr.io - --docker-username=_json_key \ - --docker-password="$(cat )" -``` - -### Azure Container Registry - -AKS clusters are not able to pull and run images from ACR by default. -Read [Integrating AKS /w ACR](https://docs.microsoft.com/en-us/azure/aks/cluster-container-registry-integration) as a potential pre-requisite -before integrating Flux `ImageRepositories` with ACR. - -Note that the resulting ImagePullSecret for Flux could also be specified by Pods within the same Namespace to pull and run ACR images as well. - -#### Generating Tokens for Managed Identities [short-lived] - -As a pre-requisite, your AKS cluster will need [AAD Pod Identity](../use-cases/azure.md#aad-pod-identity) installed. - -Once we have AAD Pod Identity installed, we can create a Deployment that frequently refreshes an image pull secret into -our desired Namespace. - -Create a directory in your control repository and save this `kustomization.yaml`: -```yaml -# kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- https://github.com/fluxcd/flux2/manifests/integrations/registry-credentials-sync/azure?ref=main -patchesStrategicMerge: -- config-patches.yaml -``` -Save and configure the following patch -- note the instructional comments for configuring matching Azure resources: -```yaml -# config-patches.yaml ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: credentials-sync -data: - ACR_NAME: my-registry - KUBE_SECRET: my-registry # does not yet exist -- will be created in the same Namespace - SYNC_PERIOD: "3600" # ACR tokens expire every 3 hours; refresh faster than that - -# Create an identity in Azure and assign it a role to pull from ACR (note: the identity's resourceGroup should match the desired ACR): -# az identity create -n acr-sync -# az role assignment create --role AcrPull --assignee-object-id "$(az identity show -n acr-sync -o tsv --query principalId)" -# Fetch the clientID and resourceID to configure the AzureIdentity spec below: -# az identity show -n acr-sync -otsv --query clientId -# az identity show -n acr-sync -otsv --query resourceId ---- -apiVersion: aadpodidentity.k8s.io/v1 -kind: AzureIdentity -metadata: - name: credentials-sync # name must match the stub-resource in az-identity.yaml - namespace: flux-system -spec: - clientID: 4ceaa448-d7b9-4a80-8f32-497eaf3d3287 - resourceID: /subscriptions/8c69185e-55f9-4d00-8e71-a1b1bb1386a1/resourcegroups/stealthybox/providers/Microsoft.ManagedIdentity/userAssignedIdentities/acr-sync - type: 0 # user-managed identity -``` - -Verify that `kustomize build .` works, then commit the directory to you control repo. -Flux will apply the Deployment and it will use the AAD managed identity for that Pod to regularly fetch ACR tokens into your configured `KUBE_SECRET` name. -Reference the `KUBE_SECRET` value from any `ImageRepository` objects for that ACR registry. - -This example uses the `fluxcd/flux2` github archive as a remote base, but you may copy the [./manifests/integrations/registry-credentials-sync/azure](https://github.com/fluxcd/flux2/tree/main/manifests/integrations/registry-credentials-sync/azure) -folder into your own repository or use a git submodule to vendor it if preferred. - -#### Using Static Credentials [long-lived] - -!!! info - Using a static credential requires a Secrets management solution compatible with your GitOps workflow. - -Follow the official Azure documentation for [Creating an Image Pull Secret for ACR](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-auth-kubernetes). - -Instead of creating the Secret directly into your Kubernetes cluster, encrypt it using [Mozilla SOPS](mozilla-sops.md) -or [Sealed Secrets](sealed-secrets.md), then commit and push the encypted file to git. - -This Secret should be in the same Namespace as your flux `ImageRepository` object. -Update the `ImageRepository.spec.secretRef` to point to it. - -It is also possible to create [Repository Scoped Tokens](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-repository-scoped-permissions). - -!!! warning - Repository Scoped Tokens are in preview and do have limitations. diff --git a/docs/guides/installation.md b/docs/guides/installation.md deleted file mode 100644 index 496c53a8..00000000 --- a/docs/guides/installation.md +++ /dev/null @@ -1,570 +0,0 @@ -# Installation - -This guide walks you through setting up Flux v2 (hereafter: "Flux") to -manage one or more Kubernetes clusters. - -## Prerequisites - -You will need a Kubernetes cluster version **1.16** or newer -and kubectl version **1.18** or newer. - -## Install the Flux CLI - -With Homebrew: - -```sh -brew install fluxcd/tap/flux -``` - -With Bash: - -```sh -curl -s https://fluxcd.io/install.sh | sudo bash - -# enable completions in ~/.bash_profile -. <(flux completion bash) -``` - -Command-line completion for `zsh`, `fish`, and `powershell` -are also supported with their own sub-commands. - -Binaries for macOS, Windows and Linux AMD64/ARM are available for download on the -[release page](https://github.com/fluxcd/flux2/releases). - -A container image with `kubectl` and `flux` is available on DockerHub and GitHub: - -* `docker.io/fluxcd/flux-cli:` -* `ghcr.io/fluxcd/flux-cli:` - -Verify that your cluster satisfies the prerequisites with: - -```sh -flux check --pre -``` - -## Bootstrap - -Using the `flux bootstrap` command you can install Flux on a -Kubernetes cluster and configure it to manage itself from a Git -repository. -If the Flux components are present on the cluster, the bootstrap -command will perform an upgrade if needed. The bootstrap is -idempotent, it's safe to run the command as many times as you want. - -The Flux component images are published to DockerHub and GitHub Container Registry -as [multi-arch container images](https://docs.docker.com/docker-for-mac/multi-arch/) -with support for Linux `amd64`, `arm64` and `armv7` (e.g. 32bit Raspberry Pi) -architectures. - -If your Git provider is **GitHub**, **GitLab** or **Azure DevOps** please follow the specific bootstrap procedure: - -* [GitHub.com and GitHub Enterprise](#github-and-github-enterprise) -* [GitLab.com and GitLab Enterprise](#gitlab-and-gitlab-enterprise) -* [Azure DevOps](../use-cases/azure.md#flux-installation-for-azure-devops) - -### Generic Git Server - -The `bootstrap git` command takes an existing Git repository, clones it and -commits the Flux components manifests to the specified branch. Then it -configures the target cluster to synchronize with that repository. - -Run bootstrap for a Git repository and authenticate with your SSH agent: - -```sh -flux bootstrap git \ - --url=ssh://git@// \ - --branch= \ - --path=clusters/my-cluster -``` - -The above command will generate a SSH key (defaults to RSA 2048 but can be changed with `--ssh-key-algorithm`), -and it will prompt you to add the SSH public key as a deploy key to your repository. - -If you want to use your own SSH key, you can provide a **passwordless** private key using -`--private-key-file=`. -This option can also be used if no SSH agent is available on your machine. - -!!! hint "Bootstrap options" - There are many options available when bootstrapping Flux, such as installing a subset of Flux components, - setting the Kubernetes context, changing the Git author name and email, enabling Git submodules, and more. - To list all the available options run `flux bootstrap git --help`. - -If your Git server doesn't support SSH, you can run bootstrap for Git over HTTPS: - -```sh -flux bootstrap git \ - --url=https://// \ - --username= \ - --password= \ - --token-auth=true \ - --path=clusters/my-cluster -``` - -If your Git server uses a self-signed TLS certificate, you can specify the CA file with -`--ca-file=`. - -With `--path` you can configure the directory which will be used to reconcile the target cluster. -To control multiple clusters from the same Git repository, you have to set a unique path per -cluster e.g. `clusters/staging` and `clusters/production`: - -```sh -./clusters/ -├── staging # <- path=clusters/staging -│   └── flux-system # <- namespace dir generated by bootstrap -│   ├── gotk-components.yaml -│   ├── gotk-sync.yaml -│   └── kustomization.yaml -└── production # <- path=clusters/production - └── flux-system -``` - -After running bootstrap you can place Kubernetes YAMLs inside a dir under path -e.g. `clusters/staging/my-app`, and Flux will reconcile them on your cluster. - -For examples on how you can structure your Git repository see: - -* [flux2-kustomize-helm-example](https://github.com/fluxcd/flux2-kustomize-helm-example) -* [flux2-multi-tenancy](https://github.com/fluxcd/flux2-multi-tenancy) - -### GitHub and GitHub Enterprise - -The `bootstrap github` command creates a GitHub repository if one doesn't exist and -commits the Flux components manifests to specified branch. Then it -configures the target cluster to synchronize with that repository by -setting up a SSH deploy key or by using token-based authentication. - -Generate a [personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line) -that can create repositories by checking all permissions under `repo`. - -Export your GitHub personal access token as an environment variable: - -```sh -export GITHUB_TOKEN= -``` - -Run the bootstrap for a repository on your personal GitHub account: - -```sh -flux bootstrap github \ - --owner=my-github-username \ - --repository=my-repository \ - --path=clusters/my-cluster \ - --personal -``` - -!!! hint "Deploy key" - The bootstrap command creates an SSH key which it stores as a secret in the - Kubernetes cluster. The key is also used to create a deploy key in the GitHub - repository. The new deploy key will be linked to the personal access token used - to authenticate. **Removing the personal access token will also remove the deploy key.** - -Run the bootstrap for a repository owned by a GitHub organization: - -```sh -flux bootstrap github \ - --owner=my-github-organization \ - --repository=my-repository \ - --team=team1-slug \ - --team=team2-slug \ - --path=clusters/my-cluster -``` - -When you specify a list of teams, those teams will be granted maintainer access to the repository. - -To run the bootstrap for a repository hosted on GitHub Enterprise, you have to specify your GitHub hostname: - -```sh -flux bootstrap github \ - --hostname=my-github-enterprise.com \ - --ssh-hostname=my-github-enterprise.com \ - --owner=my-github-organization \ - --repository=my-repository \ - --branch=main \ - --path=clusters/my-cluster -``` - -If your GitHub Enterprise has SSH access disabled, you can use HTTPS and token authentication with: - -```sh -flux bootstrap github \ - --token-auth \ - --hostname=my-github-enterprise.com \ - --owner=my-github-organization \ - --repository=my-repository \ - --branch=main \ - --path=clusters/my-cluster -``` - -### GitLab and GitLab Enterprise - -The `bootstrap gitlab` command creates a GitLab repository if one doesn't exist and -commits the Flux components manifests to specified branch. Then it -configures the target cluster to synchronize with that repository by -setting up a SSH deploy key or by using token-based authentication. - -Generate a [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) -that grants complete read/write access to the GitLab API. - -Export your GitLab personal access token as an environment variable: - -```sh -export GITLAB_TOKEN= -``` - -Run the bootstrap for a repository on your personal GitLab account: - -```sh -flux bootstrap gitlab \ - --owner=my-gitlab-username \ - --repository=my-repository \ - --branch=master \ - --path=clusters/my-cluster \ - --token-auth \ - --personal -``` - -To run the bootstrap for a repository using deploy keys for authentication, you have to specify the SSH hostname: - -```sh -flux bootstrap gitlab \ - --ssh-hostname=gitlab.com \ - --owner=my-gitlab-username \ - --repository=my-repository \ - --branch=master \ - --path=clusters/my-cluster -``` - -!!! hint "Authentication" - When providing the `--ssh-hostname`, a read-only (SSH) deploy key will be added - to your repository, otherwise your GitLab personal token will be used to - authenticate against the HTTPS endpoint instead. - -Run the bootstrap for a repository owned by a GitLab (sub)group: - -```sh -flux bootstrap gitlab \ - --owner=my-gitlab-group/my-gitlab-subgroup \ - --repository=my-repository \ - --branch=master \ - --path=clusters/my-cluster -``` - -To run the bootstrap for a repository hosted on GitLab on-prem or enterprise, you have to specify your GitLab hostname: - -```sh -flux bootstrap gitlab \ - --hostname=my-gitlab.com \ - --token-auth \ - --owner=my-gitlab-group \ - --repository=my-repository \ - --branch=master \ - --path=clusters/my-cluster -``` - -### Air-gapped Environments - -To bootstrap Flux on air-gapped environments without access to github.com and ghcr.io, first you'll need -to download the `flux` binary, and the container images from a computer with access to internet. - -List all container images: - -```console -$ flux install --export | grep ghcr.io - -image: ghcr.io/fluxcd/helm-controller:v0.8.0 -image: ghcr.io/fluxcd/kustomize-controller:v0.9.0 -image: ghcr.io/fluxcd/notification-controller:v0.9.0 -image: ghcr.io/fluxcd/source-controller:v0.9.0 -``` - -Pull the images locally and push them to your container registry: - -```sh -docker pull ghcr.io/fluxcd/source-controller:v0.9.0 -docker tag ghcr.io/fluxcd/source-controller:v0.9.0 registry.internal/fluxcd/source-controller:v0.9.0 -docker push registry.internal/fluxcd/source-controller:v0.9.0 -``` - -Copy `flux` binary to a computer with access to your air-gapped cluster, -and create the pull secret in the `flux-system` namespace: - -```sh -kubectl create ns flux-system - -kubectl -n flux-system create secret generic regcred \ - --from-file=.dockerconfigjson=/.docker/config.json \ - --type=kubernetes.io/dockerconfigjson -``` - -Finally, bootstrap Flux using the images from your private registry: - -```sh -flux bootstrap \ - --registry=registry.internal/fluxcd \ - --image-pull-secret=regcred \ - --hostname=my-git-server.internal -``` - -Note that when running `flux bootstrap` without specifying a `--version`, -the CLI will use the manifests embedded in its binary instead of downloading -them from GitHub. You can determine which version you'll be installing, -with `flux --version`. - -## Bootstrap with Terraform - -The bootstrap procedure can be implemented with Terraform using the Flux provider published on -[registry.terraform.io](https://registry.terraform.io/providers/fluxcd/flux). - -The provider consists of two data sources (`flux_install` and `flux_sync`) for generating the -Kubernetes manifests that can be used to install or upgrade Flux: - -```hcl -data "flux_install" "main" { - target_path = "clusters/my-cluster" - network_policy = false - version = "latest" -} - -data "flux_sync" "main" { - target_path = "clusters/my-cluster" - url = "https://github.com/${var.github_owner}/${var.repository_name}" - branch = "main" -} -``` - -For more details on how to use the Terraform provider -please see [fluxcd/terraform-provider-flux](https://github.com/fluxcd/terraform-provider-flux). - -## Customize Flux manifests - -You can customize the Flux components before or after running bootstrap. - -Assuming you want to customise the Flux controllers before they get deployed on the cluster, -first you'll need to create a Git repository and clone it locally. - -Create the file structure required by bootstrap with: - -```sh -mkdir -p clusters/my-cluster/flux-system -touch clusters/my-cluster/flux-system/gotk-components.yaml \ - clusters/my-cluster/flux-system/gotk-patches.yaml \ - clusters/my-cluster/flux-system/gotk-sync.yaml \ - clusters/my-cluster/flux-system/kustomization.yaml -``` - -Assuming you want to add custom annotations and labels to the Flux controllers, -edit `clusters/my-cluster/gotk-patches.yaml` and set the metadata for source-controller and kustomize-controller pods: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: source-controller - namespace: flux-system -spec: - template: - metadata: - annotations: - custom: annotation - labels: - custom: label ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kustomize-controller - namespace: flux-system -spec: - template: - metadata: - annotations: - custom: annotation - labels: - custom: label -``` - -Edit `clusters/my-cluster/kustomization.yaml` and set the resources and patches: - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - gotk-components.yaml - - gotk-sync.yaml -patchesStrategicMerge: - - gotk-patches.yaml -``` - -Push the changes to main branch: - -```sh -git add -A && git commit -m "add flux customisations" && git push -``` - -Now run the bootstrap for `clusters/my-cluster`: - -```sh -flux bootstrap git \ - --url=ssh://git@// \ - --branch=main \ - --path=clusters/my-cluster -``` - -When the controllers are deployed for the first time on your cluster, they will contain all -the customisations from `gotk-patches.yaml`. - -You can make changes to the patches after bootstrap and Flux will apply them in-cluster on its own. - -## Dev install - -For testing purposes you can install Flux without storing its manifests in a Git repository: - -```sh -flux install -``` - -Or using kubectl: - -```sh -kubectl apply -f https://github.com/fluxcd/flux2/releases/latest/download/install.yaml -``` - -Then you can register Git repositories and reconcile them on your cluster: - -```sh -flux create source git podinfo \ - --url=https://github.com/stefanprodan/podinfo \ - --tag-semver=">=4.0.0" \ - --interval=1m - -flux create kustomization podinfo-default \ - --source=podinfo \ - --path="./kustomize" \ - --prune=true \ - --validation=client \ - --interval=10m \ - --health-check="Deployment/podinfo.default" \ - --health-check-timeout=2m -``` - -You can register Helm repositories and create Helm releases: - -```sh -flux create source helm bitnami \ - --interval=1h \ - --url=https://charts.bitnami.com/bitnami - -flux create helmrelease nginx \ - --interval=1h \ - --release-name=nginx-ingress-controller \ - --target-namespace=kube-system \ - --source=HelmRepository/bitnami \ - --chart=nginx-ingress-controller \ - --chart-version="5.x.x" -``` - -## Upgrade - -!!! note "Patch versions" - It is safe and advised to use the latest PATCH version when upgrading to a - new MINOR version. - -Update Flux CLI to the latest release with `brew upgrade fluxcd/tap/flux` or by -downloading the binary from [GitHub](https://github.com/fluxcd/flux2/releases). - -Verify that you are running the latest version with: - -```sh -flux --version -``` - -### Bootstrap upgrade - -If you've used the [bootstrap](#bootstrap) procedure to deploy Flux, -then rerun the bootstrap command for each cluster using the same arguments as before: - -```sh -flux bootstrap github \ - --owner=my-github-username \ - --repository=my-repository \ - --branch=main \ - --path=clusters/my-cluster \ - --personal -``` - -The above command will clone the repository, it will update the components manifest in -`/flux-system/gotk-components.yaml` and it will push the changes to the remote branch. - -Tell Flux to pull the manifests from Git and upgrade itself with: - -```sh -flux reconcile source git flux-system -``` - -Verify that the controllers have been upgrade with: - -```sh -flux check -``` - -!!! hint "Automated upgrades" - You can automate the components manifest update with GitHub Actions - and open a PR when there is a new Flux version available. - For more details please see [Flux GitHub Action docs](https://github.com/fluxcd/flux2/tree/main/action). - -### Terraform upgrade - -Update the Flux provider to the [latest release](https://github.com/fluxcd/terraform-provider-flux/releases) -and run `terraform apply`. - -Tell Flux to upgrade itself in-cluster or wait for it to pull the latest commit from Git: - -```sh -kubectl annotate --overwrite gitrepository/flux-system reconcile.fluxcd.io/requestedAt="$(date +%s)" -``` - -### In-cluster upgrade - -If you've installed Flux directly on the cluster, then rerun the install command: - -```sh -flux install -``` - -The above command will apply the new manifests on your cluster. -You can verify that the controllers have been upgraded to the latest version with `flux check`. - -If you've installed Flux directly on the cluster with kubectl, -then rerun the command using the latest manifests from the `main` branch: - -```sh -kustomize build https://github.com/fluxcd/flux2/manifests/install?ref=main | kubectl apply -f- -``` - -## Uninstall - -You can uninstall Flux with: - -```sh -flux uninstall --namespace=flux-system -``` - -The above command performs the following operations: - -- deletes Flux components (deployments and services) -- deletes Flux network policies -- deletes Flux RBAC (service accounts, cluster roles and cluster role bindings) -- removes the Kubernetes finalizers from Flux custom resources -- deletes Flux custom resource definitions and custom resources -- deletes the namespace where Flux was installed - -If you've installed Flux in a namespace that you wish to preserve, you -can skip the namespace deletion with: - -```sh -flux uninstall --namespace=infra --keep-namespace -``` - -!!! hint - Note that the `uninstall` command will not remove any Kubernetes objects - or Helm releases that were reconciled on the cluster by Flux. diff --git a/docs/guides/monitoring.md b/docs/guides/monitoring.md deleted file mode 100644 index c02f24fe..00000000 --- a/docs/guides/monitoring.md +++ /dev/null @@ -1,112 +0,0 @@ -# Monitoring - -This guide walks you through configuring monitoring for the Flux control plane. - -Flux comes with a monitoring stack composed of: - -* **Prometheus** server - collects metrics from the toolkit controllers and stores them for 2h -* **Grafana** dashboards - displays the control plane resource usage and reconciliation stats - -## Install the monitoring stack - -To install the monitoring stack with `flux`, first register the toolkit Git repository on your cluster: - -```sh -flux create source git monitoring \ - --interval=30m \ - --url=https://github.com/fluxcd/flux2 \ - --branch=main -``` - -Then apply the [manifests/monitoring](https://github.com/fluxcd/flux2/tree/main/manifests/monitoring) -kustomization: - -```sh -flux create kustomization monitoring \ - --interval=1h \ - --prune=true \ - --source=monitoring \ - --path="./manifests/monitoring" \ - --health-check="Deployment/prometheus.flux-system" \ - --health-check="Deployment/grafana.flux-system" -``` - -You can access Grafana using port forwarding: - -```sh -kubectl -n flux-system port-forward svc/grafana 3000:3000 -``` - -## Grafana dashboards - -Control plane dashboard [http://localhost:3000/d/gitops-toolkit-control-plane](http://localhost:3000/d/gitops-toolkit-control-plane/gitops-toolkit-control-plane): - -![](../_files/cp-dashboard-p1.png) - -![](../_files/cp-dashboard-p2.png) - -Cluster reconciliation dashboard [http://localhost:3000/d/gitops-toolkit-cluster](http://localhost:3000/d/gitops-toolkit-cluster/gitops-toolkit-cluster-stats): - -![](../_files/cluster-dashboard.png) - -If you wish to use your own Prometheus and Grafana instances, then you can import the dashboards from -[GitHub](https://github.com/fluxcd/flux2/tree/main/manifests/monitoring/grafana/dashboards). - -!!! hint - Note that the toolkit controllers expose the `/metrics` endpoint on port `8080`. - When using Prometheus Operator you should create a `PodMonitor` object for each controller to configure scraping. - -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: source-controller - namespace: flux-system -spec: - namespaceSelector: - matchNames: - - flux-system - selector: - matchLabels: - app: source-controller - podMetricsEndpoints: - - port: http-prom -``` - -## Metrics - -For each `toolkit.fluxcd.io` kind, -the controllers expose a gauge metric to track the Ready condition status, -and a histogram with the reconciliation duration in seconds. - -Ready status metrics: - -```sh -gotk_reconcile_condition{kind, name, namespace, type="Ready", status="True"} -gotk_reconcile_condition{kind, name, namespace, type="Ready", status="False"} -gotk_reconcile_condition{kind, name, namespace, type="Ready", status="Unknown"} -gotk_reconcile_condition{kind, name, namespace, type="Ready", status="Deleted"} -``` - -Time spent reconciling: - -``` -gotk_reconcile_duration_seconds_bucket{kind, name, namespace, le} -gotk_reconcile_duration_seconds_sum{kind, name, namespace} -gotk_reconcile_duration_seconds_count{kind, name, namespace} -``` - -Alert manager example: - -```yaml -groups: -- name: GitOpsToolkit - rules: - - alert: ReconciliationFailure - expr: max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind) + on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) by (namespace, name, kind)) * 2 == 1 - for: 10m - labels: - severity: page - annotations: - summary: '{{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation has been failing for more than ten minutes.' -``` diff --git a/docs/guides/mozilla-sops.md b/docs/guides/mozilla-sops.md deleted file mode 100644 index 4f3449a6..00000000 --- a/docs/guides/mozilla-sops.md +++ /dev/null @@ -1,393 +0,0 @@ -# Manage Kubernetes secrets with Mozilla SOPS - -In order to store secrets safely in a public or private Git repository, you can use -Mozilla's [SOPS](https://github.com/mozilla/sops) CLI to encrypt -Kubernetes secrets with OpenPGP, AWS KMS, GCP KMS and Azure Key Vault. - -## Prerequisites - -To follow this guide you'll need a Kubernetes cluster with the GitOps -toolkit controllers installed on it. -Please see the [get started guide](../get-started/index.md) -or the [installation guide](installation.md). - -Install [gnupg](https://www.gnupg.org/) and [SOPS](https://github.com/mozilla/sops): - -```sh -brew install gnupg sops -``` - -## Generate a GPG key - -Generate a GPG/OpenPGP key with no passphrase (`%no-protection`): - -```sh -export KEY_NAME="cluster0.yourdomain.com" -export KEY_COMMENT="flux secrets" - -gpg --batch --full-generate-key < ./clusters/cluster0/.sops.pub.asc -``` - -Check the file contents to ensure it's the public key before adding it to the repo and committing. - -```sh -git add ./clusters/cluster0/.sops.pub.asc -git commit -am 'Share GPG public key for secrets generation' -``` - -Team members can then import this key when they pull the Git repository: - -```sh -gpg --import ./clusters/cluster0/.sops.pub.asc -``` - -!!! hint - The public key is sufficient for creating brand new files. - The secret key is required for decrypting and editing existing files because SOPS computes a MAC on all values. - When using solely the public key to add or remove a field, the whole file should be deleted and recreated. - -## Configure the Git directory for encryption - -Write a [SOPS config file](https://github.com/mozilla/sops#using-sops-yaml-conf-to-select-kms-pgp-for-new-files) -to the specific cluster or namespace directory used -to store encrypted objects with this particular GPG key's fingerprint. - -```yaml -cat < ./clusters/cluster0/.sops.yaml -creation_rules: - - path_regex: .*.yaml - encrypted_regex: ^(data|stringData)$ - pgp: ${KEY_FP} -EOF -``` - -This config applies recursively to all sub-directories. -Multiple directories can use separate SOPS configs. -Contributors using the `sops` CLI to create and encrypt files -won't have to worry about specifying the proper key for the target cluster or namespace. - -`encrypted_regex` helps encrypt the `data` and `stringData` fields for Secrets. -You may wish to add other fields if you are encrypting other types of Objects. - -!!! hint - Note that you should encrypt only the `data` or `stringData` section. Encrypting the Kubernetes - secret metadata, kind or apiVersion is not supported by kustomize-controller. - -## Encrypt secrets - -Generate a Kubernetes secret manifest with kubectl: - -```sh -kubectl -n default create secret generic basic-auth \ ---from-literal=user=admin \ ---from-literal=password=change-me \ ---dry-run=client \ --o yaml > basic-auth.yaml -``` - -Encrypt the secret with SOPS using your GPG key: - -```sh -sops --encrypt --in-place basic-auth.yaml -``` - -You can now commit the encrypted secret to your Git repository. - -!!! hint - Note that you shouldn't apply the encrypted secrets onto the cluster with kubectl. - SOPS encrypted secrets are designed to be consumed by kustomize-controller. - -### Using various cloud providers - -When using AWS/GCP KMS, you don't have to include the gpg `secretRef` under -`spec.provider` (you can skip the `--decryption-secret` flag when running `flux create kustomization`), -instead you'll have to bind an IAM Role with access to the KMS -keys to the `kustomize-controller` service account of the `flux-system` namespace for -kustomize-controller to be able to fetch keys from KMS. - -#### AWS - -Enabled the [IAM OIDC provider](https://eksctl.io/usage/iamserviceaccounts/) on your EKS cluster: - -```sh -eksctl utils associate-iam-oidc-provider --cluster= -``` - -Create an IAM Role with access to AWS KMS e.g.: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ], - "Effect": "Allow", - "Resource": "arn:aws:kms:eu-west-1:XXXXX209540:key/4f581f5b-7f78-45e9-a543-83a7022e8105" - } - ] -} -``` - -Bind the IAM role to the `kustomize-controller` service account: - -```sh -eksctl create iamserviceaccount \ ---override-existing-serviceaccounts \ ---name=kustomize-controller \ ---namespace=flux-system \ ---attach-policy-arn= \ ---cluster= -``` - -Restart kustomize-controller for the binding to take effect: - -```sh -kubectl -n flux-system rollout restart deployment/kustomize-controller -``` - -#### Azure - -When using Azure Key Vault you need to authenticate kustomize-controller either with [aad-pod-identity](../use-cases/azure.md#aad-pod-identity) -or by passing [Service Principal credentials as environment variables](https://github.com/mozilla/sops#encrypting-using-azure-key-vault). - -Create the Azure Key-Vault: - -```sh -export VAULT_NAME="fluxcd-$(uuidgen | tr -d - | head -c 16)" -export KEY_NAME="sops-cluster0" - -az keyvault create --name "${VAULT_NAME}" -az keyvault key create --name "${KEY_NAME}" \ - --vault-name "${VAULT_NAME}" - --protection software \ - --ops encrypt decrypt -az keyvault key show --name "${KEY_NAME}" \ - --vault-name "${VAULT_NAME}" \ - --query key.kid -``` - -If using AAD Pod-Identity, create an identity within Azure to bind against, then create an `AzureIdentity` object to match: - -```yaml -# Create an identity in Azure and assign it a role to access Key Vault (note: the identity's resourceGroup should match the desired Key Vault): -# az identity create -n sops-akv-decryptor -# az role assignment create --role "Key Vault Crypto User" --assignee-object-id "$(az identity show -n sops-akv-decryptor -o tsv --query principalId)" -# Fetch the clientID and resourceID to configure the AzureIdentity spec below: -# az identity show -n sops-akv-decryptor -otsv --query clientId -# az identity show -n sops-akv-decryptor -otsv --query resourceId ---- -apiVersion: aadpodidentity.k8s.io/v1 -kind: AzureIdentity -metadata: - name: sops-akv-decryptor # kustomize-controller label will match this name - namespace: flux-system -spec: - clientID: 58027844-6b86-424b-9888-b5ae2dc28b4f - resourceID: /subscriptions/8c69185e-55f9-4d00-8e71-a1b1bb1386a1/resourcegroups/stealthybox/providers/Microsoft.ManagedIdentity/userAssignedIdentities/sops-akv-decryptor - type: 0 # user-managed identity -``` - -[Customize your Flux Manifests](../guides/installation.md#customize-flux-manifests) so that kustomize-controller has the proper credentials. -Patch the kustomize-controller Pod template so that the label matches the `AzureIdentity` name. -Additionally, the SOPS specific environment variable `AZURE_AUTH_METHOD=msi` to activate the proper auth method within kustomize-controller. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kustomize-controller - namespace: flux-system -spec: - template: - metadata: - labels: - aadpodidbinding: sops-akv-decryptor # match the AzureIdentity name - spec: - containers: - - name: manager - env: - - name: AZURE_AUTH_METHOD - value: msi -``` - -Alternatively, if using a Service Principal stored in a K8s Secret, patch the Pod's envFrom -to reference the `AZURE_TENANT_ID`/`AZURE_CLIENT_ID`/`AZURE_CLIENT_SECRET` -fields from your Secret. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kustomize-controller - namespace: flux-system -spec: - template: - spec: - containers: - - name: manager - envFrom: - - secretRef: - name: sops-akv-decryptor-service-principal -``` - -At this point, kustomize-controller is now authorized to decrypt values in -SOPS encrypted files from your Sources via the related Key Vault. - -See Mozilla's guide to -[Encrypting Using Azure Key Vault](https://github.com/mozilla/sops#encrypting-using-azure-key-vault) -to get started committing encrypted files to your Git Repository or other Sources. - -#### Google Cloud - -Please ensure that the GKE cluster has Workload Identity enabled. - -1. Create a service account with the role `Cloud KMS CryptoKey Encrypter/Decrypter`. -2. Create an IAM policy binding between the GCP service account to the `kustomize-controller` service account of the `flux-system`. -3. Annotate the `kustomize-controller` service account in the `flux-system` with the GCP service account. - -```sh -kubectl annotate serviceaccount kustomize-controller \ - --namespace flux-system \ - iam.gke.io/gcp-service-account=@project-id.iam.gserviceaccount.com -``` - -## GitOps workflow - -A cluster admin should create the Kubernetes secret with the PGP keys on each cluster and -add the GitRepository/Kustomization manifests to the fleet repository. - -Git repository manifest: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: my-secrets - namespace: flux-system -spec: - interval: 1m - url: https://github.com/my-org/my-secrets -``` - -Kustomization manifest: - -```yaml -apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 -kind: Kustomization -metadata: - name: my-secrets - namespace: flux-system -spec: - interval: 10m0s - sourceRef: - kind: GitRepository - name: my-secrets - path: ./ - prune: true - decryption: - provider: sops - secretRef: - name: sops-gpg -``` - -!!! hint - You can generate the above manifests using `flux create --export > manifest.yaml`. - -Assuming a team member wants to deploy an application that needs to connect -to a database using a username and password, they'll be doing the following: - -* create a Kubernetes Secret manifest locally with the db credentials e.g. `db-auth.yaml` -* encrypt the secret `data` field with sops -* create a Kubernetes Deployment manifest for the app e.g. `app-deployment.yaml` -* add the Secret to the Deployment manifest as a [volume mount or env var](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets) -* commit the manifests `db-auth.yaml` and `app-deployment.yaml` to a Git repository that's being synced by the GitOps toolkit controllers - -Once the manifests have been pushed to the Git repository, the following happens: - -* source-controller pulls the changes from Git -* kustomize-controller loads the GPG keys from the `sops-pgp` secret -* kustomize-controller decrypts the Kubernetes secrets with SOPS and applies them on the cluster -* kubelet creates the pods and mounts the secret as a volume or env variable inside the app container diff --git a/docs/guides/notifications.md b/docs/guides/notifications.md deleted file mode 100644 index 37186d5b..00000000 --- a/docs/guides/notifications.md +++ /dev/null @@ -1,295 +0,0 @@ -# Setup Notifications - -When operating a cluster, different teams may wish to receive notifications about -the status of their GitOps pipelines. -For example, the on-call team would receive alerts about reconciliation -failures in the cluster, while the dev team may wish to be alerted when a new version -of an app was deployed and if the deployment is healthy. - -## Prerequisites - -To follow this guide you'll need a Kubernetes cluster with the GitOps -toolkit controllers installed on it. -Please see the [get started guide](../get-started/index.md) -or the [installation guide](installation.md). - -The GitOps toolkit controllers emit Kubernetes events whenever a resource status changes. -You can use the [notification-controller](../components/notification/controller.md) -to forward these events to Slack, Microsoft Teams, Discord or Rocket chart. -The notification controller is part of the default toolkit installation. - -## Define a provider - -First create a secret with your Slack incoming webhook: - -```sh -kubectl -n flux-system create secret generic slack-url \ ---from-literal=address=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK -``` - -Note that the secret must contain an `address` field, -it can be a Slack, Microsoft Teams, Discord or Rocket webhook URL. - -Create a notification provider for Slack by referencing the above secret: - -```yaml -apiVersion: notification.toolkit.fluxcd.io/v1beta1 -kind: Provider -metadata: - name: slack - namespace: flux-system -spec: - type: slack - channel: general - secretRef: - name: slack-url -``` - -The provider type can be `slack`, `msteams`, `discord`, `rocket`, `googlechat`, `webex`, `sentry` or `generic`. - -When type `generic` is specified, the notification controller will post the incoming -[event](../components/notification/event.md) in JSON format to the webhook address. -This way you can create custom handlers that can store the events in -Elasticsearch, CloudWatch, Stackdriver, etc. - -## Define an alert - -Create an alert definition for all repositories and kustomizations: - -```yaml -apiVersion: notification.toolkit.fluxcd.io/v1beta1 -kind: Alert -metadata: - name: on-call-webapp - namespace: flux-system -spec: - providerRef: - name: slack - eventSeverity: info - eventSources: - - kind: GitRepository - name: '*' - - kind: Kustomization - name: '*' -``` - -Apply the above files or commit them to the `fleet-infra` repository. - -To verify that the alert has been acknowledge by the notification controller do: - -```console -$ kubectl -n flux-system get alerts - -NAME READY STATUS AGE -on-call-webapp True Initialized 1m -``` - -Multiple alerts can be used to send notifications to different channels or Slack organizations. - -The event severity can be set to `info` or `error`. -When the severity is set to `error`, the kustomize controller will alert on any error -encountered during the reconciliation process. -This includes kustomize build and validation errors, -apply errors and health check failures. - -![error alert](../_files/slack-error-alert.png) - -When the verbosity is set to `info`, the controller will alert if: - -* a Kubernetes object was created, updated or deleted -* heath checks are passing -* a dependency is delaying the execution -* an error occurs - -![info alert](../_files/slack-info-alert.png) - -## Git commit status - -The GitHub, GitLab, Bitbucket, and Azure DevOps providers are slightly different to the other providers. Instead of -a stateless stream of events, the git notification providers will link the event with accompanying git commit which -triggered the event. The linking is done by updating the commit status of a specific commit. - - - [GitHub](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-status-checks) - - [GitLab](https://docs.gitlab.com/ee/api/commits.html) - - [Bitbucket](https://developer.atlassian.com/server/bitbucket/how-tos/updating-build-status-for-commits/) - - [Azure DevOps](https://docs.microsoft.com/en-us/rest/api/azure/devops/git/statuses?view=azure-devops-rest-6.0) - -In GitHub the commit status set by notification-controller will result in a green checkmark or red cross next to the commit hash. -Clicking the icon will show more detailed information about the status. -![commit status GitHub overview](../_files/commit-status-github-overview.png) - -Receiving an event in the form of a commit status rather than a message in a chat conversation has the benefit -that it closes the deployment loop giving quick and visible feedback if a commit has reconciled and if it succeeded. -This means that a deployment will work in a similar manner that people are used to with "traditional" push based CD pipelines. -Additionally the status can be fetched from the git providers API for a specific commit. Allowing for custom automation tools -that can automatically promote, commit to a new directory, after receiving a successful commit status. This can all be -done without requiring any access to the Kubernetes cluster. - -As stated before the provider works by referencing the same git repository as the Kustomization controller does. -When a new commit is pushed to the repository, source-controller will sync the commit, triggering the kustomize-controller -to reconcile the new commit. After this is done the kustomize-controller sends an event to the notification-controller -with the result and the commit hash it reconciled. Then notification-controller can update the correct commit and repository -when receiving the event. -![commit status flow](../_files/commit-status-flow.png) - -!!! hint "Limitations" - The git notification providers require that a commit hash present in the meta data - of the event. There for the the providers will only work with `Kustomization` as an - event source, as it is the only resource which includes this data. - -First follow the [get started guide](../../get-started) if you do not have a Kubernetes cluster with Flux installed in it. -You will need a authentication token to communicate with the API. The authentication method depends on -the git provider used, refer to the [Provider CRD](../../components/notification/provider/#git-commit-status) -for details about how to get the correct token. The guide will use GitHub, but the other providers will work in a very similar manner. -The token will need to have write access to the repository it is going to update the commit status in. -Store the generated token in a Secret with the following data format in the cluster. -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: github - namespace: flux-system -data: - token: -``` - -When sending notification events the kustomization-controller will include the commit hash related to the event. -Note that the commit hash in the event does not come from the git repository the `Kustomization` resource -comes from but rather the kustomization source ref. This mean that commit status notifications will not work -if the manifests comes from a repository which the API token is not allowed to write to. - -Copy the manifest content in the "[kustomize](https://github.com/stefanprodan/podinfo/tree/master/kustomize)" directory -into the directory "./clusters/my-cluster/podinfo" in your fleet-infra repository. Make sure that you also add the -namespace podinfo. -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: podinfo -``` - -Then create a Kustomization to deploy podinfo. -```yaml -apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 -kind: Kustomization -metadata: - name: podinfo - namespace: flux-system -spec: - interval: 5m - targetNamespace: podinfo - path: ./clusters/my-cluster/podinfo - prune: true - sourceRef: - kind: GitRepository - name: flux-system - healthChecks: - - apiVersion: apps/v1 - kind: Deployment - name: podinfo - namespace: podinfo - timeout: 1m -``` - -Creating a git provider is very similar to creating other types of providers. -The only caveat being that the provider address needs to point to the same -git repository as the event source originates from. -```yaml -apiVersion: notification.toolkit.fluxcd.io/v1beta1 -kind: Provider -metadata: - name: flux-system - namespace: flux-system -spec: - type: github - address: https://github.com//fleet-infra - secretRef: - name: github ---- -apiVersion: notification.toolkit.fluxcd.io/v1beta1 -kind: Alert -metadata: - name: podinfo - namespace: flux-system -spec: - providerRef: - name: flux-system - eventSeverity: info - eventSources: - - kind: Kustomization - name: podinfo - namespace: flux-system -``` - -By now the fleet-infra repository should have a similar directory structure. -``` -fleet-infra -└── clusters/ - └── my-cluster/ - ├── flux-system/ - │ ├── gotk-components.yaml - │ ├── gotk-sync.yaml - │ └── kustomization.yaml - ├── podinfo/ - │ ├── namespace.yaml - │ ├── deployment.yaml - │ ├── hpa.yaml - │ ├── service.yaml - │ └── kustomization.yaml - ├── podinfo-kustomization.yaml - └── podinfo-notification.yaml -``` - -If podinfo is deployed and the health checks pass you should get a successful status in -your forked podinfo repository. - -If everything is setup correctly there should now be a green check-mark next to the latest commit. -Clicking the check-mark should show a detailed view. - -| GitHub | GitLab | -| ------------- | ------------- | -| ![commit status GitHub successful](../_files/commit-status-github-success.png) | ![commit status GitLab successful](../_files/commit-status-gitlab-success.png) | - -Generate error - -A deployment failure can be forced by setting an invalid image tag in the podinfo deployment. -```yaml -apiVersion: apps/v1 -kind: Deployment -spec: - template: - spec: - containers: - - name: podinfod - image: ghcr.io/stefanprodan/podinfo:fake -``` - -After the commit has been reconciled it should return a failed commit status. -This is where the health check in the Kustomization comes into play together -with the timeout. The health check is used to asses the health of the Kustomization. -A failed commit status will not be sent until the health check timeout. Setting -a lower timeout will give feedback faster, but may sometimes not allow enough time -for a new application to deploy. - -| GitHub | GitLab | -| ------------- | ------------- | -| ![commit status GitHub failure](../_files/commit-status-github-failure.png) | ![commit status GitLab failure](../_files/commit-status-gitlab-failure.png) | - - -### Status changes - -The provider will continuously receive events as they happen, and multiple events may -be received for the same commit hash. The git providers are configured to only update -the status if the status has changed. This is to avoid spamming the commit status -history with the same status over and over again. - -There is an aspect of state fullness that needs to be considered, compared to the other -notification providers, as the events are stored by the git provider. This means that -the status of a commit can change over time. Initially a deployment may be healthy, resulting -in a successful status. Down the line the application, and the health check, may start failing -due to the amount of traffic it receives or external dependencies no longer being available. -The change in the health check would cause the status to go from successful to failed. -It is important to keep this in mind when building any automation tools that deals with the -status, and consider the fact that receiving a successful status once does not mean it will -always be successful. - diff --git a/docs/guides/sealed-secrets.md b/docs/guides/sealed-secrets.md deleted file mode 100644 index 56469067..00000000 --- a/docs/guides/sealed-secrets.md +++ /dev/null @@ -1,179 +0,0 @@ -# Sealed Secrets - -In order to store secrets safely in a public or private Git repository, you can use -Bitnami's [sealed-secrets controller](https://github.com/bitnami-labs/sealed-secrets) -and encrypt your Kubernetes Secrets into SealedSecrets. -The sealed secrets can be decrypted only by the controller running in your cluster and -nobody else can obtain the original secret, even if they have access to the Git repository. - -## Prerequisites - -To follow this guide you'll need a Kubernetes cluster with the GitOps -toolkit controllers installed on it. -Please see the [get started guide](../get-started/index.md) -or the [installation guide](installation.md). - -The sealed-secrets controller comes with a companion CLI tool called kubeseal. -With kubeseal you can create SealedSecret custom resources in YAML format -and store those in your Git repository. - -Install the kubeseal CLI: - -```sh -brew install kubeseal -``` - -For Linux or Windows you can download the kubeseal binary from -[GitHub](https://github.com/bitnami-labs/sealed-secrets/releases). - -## Deploy sealed-secrets with a HelmRelease - -You'll be using [helm-controller](../components/helm/controller.md) APIs to install -the sealed-secrets controller from its [Helm chart](https://hub.kubeapps.com/charts/stable/sealed-secrets). - -First you have to register the Helm repository where the sealed-secrets chart is published: - -```sh -flux create source helm sealed-secrets \ ---interval=1h \ ---url=https://bitnami-labs.github.io/sealed-secrets -``` - -With `interval` we configure [source-controller](../components/source/controller.md) to download -the Helm repository index every hour. If a newer version of sealed-secrets is published, -source-controller will signal helm-controller that a new chart is available. - -Create a Helm release that installs the latest version of sealed-secrets controller: - -```sh -flux create helmrelease sealed-secrets \ ---interval=1h \ ---release-name=sealed-secrets \ ---target-namespace=flux-system \ ---source=HelmRepository/sealed-secrets \ ---chart=sealed-secrets \ ---chart-version=">=1.15.0-0" \ ---crds=CreateReplace -``` - -With chart version `>=1.15.0-0` we configure helm-controller to automatically upgrade the release -when a new chart version is fetched by source-controller. - -At startup, the sealed-secrets controller generates a 4096-bit RSA key pair and -persists the private and public keys as Kubernetes secrets in the `flux-system` namespace. - -You can retrieve the public key with: - -```sh -kubeseal --fetch-cert \ ---controller-name=sealed-secrets \ ---controller-namespace=flux-system \ -> pub-sealed-secrets.pem -``` - -The public key can be safely stored in Git, and can be used to encrypt secrets -without direct access to the Kubernetes cluster. - -## Encrypt secrets - -Generate a Kubernetes secret manifest with kubectl: - -```sh -kubectl -n default create secret generic basic-auth \ ---from-literal=user=admin \ ---from-literal=password=change-me \ ---dry-run=client \ --o yaml > basic-auth.yaml -``` - -Encrypt the secret with kubeseal: - -```sh -kubeseal --format=yaml --cert=pub-sealed-secrets.pem \ -< basic-auth.yaml > basic-auth-sealed.yaml -``` - -Delete the plain secret and apply the sealed one: - -```sh -rm basic-auth.yaml -kubectl apply -f basic-auth-sealed.yaml -``` - -Verify that the sealed-secrets controller has created the `basic-auth` Kubernetes Secret: - -```console -$ kubectl -n default get secrets basic-auth - -NAME TYPE DATA AGE -basic-auth Opaque 2 1m43s -``` - -## GitOps workflow - -A cluster admin should add the stable `HelmRepository` manifest and the sealed-secrets `HelmRelease` -to the fleet repository. - -Helm repository manifest: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: sealed-secrets - namespace: flux-system -spec: - interval: 1h0m0s - url: https://bitnami-labs.github.io/sealed-secrets -``` - -Helm release manifest: - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: sealed-secrets - namespace: flux-system -spec: - chart: - spec: - chart: sealed-secrets - sourceRef: - kind: HelmRepository - name: sealed-secrets - version: ">=1.15.0-0" - interval: 1h0m0s - releaseName: sealed-secrets - targetNamespace: flux-system - install: - crds: Create - upgrade: - crds: CreateReplace -``` - -!!! hint - You can generate the above manifests using `flux create --export > manifest.yaml`. - -Once the sealed-secrets controller is installed, the admin fetches the -public key and shares it with the teams that operate on the fleet clusters via Git. - -When a team member wants to create a Kubernetes Secret on a cluster, -they uses kubeseal and the public key corresponding to that cluster to generate a SealedSecret. - -Assuming a team member wants to deploy an application that needs to connect -to a database using a username and password, they'll be doing the following: - -* create a Kubernetes Secret manifest locally with the db credentials e.g. `db-auth.yaml` -* encrypt the secret with kubeseal as `db-auth-sealed.yaml` -* delete the original secret file `db-auth.yaml` -* create a Kubernetes Deployment manifest for the app e.g. `app-deployment.yaml` -* add the Secret to the Deployment manifest as a [volume mount or env var](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets) using the original name `db-auth` -* commit the manifests `db-auth-sealed.yaml` and `app-deployment.yaml` to a Git repository that's being synced by the GitOps toolkit controllers - -Once the manifests have been pushed to the Git repository, the following happens: - -* source-controller pulls the changes from Git -* kustomize-controller applies the SealedSecret and the Deployment manifests -* sealed-secrets controller decrypts the SealedSecret and creates a Kubernetes Secret -* kubelet creates the pods and mounts the secret as a volume or env variable inside the app container diff --git a/docs/guides/sortable-image-tags.md b/docs/guides/sortable-image-tags.md deleted file mode 100644 index cbf7b8ad..00000000 --- a/docs/guides/sortable-image-tags.md +++ /dev/null @@ -1,192 +0,0 @@ - -# How to make sortable image tags to use with automation - -Flux v2 does not support selecting the lastest image by build time. Obtaining the build time needs -the container config for each image, and fetching that is subject to strict rate limiting by image -registries (e.g., by [DockerHub][dockerhub-rates]). - -This guide explains how to construct image tags so that the most recent image has the tag that comes -last in alphabetical or numerical order. The technique suggested is to put a timestamp or serial -number in each image tag. - -## Formats and alternatives - -The important properties for sorting are that the parts of the timestamp go from most significant to -least (e.g., the year down to the second). For numbers it is best to use numerical order, since this -will work with values of different width (e.g., '12' sorts after '2'). - -Image tags are often shown in user interfaces, so readability matters. Here is an example of a -readable timestamp that will sort well: - -```bash -$ # date and time (remember ':' is not allowed in a tag) -$ date +%F.%H%M%S -2021-01-28.133158 -``` - -You can use a timestamp that sorts as a number, like [Unix -time](https://en.wikipedia.org/wiki/Unix_time): - -``` -$ # seconds since Jan 1 1970 -$ date +%s -1611840548 -``` - -Alternatively, you can use a serial number as part of the tag. Some CI platforms will provide a -build number in an environment variable, but that may not be reliable to use as a serial number -- -check the platform documentation. -For example, Github makes availabe the variable `github.run_number` which can be used as a reliable ever increasing serial number. - -A commit count can be a reasonable stand-in for a serial number, if you build an image per commit -and you don't rewrite the branch in question: - -```bash -$ # commits in branch -$ git --rev-list --count HEAD -1504 -``` - -Beware: this will not give a useful number if you have a shallow clone. - -### Other things to include in the image tag - -It is also handy to quickly trace an image to the branch and commit of its source code. Including -the branch also means you can filter for images from a particular branch. - -A useful tag format is - - -- - -The branch and tag will usually be made available in a CI platform as environment variables. See - - - [CircleCI's built-in variables `CIRCLE_BRANCH` and `CIRCLE_SHA1`][circle-ci-env] - - [GitHub Actions' `GITHUB_REF` and `GITHUB_SHA`][github-actions-env] - - [Travis CI's `TRAVIS_BRANCH` and `TRAVIS_COMMIT`][travis-env]. - -## Example of a build process with timestamp tagging - -Here is an example of a [GitHub Actions job][gha-syntax] that creates a "build ID" with the git -branch, SHA1, and a timestamp, and uses it as a tag when building an image: - -```yaml -jobs: - build-push: - env: - IMAGE: org/my-app - runs-on: ubuntu-latest - steps: - - - name: Generate build ID - id: prep - run: | - branch=${GITHUB_REF##*/} - sha=${GITHUB_SHA::8} - ts=$(date +%s) - echo "::set-output name=BUILD_ID::${branch}-${sha}-${ts}" - - # These are prerequisites for the docker build step - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and publish container image with tag - uses: docker/build-push-action@v2 - with: - push: true - context: . - file: ./Dockerfile - tags: | - ${{ env.IMAGE }}:${{ steps.prep.outputs.BUILD_ID }} -``` - -### Alternative example utilizing github.run_number - -Here is another example example of a [GitHub Actions job][gha-syntax] which tags images using Github action's built in `run_number` -and the git SHA1: - -```yaml -jobs: - build-push: - env: - IMAGE: org/my-app - runs-on: ubuntu-latest - steps: - # These are prerequisites for the docker build step - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and publish container image with tag - uses: docker/build-push-action@v2 - with: - push: true - context: . - file: ./Dockerfile - tags: | - ${{ env.IMAGE }}:${{ github.sha }}-${{ github.run_number }} -``` - -## Using in an `ImagePolicy` object - -When creating an `ImagePolicy` object, you will need to extract just the timestamp part of the tag, -using the `tagFilter` field. You can filter for a particular branch to restrict images to only those -built from that branch. - -Here is an example that filters for only images built from `main` branch, and selects the most -recent according to a timestamp (created with `date +%s`) or according to the run number (`github.run_number` for example): - -```yaml -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImagePolicy -metadata: - name: image-repo-policy - namespace: flux-system -spec: - imageRepositoryRef: - name: image-repo - filterTags: - ## use "pattern: '(?P.*)-.+'" if you copied the workflow example using github.run_number - pattern: '^main-[a-f0-9]+-(?P[0-9]+)' - extract: '$ts' - policy: - numerical: - order: asc -``` - -If you don't care about the branch, that part can be a wildcard in the pattern: - -```yaml -apiVersion: image.toolkit.fluxcd.io/v1alpha2 -kind: ImagePolicy -metadata: - name: image-repo-policy - namespace: flux-system -spec: - imageRepositoryRef: - name: image-repo - filterTags: - pattern: '^.+-[a-f0-9]+-(?P[0-9]+)' - extract: '$ts' - policy: - numerical: - order: asc -``` - -[circle-ci-env]: https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables -[github-actions-env]: https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables -[travis-env]: https://docs.travis-ci.com/user/environment-variables/#default-environment-variables -[dockerhub-rates]: https://docs.docker.com/docker-hub/billing/faq/#pull-rate-limiting-faqs -[gha-syntax]: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions diff --git a/docs/guides/webhook-receivers.md b/docs/guides/webhook-receivers.md deleted file mode 100644 index d31fdb95..00000000 --- a/docs/guides/webhook-receivers.md +++ /dev/null @@ -1,138 +0,0 @@ -# Setup Webhook Receivers - -The GitOps toolkit controllers are by design **pull-based**. -In order to notify the controllers about changes in Git or Helm repositories, -you can setup webhooks and trigger a cluster reconciliation -every time a source changes. Using webhook receivers, you can build **push-based** -GitOps pipelines that react to external events. - -## Prerequisites - -To follow this guide you'll need a Kubernetes cluster with the GitOps -toolkit controllers installed on it. -Please see the [get started guide](../get-started/index.md) -or the [installation guide](installation.md). - -The [notification controller](../components/notification/controller.md) -can handle events coming from external systems -(GitHub, GitLab, Bitbucket, Harbor, Jenkins, etc) -and notify the GitOps toolkit controllers about source changes. -The notification controller is part of the default toolkit installation. - -## Expose the webhook receiver - -In order to receive Git push or Helm chart upload events, you'll have to -expose the webhook receiver endpoint outside of your Kubernetes cluster on -a public address. - -The notification controller handles webhook requests on port `9292`. -This port can be used to create a Kubernetes LoadBalancer Service or Ingress. - -Create a `LoadBalancer` service: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: receiver - namespace: flux-system -spec: - type: LoadBalancer - selector: - app: notification-controller - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 9292 -``` - -Wait for Kubernetes to assign a public address with: - -```sh -watch kubectl -n flux-system get svc/receiver -``` - -## Define a Git repository - -Create a Git source pointing to a GitHub repository that you have control over: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: webapp - namespace: flux-system -spec: - interval: 60m - url: https://github.com// - ref: - branch: master -``` - -!!! hint "Authentication" - SSH or token based authentication can be configured for private repositories. - See the [GitRepository CRD docs](../components/source/gitrepositories.md) for more details. - -## Define a Git repository receiver - -First generate a random string and create a secret with a `token` field: - -```sh -TOKEN=$(head -c 12 /dev/urandom | shasum | cut -d ' ' -f1) -echo $TOKEN - -kubectl -n flux-system create secret generic webhook-token \ ---from-literal=token=$TOKEN -``` - -Create a receiver for GitHub and specify the `GitRepository` object: - -```yaml -apiVersion: notification.toolkit.fluxcd.io/v1beta1 -kind: Receiver -metadata: - name: webapp - namespace: flux-system -spec: - type: github - events: - - "ping" - - "push" - secretRef: - name: webhook-token - resources: - - kind: GitRepository - name: webapp -``` - -!!! hint "Note" - Besides GitHub, you can define receivers for **GitLab**, **Bitbucket**, **Harbor** - and any other system that supports webhooks e.g. Jenkins, CircleCI, etc. - See the [Receiver CRD docs](../components/notification/receiver.md) for more details. - -The notification controller generates a unique URL using the provided token and the receiver name/namespace. - -Find the URL with: - -```console -$ kubectl -n flux-system get receiver/webapp - -NAME READY STATUS -webapp True Receiver initialised with URL: /hook/bed6d00b5555b1603e1f59b94d7fdbca58089cb5663633fb83f2815dc626d92b -``` - -On GitHub, navigate to your repository and click on the "Add webhook" button under "Settings/Webhooks". -Fill the form with: - -* **Payload URL**: compose the address using the receiver LB and the generated URL `http:///` -* **Secret**: use the `token` string - -With the above settings, when you push a commit to the repository, the following happens: - -* GitHub sends the Git push event to the receiver address -* Notification controller validates the authenticity of the payload using HMAC -* Source controller is notified about the changes -* Source controller pulls the changes into the cluster and updates the `GitRepository` revision -* Kustomize controller is notified about the revision change -* Kustomize controller reconciles all the `Kustomizations` that reference the `GitRepository` object diff --git a/docs/migration/timetable.md b/docs/migration/timetable.md deleted file mode 100644 index 537028a7..00000000 --- a/docs/migration/timetable.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -hide: - # The table data on this page is easier to read when wider - # The TOC right column is already blank anyway - - toc ---- -# Migration and Support Timetable - -!!! heart "Flux Migration Commitment" - This public timetable clarifies our commitment to end users. - Its purpose is to help improve your experience in deciding how and when to plan infra decisions related to Flux versions. - Please refer to the [Roadmap](../roadmap/index.md) for additional details. - - - -
- - - -| Date | Flux 1 | Flux 2 CLI | GOTK[^1] | -| -- | -- | -- | -- | -| Oct 6, 2020 | ^^[Maintenance Mode](https://github.com/fluxcd/website/pull/25)^^
  • Flux 1 releases only include critical bug fixes (which don’t require changing Flux 1 architecture), and security patches (for OS packages, Go runtime, and kubectl). No new features
  • Existing projects encouraged to test migration to Flux 2 pre-releases in non-production
| ^^Development Mode^^
  • Working to finish parity with Flux 1
  • New projects encouraged to test Flux 2 pre-releases in non-production
| ^^All Alpha^^[^2] | -Feb 18, 2021 | ^^Partial Migration Mode^^
  • Existing projects encouraged to migrate to `v1beta1`/`v2beta1` if you only use those features (Flux 1 read-only mode, and Helm Operator)
  • Existing projects encouraged to test image automation Alpha in non-production
| ^^Feature Parity^^ | ^^Image Automation Alpha. All others reached Feature Parity, Beta^^ | -| TBD | ^^Superseded^^
  • All existing projects encouraged to [migrate to Flux 2](https://toolkit.fluxcd.io/guides/flux-v1-migration/), and [report any bugs](https://github.com/fluxcd/flux2/issues/new/choose)
  • Flux 1 Helm Operator archived – no further updates due to unsupported dependencies
| ^^Needs further testing, may get breaking changes^^
  • CLI needs further user testing during this migration period
| ^^All Beta, Production Ready^^[^3]
  • All Flux 1 features stable and supported in Flux 2
  • Promoting Alpha versions to Beta makes this Production Ready
| -| TBD | ^^Migration and security support only^^
  • Flux 1 releases only include security patches (no bug fixes)
  • Maintainers support users with migration to Flux 2 only, no longer with Flux 1 issues
  • Flux 1 archive date announced
| ^^Public release (GA), Production Ready^^
  • CLI commits to backwards compatibility moving forward
  • CLI follows kubectl style backwards compatibility support: +1 -1 MINOR version for server components (e.g., APIs, Controllers, validation webhooks)
| ^^All Beta, Production Ready^^ | -| TBD | ^^Archived^^
  • Flux 1 obsolete, no further releases or maintainer support
  • Flux 1 repo archived
| ^^Continued active development^^ | ^^Continued active development^^ | - - -
- -[^1]: GOTK is shorthand for the [GitOps Toolkit](https://toolkit.fluxcd.io/components/) APIs and Controllers - -[^2]: Versioning: Flux 2 is a multi-service architecture, so requires a more complex explanation than Flux 1: - - Flux 2 CLI follows [Semantic Versioning](https://semver.org/) scheme - - The GitOps Toolkit APIs follow the [Kubernetes API versioning](https://kubernetes.io/docs/reference/using-api/#api-versioning) pattern. See [Roadmap](https://toolkit.fluxcd.io/roadmap/) for component versions. - - These are coordinated for cross-compatibility: For each Flux 2 CLI tag, CLI and GOTK versions are end-to-end tested together, so you may safely upgrade from one MINOR/PATCH version to another. - -[^3]: The GOTK Custom Resource Definitions which are at `v1beta1` and `v2beta1` and their controllers are considered stable and production ready. Going forward, breaking changes to the beta CRDs will be accompanied by a conversion mechanism. diff --git a/docs/proposals/go-git-providers.md b/docs/proposals/go-git-providers.md deleted file mode 100644 index 661b5094..00000000 --- a/docs/proposals/go-git-providers.md +++ /dev/null @@ -1,510 +0,0 @@ -# go-git-providers - -## Abstract - -This proposal aims to create a library with the import path `github.com/fluxcd/go-git-providers`' -(import name: `gitprovider`), which provides an abstraction layer for talking to Git providers -like GitHub, GitLab and Bitbucket. - -This would become a new repository, specifically targeted at being a general-purpose Git provider -client for multiple providers and domains. - -## Goals - -- Support multiple Git provider backends (e.g. GitHub, GitLab, Bitbucket, etc.) using the same interface -- Support talking to multiple domains at once, including custom domains (e.g. talking to "gitlab.com" and "version.aalto.fi" from the same client) -- Support both no authentication (for public repos), basic auth, and OAuth2 for authentication -- Manipulating the following resources: - - **Organizations**: `GET`, `LIST` (both all accessible top-level orgs and sub-orgs) - - For a given **Organization**: - - **Teams**: `GET` and `LIST` - - **Repositories**: `GET`, `LIST` and `POST` - - **Team Access**: `LIST`, `POST` and `DELETE` - - **Credentials**: `LIST`, `POST` and `DELETE` -- Support sub-organizations (or "sub-groups" in GitLab) if possible -- Support reconciling an object for idempotent operations -- Pagination is automatically handled for `LIST` requests -- Transparently can manage teams (collections of users, sub-groups in Gitlab) with varying access to repos -- Follow library best practices in order to be easy to vendor (e.g. use major `vX` versioning & go.mod) - -## Non-goals - -- Support for features not mentioned above - -## Design decisions - -- A `context.Context` should be passed to every request as the first argument -- There should be two interfaces per resource, if applicable: - - one collection-specific interface, with a plural name (e.g. `OrganizationsClient`), that has methods like `Get()` and `List()` - - one instance-specific interface, with a singular name (e.g. `OrganizationClient`), that operates on that instance, e.g. allowing access to child resources, e.g. `Teams()` -- Every `Create()` signature shall have a `{Resource}CreateOptions` struct as the last argument. - - `Delete()` and similar methods may use the same pattern if needed -- All `*Options` structs shall be passed by value (i.e. non-nillable) and contain only nillable, optional fields -- All optional fields in the type structs shall be nillable -- It should be possible to create a fake API client for testing, implementing the same interfaces -- All type structs shall have a `Validate()` method, and optionally a `Default()` one -- All type structs shall expose their internal representation (from the underlying library) through the `InternalGetter` interface with a method `GetInternal() interface{}` -- Typed errors shall be returned, wrapped using Go 1.14's new features -- Go-style enums are used when there are only a few supported values for a field -- Every field is documented using Godoc comment, including `+required` or `+optional` to clearly signify its importance -- Support serializing the types to JSON (if needed for e.g. debugging) by adding tags - -## Implementation - -### Provider package - -The provider package, e.g. at `github.com/fluxcd/go-git-providers/github`, will have constructor methods so a client can be created, e.g. as follows: - -```go -// Create a client for github.com without any authentication -c := github.NewClient() - -// Create a client for an enterprise GitHub account, without any authentication -c = github.NewClient(github.WithBaseURL("enterprise.github.com")) - -// Create a client for github.com using a personal oauth2 token -c = github.NewClient(github.WithOAuth2("")) -``` - -### Client - -The definition of a `Client` is as follows: - -```go -// Client is an interface that allows talking to a Git provider -type Client interface { - // The Client allows accessing all known resources - ResourceClient - - // SupportedDomain returns the supported domain - // This field is set at client creation time, and can't be changed - SupportedDomain() string - - // ProviderID returns the provider ID (e.g. "github", "gitlab") for this client - // This field is set at client creation time, and can't be changed - ProviderID() ProviderID - - // Raw returns the Go client used under the hood for accessing the Git provider - Raw() interface{} -} -``` - -As one can see, the `Client` is scoped for a single backing domain. `ProviderID` is a typed string, and every -implementation package defines their own constant, e.g. `const ProviderName = gitprovider.ProviderID("github")`. - -The `ResourceClient` actually allows talking to resources of the API, both for single objects, and collections: - -```go -// ResourceClient allows access to resource-specific clients -type ResourceClient interface { - // Organization gets the OrganizationClient for the specific top-level organization - // ErrNotTopLevelOrganization will be returned if the organization is not top-level when using - Organization(o OrganizationRef) OrganizationClient - - // Organizations returns the OrganizationsClient handling sets of organizations - Organizations() OrganizationsClient - - // Repository gets the RepositoryClient for the specified RepositoryRef - Repository(r RepositoryRef) RepositoryClient - - // Repositories returns the RepositoriesClient handling sets of organizations - Repositories() RepositoriesClient -} -``` - -In order to reference organizations and repositories, there are the `OrganizationRef` and `RepositoryRef` -interfaces: - -```go -// OrganizationRef references an organization in a Git provider -type OrganizationRef interface { - // String returns the HTTPS URL - fmt.Stringer - - // GetDomain returns the URL-domain for the Git provider backend, e.g. gitlab.com or version.aalto.fi - GetDomain() string - // GetOrganization returns the top-level organization, i.e. "weaveworks" or "kubernetes-sigs" - GetOrganization() string - // GetSubOrganizations returns the names of sub-organizations (or sub-groups), - // e.g. ["engineering", "frontend"] would be returned for gitlab.com/weaveworks/engineering/frontend - GetSubOrganizations() []string -} - -// RepositoryRef references a repository hosted by a Git provider -type RepositoryRef interface { - // RepositoryRef requires an OrganizationRef to fully-qualify a repo reference - OrganizationRef - - // GetRepository returns the name of the repository - GetRepository() string -} -``` - -Along with these, there is `OrganizationInfo` and `RepositoryInfo` which implement the above mentioned interfaces in a straightforward way. - -If you want to create an `OrganizationRef` or `RepositoryRef`, you can either use `NewOrganizationInfo()` or `NewRepositoryInfo()`, filling in all parts of the reference, or use the `ParseRepositoryURL(r string) (RepositoryRef, error)` or `ParseOrganizationURL(o string) (OrganizationRef, error)` methods. - -As mentioned above, only one target domain is supported by the `Client`. This means e.g. that if the `Client` is configured for GitHub, and you feed it a GitLab URL to parse, `ErrDomainUnsupported` will be returned. - -This brings us to a higher-level client abstraction, `MultiClient`. - -### MultiClient - -In order to automatically support multiple domains and providers using the same interface, `MultiClient` is introduced. - -The user would use the `MultiClient` as follows: - -```go -// Create a client to github.com without authentication -gh := github.NewClient() - -// Create a client to gitlab.com, authenticating with basic auth -gl := gitlab.NewClient(gitlab.WithBasicAuth("", "")) - -// Create a MultiClient which supports talking to any of these backends -client := gitprovider.NewMultiClient(gh, gl, aalto) -``` - -The interface definition of `MultiClient` is similar to that one of `Client`, both embedding `ResourceClient`, but it also allows access to domain-specific underlying `Client`'s: - -```go -// MultiClient allows talking to multiple Git providers at once -type MultiClient interface { - // The MultiClient allows accessing all known resources, automatically choosing the right underlying - // Client based on the resource's domain - ResourceClient - - // SupportedDomains returns a list of known domains - SupportedDomains() []string - - // ClientForDomain returns the Client used for a specific domain - ClientForDomain(domain string) (Client, bool) -} -``` - -### OrganizationsClient - -The `OrganizationsClient` provides access to a set of organizations, as follows: - -```go -// OrganizationsClient operates on organizations the user has access to -type OrganizationsClient interface { - // Get a specific organization the user has access to - // This might also refer to a sub-organization - // ErrNotFound is returned if the resource does not exist - Get(ctx context.Context, o OrganizationRef) (*Organization, error) - - // List all top-level organizations the specific user has access to - // List should return all available organizations, using multiple paginated requests if needed - List(ctx context.Context) ([]Organization, error) - - // Children returns the immediate child-organizations for the specific OrganizationRef o. - // The OrganizationRef may point to any sub-organization that exists - // This is not supported in GitHub - // Children should return all available organizations, using multiple paginated requests if needed - Children(ctx context.Context, o OrganizationRef) ([]Organization, error) - - // Possibly add Create/Update/Delete methods later -} -``` - -The `Organization` struct is fairly straightforward for now: - -```go -// Organization represents an (top-level- or sub-) organization -type Organization struct { - // OrganizationInfo provides the required fields - // (Domain, Organization and SubOrganizations) required for being an OrganizationRef - OrganizationInfo `json:",inline"` - // InternalHolder implements the InternalGetter interface - // +optional - InternalHolder `json:",inline"` - - // Name is the human-friendly name of this organization, e.g. "Weaveworks" or "Kubernetes SIGs" - // +required - Name string `json:"name"` - - // Description returns a description for the organization - // No default value at POST-time - // +optional - Description *string `json:"description"` -} -``` - -The `OrganizationInfo` struct is a straightforward struct just implementing the `OrganizationRef` interface -with basic fields & getters. `InternalHolder` is implementing the `InternalGetter` interface as follows, and is -embedded into all main structs: - -```go -// InternalGetter allows access to the underlying object -type InternalGetter interface { - // GetInternal returns the underlying struct that's used - GetInternal() interface{} -} - -// InternalHolder can be embedded into other structs to implement the InternalGetter interface -type InternalHolder struct { - // Internal contains the underlying object. - // +optional - Internal interface{} `json:"-"` -} -``` - -### OrganizationClient - -`OrganizationClient` allows access to a specific organization's underlying resources as follows: - -```go -// OrganizationClient operates on a given/specific organization -type OrganizationClient interface { - // Teams gives access to the TeamsClient for this specific organization - Teams() OrganizationTeamsClient -} -``` - -#### Organization Teams - -Teams belonging to a certain organization can at this moment be fetched on an individual basis, or listed. - -```go -// OrganizationTeamsClient handles teams organization-wide -type OrganizationTeamsClient interface { - // Get a team within the specific organization - // teamName may include slashes, to point to e.g. "sub-teams" i.e. subgroups in Gitlab - // teamName must not be an empty string - // ErrNotFound is returned if the resource does not exist - Get(ctx context.Context, teamName string) (*Team, error) - - // List all teams (recursively, in terms of subgroups) within the specific organization - // List should return all available organizations, using multiple paginated requests if needed - List(ctx context.Context) ([]Team, error) - - // Possibly add Create/Update/Delete methods later -} -``` - -The `Team` struct is defined as follows: - -```go -// Team is a representation for a team of users inside of an organization -type Team struct { - // Team embeds OrganizationInfo which makes it automatically comply with OrganizationRef - OrganizationInfo `json:",inline"` - // Team embeds InternalHolder for accessing the underlying object - // +optional - InternalHolder `json:",inline"` - - // Name describes the name of the team. The team name may contain slashes - // +required - Name string `json:"name"` - - // Members points to a set of user names (logins) of the members of this team - // +required - Members []string `json:"members"` -} -``` - -In GitLab, teams could be modelled as users in a sub-group. Those users can later be added as a single unit -to access a given repository. - -### RepositoriesClient - -`RepositoriesClient` provides access to a set of repositories for the user. - -```go -// RepositoriesClient operates on repositories the user has access to -type RepositoriesClient interface { - // Get returns the repository at the given path - // ErrNotFound is returned if the resource does not exist - Get(ctx context.Context, r RepositoryRef) (*Repository, error) - - // List all repositories in the given organization - // List should return all available organizations, using multiple paginated requests if needed - List(ctx context.Context, o OrganizationRef) ([]Repository, error) - - // Create creates a repository at the given organization path, with the given URL-encoded name and options - // ErrAlreadyExists will be returned if the resource already exists - Create(ctx context.Context, r *Repository, opts RepositoryCreateOptions) (*Repository, error) - - // Reconcile makes sure r is the actual state in the backing Git provider. If r doesn't exist - // under the hood, it is created. If r is already the actual state, this is a no-op. If r isn't - // the actual state, the resource will either be updated or deleted/recreated. - Reconcile(ctx context.Context, r *Repository) error -} -``` - -`RepositoryCreateOptions` has options like `AutoInit *bool`, `LicenseTemplate *string` and so forth to allow an -one-time initialization step. - -The `Repository` struct is defined as follows: - -```go -// Repository represents a Git repository provided by a Git provider -type Repository struct { - // RepositoryInfo provides the required fields - // (Domain, Organization, SubOrganizations and RepositoryName) - // required for being an RepositoryRef - RepositoryInfo `json:",inline"` - // InternalHolder implements the InternalGetter interface - // +optional - InternalHolder `json:",inline"` - - // Description returns a description for the repository - // No default value at POST-time - // +optional - Description *string `json:"description"` - - // Visibility returns the desired visibility for the repository - // Default value at POST-time: RepoVisibilityPrivate - // +optional - Visibility *RepoVisibility -} - -// GetCloneURL gets the clone URL for the specified transport type -func (r *Repository) GetCloneURL(transport TransportType) string { - return GetCloneURL(r, transport) -} -``` - -As can be seen, there is also a `GetCloneURL` function for the repository which allows -resolving the URL from which to clone the repo, for a given transport method (`ssh` and `https` -are supported `TransportType`s) - -### RepositoryClient - -`RepositoryClient` allows access to a given repository's underlying resources, like follows: - -```go -// RepositoryClient operates on a given/specific repository -type RepositoryClient interface { - // TeamAccess gives access to what teams have access to this specific repository - TeamAccess() RepositoryTeamAccessClient - - // Credentials gives access to manipulating credentials for accessing this specific repository - Credentials() RepositoryCredentialsClient -} -``` - -#### Repository Teams - -`RepositoryTeamAccessClient` allows adding & removing teams from the list of authorized persons to access a repository. - -```go -// RepositoryTeamAccessClient operates on the teams list for a specific repository -type RepositoryTeamAccessClient interface { - // Create adds a given team to the repo's team access control list - // ErrAlreadyExists will be returned if the resource already exists - // The embedded RepositoryInfo of ta does not need to be populated, but if it is, - // it must equal to the RepositoryRef given to the RepositoryClient. - Create(ctx context.Context, ta *TeamAccess, opts RepositoryAddTeamOptions) error - - // Lists the team access control list for this repo - List(ctx context.Context) ([]TeamAccess, error) - - // Reconcile makes sure ta is the actual state in the backing Git provider. If ta doesn't exist - // under the hood, it is created. If ta is already the actual state, this is a no-op. If ta isn't - // the actual state, the resource will either be updated or deleted/recreated. - // The embedded RepositoryInfo of ta does not need to be populated, but if it is, - // it must equal to the RepositoryRef given to the RepositoryClient. - Reconcile(ctx context.Context, ta *TeamAccess) error - - // Delete removes the given team from the repo's team access control list - // ErrNotFound is returned if the resource does not exist - Delete(ctx context.Context, teamName string) error -} -``` - -The `TeamAccess` struct looks as follows: - -```go -// TeamAccess describes a binding between a repository and a team -type TeamAccess struct { - // TeamAccess embeds RepositoryInfo which makes it automatically comply with RepositoryRef - // +optional - RepositoryInfo `json:",inline"` - // TeamAccess embeds InternalHolder for accessing the underlying object - // +optional - InternalHolder `json:",inline"` - - // Name describes the name of the team. The team name may contain slashes - // +required - Name string `json:"name"` - - // Permission describes the permission level for which the team is allowed to operate - // Default: read - // Available options: See the TeamRepositoryPermission enum - // +optional - Permission *TeamRepositoryPermission -} -``` - -#### Repository Credentials - -`RepositoryCredentialsClient` allows adding & removing credentials (e.g. deploy keys) from accessing a specific repository. - -```go -// RepositoryCredentialsClient operates on the access credential list for a specific repository -type RepositoryCredentialsClient interface { - // Create a credential with the given human-readable name, the given bytes and optional options - // ErrAlreadyExists will be returned if the resource already exists - Create(ctx context.Context, c RepositoryCredential, opts CredentialCreateOptions) error - - // Lists all credentials for the given credential type - List(ctx context.Context, t RepositoryCredentialType) ([]RepositoryCredential, error) - - // Reconcile makes sure c is the actual state in the backing Git provider. If c doesn't exist - // under the hood, it is created. If c is already the actual state, this is a no-op. If c isn't - // the actual state, the resource will either be updated or deleted/recreated. - Reconcile(ctx context.Context, c RepositoryCredential) error - - // Deletes a credential from the repo. name corresponds to GetName() of the credential - // ErrNotFound is returned if the resource does not exist - Delete(ctx context.Context, t RepositoryCredentialType, name string) error -} -``` - -In order to support multiple different types of credentials, `RepositoryCredential` is an interface: - -```go -// RepositoryCredential is a credential that allows access (either read-only or read-write) to the repo -type RepositoryCredential interface { - // GetType returns the type of the credential - GetType() RepositoryCredentialType - - // GetName returns a name (or title/description) of the credential - GetName() string - - // GetData returns the key that will be authorized to access the repo, this can e.g. be a SSH public key - GetData() []byte - - // IsReadOnly returns whether this credential is authorized to write to the repository or not - IsReadOnly() bool -} -``` - -The default implementation of `RepositoryCredential` is `DeployKey`: - -```go -// DeployKey represents a short-lived credential (e.g. an SSH public key) used for accessing a repository -type DeployKey struct { - // DeployKey embeds InternalHolder for accessing the underlying object - // +optional - InternalHolder `json:",inline"` - - // Title is the human-friendly interpretation of what the key is for (and does) - // +required - Title string `json:"title"` - - // Key specifies the public part of the deploy (e.g. SSH) key - // +required - Key []byte `json:"key"` - - // ReadOnly specifies whether this DeployKey can write to the repository or not - // Default value at POST-time: true - // +optional - ReadOnly *bool `json:"readOnly"` -} -``` diff --git a/docs/roadmap/index.md b/docs/roadmap/index.md deleted file mode 100644 index b4088451..00000000 --- a/docs/roadmap/index.md +++ /dev/null @@ -1,148 +0,0 @@ -# Roadmap - -!!! hint "Production readiness" - The Flux custom resource definitions which are at `v1beta1` and `v2beta1` - and their controllers are considered stable and production ready. - Going forward, breaking changes to the beta CRDs will be accompanied by a conversion mechanism. - Please see the [Migration and Suport Timetable](../migration/timetable.md) for our commitment to end users. - -The following components (included by default in [flux bootstrap](../guides/installation.md#bootstrap)) -are considered production ready: - -- [source-controller](../components/source) -- [kustomize-controller](../components/kustomize) -- [notification-controller](../components/notification) -- [helm-controller](../components/helm) - -The following GitOps Toolkit APIs are considered production ready: - -- `source.toolkit.fluxcd.io/v1beta1` -- `kustomize.toolkit.fluxcd.io/v1beta1` -- `notification.toolkit.fluxcd.io/v1beta1` -- `helm.toolkit.fluxcd.io/v2beta1` - -## The road to Flux v2 GA - -In our planning discussions we have identified these possible areas of work, -this list is subject to change while we gather feedback: - -- Stabilize the image automation APIs - * Review the spec of `ImageRepository`, `ImagePolicy` and `ImageUpdateAutomation` - * Promote the image automation APIs to `v1beta1` - * Include the image automation controllers in the default components list - -- Improve the documentation - * Gather feedback on the [migration guides](https://github.com/fluxcd/flux2/discussions/413) and address more use-cases - * Incident management and troubleshooting guides - * Cloud specific guides (AWS, Azure, Google Cloud, more?) - * Consolidate the docs under [fluxcd.io](https://fluxcd.io) website - -## The road to Flux v1 feature parity - -In our planning discussions we identified three areas of work: - -- Feature parity with Flux v1 in read-only mode -- Feature parity with the image-update functionality in Flux v1 -- Feature parity with Helm Operator v1 - -### Flux read-only feature parity - -[= 100% "100%"] - -Flux v2 read-only is ready to try. See the [Getting -Started](https://toolkit.fluxcd.io/get-started/) how-to, and the -[Migration -guide](https://toolkit.fluxcd.io/guides/flux-v1-migration/). - -This would be the first stepping stone: we want Flux v2 to be on-par with today's Flux in -[read-only mode](https://github.com/fluxcd/flux/blob/master/docs/faq.md#can-i-run-flux-with-readonly-git-access) -and [FluxCloud](https://github.com/justinbarrick/fluxcloud) notifications. - -Goals - -- :material-check-bold: [Offer a migration guide for those that are using Flux in read-only mode to synchronize plain manifests](https://toolkit.fluxcd.io/guides/flux-v1-migration/) -- :material-check-bold: [Offer a migration guide for those that are using Flux in read-only mode to synchronize Kustomize overlays](https://toolkit.fluxcd.io/guides/flux-v1-migration/) -- :material-check-bold: [Offer a dedicated component for forwarding events to external messaging platforms](https://toolkit.fluxcd.io/guides/notifications/) - -Non-Goals - -- Migrate users that are using Flux to run custom scripts with `flux.yaml` -- Automate the migration of `flux.yaml` kustomize users - -Tasks - -- [x] Design the events API -- [x] Implement events in source and kustomize controllers -- [x] Make the kustomize-controller apply/gc events on-par with Flux v1 apply events -- [x] Design the notifications and events filtering API -- [x] Implement a notification controller for Slack, MS Teams, Discord, Rocket -- [x] Implement Prometheus metrics in source and kustomize controllers -- [x] Review the git source and kustomize APIs -- [x] Support [bash-style variable substitution](https://toolkit.fluxcd.io/components/kustomize/kustomization/#variable-substitution) as an alternative to `flux.yaml` envsubst/sed usage -- [x] Create a migration guide for `flux.yaml` kustomize users -- [x] Include support for SOPS - -### Flux image update feature parity - -[= 100% "100%"] - -Image automation is available as a prerelease. See [this -guide](https://toolkit.fluxcd.io/guides/image-update/) for how to -install and use it. - -Goals - -- Offer components that can replace Flux v1 image update feature - -Non-Goals - -- Maintain backwards compatibility with Flux v1 annotations -- [Order by timestamps found inside image layers](https://github.com/fluxcd/flux2/discussions/802) - -Tasks - -- [x] [Design the image scanning and automation API](https://github.com/fluxcd/flux2/discussions/107) -- [x] Implement an image scanning controller -- [x] Public image repo support -- [x] Credentials from Secret [fluxcd/image-reflector-controller#35](https://github.com/fluxcd/image-reflector-controller/pull/35) -- [x] Design the automation component -- [x] Implement the image scan/patch/push workflow -- [x] Integrate the new components in the Flux CLI [fluxcd/flux2#538](https://github.com/fluxcd/flux2/pull/538) -- [x] Write a guide for how to use image automation ([guide here](https://toolkit.fluxcd.io/guides/image-update/)) -- [x] ACR/ECR/GCR integration ([guide here](https://toolkit.fluxcd.io/guides/image-update/#imagerepository-cloud-providers-authentication)) -- [x] Write a migration guide from Flux v1 annotations ([guide here](https://toolkit.fluxcd.io/guides/flux-v1-automation-migration/)) - -### Helm v3 feature parity - -[= 100% "100%"] - -Helm support in Flux v2 is ready to try. See the [Helm controller -guide](https://toolkit.fluxcd.io/guides/helmreleases/), and the [Helm -controller migration -guide](https://toolkit.fluxcd.io/guides/helm-operator-migration/). - -Goals - -- Offer a migration guide for those that are using Helm Operator with Helm v3 and charts from - Helm and Git repositories - -Non-Goals - -- Migrate users that are using Helm v2 - -Tasks - -- [x] Implement a Helm controller for Helm v3 covering all the current release options -- [x] Discuss and design Helm releases based on source API: - * [x] Providing values from sources - * [x] Conditional remediation on failed Helm actions - * [x] Support for Helm charts from Git -- [x] Review the Helm release, chart and repository APIs -- [x] Implement events in Helm controller -- [x] Implement Prometheus metrics in Helm controller -- [x] Implement support for values from `Secret` and `ConfigMap` resources -- [x] Implement conditional remediation on (failed) Helm actions -- [x] Implement support for Helm charts from Git -- [x] Implement support for referring to an alternative chart values file -- [x] Stabilize API -- [x] [Create a migration guide for Helm Operator users](../guides/helm-operator-migration.md) diff --git a/docs/use-cases/azure.md b/docs/use-cases/azure.md deleted file mode 100644 index 64a68876..00000000 --- a/docs/use-cases/azure.md +++ /dev/null @@ -1,233 +0,0 @@ -# Using Flux on Azure - -## AKS Cluster Options - -It's important to follow some guidelines when installing Flux on AKS. - -### CNI and Network Policy - -Previously, there has been an issue with Flux and Network Policy on AKS. -([Upstream Azure Issue](https://github.com/Azure/AKS/issues/2031)) ([Flux Issue](https://github.com/fluxcd/flux2/issues/703)) -If you ensure your AKS cluster is upgraded, and your Nodes have been restarted with the most recent Node images, -this could resolve flux reconciliation failures where source-controller is unreachable. -Using `--network-plugin=azure --network-policy=calico` has been tested to work properly. -This issue only affects you if you are using `--network-policy` on AKS, which is not a default option. - -!!! warning - AKS `--network-policy` is currently in Preview - -### AAD Pod-Identity - -Depending on the features you are interested in using with Flux, you may want to install AAD Pod Identity. -With [AAD Pod-Identity](https://azure.github.io/aad-pod-identity/docs/), we can create Pods that have their own -cloud credentials for accessing Azure services like Azure Container Registry(ACR) and Azure Key Vault(AKV). - -If you do not use AAD Pod-Identity, you'll need to manage and store Service Principal credentials -in K8s Secrets, to integrate Flux with other Azure Services. - -As a pre-requisite, your cluster must have `--enable-managed-identity` configured. - -This software can be [installed via Helm](https://azure.github.io/aad-pod-identity/docs/getting-started/installation/) -(unmanaged by Azure). -Use Flux's `HelmRepository` and `HelmRelease` object to manage the aad-pod-identity installation -from a bootstrap repository and keep it up to date. - -!!! note - As an alternative to Helm, the `--enable-aad-pod-identity` flag for the `az aks create` is currently in Preview. - Follow the Azure guide for [Creating an AKS cluster with AAD Pod Identity](https://docs.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) - if you would like to enable this feature with the Azure CLI. - -### Cluster Creation - -The following creates an AKS cluster with some minimal configuration that will work well with Flux: - -```sh -az aks create \ - --network-plugin="azure" \ - --network-policy="calico" \ - --enable-managed-identity \ - --enable-pod-identity \ - --name="my-cluster" -``` - -!!! info - When working with the Azure CLI, it can help to set a default `location`, `group`, and `acr`. - See `az configure --help`, `az configure --list-defaults`, and `az configure --defaults key=value`. - -## Flux Installation for Azure DevOps - -Ensure you can login to [dev.azure.com](https://dev.azure.com) for your proper organization, -and create a new repository to hold your Flux install and other Kubernetes resources. - -Clone the Git repository locally: - -```sh -git clone ssh://git@ssh.dev.azure.com/v3/// -cd my-repository -``` - -Create a directory inside the repository: - -```sh -mkdir -p ./clusters/my-cluster/flux-system -``` - -Download the [Flux CLI](../guides/installation.md#install-the-flux-cli) and generate the manifests with: - -```sh -flux install \ - --export > ./clusters/my-cluster/flux-system/gotk-components.yaml -``` - -Commit and push the manifest to the master branch: - -```sh -git add -A && git commit -m "add components" && git push -``` - -Apply the manifests on your cluster: - -```sh -kubectl apply -f ./clusters/my-cluster/flux-system/gotk-components.yaml -``` - -Verify that the controllers have started: - -```sh -flux check -``` - -Create a `GitRepository` object on your cluster by specifying the SSH address of your repo: - -```sh -flux create source git flux-system \ - --git-implementation=libgit2 \ - --url=ssh://git@ssh.dev.azure.com/v3/// \ - --branch= \ - --ssh-key-algorithm=rsa \ - --ssh-rsa-bits=4096 \ - --interval=1m -``` - -The above command will prompt you to add a deploy key to your repository, but Azure DevOps -[does not support repository or org-specific deploy keys](https://developercommunity.visualstudio.com/t/allow-the-creation-of-ssh-deploy-keys-for-vsts-hos/365747). -You may add the deploy key to a user's personal SSH keys, but take note that -revoking the user's access to the repository will also revoke Flux's access. -The better alternative is to create a machine-user whose sole purpose is -to store credentials for automation. -Using a machine-user also has the benefit of being able to be read-only or -restricted to specific repositories if this is needed. - -!!! note - Unlike `git`, Flux does not support the - ["shorter" scp-like syntax for the SSH protocol](https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#_the_ssh_protocol) - (e.g. `ssh.dev.azure.com:v3`). - Use the [RFC 3986 compatible syntax](https://tools.ietf.org/html/rfc3986#section-3) instead: `ssh.dev.azure.com/v3`. - -If you wish to use Git over HTTPS, then generate a personal access token and supply it as the password: - -```sh -flux create source git flux-system \ - --git-implementation=libgit2 \ - --url=https://dev.azure.com///_git/ \ - --branch=main \ - --username=git \ - --password=${AZ_PAT_TOKEN} \ - --interval=1m -``` - -Please consult the [Azure DevOps documentation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=preview-page) -on how to generate personal access tokens for Git repositories. -Azure DevOps PAT's always have an expiration date, so be sure to have some process for renewing or updating these tokens. -Similar to the lack of repo-specific deploy keys, a user needs to generate a user-specific PAT. -If you are using a machine-user, you can generate a PAT or simply use the machine-user's password which does not expire. - -Create a `Kustomization` object on your cluster: - -```sh -flux create kustomization flux-system \ - --source=flux-system \ - --path="./clusters/my-cluster" \ - --prune=true \ - --interval=10m -``` - -Export both objects, generate a `kustomization.yaml`, commit and push the manifests to Git: - -```sh -flux export source git flux-system \ - > ./clusters/my-cluster/flux-system/gotk-sync.yaml - -flux export kustomization flux-system \ - >> ./clusters/my-cluster/flux-system/gotk-sync.yaml - -cd ./clusters/my-cluster/flux-system && kustomize create --autodetect - -git add -A && git commit -m "add sync manifests" && git push -``` - -Wait for Flux to reconcile your previous commit with: - -```sh -watch flux get kustomization flux-system -``` - -### Flux Upgrade - -To upgrade the Flux components to a newer version, download the latest `flux` binary, -run the install command in your repository root, commit and push the changes: - -```sh -flux install \ - --export > ./clusters/my-cluster/flux-system/gotk-components.yaml - -git add -A && git commit -m "Upgrade to $(flux -v)" && git push -``` - -The [source-controller](../components/source/controller.md) will pull the changes on the cluster, -then [kustomize-controller](../components/source/controller.md) -will perform a rolling update of all Flux components including itself. - -## Helm Repositories on Azure Container Registry - -The Flux `HelmRepository` object currently supports -[Chart Repositories](https://helm.sh/docs/topics/chart_repository/) -as well as fetching `HelmCharts` from paths in `GitRepository` sources. - -Azure Container Registry has a sub-command ([`az acr helm`](https://docs.microsoft.com/en-us/cli/azure/acr/helm)) -for working with ACR-Hosted Chart Repositories, but it is deprecated. -If you are using these deprecated Azure Chart Repositories, -you can use Flux `HelmRepository` objects with them. - -[Newer ACR Helm documentation](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-helm-repos) -suggests using ACR as an experimental [Helm OCI Registry](https://helm.sh/docs/topics/registries/). -This will not work with Flux, because using Charts from OCI Registries is not yet supported. - -## Secrets Management with SOPS and Azure Key Vault - -You will need to create an Azure Key Vault and bind a credential such as a Service Principal or Managed Identity to it. -If you want to use Managed Identities, install or enable [AAD Pod Identity](#aad-pod-identity). - -Patch kustomize-controller with the proper Azure credentials, so that it may access your Azure Key Vault, and then begin -committing SOPS encrypted files to the Git repository with the proper Azure Key Vault configuration. - -See the [Mozilla SOPS Azure Guide](../guides/mozilla-sops.md#azure) for further detail. - -## Image Updates with Azure Container Registry - -You will need to create an ACR registry and bind a credential such as a Service Principal or Managed Identity to it. -If you want to use Managed Identities, install or enable [AAD Pod Identity](#aad-pod-identity). - -You may need to update your Flux install to include additional components: -```sh -flux install \ - --components-extra="image-reflector-controller,image-automation-controller" \ - --export > ./clusters/my-cluster/flux-system/gotk-components.yaml -``` - -Follow the [Image Update Automation Guide](../guides/image-update.md) and see the -[ACR specific section](../guides/image-update.md#azure-container-registry) for more details. - -Your AKS cluster's configuration can also be updated to -[allow the kubelets to pull images from ACR](https://docs.microsoft.com/en-us/azure/aks/cluster-container-registry-integration) -without ImagePullSecrets as an optional, complimentary step. diff --git a/docs/use-cases/gh-actions-manifest-generation.md b/docs/use-cases/gh-actions-manifest-generation.md deleted file mode 100644 index f240ccd3..00000000 --- a/docs/use-cases/gh-actions-manifest-generation.md +++ /dev/null @@ -1,1279 +0,0 @@ -# GitHub Actions Manifest Generation - -This example implements "build-time" manifest generation on GitHub Actions. - -Third-party tools are used to generate YAML manifests in a CI job. The updated YAML are committed and pushed to Git, where `kustomize-controller` finally applies them. - -### Background - -There are many use cases for manifest generation tools, but Flux v2 no longer permits embedding arbitrary binaries with the Flux machinery to run at apply time. - -Flux (kustomize-controller) will apply whatever revision of the manifests are at the latest commit, on any branch it is pointed at. By design, Flux doesn't care for any details of how a commit is generated. - -Since ["select latest by build time" image automation][flux2/discussions/802] is deprecated, and since [`.flux.yaml` is also deprecated][flux2/issues/543], some staple workflows are no longer possible without new accommodations from infrastructure. - -#### What Should We Do? - -We first recommend users [adjust their tagging strategies][Sortable image tags], which is made clear elsewhere in the docs. This is usually a straightforward adjustment, and enables the use of [Image Update Policies][image update guide]; however this may not be feasible or desired in some cases. - -## Use Manifest Generation - -Introducing, Manifest Generation with Jsonnet, for [any old app] on GitHub! - -If you have followed the [Flux bootstrap guide] and only have one `fleet-infra` repository, it is recommended to create a separate repository that represents your application for this use case guide, or clone the repository linked above in order to review these code examples which have already been implemented there. - -### Primary Uses of Flux - -Flux's primary use case for `kustomize-controller` is to apply YAML manifests from the latest `Revision` of an `Artifact`. - -### Security Consideration - -Flux v2 can not be configured to call out to arbitrary binaries that a user might supply with an `InitContainer`, as it was possible to do in Flux v1. - -#### Motivation for this Guide - -In Flux v2 it is assumed if users want to run more than `Kustomize` with `envsubst`, that it will be done outside of Flux; the goal of this guide is to show several common use cases of this pattern in secure ways. - -#### Demonstrated Concepts - -It is intended, finally, to show through this use case, three fundamental ideas for use in CI to accompany Flux automation: - -1. Writing workflow that can commit changes back to the same branch of a working repository. -1. A workflow to commit generated content from one directory into a different branch in the repository. -1. Workflow to commit from any source directory into a target branch on a different repository. - -Readers can interpret this document with adaptations for use with other CI providers, or Git source hosts, or manifest generators. - -Jsonnet is demonstrated with examples presented in sufficient depth that, hopefully, Flux users who are not already familiar with manifest generation or Jsonnet can pick up `kubecfg` and start using it to solve novel and interesting configuration problems. - -### The Choice of GitHub Actions - -There are authentication concerns to address with every CI provider and they also differ by Git provider. - -Given that GitHub Actions are hosted on GitHub, this guide can be streamlined in some ways. We can almost completely skip configuring authentication. The cross-cutting concern is handled by the CI platform, except in our fourth and final example, the *Commit Across Repositories Workflow*. - -From a GitHub Action, as we must have been authenticated to write to a branch, Workflows also can transitively gain write access to the repo safely. - -Mixing and matching from other providers like Bitbucket Cloud, Jenkins, or GitLab will need more attention to these details for auth configurations. GitHub Actions is a platform that is designed to be secure by default. - -## Manifest Generation Examples - -There are several use cases presented. - -* [String Substitution with sed -i] -* [Docker Build and Tag with Version] -* [Jsonnet for YAML Document Rehydration] -* [Commit Across Repositories Workflow] - -In case these examples are too heavy, this short link guide can help you navigate the four main examples. Finally, the code examples we've all been waiting for, the answer to complex `.flux.yaml` configs in Flux v2! 🎉🎁 - -### String Substitution with `sed -i` - -The entry point for these examples begins at `.github/workflows/` in any GitHub source repository where your YAML manifests are stored. - -!!! warning "`GitRepository` source only targets one branch" - While this first example operates on any branch (`branches: ['*']`), each `Kustomization` in Flux only deploys manifests from **one branch or tag** at a time. Understanding this is key for managing large Flux deployments and clusters with multiple `Kustomizations` and/or crossing several environments. - -First add this directory if needed in your repositories. Find the example below in context, and read on to understand how it works: [01-manifest-generate.yaml]. - -```yaml -# ./.github/workflows/01-manifest-generate.yaml -name: Manifest Generation -on: - push: - branches: - - '*' - -jobs: - run: - name: Push Git Update - runs-on: ubuntu-latest - steps: - - name: Prepare - id: prep - run: | - VERSION=${GITHUB_SHA::8} - echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ') - echo ::set-output name=VERSION::${VERSION} - - - name: Checkout repo - uses: actions/checkout@v2 - - - name: Update manifests - run: ./update-k8s.sh $GITHUB_SHA - - - name: Commit changes - uses: EndBug/add-and-commit@v7 - with: - add: '.' - message: "[ci skip] deploy from ${{ steps.prep.outputs.VERSION }}" - signoff: true -``` - -In the `Prepare` step, even before the clone, GitHub Actions provides metadata about the commit. Then, `Checkout repo` performs a shallow clone for the build. - -```bash -# excerpt from above - set two outputs named "VERSION" and "BUILD_DATE" -VERSION=${GITHUB_SHA::8} -echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ') -echo ::set-output name=VERSION::${VERSION} -``` - -!!! note "When migrating to Flux v2" - Users will find that [some guidance has changed since Flux v1]. Tagging images with a `GIT_SHA` was a common practice that is no longer supported by Flux's Image Automation. A newer alternative is adding timestamp or build number in [Sortable image tags], preferred by the `image-automation-controller`. - -Next we call out to a shell script `update-k8s.sh` taking one argument, the Git SHA value from GitHub: - -```yaml -# excerpted from above - run a shell script -- name: Update manifests - run: ./update-k8s.sh $GITHUB_SHA -``` - -That script is below. It performs two in-place string substitutions using `sed`. - -```bash -#!/bin/bash - -# update-k8s.sh -set -feu # Usage: $0 # Fails when GIT_SHA is not provided - -GIT_SHA=${1:0:8} -sed -i "s|image: kingdonb/any-old-app:.*|image: kingdonb/any-old-app:$GIT_SHA|" k8s.yml -sed -i "s|GIT_SHA: .*|GIT_SHA: $GIT_SHA|" flux-config/configmap.yaml -``` - -`update-k8s.sh` receives `GITHUB_SHA` that the script trims down to 8 characters. - -Then, `sed -i` runs twice, updating `k8s.yml` and `flux-config/configmap.yaml` which are also provided as examples here. The new SHA value is added twice, once in each file. - -```yaml -# k8s.yml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: any-old-app -spec: - replicas: 1 - selector: - matchLabels: - app: any-old-app - template: - metadata: - labels: - app: any-old-app - spec: - containers: - - image: kingdonb/any-old-app:4f314627 - name: any-old-app ---- -apiVersion: v1 -kind: Service -metadata: - name: any-old-app -spec: - type: ClusterIP - ports: - - name: "any-old-app" - port: 3000 - selector: - app: any-old-app -``` - -The convention of including a `k8s.yml` file in one's application repository is borrowed from [Okteto's Getting Started Guides], as a simplified example. - -The `k8s.yml` file in the application root is not meant to be applied by Flux, but might be a handy template to keep fresh as a developer reference nonetheless. - -The file below, `configmap.yaml`, is placed in a directory `flux-config/` which will be synchronized to the cluster by a `Kustomization` that we will add in the following step. - -```yaml -# flux-config/configmap.yaml -apiVersion: v1 -data: - GIT_SHA: 4f314627 -kind: ConfigMap -metadata: - creationTimestamp: null - name: any-old-app-version - namespace: devl -``` - -These are the two files that are re-written in the `sed -i` example above. - -A configmap is an ideal place to write a variable that is needed by any downstream `Kustomization`, for example to use with `envsubst`. - -```yaml ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 -kind: Kustomization -metadata: - name: any-old-app-devl -spec: - interval: 15m0s - path: ./ - prune: true - sourceRef: - kind: GitRepository - name: any-old-app-prod - targetNamespace: prod - validation: client - postBuild: - substituteFrom: - - kind: ConfigMap - name: any-old-app-version ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: any-old-app-prod - namespace: prod -spec: - interval: 20m0s - ref: - branch: deploy - secretRef: - name: flux-secret - url: ssh://git@github.com/kingdonb/csh-flux -``` - -Now, any downstream `Deployment` in the `Kustomization` can write a `PodSpec` like this one, to reference the image from the latest commit referenced by the `ConfigMap`: - -```yaml -# flux/ some-example-deployment.yaml -spec: - replicas: 1 - selector: - matchLabels: - app: any-old-app - template: - metadata: - labels: - app: any-old-app - spec: - containers: - - image: kingdonb/any-old-app:${GIT_SHA} - name: any-old-app -``` - -Deployment specifications will vary, so adapting this example is left as exercise for the reader. Write it together with a kustomization.yaml, or just add this to a subdirectory anywhere within your Flux Kustomization path. - -### Docker Build and Tag with Version - -Now for another staple workflow: building and pushing an OCI image tag from a Dockerfile in any branch or tag. - -From the Actions marketplace, [Build and push Docker images] provides the heavy lifting in this example. Flux has nothing to do with building images, but we include this still — as some images will need to be built for our use in these examples. - -!!! hint "`ImageRepository` can reflect both branches and tags" - This example builds an image for any branch or tag ref and pushes it to Docker Hub. (Note the omission of `branches: ['*']` that was in the prior example.) GitHub Secrets `DOCKERHUB_USERNAME` and `DOCKERHUB_TOKEN` are used here to authenticate with Docker Hub from within GitHub Actions. - -We again borrow a [Prepare step] from Kustomize Controller's own release workflow. Find the example below in context, [02-docker-build.yaml], or copy it from below. - -```yaml -# ./.github/workflows/02-docker-build.yaml -name: Docker Build, Push - -on: - push: - branches: - - '*' - tags-ignore: - - 'release/*' - -jobs: - docker: - runs-on: ubuntu-latest - steps: - - name: Prepare - id: prep - run: | - VERSION=${GITHUB_SHA::8} - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF/refs\/tags\//} - fi - echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ') - echo ::set-output name=VERSION::${VERSION} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and push - id: docker_build - uses: docker/build-push-action@v2 - with: - push: true - tags: kingdonb/any-old-app:${{ steps.prep.outputs.VERSION }} - - - name: Image digest - run: echo ${{ steps.docker_build.outputs.digest }} -``` - -The [Docker Login Action] is used here to enable an authenticated image push. - -Any secrets from GitHub Secrets can be used as shown, and support for image registries is explained in the linked README. Add a setting for `registry` if your app uses any private registry, rather than the implicit Docker Hub registry above. - -``` -# for example -with: - registry: registry.cloud.okteto.net -``` - -The image tag `VERSION` comes from the branch or Git tag that triggered the build. Whether that version is a `GIT_SHA` or a Semantic Version, (or anything in between!) the same workflow can be used to build an OCI image as shown here. - -### Jsonnet for YAML Document Rehydration - -As mentioned before, Flux only monitors one branch or tag per Kustomization. - -In the earlier examples, no fixed branch target was specified. Whatever branch triggered the workflow, received the generated YAMLs in the next commit. - -If you created your deployment manifests in any branch, the `deploy` branch or otherwise, it is necessary to add another `Kustomization` and `GitRepository` source to apply manifests from that branch and path in the cluster. - -In application repositories, it is common to maintain an environment branch, a release branch, or both. Some additional Flux objects may be needed for each new environment target with its own branch. Jsonnet can be used for more easily managing heavyweight repetitive boilerplate configuration such as this. - -It is recommended to follow these examples as they are written for better understanding, then later change and adapt them for your own release practices and environments. - -!!! note "`GitRepository` source only targets one branch" - Since Flux uses one branch per Kustomization, to trigger an update we must write to a `deploy` branch or tag. Even when new app images can come from any branch (eg. for Dev environments where any latest commit is to be deployed) the YAML manifests to deploy will be sourced from just one branch. - -It is advisable to protect repository main and release branches with eg. branch policies and review requirements, as through automation, these branches can directly represent the production environment. - -The CI user for this example should be allowed to push directly to the `deploy` branch that Kustomize deploys from; this branch also represents the environment so must be protected in a similar fashion to `release`. - -Only authorized people (and build robots) should be allowed to make writes to a `deploy` branch. - -#### Jsonnet Render Action - -In this example, the outputted YAML manifests, (on successful completion of the Jsonnet render step,) are staged on the `deploy` branch, then committed and pushed. - -The latest commit on the `deploy` branch is reconciled into the cluster by another `Kustomization` that is omitted here, as it is assumed that users who have read this far already added this in the previous examples. - -You may find the example below in context, [03-release-manifests.yaml], or simply copy it from below. - -```yaml -# ./.github/workflows/03-release-manifests.yaml -name: Build jsonnet -on: - push: - tags: ['release/*'] - branches: ['release'] - -jobs: - run: - name: jsonnet push - runs-on: ubuntu-latest - steps: - - name: Prepare - id: prep - run: | - VERSION=${GITHUB_SHA::8} - if [[ $GITHUB_REF == refs/tags/release/* ]]; then - VERSION=${GITHUB_REF/refs\/tags\/release\//} - fi - echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ') - echo ::set-output name=VERSION::${VERSION} - - - name: Checkout repo - uses: actions/checkout@v2 - - - name: Setup kubecfg CLI - uses: kingdonb/kubecfg/action@main - - - name: kubecfg show - run: kubecfg show manifests/example.jsonnet > output/production.yaml - - - name: Prepare target branch - run: ./ci/rake.sh deploy - - - name: Commit changes - uses: EndBug/add-and-commit@v7 - with: - add: 'production.yaml' - branch: deploy - message: "[ci skip] from ${{ steps.prep.outputs.VERSION }}" - signoff: true -``` - -We add three new steps in this example: - -```yaml -# excerpted from above - workflow steps 3, 4, and 5 -- name: Setup kubecfg CLI - uses: kingdonb/kubecfg/action@main - -- name: kubecfg show - run: kubecfg show manifests/example.jsonnet > output/production.yaml - -- name: Prepare target branch - run: ./ci/rake.sh deploy -``` - -While the remaining examples will be written to depend on `kubecfg`, some use cases may prefer to use pure Jsonnet only as it is sandboxed and therefore safer. We plan to use the `kubecfg` capability to take input from other sources, like variables and references, but also network-driven imports and functions. - -```yaml -# from above - substitute these steps in 03-release-manifests.yaml, -# between "Checkout repo" and "Commit changes" to use plain Jsonnet instead of kubecfg -- id: jsonnet-render - uses: alexdglover/jsonnet-render@v1 - with: - file: manifests/example.jsonnet - output_file: output/production.yaml - params: dryrun=true;env=prod - -- name: Prepare target branch - run: ./ci/rake.sh deploy -``` - -The `jsonnet-render` step is borrowed from another source, again find it on [GitHub Actions Marketplace][actions/jsonnet-render] for more information. For Tanka users, there is also [letsbuilders/tanka-action] which describes itself as heavily inspired by `jsonnet-render`. - -!!! note "The `EndBug/add-and-commit` action is used again" - This time, with the help of `rake.sh`, our change is staged into a different target branch. This is the same `deploy` branch, regardless of which branch or tag the build comes from; any configured push event can trigger this workflow to trigger an update to the deploy branch. - -```bash -#!/bin/bash - -# ./ci/rake.sh -set -feux # Usage: $0 # Fails when BRANCH is not provided -BRANCH=$1 - -# The output/ directory is listed in .gitignore, where jsonnet rendered output. -pushd output - -# Fetch git branch 'deploy' and run `git checkout deploy` -/usr/bin/git -c protocol.version=2 fetch \ - --no-tags --prune --progress --no-recurse-submodules \ - --depth=1 origin $BRANCH -git checkout $BRANCH -- - -# Prepare the output to commit by itself in the deploy branch's root directory. -mv -f ./production.yaml ../ # Overwrite any existing files (no garbage collection here) -git diff - -# All done (the commit will take place in the next action!) -popd -``` - -Give this file `chmod +x` before adding and committing; tailor this workflow to your needs. We render from a file `manifests/example.jsonnet`, it can be anything. The output is a single K8s YAML file, `production.yaml`. - -```yaml -- name: Commit changes - uses: EndBug/add-and-commit@v7 - with: - add: 'production.yaml' - branch: deploy - message: "[ci skip] from ${{ steps.prep.outputs.VERSION }}" - signoff: true -``` - -This is [Add & Commit] with a `branch` option, to set the target branch. We've added a `signoff` option as well here, to demonstrate another feature of this GitHub Action. There are many ways to use this workflow step. The link provides more information. - -The examples that follow can be copied and pasted into `manifests/example.jsonnet`, then committed to the `release` branch and pushed to GitHub in order to execute them. - -Pushing this to your repository will fail at first, unless and until a `deploy` branch is created. - -Run `git checkout --orphan deploy` to create a new empty HEAD in your repo. - -Run `git reset` and `git stash` to dismiss any files that were staged, then run `git commit --allow-empty` to create an initial empty commit on the branch. - -Now you can copy and run these commands to create an empty branch: - -``` -#- Add a basic kustomization.yaml file -cat < kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- production.yaml -EOF - -#- Add a gitignore so output directory is created -cat < .gitignore -/output/** -!/output/.keep -EOF - -#- Add the .keep file -touch output/.keep - -#- Push these files into an empty branch, and go back to main branch -git add output/.keep .gitignore kustomization.yaml -git commit -m'seed deploy kustomization.yaml for prod' -git push -u origin deploy -git checkout main -``` - -On the main branch as well, create a `.gitignore` and `/output/.keep` for that branch too. We need to make sure that the `output/` directory is present for writing whenever the Jsonnet workflow begins. The `rake.sh` script sweeps files from the output into the root directory of the `deploy` branch. - -Now run `git checkout -b release; git push -u origin release` to trigger this action and see it working! 🤞🤞 - -You may need to be sure that GitHub Actions are enabled on your repository before this will work. - -Read onward to see some basic as well as more advanced uses of `kubecfg`. - -##### Jsonnet `ConfigMap` with `envsubst` - -The next example "enforces," or copies, a value from a `configMap` from one namespace into many namespaces. This is done so that Kustomizations for each namespace can maintain similar config data in their reconciliations while staying DRY, with some configurations that reach across namespace boundaries. - -With Jsonnet, Kustomize, and Kustomization Controller's `postBuild` which uses `envsubst`, there are usually a handful of different ways to accomplish the same task. - -This example demonstrates a feature called `ext_vars` or [External Variables] in the Jsonnet `stdlib`. - -These examples assume (since no such protection has been presented) that nothing prevents `production.yaml` from writing resources throughout multiple namespaces. This may be difficult or impossible to achieve depending on your environment. - -In order to permit a Kustomization to write into different namespaces, some RBAC configuration may be required. - -When you write a `Kustomization` to apply this, be sure you are aware of whether or not you have set `targetNamespace` on the Flux `Kustomization` as it may override any namespace settings in the Jsonnet output. You may note similar configuration in the `kustomization.yaml` we wrote into the deploy branch as described above, in the step for **Jsonnet Render Action**. - -##### External Variable Substitution - -```javascript -# Any Old App Jsonnet example 0.10.1 - manifests/example.jsonnet - -local kube = import 'https://github.com/bitnami-labs/kube-libsonnet/raw/73bf12745b86718083df402e89c6c903edd327d2/kube.libsonnet'; - -local example = import 'example.libsonnet'; - -{ - version_configmap: kube.ConfigMap('any-old-app-version') { - metadata+: { - namespace: 'prod', - }, - data+: { - VERSION: std.extVar('VERSION'), - }, - }, - flux_kustomization: example.kustomization('any-old-app-prod') { - metadata+: { - namespace: 'flux-system', - }, - spec+: { - path: './flux-config/', - postBuild+: { - substituteFrom+: [ - { - kind: 'ConfigMap', - name: 'any-old-app-version', - }, - ], - }, - }, - }, - flux_gitrepository: example.gitrepository('any-old-app-prod') { - metadata+: { - namespace: 'flux-system', - }, - spec+: { - url: 'https://github.com/kingdonb/any_old_app', - }, - }, -} -``` - -The above jsonnet declaration `example.jsonnet` will not complete without its neighbor `example.libsonnet` (which can be found [linked here][example 10.1 library].) This part of the example contains some boilerplate detail not meant to be copied, like the name `any-old-app-prod` and the string `'sops-gpg'` in `decryption.secretRef` which should be changed to match your environment). - -If you visited the linked `example.libsonnet` you may have noticed definitions for `kustomization` and `gitrepository` that are frankly pretty specific for a library function. They include details you wouldn't expect to find in a vendor library, like a default git repository URL, and a default hardcoded ref to the name of our Source gitrepository. - -This is **our library file**, so it can have our own implementation-specific details in it if we want to include them. Now, the power of Jsonnet is visible; we get to decide which configuration needs to be exposed in our main `example.jsonnet` file, and which parameters are defaults provided by the library, that can be treated like boilerplate and re-defined however we want. - -```json -data+: { - VERSION: std.extVar('VERSION'), -}, -``` - -This is `std.extVar` from `ext_vars` mentioned earlier. Arrange for the version to be passed in through the GitHub Actions workflow: - -```yaml -# adapted from above - 03-release-manifests.yaml -- name: kubecfg show - run: kubecfg show -V VERSION=${{ steps.prep.outputs.VERSION }} manifests/example.jsonnet > output/production.yaml -``` - -The neighbor `example.libsonnet` file contains some boring (but necessary) boilerplate, so that `kubecfg` can fulfill this jsonnet, to generate and commit the full Kustomize-ready YAML into a `deploy` branch as specified in the workflow. (The `Kustomization` for this example is provided [from my fleet-infra repo here][any-old-app-deploy-kustomization.yaml]. Personalize and adapt this for use with your own application or manifest generations.) - -The values provided are for example only and should be personalized, or restructured/rewritten completely to suit your preferred template values and instances. For more idiomatic examples written recently by actual Jsonnet pros, the [Tanka - Using Jsonnet tutorial] is great, and so is [Tanka - Parameterizing] which I'll call out specifically for the `_config::` object example that is decidedly more elegant than my version of parameter passing. - -If you've not previously used Jsonnet before, then you might be wondering about that code example you just read and that's OK! If you **have** previously used Jsonnet and already know what idiomatic Jsonnet looks like, you might be wondering too... you can probably tell I (author, Kingdon) practically haven't ever written a lick of Jsonnet before today. - -These examples are going to get progressively more advanced as I learn Jsonnet while I go. At this point I already think it's pretty cool and I barely know how to use it, but I am starting to understand what type of problems people are using it to solve. - -ConfigMap values are not treated as secret data, so there is no encryption to contend with; this makes for what seems like a good first example. Jsonnet enthusiasts, please forgive my newness. I am sure that my interpretation of how to write Jsonnet is most likely not optimal or idiomatic. - -Above we showed how to pass in a string from our build pipeline, and use it to write back generated Jsonnet manifests into a commit. - -##### Make Two Environments - -Here's a second example, defining two environments in separate namespaces, instead of just one: - -```javascript -# Any Old App Jsonnet example 0.10.2-alpha1 - manifests/example.jsonnet -# Replicate a section of config and change nothing else about it - -// ... - -{ - version_configmap: kube.ConfigMap('any-old-app-version') { - metadata+: { - namespace: 'flux-system', - }, - data+: { - VERSION: std.extVar('VERSION'), - }, - }, - test_flux_kustomization: example.kustomization('any-old-app-test') { - metadata+: { - namespace: 'flux-system', - }, - spec+: { - path: './flux-config/', - postBuild+: { - substituteFrom+: [ - { - kind: 'ConfigMap', - name: 'any-old-app-version', - }, - ], - }, - targetNamespace: 'test-tenant', - }, - }, - prod_flux_kustomization: example.kustomization('any-old-app-prod') { - metadata+: { - namespace: 'flux-system', - }, - spec+: { - path: './flux-config/', - postBuild+: { - substituteFrom+: [ - { - kind: 'ConfigMap', - name: 'any-old-app-version', - }, - ], - }, - targetNamespace: 'prod-tenant', - }, - }, - flux_gitrepository: example.gitrepository('any-old-app-prod') { - metadata+: { - namespace: 'flux-system', - }, - spec+: { - url: 'https://github.com/kingdonb/any_old_app', - }, - }, -} -``` - -In this example, some front-matter was omitted for brevity. Wait, what? (There's nothing brief about this example, it's extra-verbose!) - -"I thought Jsonnet was supposed to be DRY." Be gentle, refactoring is a methodical and deliberate process. We simply copied the original one environment into two environments, test and prod, which differ only in name. - -In the next example, we will subtly change one of them to be configured differently from the other. - -##### Change Something and Refactor - -Note the string 'flux-system' only occurs once now, having been factored into a variable `config_ns`. These are some basic abstractions in Jsonnet that we can use to start to DRY up our source manifests. - -Again, practically nothing changes functionally, this still does exactly the same thing. With another refactoring, we can express this manifest more concisely, thanks to a new library function we can invent, named `example.any_old_app`. - -```javascript -# Any Old App Jsonnet example 0.10.2-alpha4 - manifests/example.jsonnet -# Make something different between test and prod - -// ... - -{ - version_configmap: kube.ConfigMap('any-old-app-version') { - metadata+: { - namespace: config_ns, - }, - data+: { - VERSION: std.extVar('VERSION'), - }, - }, - test_flux_kustomization: example.any_old_app('test') { - spec+: { - prune: true, - }, - }, - prod_flux_kustomization: example.any_old_app('prod') { - spec+: { - prune: false, - }, - }, - flux_gitrepository: example.gitrepository('any-old-app-prod') { - metadata+: { - namespace: config_ns, - }, - spec+: { - url: 'https://github.com/kingdonb/any_old_app', - }, - }, -} -``` - -Two things have changed to make this refactoring of the config differ from the first version. Hopefully you'll notice it's a lot shorter. - -Redundant strings have been collapsed into a variable, and more boilerplate has been moved into the library. - -This refactored state is perhaps the most obvious to review, and most intentionally clear about its final intent to the reader. Hopefully you noticed the original environments were identical (or if your eyes glossed over because of the wall of values, you've at least taken my word for it.) - -But now, these two differ. We're creating two configurations for `any_old_app`, named `test` and `prod`. One of them has `prune` enabled, the test environment, and `prod` is set more conservatively to prevent accidental deletions, with a setting of `prune: false`. - -Since the two environments should each differ only by the boolean setting of `spec.prune`, we can now pack up and hide away the remainder of the config in with the rest of the boilerplate. - -Hiding the undifferentiated boilerplate in a library makes it easier to detect and observe this difference in a quick visual review. - -Here's the new library function's definition: - -```javascript -any_old_app(environment):: self.kustomization('any-old-app-' + environment) { - metadata+: { - namespace: 'flux-system', - }, - spec+: { - path: './flux-config/', - postBuild+: { - substituteFrom+: [ - { - kind: 'ConfigMap', - name: 'any-old-app-version', - }, - ], - }, - targetNamespace: environment + '-tenant', - }, -}, -``` - -This excerpt is taken from [the 10.2 release version][example 10.2 library excerpt] of `example.libsonnet`, where you can also read the specific definition of `kustomization` that is invoked with the expression `self.kustomization('any-old-app-' + environment)`. - -The evolution of this jsonnet snippet has gone from *unnecessarily verbose* to **perfect redundant clarity**. I say redundant, but I'm actually fine with this exactly the way it is. I think, if nothing further changes, we already have met the best way to express this particular manifest with Jsonnet. - -But given that config will undoubtedly have to change as the differing requirements of our development teams and their environments grow, this perfect clarity unfortunately can't last forever in its current form. It will have to scale. - -Notice that strings and repeated invocations of `any_old_app` are written with parallel structure and form, but there's nothing explicit linking them. - -The object-oriented programmer in me can't help but ask now, "what happens when we need another copy of the environment, this time slightly more different than those two, and ... how about two more after that, (and actually, can we really get by with only ten environments?)" — I am inclined towards thinking of this repeated structure as a sign that an object cries out, waiting to be recognized and named, and defined, (and refactored and defined again.) - -##### List Comprehension - -So get ready for _obfuscated nightmare mode_, (which is the name we thoughtfully reserved for the [best and final version][example 10.2 jsonnet] of the example,) shown below. - -```javascript -# Any Old App Jsonnet example 0.10.2 - manifests/example.jsonnet - -// This is a simple manifest generation example to demo some simple tasks that -// can be automated through Flux, with Flux configs rehydrated through Jsonnet. - -// This example uses kube.libsonnet from Bitnami. There are other -// Kubernetes libraries available, or write your own! -local kube = import 'https://github.com/bitnami-labs/kube-libsonnet/raw/73bf12745b86718083df402e89c6c903edd327d2/kube.libsonnet'; - -// The declaration below adds configuration to a more verbose base, defined in -// more detail at the neighbor libsonnet file here: -local example = import 'example.libsonnet'; -local kubecfg = import 'kubecfg.libsonnet'; -local kustomize = import 'kustomize.libsonnet'; - -local config_ns = 'flux-system'; - -local flux_config = [ - kube.ConfigMap('any-old-app-version') { - data+: { - VERSION: std.extVar('VERSION'), - }, - }, - example.gitrepository('any-old-app-prod') { - spec+: { - url: 'https://github.com/kingdonb/any_old_app', - }, - }, -] + kubecfg.parseYaml(importstr 'examples/configMap.yaml'); - -local kustomization = kustomize.applyList([ - kustomize.namespace(config_ns), -]); - -local kustomization_output = std.map(kustomization, flux_config); - -{ flux_config: kustomization_output } + { - - local items = ['test', 'prod'], - - joined: { - [ns + '_flux_kustomization']: { - data: example.any_old_app(ns) { - spec+: { - prune: if ns == 'prod' then false else true, - }, - }, - } - for ns in items - - // Credit: - // https://groups.google.com/g/jsonnet/c/ky6sjYj4UZ0/m/d4lZxWbhAAAJ - // thanks Dave for showing how to do something like this in Jsonnet - }, -} -``` - -This is the sixth revision of this example, (some have been omitted from the story, but they are [in Git history][examples 0.10.2-all].) I think it's really perfect now. If you're a programmer, I think, this version is perhaps much clearer. That's why I called it _obfuscated nightmare mode_, right? (I'm a programmer, I swear.) - -The `examples/configMap.yaml` file can be found [in the 0.10.2 tag][example 10.2 configmap] of `kingdonb/any_old_app`, it is vestigial and does not serve any functional purpose in this example, except for showing how to compose Jsonnet objects with parsed YAML from a file. - -You should note that kubecfg's `kubecfg.parseYaml` method always returns an array, even when the `importstr` input file only contains a single YAML document. Jsonnet arrays, like strings, can be easily added together with a familiar `+` operator. - -Jsonnet objects can also be added to other objects, composing their fields from smaller objects into larger ones. In the example above, we have added the `flux_config` object to a collection of `AnyOldApp` objects, a list comprehension from our environments. This is necessary and important because a Jsonnet program or library must always return a single object. - -I'm trying to learn Jsonnet as fast as I can, I hope you're still with me and if not, don't worry. Where did all of this programming come from? (And what's a list comprehension?) It really doesn't matter. - -The heavy lifting libraries for this example are from [anguslees/kustomize-libsonnet], which implements some basic primitives of Kustomize in Jsonnet. YAML parser is provided by [bitnami/kubecfg][kubecfg yaml parser], and the Jsonnet implementations of Kubernetes primitives by [bitnami-labs/kube-libsonnet]. - -It is a matter of taste whether you consider from above the first, second, or third example to be better stylistically. It is a matter of taste and circumstances, to put a finer point on it. They each have strengths and weaknesses, depending mostly on whatever changes we will have to make to them next. - -We can compare these three versions to elucidate the intent of the programmatically most expressive version which followed the other two. If you're new at this, you may try to explain how these three examples are similar, and also how they differ. Follow the explanation below for added clarity. - -If you haven't studied Jsonnet, this last version may daunt you with its complexity. The fact is YAML is a document store and Jsonnet is a programming language. This complexity is exactly what we came here for, we want our configuration language to be more powerful! Bring on more complex Jsonnet examples! - -#### Breaking It Down - -We define a configmap and a gitrepository (in Jsonnet), then put it together with another configmap (from plain YAML). That's called `flux_config`. - -```javascript -local kustomization = kustomize.applyList([ - kustomize.namespace(config_ns), -]); - -local kustomization_output = std.map(kustomization, flux_config); -``` - -This little diddy (above) has the same effect as a Kustomization based on the following instruction to `kustomize build`, (except it's all jsonnet): - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: ${config_ns} -``` - -... setting the namespace for all objects in `flux_config` to the value of `config_ns`. - -Next, we join it together with a list comprehension (at least I think that's what this is called): - -```javascript -local items = ['test', 'prod'], - -joined: { - [ns + '_flux_kustomization']: { - data: example.any_old_app(ns) { - spec+: { - prune: if ns == 'prod' then false else true, - }, - }, - } - for ns in items -}, -``` - -Two `any_old_app` templates are invoked programmatically, with different properties and names, and in target namespaces that are based on the environment names. They go on the end of the document list, and Jsonnet renders them alongside of the others, in various namespaces. - -This is the same technique as in `01-manifest-generate.yaml`, only this time with Jsonnet and `kubecfg` instead of `sed`, (and what a difference it makes!) - -This is the foundation for some real release machinery for your applications, this is not just a bunch of shell scripts. Whenever any commit hits the `release` branch, or when any tag in the form `release/*` is pushed, the repo is configured to push generated manifest changes to a `deploy` branch. - -This behavior is self-contained within the example `any_old_app` repository in these examples. - -We can use GitHub Actions and Jsonnet to populate parameters through ConfigMap values or with `extVars`, and at the same time, apply `Kustomization` and `GitRepository` as new sync infrastructure for the `deploy` branch with dependencies on those ConfigMaps. The `Kustomization` refers to the configmap and makes the `VERSION` or `GIT_SHA` variable available as a `postBuild` substitution, with values pulled from that same configmap we just applied. - -Later, we can repeat this process with a SOPS encrypted secret. - -The process is not very different, though some of the boilerplate is longer, we've already learned to pack away boilerplate. Copying and renaming encrypted secrets within the same cluster is possible wherever cluster operators are permitted to both decrypt and encrypt them with the decryption provider. - -A credential at `spec.decryption.secretRef` holds the key for decryption. Without additional configuration secrets can usually be copied freely around the cluster, as it is possible to decrypt them freely anywhere the decryption keys are made available. - -##### Copy `ConfigMap`s - -Assume that each namespace will be separately configured as a tenant by itself somehow later, and that each tenant performs its own git reconciliation within the tenant namespace. That config is out of scope for this example. We are only interested in briefly demonstrating some Jsonnet use cases here. - -The app version to install is maintained in a `ConfigMap` in each namespace based on our own decision logic. This can be implemented as a human operator who goes in and updates this variable's value before release time. - -This Jsonnet creates from a list of namespaces, and injects a `ConfigMap` into each namespace, [another example.jsonnet][example 10.3 jsonnet]. - -```javascript -# Any Old App Jsonnet example 0.10.3 - manifests/example.jsonnet - -local release_config = kube.ConfigMap('any-old-app-version'); -local namespace_list = ['prod', 'stg', 'qa', 'uat', 'dev']; - -local release_version = '0.10.3'; -local latest_candidate = '0.10.3-alpha1'; - -{ - [ns + '_tenant']: { - [ns + '_namespace']: { - namespace: kube.Namespace(ns), - }, - [ns + '_configmap']: { - version_data: release_config { - metadata+: { - namespace: ns, - }, - data+: { - VERSION: if ns == 'prod' || ns == 'stg' then release_version else latest_candidate, - }, - }, - }, - } - for ns in namespace_list -} -``` - -In this example, we have set up an additional 3 namespaces and assumed that a Flux Kustomization is provided some other way. The deploy configuration of all 5 environments is maintained here, in a single deploy config. - -Imagine that two policies should exist for promoting releases into environments. The environments for `dev`elopment, `U`ser `A`cceptance `T`esting (`uat`), and `Q`uality `A`ssurance (`qa`) can all be primed with the latest release candidate build at any given time. - -This is perhaps an excessive amount of formality for an open source or cloud-native project, though readers working in regulated environments may recognize this familiar pattern. - -This example will possibly fail to apply with the recommended validations enabled, failing with errors that you can review by running `flux get kustomization` in your flux namespace, like these: - -``` -validation failed: namespace/dev created (server dry run) -namespace/prod created (server dry run) -... -Error from server (NotFound): error when creating "14f54b89-2456-4c15-862e-34670dfcda79.yaml": namespaces "dev" not found -Error from server (NotFound): error when creating "14f54b89-2456-4c15-862e-34670dfcda79.yaml": namespaces "prod" not found -``` - -As you can perhaps see, the problem is that objects created within the namespace are not valid before creating the namespace. You can disable the validation temporarily by adding a setting `validation: none` to your Flux Kustomization to get past this error. - -In the deployment configuration above, both `prod` and staging (`stg`) are kept in sync with the latest release (not pre-release). - -Left as an exercise to the reader, we can also ask next: is it possible to supplement this configuration with a Flagger canary so that updates to the production config are able to be manually verified in the staging environment before they are promoted into Production? - -(Hint: Look at the [Manual Gating] feature of Flagger.) - -##### Copy `Secret`s - -This example writes the same `secretRef` into many `HelmReleases`, to provide for the cluster to be able to use the same `imagePullSecret` across several `Deployments` in a namespace. It is a common problem that `jsonnet` can solve quite handily, without repeating the `Secret` name over and over as a string. - -Because we have decided to create tenants for each namespace, now is a good time to mention [flux create tenant]. - -We can take the output of `flux create tenant prod --with-namespace prod --export` and use it to create `manifests/examples/tenant.yaml`. Perhaps in a full implementation, we would create a tenant library function and call it many times to create our tenants. - -For this example, you may discard the `Namespace` and/or `ClusterRoleBinding` as they are not needed. Here, we actually just need a ServiceAccount to patch. - -```yaml ---- -# manifests/examples/tenant.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - toolkit.fluxcd.io/tenant: prod - name: prod - namespace: prod -``` - -A namespace will be created by our Jsonnet example code instead (or you may comment this line out in the jsonnet below, if you are already working within a tenant.) The ClusterRoleBinding, or a more restrictive RoleBinding, is important for a functioning Flux tenant installation, but it is not needed for this example. - -(For more information on multi-tenancy, read the [Flux 2 Multi-Tenancy Guide].) - -We make an image pull secret with some docker registry credentials, for the purpose of completing the example. This is just for an example, it can be any secret that you want to replicate through several namespaces in the cluster with Jsonnet. - -``` -kubectl create secret docker-registry prod-docker-pull-secret \ - --namespace=prod \ - --docker-username=youruser --docker-password=secretpassword \ - --docker-email=required@example.org --dry-run=client -oyaml \ - > manifests/examples/sops-image-pull-secret.yaml -sops -e -i manifests/examples/sops-image-pull-secret.yaml -``` - -If you are not familiar with SOPS encryption, you should complete the [Mozilla SOPS] Flux guide before replicating this example, and internalize all of the concepts explained there. - -You will need to have configured your cluster's Kustomization with a decryption provider and decryption keys to enable the inclusion of encrypted secrets in your config repos. It is not safe to write unencrypted secrets into your git repository, and this should be avoided at all costs even if your repository is kept private. - -This final Jsonnet example is presented in context as a working reference in the `any_old_app` repository, once again as [example.jsonnet][example 10.4 jsonnet]. - -```javascript -# Any Old App Jsonnet example 0.10.4 - manifests/example.jsonnet - -local kubecfg = import 'kubecfg.libsonnet'; -local kustomize = import 'kustomize.libsonnet'; - -local tenant = - kubecfg.parseYaml(importstr 'examples/tenant.yaml'); -local pull_secrets = - kubecfg.parseYaml(importstr 'examples/sops-image-pull-secret.yaml'); - -local prod_ns = 'prod'; -local staging_ns = 'stg'; -local image_pull_secret = 'prod-docker-pull-secret'; - -// Set the Image Pull Secret on each ServiceAccount -local updateConfig(o) = ( - if o.kind == 'ServiceAccount' then o { - imagePullSecrets: [{ name: image_pull_secret }], - } else o -); - -// Create a namespace, and add to it Namespace and Secret -local prod_tenant = [ - kube.Namespace(prod_ns), -] + pull_secrets + tenant; - -// Prod kustomization - apply the updateConfig -local prod_kustomization = kustomize.applyList([ - updateConfig, -]); - -// Stg kustomization - apply the updateConfig and "stg" ns -local staging_kustomization = kustomize.applyList([ - updateConfig, - kustomize.namespace(staging_ns), -]); - -// Include both kustomizations in the Jsonnet object -{ - prod: std.map(prod_kustomization, prod_tenant), - stg: std.map(staging_kustomization, prod_tenant), -} -``` - -The `kubecfg.parseYaml` instruction returns a list of Jsonnet objects. Our own Jsonnet closely mirrors the [example provided][anguslees example jsonnet] by anguslees, with a few differences. - -The power of kubecfg is well illustrated through this example inspired by [anguslees/kustomize-libsonnet]. We parse several YAML files and make several minor updates to them. It really doesn't matter if one document involved is a secret, or that its data is encrypted by SOPS. - -If you are coming to Mozilla SOPS support in Flux v2, having used the SealedSecrets controller before when it was recommended in Flux v1, then you are probably surprised that this works. SOPS does not encrypt secret metadata when used with Flux's Kustomize Controller integration, which makes examples like this one possible. - -The ServiceAccount is a part of Flux's `tenant` configuration, and a fundamental concept of Kubernetes RBAC. If this concept is still new to you, read more in the [Kubernetes docs on Using Service Accounts]. - -The other fundamental concept to understand is a Namespace. - -Secrets are namespaced objects, and ordinary users with tenant privileges cannot reach outside of their namespace. If tenants should manage a Flux Kustomization within their own namespace boundaries, then a `sops-gpg` secret must be present in the Namespace with the Kustomization. Cross-namespace secret refs are not supported. - -However, any principal with access to read a `sops-gpg` secret can decrypt any data that are encrypted for it. - -Each ServiceAccount can list one or more `imagePullSecrets`, and any pod that binds the ServiceAccount will automatically include any pull secrets provided there. By adding the imagePullSecret to a ServiceAccount, we can streamline including it everywhere that it is needed. - -We can apply a list of transformations with `kustomize.applyList` that provides a list of pass-through mutating functions for Jsonnet to apply to each Jsonnet object; in our case we use the `updateConfig` function to patch each ServiceAccount with the ImagePullSecret that we want it to use. - -Finally, for staging, we additionally apply `kustomize.namespace` to update all resources to use the `stg` namespace instead of the `prod` namespace. The secret can be copied anywhere we want within the reach of our Flux Kustomization, and since our Flux Kustomization still has `cluster-admin` and local access to the decryption key, there is no obstacle to copying secrets. - -#### Handling `Secret`s - -Because a `secret` is not safe to store in Git unencrypted, Flux recommends using SOPS to encrypt it. - -SOPS will produce a [different data key][sops/issues/315] for each fresh invocation of `sops -e`, producing different cipher data even for the same input data. This is true even when the secret content has not changed. This means, unfortunately, it is not practical for a Manifest Generation routine to implement secret transparency without also granting the capability to read secrets to the CI infrastructure. - -SOPS stores the metadata required to decrypt each secret in the metadata of the secret, which must be stored unencrypted to allow encrypted secrets to be read by the private key owners. - -Secret transparency means that it should be possible for an observer to know when a stored secret has been updated or rotated. Transparency can be achieved in SOPS by running `sops` as an editor, using `sops [encrypted.yaml]`, which decrypts for editing and re-encrypts the secret upon closing the editor, thereby only changing the cipher text when secret data also changes. - -Depending on your access model, this suggestion could be either a complete non-starter, or a helpful add-on. - -As an example, Secrets could be read from GitHub Secrets during a CI job, then written encrypted into a secret that is pushed to the deploy branch. This implementation provides a basic solution for simple centralized secrets rotation. But as this would go way beyond simple manifest generation, we consider this beyond the scope of the tutorial, and it is mentioned only as an example of a more complex usage scenario for users to consider. - -#### Replicate `Secrets` Across Namespaces - -When the data of a `secret` is stored in the Git repository, it can be encrypted to store and transmit safely. SOPS in Kustomize supports encryption of only `(stringData|data)` fields, not secret `metadata` including `namespace`. This means that secrets within the same repo can be copied freely and decrypted somewhere else, just as long as the `Kustomization` still has access to the SOPS private key. - -Because of these properties though, copying a SOPS-encrypted secret from one namespace to another within one single Flux tenant is as easy as cloning the YAML manifest and updating the `namespace` field. Compared to SealedSecrets controller, which does not permit this type of copying; SOPS, on the other hand, does not currently prevent this without some attention being paid to RBAC. - -Remember to protect your secrets with RBAC! This is not optional, when handling secrets as in this example. - -#### Protecting `Secrets` from Unauthorized Access - -The logical boundary of a secret is any cluster or tenant where the private key is available for decrypting. - -This means that any SOPS secret, once encrypted, can be copied anywhere or used as a base for other Kustomizations in the cluster, so long as the Kustomization itself has access to the decryption keys. - -It is important to understand that the `sops-gpg` key that is generated in the Flux SOPS guide can be used by any `Kustomization` in the `flux-system` namespace. - -It cannot be over-emphasized; if users want secrets to remain secret, the `flux-system` namespace (and indeed the entire cluster itself) must be hardened and protected, managed by qualified cluster admins. It is recommended that changes which could access encrypted secrets are tightly controlled as much as deemed appropriate. - -#### More Advanced Secrets Usage - -The use of KMS as opposed to in-cluster GPG keys with SOPS is left as an exercise for the reader. The basics of KMS with various cloud providers is covered in more depth by the [Mozilla SOPS][using various cloud providers] guide. - -Another scenario we considered, but rejected for these examples, requires to decrypt and then re-encrypt SOPS secrets, for use with the `secretGenerator` feature of Kustomize. This workflow is not supported here for reasons already explained. - -Flux suggests maintaining the only active copy of the decryption key for a cluster inside of that cluster (though there may be a provision for backups, or some alternate keys permitted to decrypt.) This arrangement makes such use cases significantly more complicated to explain, beyond the scope of this guide. - -For those uses though, additional Workflow Actions are provided: - -The [Decrypt SOPS Secrets] action may be useful and it is mentioned here, (but no example uses are provided.) - -The [Sops Binary Installer] action enables more advanced use cases, like encrypting or re-encrypting secrets. - -#### Jsonnet Recap - -While much of this type of manipulation could be handled in `Kustomization`'s `postBuild`, via `envsubst`, some configurations are more complicated this way. They can be better handled in CI, where access to additional tools can be provided. - -By writing YAML manifests into a Git commit, the same manifests that `Kustomize` directly applies, they can be saved for posterity. Or projected out into a new pull request where they can be reviewed before application, or with the proper safe-guards in place they can be applied immediately through a more direct-driven automation. - -With generated YAML that Flux applies in the cluster directly from Git commits, **fui-yoh** - that's GitOps! - -### Commit Across Repositories Workflow - -Flux will not deploy from pushes on just any branch; GitRepository sources target just one specific branch. Merging to a `staging` branch, for example, can be used to trigger a deployment to a Staging environment. - -Manifest generation can be used to solve, broadly, very many problems, such that even with many examples, this guide would never be totally exhaustive. - -This is the final example in this guide. - -Here we show 🥁 ... how to replicate the original behavior of Flux v1's image automation! 🤯 🎉 - -You can put this workflow in your application repo, and target it toward your `fleet-infra` repo. - -To replicate the nearest approximation of Flux's "deploy latest image" feature of yesteryore, we use push events to do the job, as we hinted was possible in an earlier example. This can be done without Flux v1's redundant and expensive image pull behavior, retrieving build metadata required to order image tags for deployment. - -Flux recommends using real version numbers in your image tags, with a canonical ordering. - -The alternative is racy and doesn't always guarantee the latest commit will be the one that is deployed, since this behavior depends on the time that each commit is pushed, and even precisely how long the build takes to complete; the difference is fine for Dev environments, but this is not a strategy for Production use cases. - -Your app's CI can commit and push YAML manifests (or one manifest for each app) into a separate deploy branch for `Kustomization` to apply. The deploy branch in a separate repository should be a branch to which the CI user is granted write access. - -While there are some issues, this is actually perfect for non-prod deployments, eg. in a test environment! - -In context, find [04-update-fleet-infra.yaml], or simply copy it from below. - -```yaml -# ./.github/workflows/04-update-fleet-infra.yaml -name: Update Fleet-Infra -on: - push: - branches: - - 'main' - -jobs: - run: - name: Push Update - runs-on: ubuntu-latest - steps: - - name: Prepare - id: prep - run: | - VERSION=${GITHUB_SHA::8} - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF/refs\/tags\//} - fi - echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ') - echo ::set-output name=VERSION::${VERSION} - - - name: Checkout repo - uses: actions/checkout@v2 - - - name: Update manifests - run: ./update-k8s.sh $GITHUB_SHA - - - name: Push directory to another repository - uses: cpina/github-action-push-to-another-repository@v1.2 - - env: - API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }} - with: - source-directory: 'flux-config' - destination-github-username: 'kingdonb' - destination-repository-name: 'fleet-infra' - target-branch: 'deploy' - user-email: kingdon+bot@weave.works - commit-message: "[ci skip] deploy from ${{ steps.prep.outputs.VERSION }}" -``` - -This is [Push directory to another repository]. This is especially useful because Flux v2 is made to work with more than one GitRepository. - -If you must use a mono-repo, consider adding a deploy branch to it! There is no need for branches in the same repo to always share a parent and intersect again at a merge point. - -A mono-repo can be counter-productive for performance and will create bottlenecks for Flux, as large commits will take longer to clone, and therefore to reconcile. Ignoring with `.sourceignore` or `spec.ignore` will unfortunately not help much with this. Some limitations can only be overcome by changing the data structure. - -The `flux-system` is in the `main` branch of `kingdonb/fleet-infra`, as is the default. We prepared in advance, an empty commit with no parent in the same repository, on the `deploy` branch, so that this checkout would begin with an empty workspace that `ci/rake.sh` could copy the `output/` of Jsonnet into. - -```bash -git checkout --orphan deploy -git reset --hard -git commit --allow-empty -m'initial empty commit' -git push origin deploy -``` - -This is not technically regressive when compared to the behavior of Flux v1's `fluxcd.io/automated`, actually avoiding image pull depending on push instead to write the latest Git tag, externally and functionally identical to how Flux v1 did automation. Little else is good that we can say about it. - -It is a compatibility shim, to bridge the gap for Flux v1 users. If possible, users are encouraged to migrate to using timestamps, build numbers, or semver tags, that are all supported by some [Flux v2 image automation] features that are still in alpha at the time of this writing. - -Flux's new [Image Automation Controllers] are the new solution for Production use! - -### Adapting for Flux v2 - -In Flux v2, with `ImagePolicy`, these examples may be adjusted to order tags by their `BUILD_DATE`, by adding more string information to the tags. Besides a build timestamp, we can also add branch name. - -Why not have it all: `${branch}-${sha}-${ts}` – this is the suggestion given in: - -* [Example of a build process with timestamp tagging]. - -Example formats and alternative strings to use for tagging are at: - -* [Sortable image tags to use with automation]. - -We don't expect you to follow these examples to the letter. They present an evolution and are meant to show some of the breadth of options that are available, rather than as prescriptive guidance. - -If you are on GitHub, and are struggling to get started using GitHub Actions, or maybe still waiting to make a move on your planned migration from Flux v1; we hope that these GitHub Actions examples can help Flux users to better bridge the gap between both versions. - -[flux2/discussions/802]: https://github.com/fluxcd/flux2/discussions/802 -[flux2/issues/543]: https://github.com/fluxcd/flux2/issues/543 -[image update guide]: /guides/image-update/ -[any old app]: https://github.com/kingdonb/any_old_app -[Flux bootstrap guide]: /get-started/ -[String Substitution with sed -i]: #string-substitution-with-sed-i -[Docker Build and Tag with Version]: #docker-build-and-tag-with-version -[Jsonnet for YAML Document Rehydration]: #jsonnet-for-yaml-document-rehydration -[Commit Across Repositories Workflow]: #commit-across-repositories-workflow -[01-manifest-generate.yaml]: https://github.com/kingdonb/any_old_app/blob/main/.github/workflows/01-manifest-generate.yaml -[some guidance has changed since Flux v1]: https://github.com/fluxcd/flux2/discussions/802#discussioncomment-320189 -[Sortable image tags]: /guides/sortable-image-tags/ -[Okteto's Getting Started Guides]: https://github.com/okteto/go-getting-started/blob/master/k8s.yml -[Build and push Docker images]: https://github.com/marketplace/actions/build-and-push-docker-images -[Prepare step]: https://github.com/fluxcd/kustomize-controller/blob/5da1fc043db4a1dc9fd3cf824adc8841b56c2fcd/.github/workflows/release.yml#L17-L25 -[02-docker-build.yaml]: https://github.com/kingdonb/any_old_app/blob/main/.github/workflows/02-docker-build.yaml -[Docker Login Action]: https://github.com/marketplace/actions/docker-login -[03-release-manifests.yaml]: https://github.com/kingdonb/any_old_app/blob/main/.github/workflows/03-release-manifests.yaml -[actions/jsonnet-render]: https://github.com/marketplace/actions/jsonnet-render -[letsbuilders/tanka-action]: https://github.com/letsbuilders/tanka-action -[Add & Commit]: https://github.com/marketplace/actions/add-commit -[External Variables]: https://jsonnet.org/ref/stdlib.html#ext_vars -[example 10.1 library]: https://github.com/kingdonb/any_old_app/blob/release/0.10.1/manifests/example.libsonnet -[any-old-app-deploy-kustomization.yaml]: https://github.com/kingdonb/csh-flux/commit/7c3f1e62e2a87a2157bc9a22db4f913cc30dc12e#diff-f6ebc9688433418f0724f3545c96c301f029fd5a15847b824eab04545e057e84 -[Tanka - Using Jsonnet tutorial]: https://tanka.dev/tutorial/jsonnet -[Tanka - Parameterizing]: https://tanka.dev/tutorial/parameters -[example 10.2 library excerpt]: https://github.com/kingdonb/any_old_app/blob/release/0.10.2/manifests/example.libsonnet#L47-L63 -[example 10.2 jsonnet]: https://github.com/kingdonb/any_old_app/blob/release/0.10.2/manifests/example.jsonnet -[examples 0.10.2-all]: https://github.com/kingdonb/any_old_app/releases?after=0.10.2-alpha5 -[example 10.2 configmap]: https://github.com/kingdonb/any_old_app/blob/release/0.10.2/manifests/examples/configMap.yaml -[anguslees/kustomize-libsonnet]: (https://github.com/anguslees/kustomize-libsonnet) -[kubecfg yaml parser]: https://github.com/bitnami/kubecfg/blob/master/lib/kubecfg.libsonnet#L25 -[bitnami-labs/kube-libsonnet]: https://github.com/bitnami-labs/kube-libsonnet -[example 10.3 jsonnet]: https://github.com/kingdonb/any_old_app/blob/release/0.10.3/manifests/example.jsonnet -[Manual Gating]: https://docs.flagger.app/usage/webhooks#manual-gating -[flux create tenant]: /cmd/flux_create_tenant -[Flux 2 Multi-Tenancy Guide]: https://github.com/fluxcd/flux2-multi-tenancy -[Mozilla SOPS]: /guides/mozilla-sops/ -[example 10.4 jsonnet]: https://github.com/kingdonb/any_old_app/blob/release/0.10.4/manifests/example.jsonnet -[anguslees example jsonnet]: https://github.com/anguslees/kustomize-libsonnet/blob/master/example.jsonnet -[Kubernetes docs on Using Service Accounts]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-multiple-service-accounts -[sops/issues/315]: https://github.com/mozilla/sops/issues/315 -[using various cloud providers]: /guides/mozilla-sops/#using-various-cloud-providers -[Decrypt SOPS Secrets]: https://github.com/marketplace/actions/decrypt-sops-secrets -[Sops Binary Installer]: https://github.com/marketplace/actions/sops-binary-installer -[04-update-fleet-infra.yaml]: https://github.com/kingdonb/any_old_app/blob/main/.github/workflows/04-update-fleet-infra.yaml -[Push directory to another repository]: https://github.com/marketplace/actions/push-directory-to-another-repository -[Flux v2 image automation]: /guides/image-update/ -[Image Automation Controllers]: /components/image/controller/ -[Example of a build process with timestamp tagging]: /guides/sortable-image-tags/#example-of-a-build-process-with-timestamp-tagging -[Sortable image tags to use with automation]: /guides/sortable-image-tags/#formats-and-alternatives diff --git a/docs/use-cases/helm.md b/docs/use-cases/helm.md deleted file mode 100644 index efc0e938..00000000 --- a/docs/use-cases/helm.md +++ /dev/null @@ -1,208 +0,0 @@ -# Flux for Helm Users - -Welcome Helm users! -We think Flux's Helm Controller is the best way to do Helm according to GitOps principles, and we're dedicated to doing what we can to help you feel the same way. - -## What Does Flux add to Helm? - -Helm 3 was designed with both a client and an SDK, but no running software agents. -This architecture intended anything outside of the client scope to be addressed by other tools in the ecosystem, which could then make use of Helm's SDK. - -Built on Kubernetes controller-runtime, Flux's Helm Controller is an example of a mature software agent that uses Helm's SDK to full effect. - - -Flux's biggest addition to Helm is a structured declaration layer for your releases that automatically gets reconciled to your cluster based on your configured rules: - -- While the Helm client commands let you imperatively do things -- Flux Helm Custom Resources let you declare what you want the Helm SDK to do automatically - -Additional benefits Flux adds to Helm include: - -- Managing / structuring multiple environments -- A control loop, with configurable retry logic -- Automated drift detection between the desired and actual state of your operations -- Automated responses to that drift, including reconciliation, notifications, and unified logging - -## Getting Started - -The simplest way to explain is by example. -Lets translate imperative Helm commands to Flux Helm Controller Custom Resources: - -Helm client: - -```console -helm repo add traefik https://helm.traefik.io/traefik -helm install my-traefik traefik/traefik \ - --version 9.18.2 \ - --namespace traefik -``` - -Flux client: - -```console -flux create source helm traefik --url https://helm.traefik.io/traefik -flux create helmrelease --chart my-traefik \ - --source HelmRepository/traefik \ - --chart-version 9.18.2 \ - --namespace traefik -``` - -The main difference is Flux client will not imperatively create resources in the cluster. -Instead these commands create Custom Resource *files*, which are committed to version control as instructions only (note: you may use the `--export` flag to manage any file edits with finer grained control before pushing to version control). -Separately, the Flux Helm Controller software agent automatically reconciles these instructions with the running state of your cluster based on your configured rules. - -Lets check out what the Custom Resoruce instruction files look like: - -```yaml -# /flux/boot/traefik/helmrepo.yaml -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: traefik - namespace: traefik -spec: - interval: 1m0s - url: https://helm.traefik.io/traefik -``` - -```yaml -# /flux/boot/traefik/helmrelease.yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: traefik - namespace: traefik -spec: - chart: - spec: - chart: traefik - sourceRef: - kind: HelmRepository - name: traefik - version: 9.18.2 - interval: 1m0s -``` - - - -Once these are applied to your cluster, the Flux Helm Controller automatically uses the Helm SDK to do your bidding according to the rules you've set. - -Why is this important? -If you or your team has ever collaborated with multiple engineers on one or more apps, and/or in more than one namespace or cluster, you probably have a good idea of how declarative, automatic reconciliation can help solve common problems. -If not, or either way, you may want to check out this [short introduction to GitOps](https://youtu.be/r-upyR-cfDY). - -## Customizing Your Release - -While Helm charts are usually installable using default configurations, users will often customize charts with their preferred configuration by [overriding the default values](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). -The Helm client allows this by imperatively specifying override values with `--set` on the command line, and in additional `--values` files. For example: - -```console -helm install my-traefik traefik/traefik --set service.type=ClusterIP -``` - -and - -```console -helm install my-traefik traefik/traefik --values ci/kind-values.yaml -``` - -where `ci/kind-values.yaml` contains: - -```yaml -service: - type: ClusterIP -``` - -Flux Helm Controller allows these same YAML values overrides on the `HelmRelease` CRD. -These can be declared directly in `spec.values`: - -```yaml -spec: - values: - service: - type: ClusterIP -``` - -and defined in `spec.valuesFrom` as a list of `ConfigMap` and `Secret` resources from which to draw values, allowing reusability and/or greater security. -See `HelmRelease` CRD [values overrides](https://toolkit.fluxcd.io/components/helm/helmreleases/#values-overrides) documentation for the latest spec. - -## Managing Secrets and ConfigMaps - -You may manage these `ConfigMap` and `Secret` resources any way you wish, but there are several benefits to managing these with the Flux Kustomize Controller. - -It is fairly straigtforward to use Kustomize `configMapGenerator` to [trigger a Helm release upgrade every time the encoded values change](https://toolkit.fluxcd.io/guides/helmreleases/#refer-to-values-in-configmaps-generated-with-kustomize). -This common use case currently solveable in Helm by [adding specially crafted annotations](https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments) to a chart. -The Flux Kustomize Controller method allows you to accomplish this on any chart without additional templated annotations. - -You may also use Kustomize Controller built-in [Mozilla SOPS integration](https://toolkit.fluxcd.io/components/kustomize/kustomization/#secrets-decryption) to securely manage your encrypted secrets stored in git. -See the [Flux SOPS guide](https://toolkit.fluxcd.io/guides/mozilla-sops/) for step-by-step instructions through various use cases. - -## Automatic Release Upgrades - -If you want Helm Controller to automatically upgrade your releases when a new chart version is available in the release's referenced `HelmRepository`, you may specify a SemVer range (i.e. `>=4.0.0 <5.0.0`) instead of a fixed version. - -This is useful if your release should use a fixed MAJOR chart version, but want the latest MINOR or PATCH versions as they become available. - -For full SemVer range syntax, see `Masterminds/semver` [Checking Version Constraints](https://github.com/Masterminds/semver/blob/master/README.md#checking-version-constraints) documentation. - -## Automatic Uninstalls and Rollback - -The Helm Controller offers an extensive set of configuration options to remediate when a Helm release fails, using [spec.install.remediate](https://toolkit.fluxcd.io/components/helm/api/#helm.toolkit.fluxcd.io/v2beta1.InstallRemediation), [spec.upgrade.remediate](https://toolkit.fluxcd.io/components/helm/api/#helm.toolkit.fluxcd.io/v2beta1.UpgradeRemediation), [spec.rollback](https://toolkit.fluxcd.io/components/helm/api/#helm.toolkit.fluxcd.io/v2beta1.Rollback) and [spec.uninstall](https://toolkit.fluxcd.io/components/helm/api/#helm.toolkit.fluxcd.io/v2beta1.Uninstall). -Features include the option to remediate with an uninstall after an upgrade failure, and the option to keep a failed release for debugging purposes when it has run out of retries. - -Here is an example for configuring automated uninstalls (for all available fields, consult the `InstallRemediation` and `Uninstall` API references linked above): - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - install: - # Remediation configuration for when the Helm install - # (or sequent Helm test) action fails - remediation: - # Number of retries that should be attempted on failures before - # bailing, a negative integer equals to unlimited retries - retries: -1 - # Configuration options for the Helm uninstall action - uninstall: - timeout: 5m - disableHooks: false - keepHistory: false -``` - -Here is an example of automated rollback configuration (for all available fields, consult the `UpgradeRemediation` and `Rollback` API references linked above): - -```yaml -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: my-release - namespace: default -spec: - # ...omitted for brevity - upgrade: - # Remediaton configuration for when an Helm upgrade action fails - remediation: - # Amount of retries to attempt after a failure, - # setting this to 0 means no remedation will be - # attempted - retries: 5 - # Configuration options for the Helm rollback action - rollback: - timeout: 5m - disableWait: false - disableHooks: false - recreate: false - force: false - cleanupOnFail: false -``` - -## Next Steps - -- [Guides > Manage Helm Releases](/guides/helmreleases/) -- [Toolkit Components > Helm Controller](/components/helm/controller/) -- [Migration > Migrate to the Helm Controller](/guides/helm-operator-migration/) diff --git a/mkdocs.yml b/mkdocs.yml deleted file mode 100644 index 0e4e5248..00000000 --- a/mkdocs.yml +++ /dev/null @@ -1,216 +0,0 @@ -site_name: Flux | GitOps Toolkit -site_description: Open and extensible continuous delivery solution for Kubernetes -site_author: The Flux project -site_url: https://toolkit.fluxcd.io - -repo_name: fluxcd/flux2 -repo_url: https://github.com/fluxcd/flux2 -edit_uri: "" - -theme: - name: material - logo: _files/flux-icon@2x.png - language: en - palette: - primary: blue - accent: indigo - custom_dir: mkdocs/ - -docs_dir: docs - -extra_css: - - _static/custom.css - -plugins: - - search - -markdown_extensions: - - admonition - - codehilite: - guess_lang: false - - footnotes - - meta - - pymdownx.caret - - pymdownx.emoji: - emoji_generator: !!python/name:materialx.emoji.to_svg - emoji_index: !!python/name:materialx.emoji.twemoji - - pymdownx.extra - - pymdownx.progressbar - - pymdownx.superfences: - highlight_code: true - - pymdownx.tabbed - - pymdownx.tasklist - - pymdownx.tilde - - toc: - permalink: true - -nav: - - Introduction: index.md - - Core Concepts: core-concepts/index.md - - Get Started: get-started/index.md - - Migration: - - Migration and Support Timetable: migration/timetable.md - - Migrate from Flux v1: guides/flux-v1-migration.md - - Migrate from Flux v1 image update automation: guides/flux-v1-automation-migration.md - - Migrate from the Helm Operator: guides/helm-operator-migration.md - - FAQ: guides/faq-migration.md - - Guides: - - Installation: guides/installation.md - - Manage Helm Releases: guides/helmreleases.md - - Setup Notifications: guides/notifications.md - - Setup Webhook Receivers: guides/webhook-receivers.md - - Monitoring with Prometheus: guides/monitoring.md - - Sealed Secrets: guides/sealed-secrets.md - - Mozilla SOPS: guides/mozilla-sops.md - - Automate image updates to Git: guides/image-update.md - - Sortable image tags to use with automation: guides/sortable-image-tags.md - - Use Cases: - - Azure: use-cases/azure.md - - GitHub Actions Manifest Generation: use-cases/gh-actions-manifest-generation.md - - Helm: use-cases/helm.md - - Toolkit Components: - - Overview: components/index.md - - Source Controller: - - Overview: components/source/controller.md - - GitRepository CRD: components/source/gitrepositories.md - - HelmRepository CRD: components/source/helmrepositories.md - - HelmChart CRD: components/source/helmcharts.md - - Bucket CRD: components/source/buckets.md - - Source API Reference: components/source/api.md - - Kustomize Controller: - - Overview: components/kustomize/controller.md - - Kustomization CRD: components/kustomize/kustomization.md - - Kustomize API Reference: components/kustomize/api.md - - Helm Controller: - - Overview: components/helm/controller.md - - HelmRelease CRD: components/helm/helmreleases.md - - Helm API Reference: components/helm/api.md - - Notification Controller: - - Overview: components/notification/controller.md - - Event: components/notification/event.md - - Provider CRD: components/notification/provider.md - - Alert CRD: components/notification/alert.md - - Receiver CRD: components/notification/receiver.md - - Notification API Reference: components/notification/api.md - - Image Automation Controllers: - - Overview: components/image/controller.md - - ImageRepository CRD: components/image/imagerepositories.md - - ImagePolicy CRD: components/image/imagepolicies.md - - ImageUpdateAutomation CRD: components/image/imageupdateautomations.md - - Automation API Reference: components/image/automation-api.md - - Flux CLI: - - Overview: cmd/flux.md - - Bootstrap: cmd/flux_bootstrap.md - - Bootstrap github: cmd/flux_bootstrap_github.md - - Bootstrap gitlab: cmd/flux_bootstrap_gitlab.md - - Check: cmd/flux_check.md - - Create: cmd/flux_create.md - - Create kustomization: cmd/flux_create_kustomization.md - - Create helmrelease: cmd/flux_create_helmrelease.md - - Create source: cmd/flux_create_source.md - - Create source git: cmd/flux_create_source_git.md - - Create source helm: cmd/flux_create_source_helm.md - - Create source bucket: cmd/flux_create_source_bucket.md - - Create alert provider: cmd/flux_create_alert-provider.md - - Create alert: cmd/flux_create_alert.md - - Create receiver: cmd/flux_create_receiver.md - - Create image: cmd/flux_create_image.md - - Create image policy: cmd/flux_create_image_policy.md - - Create image repository: cmd/flux_create_image_repository.md - - Create image update: cmd/flux_create_image_update.md - - Create tenant: cmd/flux_create_tenant.md - - Create secret: cmd/flux_create_secret.md - - Create secret git: cmd/flux_create_secret_git.md - - Create secret helm: cmd/flux_create_secret_helm.md - - Create secret tls: cmd/flux_create_secret_tls.md - - Delete: cmd/flux_delete.md - - Delete kustomization: cmd/flux_delete_kustomization.md - - Delete helmrelease: cmd/flux_delete_helmrelease.md - - Delete source: cmd/flux_delete_source.md - - Delete source git: cmd/flux_delete_source_git.md - - Delete source helm: cmd/flux_delete_source_helm.md - - Delete source bucket: cmd/flux_delete_source_bucket.md - - Delete image: cmd/flux_delete_image.md - - Delete image policy: cmd/flux_delete_image_policy.md - - Delete image repository: cmd/flux_delete_image_repository.md - - Delete image update: cmd/flux_delete_image_update.md - - Export: cmd/flux_export.md - - Export kustomization: cmd/flux_export_kustomization.md - - Export helmrelease: cmd/flux_export_helmrelease.md - - Export source: cmd/flux_export_source.md - - Export source git: cmd/flux_export_source_git.md - - Export source helm: cmd/flux_export_source_helm.md - - Export source bucket: cmd/flux_export_source_bucket.md - - Export alert provider: cmd/flux_export_alert-provider.md - - Export alert: cmd/flux_export_alert.md - - Export receiver: cmd/flux_export_receiver.md - - Export image: cmd/flux_export_image.md - - Export image policy: cmd/flux_export_image_policy.md - - Export image repository: cmd/flux_export_image_repository.md - - Export image update: cmd/flux_export_image_update.md - - Get: cmd/flux_get.md - - Get all: cmd/flux_get_all.md - - Get kustomizations: cmd/flux_get_kustomizations.md - - Get helmreleases: cmd/flux_get_helmreleases.md - - Get sources: cmd/flux_get_sources.md - - Get sources all: cmd/flux_get_sources_all.md - - Get sources git: cmd/flux_get_sources_git.md - - Get sources helm: cmd/flux_get_sources_helm.md - - Get sources chart: cmd/flux_get_sources_chart.md - - Get sources bucket: cmd/flux_get_sources_bucket.md - - Get alert provider: cmd/flux_get_alert-provider.md - - Get alerts: cmd/flux_get_alerts.md - - Get alert providers: cmd/flux_get_alert-providers.md - - Get receivers: cmd/flux_get_receivers.md - - Get images: cmd/flux_get_images.md - - Get images all: cmd/flux_get_images_all.md - - Get images policy: cmd/flux_get_images_policy.md - - Get images repository: cmd/flux_get_images_repository.md - - Get images update: cmd/flux_get_images_update.md - - Install: cmd/flux_install.md - - Logs: cmd/flux_logs.md - - Resume: cmd/flux_resume.md - - Resume kustomization: cmd/flux_resume_kustomization.md - - Resume helmrelease: cmd/flux_resume_helmrelease.md - - Resume source: cmd/flux_resume_source.md - - Resume source git: cmd/flux_resume_source_git.md - - Resume source helm: cmd/flux_resume_source_helm.md - - Resume source chart: cmd/flux_resume_source_chart.md - - Resume source bucket: cmd/flux_resume_source_bucket.md - - Resume alert provider: cmd/flux_resume_alert-provider.md - - Resume alert: cmd/flux_resume_alert.md - - Resume receiver: cmd/flux_resume_receiver.md - - Resume image: cmd/flux_resume_image.md - - Resume image repository: cmd/flux_resume_image_repository.md - - Resume image update: cmd/flux_resume_image_update.md - - Suspend: cmd/flux_suspend.md - - Suspend kustomization: cmd/flux_suspend_kustomization.md - - Suspend helmrelease: cmd/flux_suspend_helmrelease.md - - Suspend source: cmd/flux_suspend_source.md - - Suspend source git: cmd/flux_suspend_source_git.md - - Suspend source helm: cmd/flux_suspend_source_helm.md - - Suspend source chart: cmd/flux_suspend_source_chart.md - - Suspend source bucket: cmd/flux_suspend_source_bucket.md - - Suspend alert provider: cmd/flux_suspend_alert-provider.md - - Suspend alert: cmd/flux_suspend_alert.md - - Suspend receiver: cmd/flux_suspend_receiver.md - - Suspend image: cmd/flux_suspend_image.md - - Suspend image repository: cmd/flux_suspend_image_repository.md - - Suspend image update: cmd/flux_suspend_image_update.md - - Reconcile: cmd/flux_reconcile.md - - Reconcile kustomization: cmd/flux_reconcile_kustomization.md - - Reconcile helmrelease: cmd/flux_reconcile_helmrelease.md - - Reconcile source: cmd/flux_reconcile_source.md - - Reconcile source git: cmd/flux_reconcile_source_git.md - - Reconcile source helm: cmd/flux_reconcile_source_helm.md - - Reconcile source bucket: cmd/flux_reconcile_source_bucket.md - - Reconcile image: cmd/flux_reconcile_image.md - - Reconcile image repository: cmd/flux_reconcile_image_repository.md - - Reconcile image update: cmd/flux_reconcile_image_update.md - - Uninstall: cmd/flux_uninstall.md - - Dev Guides: - - Watching for source changes: dev-guides/source-watcher.md - - Advanced debugging: dev-guides/debugging.md - - Roadmap: roadmap/index.md - - Contributing: contributing/index.md - - FAQ: faq/index.md diff --git a/mkdocs/main.html b/mkdocs/main.html deleted file mode 100644 index eb0ab453..00000000 --- a/mkdocs/main.html +++ /dev/null @@ -1,32 +0,0 @@ -{% extends "base.html" %} - -{% block extrahead %} - - -{% if page and page.meta and page.meta.title %} - -{% elif page and page.title and not page.is_homepage %} - -{% else %} - -{% endif %} - - - - - - - - -{% if page and page.meta and page.meta.title %} - -{% elif page and page.title and not page.is_homepage %} - -{% else %} - -{% endif %} - - - - -{% endblock %}