remove content which has moved to f/website

Signed-off-by: Daniel Holbach <daniel@weave.works>
pull/1365/head
Daniel Holbach 4 years ago
parent 5ba3774fd5
commit ec21eedd56

Binary file not shown.

Before

Width:  |  Height:  |  Size: 232 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 146 KiB

@ -1,22 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="64px" height="64px" viewBox="0 0 64 64" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 56.3 (81716) - https://sketch.com -->
<title>flux-icon</title>
<desc>Created with Sketch.</desc>
<g id="flux-icon" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Group" transform="translate(11.000000, 2.000000)">
<path d="M0.803134615,15.7791346 C-0.246288462,15.0966346 -0.246288462,13.5602885 0.803134615,12.8783654 L20.1819808,0.279519231 C20.7554423,-0.0931730769 21.4944808,-0.0931730769 22.0679423,0.279519231 L41.4473654,12.8783654 C42.4967885,13.5602885 42.4967885,15.0966346 41.4473654,15.7791346 L22.0679423,28.3779808 C21.4944808,28.7506731 20.7554423,28.7506731 20.1819808,28.3779808 L0.803134615,15.7791346 Z" id="Fill-1" fill="#326CE5"></path>
<path d="M24.1851346,18.0023077 L25.5293654,18.0023077 C26.3145577,18.0023077 26.8055192,17.1525 26.4126346,16.4728846 L22.0084038,8.84423077 C21.6160962,8.16461538 20.63475,8.16461538 20.2418654,8.84423077 L15.8376346,16.4728846 C15.4453269,17.1525 15.9357115,18.0023077 16.7209038,18.0023077 L18.0657115,18.0023077 C18.6287885,18.0023077 19.0851346,18.4592308 19.0851346,19.0223077 L19.0851346,27.7298077 L19.9874423,28.3165385 C20.6791731,28.7665385 21.5710962,28.7665385 22.2628269,28.3165385 L23.1651346,27.7298077 L23.1651346,19.0223077 C23.1651346,18.4592308 23.6214808,18.0023077 24.1851346,18.0023077" id="Fill-3" fill="#C1D2F7"></path>
<path d="M27.8390769,34.8375577 L23.1648462,31.7989038 L23.1648462,33.2389038 C24.6902308,33.8919808 26.2588846,34.4008269 27.8390769,34.8375577" id="Fill-5" fill="#326CE5"></path>
<path d="M23.1650769,35.8280192 L23.1650769,37.8495577 C24.7095,38.3209038 26.2723846,38.7080192 27.8191154,39.0893654 C32.8706538,40.3349423 37.6418077,41.5107115 41.4783462,45.3478269 C41.6733462,45.54225 41.8562308,45.7407115 42.0373846,45.93975 C42.4308462,45.1880192 42.2335385,44.1957115 41.4466154,43.6845577 L33.8560385,38.7489808 C32.0133462,38.1409038 30.1360385,37.6759038 28.2806538,37.2189808 C26.5308462,36.7874423 24.8196923,36.3570577 23.1650769,35.8280192" id="Fill-7" fill="#326CE5"></path>
<path d="M19.08525,34.1699423 C18.4304423,33.8318654 17.7854423,33.4689808 17.1629423,33.0489808 L15.4269808,34.1774423 C16.5975577,35.0382115 17.8235192,35.7362885 19.08525,36.3212885 L19.08525,34.1699423 Z" id="Fill-9" fill="#326CE5"></path>
<path d="M24.8941731,40.6051154 C24.3137885,40.4620385 23.7374423,40.3195385 23.1651346,40.1735769 L23.1651346,42.1605 C23.5885962,42.2666538 24.0114808,42.3722308 24.4326346,42.4760769 C29.4841731,43.7210769 34.2553269,44.8968462 38.0924423,48.7339615 C38.0987885,48.7408846 38.1045577,48.7472308 38.1114808,48.7541538 L39.75225,47.6868462 C39.6524423,47.5824231 39.5584038,47.4751154 39.4545577,47.3718462 C35.2384038,43.1551154 29.9791731,41.8587692 24.8941731,40.6051154" id="Fill-11" fill="#326CE5"></path>
<path d="M19.08525,38.9907115 C16.8900577,38.2389808 14.8096731,37.2714808 12.9115962,35.8124423 L11.2119808,36.9178269 C13.6287115,38.9110962 16.3194808,40.1203269 19.08525,41.0168654 L19.08525,38.9907115 Z" id="Fill-13" fill="#326CE5"></path>
<path d="M19.08525,43.3809808 C15.3069808,42.3909808 11.7537115,41.18175 8.71794231,38.5388654 L7.04717308,39.6252115 C10.6125577,42.9102115 14.8540962,44.2832885 19.08525,45.3707885 L19.08525,43.3809808 Z" id="Fill-15" fill="#326CE5"></path>
<path d="M23.1650769,46.3935 C27.1175769,47.4140769 30.8341154,48.6342692 33.9823846,51.4381154 L35.6439231,50.3581154 C31.9654615,46.9000385 27.5514231,45.5194615 23.1650769,44.4048462 L23.1650769,46.3935 Z" id="Fill-17" fill="#326CE5"></path>
<path d="M4.57875,41.2299231 L2.92990385,42.3018462 C2.98759615,42.3612692 3.04009615,42.423 3.09951923,42.4818462 C7.31625,46.6985769 12.5743269,47.9949231 17.6599038,49.2485769 C22.0641346,50.3337692 26.2543269,51.3687692 29.7989423,54.1581923 L31.4893269,53.0591538 C27.4958654,49.6968462 22.7385577,48.5158846 18.1214423,47.3781923 C13.1206731,46.1453077 8.39567308,44.9758846 4.57875,41.2299231" id="Fill-19" fill="#326CE5"></path>
<path d="M1.07555769,44.5060962 C0.883442308,44.3139808 0.702865385,44.1184038 0.524019231,43.9216731 C-0.227711538,44.6745577 -0.139442308,45.9726346 0.80325,46.5853269 L6.50959615,50.2955192 C9.03536538,51.3409038 11.6765192,51.9945577 14.2738269,52.6349423 C18.3284423,53.6341731 22.2019038,54.5924423 25.5578654,56.9157115 L27.2834423,55.7930192 C23.4676731,52.9245577 19.0403654,51.8255192 14.7347885,50.7639808 C9.68382692,49.5189808 4.91267308,48.3432115 1.07555769,44.5060962" id="Fill-21" fill="#326CE5"></path>
<path d="M19.6441154,58.8342692 C20.0243077,59.0188846 20.3998846,59.2133077 20.7691154,59.4221538 C21.2093077,59.5150385 21.6771923,59.4383077 22.0683462,59.1838846 L23.0260385,58.5613846 C19.9493077,56.5035 16.5287308,55.461 13.1196923,54.5927308 L19.6441154,58.8342692 Z" id="Fill-23" fill="#326CE5"></path>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

@ -1,122 +0,0 @@
@import url("https://fonts.googleapis.com/css?family=Montserrat&display=swap");
body {
font-family: "Montserrat", sans-serif;
}
.md-logo {
width: 40px;
height: 40px;
padding-bottom: 2px;
padding-top: 2px;
}
.md-logo img {
width: 40px;
height: 40px;
}
.md-header, .md-footer-nav {
background-image: linear-gradient(45deg, rgb(0, 150, 225) 0%, rgb(27, 141, 226) 24%, rgb(42, 125, 227) 53%, rgb(53, 112, 227) 78%, rgb(53, 112, 227) 100%);
}
.md-header-nav__title {
font-size: .85rem;
}
.check-bullet {
color:#07bfa5;
background-color: white;
margin-left:-22px;
}
/* Progress bar styling */
.progress-label {
position: absolute;
text-align: center;
font-weight: 700;
width: 100%;
/* remove original styling for thin styling
margin: 0 ! important; */
margin-top: -0.4rem ! important;
line-height: 1.2rem;
white-space: nowrap;
overflow: hidden;
}
.progress-bar {
/*remove original styling for thin styling
height: 1.2rem; */
height: 0.4rem;
float: left;
background: repeating-linear-gradient(
45deg,
rgba(255, 255, 255, 0.2),
rgba(255, 255, 255, 0.2) 10px,
rgba(255, 255, 255, 0.3) 10px,
rgba(255, 255, 255, 0.3) 20px
) #2979ff;
border-radius: 2px;
}
.progress {
display: block;
width: 100%;
/* remove original styling for thin styling
margin: 0.5rem 0;
height: 1.2rem; */
margin-top: 0.9rem;
height: 0.4rem;
background-color: #eeeeee;
position: relative;
border-radius: 2px;
}
.progress-100plus .progress-bar {
background-color: #00c853;
}
.progress-80plus .progress-bar {
background-color: #64dd17;
}
.progress-60plus .progress-bar {
background-color: #fbc02d;
}
.progress-40plus .progress-bar {
background-color: #ff9100;
}
.progress-20plus .progress-bar {
background-color: #ff5252;
}
.progress-0plus .progress-bar {
background-color: #ff1744;
}
/* Custom admonitions */
/* See https://squidfunk.github.io/mkdocs-material/reference/admonitions */
:root {
--md-admonition-icon--heart: url('data:image/svg+xml;charset=utf-8,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M14 20.408c-.492.308-.903.546-1.192.709-.153.086-.308.17-.463.252h-.002a.75.75 0 0 1-.686 0 16.709 16.709 0 0 1-.465-.252 31.147 31.147 0 0 1-4.803-3.34C3.8 15.572 1 12.331 1 8.513 1 5.052 3.829 2.5 6.736 2.5 9.03 2.5 10.881 3.726 12 5.605 13.12 3.726 14.97 2.5 17.264 2.5 20.17 2.5 23 5.052 23 8.514c0 3.818-2.801 7.06-5.389 9.262A31.146 31.146 0 0 1 14 20.408z"/></svg>')
}
.md-typeset .admonition.heart,
.md-typeset details.heart {
border-color: rgb(233, 30, 99);
}
.md-typeset .heart > .admonition-title,
.md-typeset .heart > summary {
background-color: rgba(233, 30, 99, 0.1);
}
.md-typeset .heart > .admonition-title::before,
.md-typeset .heart > summary::before {
background-color: rgb(233, 30, 99);
-webkit-mask-image: var(--md-admonition-icon--heart);
mask-image: var(--md-admonition-icon--heart);
}
.timetable-explicit-col-widths th:nth-child(1) { width: 4%; }
.timetable-explicit-col-widths th:nth-child(2) { width: 32%; }
.timetable-explicit-col-widths th:nth-child(3) { width: 32%; }
.timetable-explicit-col-widths th:nth-child(4) { width: 32%; }

@ -1,28 +0,0 @@
# Helm Controller
The Helm Controller is a Kubernetes operator, allowing one to declaratively manage Helm chart
releases with Kubernetes manifests.
![](../../_files/helm-controller.png)
The desired state of a Helm release is described through a Kubernetes Custom Resource named `HelmRelease`.
Based on the creation, mutation or removal of a `HelmRelease` resource in the cluster,
Helm actions are performed by the controller.
Features:
- Watches for `HelmRelease` objects and generates `HelmChart` objects
- Supports `HelmChart` artifacts produced from `HelmRepository` and `GitRepository` sources
- Fetches artifacts produced by [source-controller](../source/controller.md) from `HelmChart` objects
- Watches `HelmChart` objects for revision changes (including semver ranges for charts from `HelmRepository` sources)
- Performs automated Helm actions, including Helm tests, rollbacks and uninstalls
- Offers extensive configuration options for automated remediation (rollback, uninstall, retry) on failed Helm install, upgrade or test actions
- Runs Helm install/upgrade in a specific order, taking into account the depends-on relationship defined in a set of `HelmRelease` objects
- Prunes Helm releases removed from cluster (garbage collection)
- Reports Helm releases statuses (alerting provided by [notification-controller](../notification/controller.md))
- Built-in Kustomize compatible Helm post renderer, providing support for strategic merge, JSON 6902 and images patches
Links:
- Source code [fluxcd/helm-controller](https://github.com/fluxcd/helm-controller)
- Specification [docs](https://github.com/fluxcd/helm-controller/tree/main/docs/spec)

@ -1,18 +0,0 @@
# Image reflector and automation controllers
The image-reflector-controller and image-automation-controller work together to update a Git
repository when new container images are available.
- The image-reflector-controller scans image repositories and reflects the image metadata in
Kubernetes resources.
- The image-automation-controller updates YAML files based on the latest images scanned, and commits
the changes to a given Git repository.
![](../../_files/image-update-automation.png)
Links:
- Source code [fluxcd/image-reflector-controller](https://github.com/fluxcd/image-reflector-controller)
- Reflector [specification docs](https://github.com/fluxcd/image-reflector-controller/tree/main/docs/spec)
- Source code [fluxcd/image-automation-controller](https://github.com/fluxcd/image-automation-controller)
- Automation [specification docs](https://github.com/fluxcd/image-automation-controller/tree/main/docs/spec)

@ -1,30 +0,0 @@
# GitOps Toolkit components
The GitOps Toolkit is the set of APIs and controllers that make up the
runtime for Flux v2. The APIs comprise Kubernetes custom resources,
which can be created and updated by a cluster user, or by other
automation tooling.
You can use the toolkit to extend Flux, and to build your own systems
for continuous delivery. The [the source-watcher
guide](../dev-guides/source-watcher/) is a good place to start.
A reference for each component and API type is linked below.
- [Source Controller](source/controller.md)
- [GitRepository CRD](source/gitrepositories.md)
- [HelmRepository CRD](source/helmrepositories.md)
- [HelmChart CRD](source/helmcharts.md)
- [Bucket CRD](source/buckets.md)
- [Kustomize Controller](kustomize/controller.md)
- [Kustomization CRD](kustomize/kustomization.md)
- [Helm Controller](helm/controller.md)
- [HelmRelease CRD](helm/helmreleases.md)
- [Notification Controller](notification/controller.md)
- [Provider CRD](notification/provider.md)
- [Alert CRD](notification/alert.md)
- [Receiver CRD](notification/receiver.md)
- [Image automation controllers](image/controller.md)
- [ImageRepository CRD](image/imagerepositories.md)
- [ImagePolicy CRD](image/imagepolicies.md)
- [ImageUpdateAutomation CRD](image/imageupdateautomations.md)

@ -1,23 +0,0 @@
# Kustomize Controller
The kustomize-controller is a Kubernetes operator,
specialized in running continuous delivery pipelines for infrastructure and
workloads defined with Kubernetes manifests and assembled with Kustomize.
![](../../_files/kustomize-controller.png)
Features:
- Reconciles the cluster state from multiple sources (provided by source-controller)
- Generates manifests with Kustomize (from plain Kubernetes yamls or Kustomize overlays)
- Validates manifests against Kubernetes API
- Impersonates service accounts (multi-tenancy RBAC)
- Health assessment of the deployed workloads
- Runs pipelines in a specific order (depends-on relationship)
- Prunes objects removed from source (garbage collection)
- Reports cluster state changes (alerting provided by notification-controller)
Links:
- Source code [fluxcd/kustomize-controller](https://github.com/fluxcd/kustomize-controller)
- Specification [docs](https://github.com/fluxcd/kustomize-controller/tree/main/docs/spec)

@ -1,17 +0,0 @@
# Notification Controller
The Notification Controller is a Kubernetes operator, specialized in handling inbound and outbound events.
![](../../_files/notification-controller.png)
The controller handles events coming from external systems (GitHub, GitLab, Bitbucket, Harbor, Jenkins, etc)
and notifies the GitOps toolkit controllers about source changes.
The controller handles events emitted by the GitOps toolkit controllers (source, kustomize, helm)
and dispatches them to external systems (Slack, Microsoft Teams, Discord, Rocker)
based on event severity and involved objects.
Links:
- Source code [fluxcd/notification-controller](https://github.com/fluxcd/notification-controller)
- Specification [docs](https://github.com/fluxcd/notification-controller/tree/main/docs/spec)

@ -1,24 +0,0 @@
# Source Controller
The main role of the source management component is to provide a common interface for artifacts acquisition.
The source API defines a set of Kubernetes objects that cluster admins and various automated operators can
interact with to offload the Git and Helm repositories operations to a dedicated controller.
![](../../_files/source-controller.png)
Features:
- Validate source definitions
- Authenticate to sources (SSH, user/password, API token)
- Validate source authenticity (PGP)
- Detect source changes based on update policies (semver)
- Fetch resources on-demand and on-a-schedule
- Package the fetched resources into a well-known format (tar.gz, yaml)
- Make the artifacts addressable by their source identifier (sha, version, ts)
- Make the artifacts available in-cluster to interested 3rd parties
- Notify interested 3rd parties of source changes and availability (status conditions, events, hooks)
Links:
- Source code [fluxcd/source-controller](https://github.com/fluxcd/source-controller)
- Specification [docs](https://github.com/fluxcd/source-controller/tree/main/docs/spec)

@ -1 +0,0 @@
../../CONTRIBUTING.md

@ -1,52 +0,0 @@
# Core Concepts
!!! note "Work in progress"
This document is a work in progress.
These are some core concepts in Flux.
## GitOps
GitOps is a way of managing your infrastructure and applications so that whole system is described declaratively and version controlled (most likely in a Git repository), and having an automated process that ensures that the deployed environment matches the state specified in a repository.
For more information, take a look at ["What is GitOps?"](https://www.gitops.tech/#what-is-gitops).
## Sources
A *Source* defines the origin of a source and the requirements to obtain
it (e.g. credentials, version selectors). For example, the latest `1.x` tag
available from a Git repository over SSH.
Sources produce an artifact that is consumed by other Flux elements to perform
actions, like applying the contents of the artifact on the cluster. A source
may be shared by multiple consumers to deduplicate configuration and/or storage.
The origin of the source is checked for changes on a defined interval, if
there is a newer version available that matches the criteria, a new artifact
is produced.
All sources are specified as Custom Resources in a Kubernetes cluster, examples
of sources are `GitRepository`, `HelmRepository` and `Bucket` resources.
For more information, take a look at [the source controller documentation](../components/source/controller.md).
## Reconciliation
Reconciliation refers to ensuring that a given state (e.g application running in the cluster, infrastructure) matches a desired state declaratively defined somewhere (e.g a git repository). There are various examples of these in flux e.g:
- HelmRelease reconciliation: ensures the state of the Helm release matches what is defined in the resource, performs a release if this is not the case (including revision changes of a HelmChart resource).
- Bucket reconciliation: downloads and archives the contents of the declared bucket on a given interval and stores this as an artifact, records the observed revision of the artifact and the artifact itself in the status of resource.
- [Kustomization](#kustomization) reconciliation: ensures the state of the application deployed on a cluster matches resources contained in a git repository.
## Kustomization
The kustomization represents a local set of Kubernetes resources that Flux is supposed to reconcile in the cluster. The reconciliation runs every one minute by default but this can be specified in the kustomization. If you make any changes to the cluster using `kubectl edit` or `kubectl patch`, it will be promptly reverted. You either suspend the reconciliation or push your changes to a Git repository.
For more information, take a look at [this documentation](../components/kustomize/kustomization.md).
## Bootstrap
The process of installing the Flux components in a complete GitOps way is called a bootstrap. The manifests are applied to the cluster, a `GitRepository` and `Kustomization` are created for the Flux components, and the manifests are pushed to an existing Git repository (or a new one is created). Flux can manage itself just as it manages other resources.
The bootstrap is done using the `flux` CLI `flux bootstrap`.
For more information, take a look at [the documentation for the bootstrap command](../cmd/flux_bootstrap.md).

@ -1,42 +0,0 @@
# Advanced debugging
This guide covers more advanced debugging topics such as collecting
runtime profiling data from GitOps Toolkit components.
As a user, this page normally should be a last resort, but you may
be asked by a maintainer to share a [collected profile](#collecting-a-profile)
to debug e.g. performance issues.
## Pprof
The [GitOps Toolkit components](../components/index.md) serve [`pprof`](https://golang.org/pkg/net/http/pprof/)
runtime profiling data on their metrics HTTP server (default `:8080`).
### Endpoints
| Endpoint | Path |
|-------------|------------------------|
| Index | `/debug/pprof/` |
| CPU profile | `/debug/pprof/profile` |
| Symbol | `/debug/pprof/symbol` |
| Trace | `/debug/pprof/trace` |
### Collecting a profile
To collect a profile, port-forward to the component's metrics endpoint
and collect the data from the [endpoint](#endpoints) of choice:
```console
$ kubectl port-forward -n <namespace> deploy/<component> 8080
$ curl -Sk -v http://localhost:8080/debug/pprof/heap > heap.out
```
The collected profile [can be analyzed using `go`](https://blog.golang.org/pprof),
or shared with one of the maintainers.
## Resource usage
As `kubectl top` gives a limited (and at times inaccurate) overview of
resource usage, it is often better to make use of the Grafana metrics
to gather insights. See [monitoring](../guides/monitoring.md) for a
guide on how to visualize this data with a Grafana dashboard.

@ -1,230 +0,0 @@
# Watching for source changes
In this guide you'll be developing a Kubernetes controller with
[Kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
that subscribes to [GitRepository](../components/source/gitrepositories.md)
events and reacts to revision changes by downloading the artifact produced by
[source-controller](../components/source/controller.md).
## Prerequisites
On your dev machine install the following tools:
* go >= 1.15
* kubebuilder >= 2.3
* kind >= 0.8
* kubectl >= 1.18
* kustomize >= 3.5
* docker >= 19.03
## Install Flux
Create a cluster for testing:
```sh
kind create cluster --name dev
```
Install the Flux CLI:
```sh
curl -s https://fluxcd.io/install.sh | sudo bash
```
Verify that your dev machine satisfies the prerequisites with:
```sh
flux check --pre
```
Install source-controller on the dev cluster:
```sh
flux install \
--namespace=flux-system \
--network-policy=false \
--components=source-controller
```
## Clone the sample controller
You'll be using [fluxcd/source-watcher](https://github.com/fluxcd/source-watcher) as
a template for developing your own controller. The source-watcher was scaffolded with `kubebuilder init`.
Clone the source-watcher repository:
```sh
git clone https://github.com/fluxcd/source-watcher
cd source-watcher
```
Build the controller:
```sh
make
```
## Run the controller
Port forward to source-controller artifacts server:
```sh
kubectl -n flux-system port-forward svc/source-controller 8181:80
```
Export the local address as `SOURCE_HOST`:
```sh
export SOURCE_HOST=localhost:8181
```
Run source-watcher locally:
```sh
make run
```
Create a Git source:
```sh
flux create source git test \
--url=https://github.com/stefanprodan/podinfo \
--tag=4.0.0
```
The source-watcher should log the revision:
```console
New revision detected {"gitrepository": "flux-system/test", "revision": "4.0.0/ab953493ee14c3c9800bda0251e0c507f9741408"}
Extracted tarball into /var/folders/77/3y6x_p2j2g9fspdkzjbm5_s40000gn/T/test292235827: 123 files, 29 dirs (32.603415ms)
Processing files...
```
Change the Git tag:
```sh
flux create source git test \
--url=https://github.com/stefanprodan/podinfo \
--tag=4.0.1
```
The source-watcher should log the new revision:
```console
New revision detected {"gitrepository": "flux-system/test", "revision": "4.0.1/113360052b3153e439a0cf8de76b8e3d2a7bdf27"}
```
The source-controller reports the revision under `GitRepository.Status.Artifact.Revision` in the format: `<branch|tag>/<commit>`.
## How it works
The [GitRepositoryWatcher](https://github.com/fluxcd/source-watcher/blob/main/controllers/gitrepository_watcher.go)
controller does the following:
* subscribes to `GitRepository` events
* detects when the Git revision changes
* downloads and extracts the source artifact
* write to stdout the extracted file names
```go
// GitRepositoryWatcher watches GitRepository objects for revision changes
type GitRepositoryWatcher struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get
func (r *GitRepositoryWatcher) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logr.FromContext(ctx)
// get source object
var repository sourcev1.GitRepository
if err := r.Get(ctx, req.NamespacedName, &repository); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
log.Info("New revision detected", "revision", repository.Status.Artifact.Revision)
// create tmp dir
tmpDir, err := ioutil.TempDir("", repository.Name)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create temp dir, error: %w", err)
}
defer os.RemoveAll(tmpDir)
// download and extract artifact
summary, err := r.fetchArtifact(ctx, repository, tmpDir)
if err != nil {
log.Error(err, "unable to fetch artifact")
return ctrl.Result{}, err
}
log.Info(summary)
// list artifact content
files, err := ioutil.ReadDir(tmpDir)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to list files, error: %w", err)
}
// do something with the artifact content
for _, f := range files {
log.Info("Processing " + f.Name())
}
return ctrl.Result{}, nil
}
func (r *GitRepositoryWatcher) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.GitRepository{}, builder.WithPredicates(GitRepositoryRevisionChangePredicate{})).
Complete(r)
}
```
To add the watcher to an existing project, copy the controller and the revision change predicate to your `controllers` dir:
* [gitrepository_watcher.go](https://github.com/fluxcd/source-watcher/blob/main/controllers/gitrepository_watcher.go)
* [gitrepository_predicate.go](https://github.com/fluxcd/source-watcher/blob/main/controllers/gitrepository_predicate.go)
In your `main.go` init function, register the Source API schema:
```go
import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = sourcev1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
```
Start the controller in the main function:
```go
func main() {
if err = (&controllers.GitRepositoryWatcher{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "GitRepositoryWatcher")
os.Exit(1)
}
}
```
Note that the watcher controller depends on Kubernetes client-go >= 1.20.
Your `go.mod` should require controller-runtime v0.8 or newer:
```go
require (
k8s.io/apimachinery v0.20.2
k8s.io/client-go v0.20.2
sigs.k8s.io/controller-runtime v0.8.3
)
```
That's it! Happy hacking!

@ -1,273 +0,0 @@
# Frequently asked questions
## Kustomize questions
### Are there two Kustomization types?
Yes, the `kustomization.kustomize.toolkit.fluxcd.io` is a Kubernetes
[custom resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
while `kustomization.kustomize.config.k8s.io` is the type used to configure a
[Kustomize overlay](https://kubectl.docs.kubernetes.io/references/kustomize/).
The `kustomization.kustomize.toolkit.fluxcd.io` object refers to a `kustomization.yaml`
file path inside a Git repository or Bucket source.
### How do I use them together?
Assuming an app repository with `./deploy/prod/kustomization.yaml`:
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- ingress.yaml
```
Define a source of type `gitrepository.source.toolkit.fluxcd.io`
that pulls changes from the app repository every 5 minutes inside the cluster:
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: my-app
namespace: default
spec:
interval: 5m
url: https://github.com/my-org/my-app
ref:
branch: main
```
Then define a `kustomization.kustomize.toolkit.fluxcd.io` that uses the `kustomization.yaml`
from `./deploy/prod` to determine which resources to create, update or delete:
```yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: my-app
namespace: default
spec:
interval: 15m
path: "./deploy/prod"
prune: true
sourceRef:
kind: GitRepository
name: my-app
```
### What is a Kustomization reconciliation?
In the above example, we pull changes from Git every 5 minutes,
and a new commit will trigger a reconciliation of
all the `Kustomization` objects using that source.
Depending on your configuration, a reconciliation can mean:
* generating a kustomization.yaml file in the specified path
* building the kustomize overlay
* decrypting secrets
* validating the manifests with client or server-side dry-run
* applying changes on the cluster
* health checking of deployed workloads
* garbage collection of resources removed from Git
* issuing events about the reconciliation result
* recoding metrics about the reconciliation process
The 15 minutes reconciliation interval, is the interval at which you want to undo manual changes
.e.g. `kubectl set image deployment/my-app` by reapplying the latest commit on the cluster.
Note that a reconciliation will override all fields of a Kubernetes object, that diverge from Git.
For example, you'll have to omit the `spec.replicas` field from your `Deployments` YAMLs if you
are using a `HorizontalPodAutoscaler` that changes the replicas in-cluster.
### Can I use repositories with plain YAMLs?
Yes, you can specify the path where the Kubernetes manifests are,
and kustomize-controller will generate a `kustomization.yaml` if one doesn't exist.
Assuming an app repository with the following structure:
```
├── deploy
│   └── prod
│   ├── .yamllint.yaml
│   ├── deployment.yaml
│   ├── service.yaml
│   └── ingress.yaml
└── src
```
Create a `GitRepository` definition and exclude all the files that are not Kubernetes manifests:
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: my-app
namespace: default
spec:
interval: 5m
url: https://github.com/my-org/my-app
ref:
branch: main
ignore: |
# exclude all
/*
# include deploy dir
!/deploy
# exclude non-Kubernetes YAMLs
/deploy/**/.yamllint.yaml
```
Then create a `Kustomization` definition to reconcile the `./deploy/prod` dir:
```yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: my-app
namespace: default
spec:
interval: 15m
path: "./deploy/prod"
prune: true
sourceRef:
kind: GitRepository
name: my-app
```
With the above configuration, source-controller will pull the Kubernetes manifests
from the app repository and kustomize-controller will generate a
`kustomization.yaml` including all the resources found with `./deploy/prod/**/*.yaml`.
The kustomize-controller creates `kustomization.yaml` files similar to:
```sh
cd ./deploy/prod && kustomize create --autodetect --recursive
```
### What is the behavior of Kustomize used by Flux
We referred to the Kustomization CLI flags here, so that you can replicate the same behavior using the CLI.
The behavior of Kustomize used by the controller is currently configured as following:
- `--allow_id_changes` is set to false, so it does not change any resource IDs.
- `--enable_kyaml` is disabled by default, so it currently used `k8sdeps` to process YAMLs.
- `--enable_alpha_plugins` is disabled by default, so it uses only the built-in plugins.
- `--load_restrictor` is set to `LoadRestrictionsNone`, so it allows loading files outside the dir containing `kustomization.yaml`.
- `--reorder` resources is done in the `legacy` mode, so the output will have namespaces and cluster roles/role bindings first, CRDs before CRs, and webhooks last.
!!! hint "`kustomization.yaml` validation"
To validate changes before committing and/or merging, [a validation
utility script is available](https://github.com/fluxcd/flux2-kustomize-helm-example/blob/main/scripts/validate.sh),
it runs `kustomize` locally or in CI with the same set of flags as
the controller and validates the output using `kubeval`.
## Helm questions
### How to debug "not ready" errors?
Misconfiguring the `HelmRelease.spec.chart`, like a typo in the chart name, version or chart source URL
would result in a "HelmChart is not ready" error displayed by:
```console
$ flux get helmreleases --all-namespaces
NAMESPACE NAME READY MESSAGE
default podinfo False HelmChart 'default/default-podinfo' is not ready
```
In order to get to the root cause, first make sure the source e.g. the `HelmRepository`
is configured properly and has access to the remote `index.yaml`:
```console
$ flux get sources helm --all-namespaces
NAMESPACE NAME READY MESSAGE
default podinfo False failed to fetch https://stefanprodan.github.io/podinfo2/index.yaml : 404 Not Found
```
If the source is `Ready`, then the error must be caused by the chart,
for example due to an invalid chart name or non-existing version:
```console
$ flux get sources chart --all-namespaces
NAMESPACE NAME READY MESSAGE
default default-podinfo False no chart version found for podinfo-9.0.0
```
### Can I use Flux HelmReleases without GitOps?
Yes, you can install the Flux components directly on a cluster
and manage Helm releases with `kubectl`.
Install the controllers needed for Helm operations with `flux`:
```sh
flux install \
--namespace=flux-system \
--network-policy=false \
--components=source-controller,helm-controller
```
Create a Helm release with `kubectl`:
```sh
cat << EOF | kubectl apply -f -
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: bitnami
namespace: flux-system
spec:
interval: 30m
url: https://charts.bitnami.com/bitnami
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: metrics-server
namespace: kube-system
spec:
interval: 60m
releaseName: metrics-server
chart:
spec:
chart: metrics-server
version: "^5.x"
sourceRef:
kind: HelmRepository
name: bitnami
namespace: flux-system
values:
apiService:
create: true
EOF
```
Based on the above definition, Flux will upgrade the release automatically
when Bitnami publishes a new version of the metrics-server chart.
## Flux v1 vs v2 questions
### What are the differences between v1 and v2?
Flux v1 is a monolithic do-it-all operator;
Flux v2 separates the functionalities into specialized controllers, collectively called the GitOps Toolkit.
You can find a detailed comparison of Flux v1 and v2 features in the [migration FAQ](../guides/faq-migration.md).
### How can I migrate from v1 to v2?
The Flux community has created guides and example repositories
to help you migrate to Flux v2:
- [Migrate from Flux v1](https://toolkit.fluxcd.io/guides/flux-v1-migration/)
- [Migrate from `.flux.yaml` and kustomize](https://toolkit.fluxcd.io/guides/flux-v1-migration/#flux-with-kustomize)
- [Migrate from Flux v1 automated container image updates](https://toolkit.fluxcd.io/guides/flux-v1-automation-migration/)
- [How to manage multi-tenant clusters with Flux v2](https://github.com/fluxcd/flux2-multi-tenancy)
- [Migrate from Helm Operator to Flux v2](https://toolkit.fluxcd.io/guides/helm-operator-migration/)
- [How to structure your HelmReleases](https://github.com/fluxcd/flux2-kustomize-helm-example)

@ -1,299 +0,0 @@
# Get started with Flux v2
!!! note "Basic knowledge"
This guide assumes you have some understanding of the core concepts and have read the introduction to Flux.
The core concepts used in this guide are [GitOps](../core-concepts/index.md#gitops),
[Sources](../core-concepts/index.md#sources), [Kustomization](../core-concepts/index.md#kustomization).
In this tutorial, you will deploy an application to a kubernetes cluster with Flux
and manage the cluster in a complete GitOps manner.
You'll be using a dedicated Git repository e.g. `fleet-infra` to manage your Kubernetes clusters.
## Prerequisites
In order to follow the guide, you will need a Kubernetes cluster version 1.16 or newer and kubectl version 1.18.
For a quick local test, you can use [Kubernetes kind](https://kind.sigs.k8s.io/docs/user/quick-start/).
Any other Kubernetes setup will work as well though.
Flux is installed in a GitOps way and its manifest will be pushed to the repository,
so you will also need a GitHub account and a
[personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line)
that can create repositories (check all permissions under `repo`) to enable Flux do this.
Export your GitHub personal access token and username:
```sh
export GITHUB_TOKEN=<your-token>
export GITHUB_USER=<your-username>
```
## Install the Flux CLI
To install the latest `flux` release on MacOS and Linux using
[Homebrew](https://brew.sh/) run:
```sh
brew install fluxcd/tap/flux
```
Or install `flux` by downloading precompiled binaries using a Bash script:
```sh
curl -s https://fluxcd.io/install.sh | sudo bash
```
The install script downloads the flux binary to `/usr/local/bin`.
If using Arch Linux, install the latest stable version from **AUR** using
either [flux-bin](https://aur.archlinux.org/packages/flux-bin) (pre-built
binary) or [flux-go](https://aur.archlinux.org/packages/flux-go) (locally built
binary).
Binaries for **macOS**, **Windows** and **Linux** AMD64/ARM are available for download on the
[release page](https://github.com/fluxcd/flux2/releases).
To configure your shell to load `flux` [bash completions](../cmd/flux_completion_bash.md) add to your profile:
```sh
# ~/.bashrc or ~/.bash_profile
. <(flux completion bash)
```
[`zsh`](../cmd/flux_completion_zsh.md), [`fish`](../cmd/flux_completion_fish.md), and [`powershell`](../cmd/flux_completion_powershell.md) are also supported with their own sub-commands.
## Install Flux components
Create the cluster using Kubernetes kind or set the kubectl context to an existing cluster:
```sh
kind create cluster
kubectl cluster-info
```
Verify that your staging cluster satisfies the prerequisites with:
```console
$ flux check --pre
► checking prerequisites
✔ kubectl 1.18.3 >=1.18.0
✔ kubernetes 1.18.2 >=1.16.0
✔ prerequisites checks passed
```
Run the bootstrap command:
```sh
flux bootstrap github \
--owner=$GITHUB_USER \
--repository=fleet-infra \
--branch=main \
--path=./clusters/my-cluster \
--personal
```
!!! hint "Multi-arch images"
The component images are published as [multi-arch container images](https://docs.docker.com/docker-for-mac/multi-arch/)
with support for Linux `amd64`, `arm64` and `armv7` (e.g. 32bit Raspberry Pi)
architectures.
The bootstrap command creates a repository if one doesn't exist,
commits the manifests for the Flux components to the default branch at the specified path,
and installs the Flux components.
Then it configures the target cluster to synchronize with the specified path inside the repository.
If you wish to create the repository under a GitHub organization:
```sh
flux bootstrap github \
--owner=<organization> \
--repository=<repo-name> \
--branch=<organization default branch> \
--team=<team1-slug> \
--team=<team2-slug> \
--path=./clusters/my-cluster
```
Example output:
```console
$ flux bootstrap github --owner=gitopsrun --team=devs --repository=fleet-infra --path=./clusters/my-cluster
► connecting to github.com
✔ repository created
✔ devs team access granted
✔ repository cloned
✚ generating manifests
✔ components manifests pushed
► installing components in flux-system namespace
deployment "source-controller" successfully rolled out
deployment "kustomize-controller" successfully rolled out
deployment "helm-controller" successfully rolled out
deployment "notification-controller" successfully rolled out
✔ install completed
► configuring deploy key
✔ deploy key configured
► generating sync manifests
✔ sync manifests pushed
► applying sync manifests
◎ waiting for cluster sync
✔ bootstrap finished
```
If you prefer GitLab, export `GITLAB_TOKEN` env var and
use the command [flux bootstrap gitlab](../guides/installation.md#gitlab-and-gitlab-enterprise).
!!! hint "Idempotency"
It is safe to run the bootstrap command as many times as you want.
If the Flux components are present on the cluster,
the bootstrap command will perform an upgrade if needed.
You can target a specific Flux [version](https://github.com/fluxcd/flux2/releases)
with `flux bootstrap --version=<semver>`.
## Clone the git repository
We are going to drive app deployments in a GitOps manner,
using the Git repository as the desired state for our cluster.
Instead of applying the manifests directly to the cluster,
Flux will apply it for us instead.
Therefore, we need to clone the repository to our local machine:
```sh
git clone https://github.com/$GITHUB_USER/fleet-infra
cd fleet-infra
```
## Add podinfo repository to Flux
We will be using a public repository [github.com/stefanprodan/podinfo](https://github.com/stefanprodan/podinfo),
podinfo is a tiny web application made with Go.
Create a [GitRepository](../components/source/gitrepositories/)
manifest pointing to podinfo repository's master branch:
```sh
flux create source git podinfo \
--url=https://github.com/stefanprodan/podinfo \
--branch=master \
--interval=30s \
--export > ./clusters/my-cluster/podinfo-source.yaml
```
The above command generates the following manifest:
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: podinfo
namespace: flux-system
spec:
interval: 30s
ref:
branch: master
url: https://github.com/stefanprodan/podinfo
```
Commit and push it to the `fleet-infra` repository:
```sh
git add -A && git commit -m "Add podinfo GitRepository"
git push
```
## Deploy podinfo application
We will create a Flux [Kustomization](../components/kustomize/kustomization/) manifest for podinfo.
This configures Flux to build and apply the [kustomize](https://github.com/stefanprodan/podinfo/tree/master/kustomize)
directory located in the podinfo repository.
```sh
flux create kustomization podinfo \
--source=podinfo \
--path="./kustomize" \
--prune=true \
--validation=client \
--interval=5m \
--export > ./clusters/my-cluster/podinfo-kustomization.yaml
```
The above command generates the following manifest:
```yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: podinfo
namespace: flux-system
spec:
interval: 5m0s
path: ./kustomize
prune: true
sourceRef:
kind: GitRepository
name: podinfo
validation: client
```
Commit and push the `Kustomization` manifest to the repository:
```sh
git add -A && git commit -m "Add podinfo Kustomization"
git push
```
The structure of your repository should look like this:
```
fleet-infra
└── clusters/
└── my-cluster/
├── flux-system/
│ ├── gotk-components.yaml
│ ├── gotk-sync.yaml
│ └── kustomization.yaml
├── podinfo-kustomization.yaml
└── podinfo-source.yaml
```
## Watch Flux sync the application
In about 30s the synchronization should start:
```console
$ watch flux get kustomizations
NAME READY MESSAGE
flux-system True Applied revision: main/fc07af652d3168be329539b30a4c3943a7d12dd8
podinfo True Applied revision: master/855f7724be13f6146f61a893851522837ad5b634
```
When the synchronization finishes you can check that podinfo has been deployed on your cluster:
```console
$ kubectl -n default get deployments,services
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/podinfo 2/2 2 2 108s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/podinfo ClusterIP 10.100.149.126 <none> 9898/TCP,9999/TCP 108s
```
!!! tip
From this moment forward, any changes made to the podinfo
Kubernetes manifests in the master branch will be synchronised with your cluster.
If a Kubernetes manifest is removed from the podinfo repository, Flux will remove it from your cluster.
If you delete a `Kustomization` from the fleet-infra repository, Flux will remove all Kubernetes objects that
were previously applied from that `Kustomization`.
If you alter the podinfo deployment using `kubectl edit`, the changes will be reverted to match
the state described in Git. When dealing with an incident, you can pause the reconciliation of a
kustomization with `flux suspend kustomization <name>`. Once the debugging session
is over, you can re-enable the reconciliation with `flux resume kustomization <name>`.
## Multi-cluster Setup
To use Flux to manage more than one cluster or promote deployments from staging to production, take a look at the
two approaches in the repositories listed below.
1. [https://github.com/fluxcd/flux2-kustomize-helm-example](https://github.com/fluxcd/flux2-kustomize-helm-example)
2. [https://github.com/fluxcd/flux2-multi-tenancy](https://github.com/fluxcd/flux2-multi-tenancy)

@ -1,92 +0,0 @@
## Flux v1 vs v2 questions
### What does Flux v2 mean for Flux?
Flux v1 is a monolithic do-it-all operator; Flux v2 separates the functionalities into specialized controllers, collectively called the GitOps Toolkit.
You can install and operate Flux v2 simply using the `flux` command. You can easily pick and choose the functionality you need and extend it to serve your own purposes.
The timeline we are looking at right now is:
1. Put Flux v1 into maintenance mode (no new features being added; bugfixes and CVEs patched only).
1. Continue work on the [Flux v2 roadmap](https://toolkit.fluxcd.io/roadmap/).
1. We will provide transition guides for specific user groups, e.g. users of Flux v1 in read-only mode, or of Helm Operator v1, etc. once the functionality is integrated into Flux v2 and it's deemed "ready".
1. Once the use-cases of Flux v1 are covered, we will continue supporting Flux v1 for 6 months. This will be the transition period before it's considered unsupported.
### Why did you rewrite Flux?
Flux v2 implements its functionality in individual controllers, which allowed us to address long-standing feature requests much more easily.
By basing these controllers on modern Kubernetes tooling (`controller-runtime` libraries), they can be dynamically configured with Kubernetes custom resources either by cluster admins or by other automated tools -- and you get greatly increased observability.
This gave us the opportunity to build Flux v2 with the top Flux v1 feature requests in mind:
- Supporting multiple source Git repositories
- Operational insight through health checks, events and alerts
- Multi-tenancy capabilities, like applying each source repository with its own set of permissions
On top of that, testing the individual components and understanding the codebase becomes a lot easier.
### What are significant new differences between Flux v1 and Flux v2?
#### Reconciliation
Flux v1 | Flux v2
---------------------------------- | ----------------------------------
Limited to a single Git repository | Multiple Git repositories
Declarative config via arguments in the Flux deployment | `GitRepository` custom resource, which produces an artifact which can be reconciled by other controllers
Follow `HEAD` of Git branches | Supports Git branches, pinning on commits and tags, follow SemVer tag ranges
Suspending of reconciliation by downscaling Flux deployment | Reconciliation can be paused per resource by suspending the `GitRepository`
Credentials config via Arguments and/or Secret volume mounts in the Flux pod | Credentials config per `GitRepository` resource: SSH private key, HTTP/S username/password/token, OpenPGP public keys
#### `kustomize` support
Flux v1 | Flux v2
---------------------------------- | ----------------------------------
Declarative config through `.flux.yaml` files in the Git repository | Declarative config through a `Kustomization` custom resource, consuming the artifact from the GitRepository
Manifests are generated via shell exec and then reconciled by `fluxd` | Generation, server-side validation, and reconciliation is handled by a specialised `kustomize-controller`
Reconciliation using the service account of the Flux deployment | Support for service account impersonation
Garbage collection needs cluster role binding for Flux to query the Kubernetes discovery API | Garbage collection needs no cluster role binding or access to Kubernetes discovery API
Support for custom commands and generators executed by fluxd in a POSIX shell | No support for custom commands
#### Helm integration
Flux v1 | Flux v2
---------------------------------- | ----------------------------------
Declarative config in a single Helm custom resource | Declarative config through `HelmRepository`, `GitRepository`, `Bucket`, `HelmChart` and `HelmRelease` custom resources
Chart synchronisation embedded in the operator | Extensive release configuration options, and a reconciliation interval per source
Support for fixed SemVer versions from Helm repositories | Support for SemVer ranges for `HelmChart` resources
Git repository synchronisation on a global interval | Planned support for charts from GitRepository sources
Limited observability via the status object of the HelmRelease resource | Better observability via the HelmRelease status object, Kubernetes events, and notifications
Resource heavy, relatively slow | Better performance
Chart changes from Git sources are determined from Git metadata | Chart changes must be accompanied by a version bump in `Chart.yaml` to produce a new artifact
#### Notifications, webhooks, observability
Flux v1 | Flux v2
---------------------------------- | ----------------------------------
Emits "custom Flux events" to a webhook endpoint | Emits Kubernetes events for included custom resources
RPC endpoint can be configured to a 3rd party solution like FluxCloud to be forwarded as notifications to e.g. Slack | Flux v2 components can be configured to POST the events to a `notification-controller` endpoint. Selective forwarding of POSTed events as notifications using `Provider` and `Alert` custom resources.
Webhook receiver is a side-project | Webhook receiver, handling a wide range of platforms, is included
Unstructured logging | Structured logging for all components
Custom Prometheus metrics | Generic / common `controller-runtime` Prometheus metrics
### Are there any breaking changes?
- In Flux v1 Kustomize support was implemented through `.flux.yaml` files in the Git repository. As indicated in the comparison table above, while this approach worked, we found it to be error-prone and hard to debug. The new [Kustomization CR](https://github.com/fluxcd/kustomize-controller/blob/master/docs/spec/v1alpha1/kustomization.md) should make troubleshooting much easier. Unfortunately we needed to drop the support for custom commands as running arbitrary shell scripts in-cluster poses serious security concerns.
- Helm users: we redesigned the `HelmRelease` API and the automation will work quite differently, so upgrading to `HelmRelease` v2 will require a little work from you, but you will gain more flexibility, better observability and performance.
### Is the GitOps Toolkit related to the GitOps Engine?
In an announcement in August 2019, the expectation was set that the Flux project would integrate the GitOps Engine, then being factored out of ArgoCD. Since the result would be backward-incompatible, it would require a major version bump: Flux v2.
After experimentation and considerable thought, we (the maintainers) have found a path to Flux v2 that we think better serves our vision of GitOps: the GitOps Toolkit. In consequence, we do not now plan to integrate GitOps Engine into Flux.
### How can I get involved?
There are a variety of ways and we look forward to having you on board building the future of GitOps together:
- [Discuss the direction](https://github.com/fluxcd/flux2/discussions) of Flux v2 with us
- Join us in #flux-dev on the [CNCF Slack](https://slack.cncf.io)
- Check out our [contributor docs](https://toolkit.fluxcd.io/contributing/)
- Take a look at the [roadmap for Flux v2](https://toolkit.fluxcd.io/roadmap/)

@ -1,751 +0,0 @@
<!-- -*- fill-column: 100 -*- -->
# Migrating image update automation to Flux v2
"Image Update Automation" is a process in which Flux makes commits to your Git repository when it
detects that there is a new image to be used in a workload (e.g., a Deployment). In Flux v2 this
works quite differently to how it worked in Flux v1. This guide explains the differences and how to
port your cluster configuration from v1 to v2. There is also a [tutorial for using image update
automation with a new cluster][image-update-tute].
## Overview of changes between v1 and v2
In Flux v1, image update automation (from here, just "automation") was built into the Flux daemon,
which scanned everything it found in the cluster and updated the Git repository it was syncing.
In Flux v2,
- automation is controlled with custom resources, not annotations
- ordering images by build time is not supported (there is [a section
below](#how-to-migrate-annotations-to-image-policies) explaining what to do instead)
- the fields to update in files are marked explicitly, rather than inferred from annotations.
#### Automation is now controlled by custom resources
Flux v2 breaks down the functions in Flux v1's daemon into controllers, with each having a specific
area of concern. Automation is now done by two controllers: one which scans image repositories to
find the latest images, and one which uses that information to commit changes to git
repositories. These are in turn separate to the syncing controllers.
This means that automation in Flux v2 is governed by custom resources. In Flux v1 the daemon scanned
everything, and looked at annotations on the resources to determine what to update. Automation in v2
is more explicit than in v1 -- you have to mention exactly which images you want to be scanned, and
which fields you want to be updated.
A consequence of using custom resources is that with Flux v2 you can have an arbitrary number of
automations, targeting different Git repositories if you wish, and updating different sets of
images. If you run a multitenant cluster, the tenants can define automation in their own namespaces,
for their own Git repositories.
#### Selecting an image is more flexible
The ways in which you choose to select an image have changed. In Flux v1, you generally supply a
filter pattern, and the latest image is the image with the most recent build time out of those
filtered. In Flux v2, you choose an ordering, and separately specify a filter for the tags to
consider. These are dealt with in detail below.
Selecting an image by build time is no longer supported. This is the implicit default in Flux v1. In
Flux v2, you will need to tag images so that they sort in the order you would like -- [see
below](#how-to-use-sortable-image-tags) for how to do this conveniently.
#### Fields to update are explicitly marked
Lastly, in Flux v2 the fields to update in files are marked explicitly. In Flux v1 they are inferred
from the type of the resource, along with the annotations given. The approach in Flux v1 was limited
to the types that had been programmed in, whereas Flux v2 can update any Kubernetes object (and some
files that aren't Kubernetes objects, like `kustomization.yaml`).
## Preparing for migration
It is best to complete migration of your system to _Flux v2 syncing_ first, using the [Flux v1
migration guide][flux-v1-migration]. This will remove Flux v1 from the system, along with its image
automation. You can then reintroduce automation with Flux v2 by following the instructions in this
guide.
It is safe to leave the annotations for Flux v1 in files while you reintroduce automation, because
Flux v2 will ignore them.
To migrate to Flux v2 automation, you will need to do three things:
- make sure you are running the automation controllers; then,
- declare the automation with an `ImageUpdateAutomation` object; and,
- migrate each manifest by translate Flux v1 annotations to Flux v2 `ImageRepository` and
`ImagePolicy` objects, and putting update markers in the manifest file.
### Where to keep `ImageRepository`, `ImagePolicy` and `ImageUpdateAutomation` manifests
This guide assumes you want to manage automation itself via Flux. In the following sections,
manifests for the objects controlling automation are saved in files, committed to Git, and applied
in the cluster with Flux.
A Flux v2 installation will typically have a Git repository structured like this:
```
<...>/flux-system/
gotk-components.yaml
gotk-sync.yaml
<...>/app/
# deployments etc.
```
The `<...>` is the path to a particular cluster's definitions -- this may be simply `.`, or
something like `clusters/my-cluster`. To get the files in the right place, set a variable for this
path:
```bash
$ CLUSTER_PATH=<...> # e.g., "." or "clusters/my-cluster", or ...
$ AUTO_PATH=$CLUSTER_PATH/automation
$ mkdir ./$AUTO_PATH
```
The file `$CLUSTER_PATH/flux-system/gotk-components.yaml` has definitions of all the Flux v2
controllers and custom resource definitions. The file `gotk-sync.yaml` defines a `GitRepository` and
a `Kustomization` which will sync manifests under `$CLUSTER_PATH/`.
To these will be added definitions for automation objects. This guide puts manifest files for
automation in `$CLUSTER_PATH/automation/`, but there is no particular structure required
by Flux. The automation objects do not have to be in the same namespace as the objects to be
updated.
#### Migration on a branch
This guide assumes you will commit changes to the branch that is synced by Flux, as this is the
simplest way to understand.
It may be less disruptive to put migration changes on a branch, then merging when you have completed
the migration. You would need to either change the `GitRepository` to point at the migration branch,
or have separate `GitRepository` and `Kustomization` objects for the migrated parts of your Git
repository. The main thing to avoid is syncing the same objects in two different places; e.g., avoid
having Kustomizations that sync both the unmigrated and migrated application configuration.
### Installing the command-line tool `flux`
The command-line tool `flux` will be used below; see [these instructions][install-cli] for how to
install it.
## Running the automation controllers
The first thing to do is to deploy the automation controllers to your cluster. The best way to
proceed will depend on the approach you took when following the [Flux read-only migration
guide][flux-v1-migration].
- If you used `flux bootstrap` to create a new Git repository, then ported your cluster
configuration to that repository, use [After `flux bootstrap`](#after-flux-bootstrap);
- If you used `flux install` to install the controllers directly, use [After migrating Flux v1 in
place](#after-migrating-flux-v1-in-place);
- If you used `flux install` and exported the configuration to a file, use [After committing Flux
v2 configuration to Git](#after-committing-a-flux-v2-configuration-to-git).
### After `flux bootstrap`
When starting from scratch, you are likely to have used `flux bootstrap`. Rerun the command, and
include the image automation controllers in your starting configuration with the flag
`--components-extra`, [as shown in the installation guide][flux-bootstrap].
This will commit changes to your Git repository and sync them in the cluster.
```bash
flux check --components-extra=image-reflector-controller,image-automation-controller
```
Now jump to the section [Migrating each manifest to Flux v2](#migrating-each-manifest-to-flux-v2).
### After migrating Flux v1 in place
If you followed the [Flux v1 migration guide][flux-v1-migration], you will already be running some
Flux v2 controllers. The automation controllers are currently considered an optional extra to those,
but are installed and run in much the same way. You may or may not have committed the Flux v2
configuration to your Git repository. If you did, go to the section [After committing Flux v2
configuration to Git](#after-committing-flux-v2-configuration-to-git).
If _not_, you will be installing directly to the cluster:
```bash
$ flux install --components-extra=image-reflector-controller,image-automation-controller
```
It is safe to repeat the installation command, or to run it after using `flux bootstrap`, so long as
you repeat any arguments you supplied the first time.
Now jump ahead to [Migrating each manifest to Flux v2](#migrating-each-manifest-to-flux-v2).
#### After committing a Flux v2 configuration to Git
If you added the Flux v2 configuration to your git repository, assuming it's in the file
`$CLUSTER_PATH/flux-system/gotk-components.yaml` as used in the guide, use `flux install` and write
it back to that file:
```bash
$ flux install \
--components-extra=image-reflector-controller,image-automation-controller \
--export > "$CLUSTER_PATH/flux-system/gotk-components.yaml"
```
Commit changes to the `$CLUSTER_PATH/flux-system/gotk-components.yaml` file and sync the cluster:
```bash
$ git add $CLUSTER_PATH/flux-system/gotk-components.yaml
$ git commit -s -m "Add image automation controllers to Flux config"
$ git push
$ flux reconcile kustomization --with-source flux-system
```
## Controlling automation with an `ImageUpdateAutomation` object
In Flux v1, automation was run by default. With Flux v2, you have to explicitly tell the controller
which Git repository to update and how to do so. These are defined in an `ImageUpdateAutomation`
object; but first, you need a `GitRepository` with write access, for the automation to use.
If you followed the [Flux v1 read-only migration guide][flux-v1-migration], you will have a
`GitRepository` defined in the namespace `flux-system`, for syncing to use. This `GitRepository`
will have _read_ access to the Git repository by default, and automation needs _write_ access to
push commits.
To give it write access, you can replace the secret it refers to. How to do this will depend on what
kind of authentication you used to install Flux v2.
### Replacing the Git credentials secret
The secret with Git credentials will be named in the `.spec.secretRef.name` field of the
`GitRepository` object. Say your `GitRepository` is in the _namespace_ `flux-system` and _named_
`flux-system` (these are the defaults if you used `flux bootstrap`); you can retrieve the secret
name and Git URL with:
```bash
$ FLUX_NS=flux-system
$ GIT_NAME=flux-system
$ SECRET_NAME=$(kubectl -n $FLUX_NS get gitrepository $GIT_NAME -o jsonpath={.spec.secretRef.name})
$ GIT_URL=$(kubectl -n $FLUX_NS get gitrepository $GIT_NAME -o jsonpath='{.spec.url}')
$ echo $SECRET_NAME $GIT_URL # make sure they have values
```
If you're not sure which kind of credentials you're using, look at the secret:
```bash
$ kubectl -n $FLUX_NS describe secret $SECRET_NAME
```
An entry at `.data.identity` indicates that you are using an SSH key (the [first
section](#replacing-an-ssh-key-secret) below); an entry at `.data.username` indicates you are using
a username and password or token (the [second section](#replacing-a-usernamepassword-secret)
below).
#### Replacing an SSH key secret
When using an SSH (deploy) key, create a new key:
```bash
$ flux create secret git -n $FLUX_NS $SECRET_NAME --url=$GIT_URL
```
You will need to copy the public key that's printed out, and install that as a deploy key for your
Git repo **making sure to check the 'All write access' box** (or otherwise give the key write
permissions). Remove the old deploy key.
#### Replacing a username/password secret
When you're using a username and password to authenticate, you may be able to change the permissions
associated with that account.
If not, you will need to create a new access token (e.g., ["Personal Access Token"][github-pat] in
GitHub). In this case, once you have the new token you can replace the secret with the following:
```bash
$ flux create secret git -n $FLUX_NS $SECRET_NAME \
--username <username> --password <token> --url $GIT_URL
```
#### Checking the new credentials
To check if your replaced credentials still work, try syncing the `GitRepository` object:
```bash
$ flux reconcile source git -n $FLUX_NS $GIT_NAME
► annotating GitRepository flux-system in flux-system namespace
✔ GitRepository annotated
◎ waiting for GitRepository reconciliation
✔ GitRepository reconciliation completed
✔ fetched revision main/d537304e8f5f41f1584ca1e807df5b5752b2577e
```
When this is successful, it tells you the new credentials have at least read access.
### Making an automation object
To set automation running, you create an [`ImageUpdateAutomation`][auto-ref] object. Each object
will update a Git repository, according to the image policies in the namespace.
Here is an `ImageUpdateAutomation` manifest for the example (note: you will have to supply your own
value for at least the host part of the email address):
```yaml
$ # the environment variables $AUTO_PATH and $GIT_NAME are set above
$ FLUXBOT_EMAIL=fluxbot@example.com # supply your own host or address here
$ flux create image update my-app-auto \
--author-name FluxBot --author-email "$FLUXBOT_EMAIL" \
--git-repo-ref $GIT_NAME --branch main \
--interval 5m \
--export > ./$AUTO_PATH/my-app-auto.yaml
$ cat my-app-auto.yaml
---
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImageUpdateAutomation
metadata:
name: my-app-auto
namespace: flux-system
spec:
interval: 5m0s
sourceRef:
kind: GitRepository
name: flux-system
git:
checkout:
ref:
branch: main
commit:
author:
email: fluxbot@example.com
name: FluxBot
```
#### Commit and check that the automation object works
Commit the manifeat file and push:
```bash
$ git add ./$AUTO_PATH/my-app-auto.yaml
$ git commit -s -m "Add image update automation"
$ git push
# ...
```
Then sync and check the object status:
```bash
$ flux reconcile kustomization --with-source flux-system
► annotating GitRepository flux-system in flux-system namespace
✔ GitRepository annotated
◎ waiting for GitRepository reconciliation
✔ GitRepository reconciliation completed
✔ fetched revision main/401dd3b550f82581c7d12bb79ade389089c6422f
► annotating Kustomization flux-system in flux-system namespace
✔ Kustomization annotated
◎ waiting for Kustomization reconciliation
✔ Kustomization reconciliation completed
✔ reconciled revision main/401dd3b550f82581c7d12bb79ade389089c6422f
$ flux get image update
NAME READY MESSAGE LAST RUN SUSPENDED
my-app-auto True no updates made 2021-02-08T14:53:43Z False
```
Read on to the next section to see how to change each manifest file to work with Flux v2.
## Migrating each manifest to Flux v2
In Flux v1, the annotation
fluxcd.io/automated: "true"
switches automation on for a manifest (a description of a Kubernetes object). For each manifest that
has that annotation, you will need to create custom resources to scan for the latest image, and to
replace the annotations with field markers.
The following sections explain these steps, using this example Deployment manifest which is
initially annotated to work with Flux v1:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app
namespace: default
annotations:
fluxcd.io/automated: "true"
fluxcd.io/tag.app: semver:^5.0
spec:
template:
spec:
containers:
- name: app
image: ghcr.io/stefanprodan/podinfo:5.0.0
```
!!! warning
A YAML file may have more than one manifest in it, separated with
`---`. Be careful to account for each manifest in a file.
You may wish to try migrating the automation of just one file or manifest and follow it through to
the end of the guide, before returning here to do the remainder.
### How to migrate annotations to image policies
For each image repository that is the subject of automation you will need to create an
`ImageRepository` object, so that the image repository is scanned for tags. The image repository in
the example deployment is `ghcr.io/stefanprodan/podinfo`, which is the image reference minus its
tag:
```yaml
$ cat $CLUSTER_PATH/app/my-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app
namespace: default
annotations:
fluxcd.io/automated: "true"
fluxcd.io/tag.app: semver:^5.0
spec:
template:
spec:
containers:
- name: app
image: ghcr.io/stefanprodan/podinfo:5.0.0 # <-- image reference
```
The command-line tool `flux` will help create a manifest for you. Note that the output is redirected
to a file under `$AUTO_PATH`, so it can be added to the Git repository and synced to the cluster.
```bash
$ # the environment variable $AUTO_PATH was set earlier
$ flux create image repository podinfo-image \
--image ghcr.io/stefanprodan/podinfo \
--interval 5m \
--export > ./$AUTO_PATH/podinfo-image.yaml
$ cat ./$AUTO_PATH/podinfo-image.yaml
---
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImageRepository
metadata:
name: podinfo-image
namespace: flux-system
spec:
image: ghcr.io/stefanprodan/podinfo
interval: 5m0s
```
!!! hint
If you are using the same image repository in several manifests, you only need one
`ImageRepository` object for it.
##### Using image registry credentials for scanning
When your image repositories are private, you supply Kubernetes with "image pull secrets" with
credentials for accessing the image registry (e.g., DockerHub). The image reflector controller needs
the same kind of credentials to scan image repositories.
There are several ways that image pull secrets can be made available for the image reflector
controller. The [image update tutorial][image-update-tute-creds] describes how to create or arrange
secrets for scanning to use. Also see later in the tutorial for [instructions specific to some cloud
platforms][image-update-tute-clouds].
##### Committing and checking the ImageRepository
Add the `ImageRepository` manifest to the Git index and commit it:
```bash
$ git add ./$AUTO_PATH/podinfo-image.yaml
$ git commit -s -m "Add image repository object for podinfo"
$ git push
# ...
```
Now you can sync the new commit, and check that the object is working:
```bash
$ flux reconcile kustomization --with-source flux-system
► annotating GitRepository flux-system in flux-system namespace
✔ GitRepository annotated
◎ waiting for GitRepository reconciliation
✔ GitRepository reconciliation completed
✔ fetched revision main/fd2fe8a61d4537bcfa349e4d1dbc480ea699ba8a
► annotating Kustomization flux-system in flux-system namespace
✔ Kustomization annotated
◎ waiting for Kustomization reconciliation
✔ Kustomization reconciliation completed
✔ reconciled revision main/fd2fe8a61d4537bcfa349e4d1dbc480ea699ba8a
$ flux get image repository podinfo-image
NAME READY MESSAGE LAST SCAN SUSPENDED
podinfo-image True successful scan, found 16 tags 2021-02-08T14:31:38Z False
```
#### Replacing automation annotations
For each _field_ that's being updated by automation, you'll need an `ImagePolicy` object to describe
how to select an image for the field value. In the example, the field `.image` in the container
named `"app"` is the field being updated.
In Flux v1, annotations describe how to select the image to update to, using a prefix. In the
example, the prefix is `semver:`:
```yaml
annotations:
fluxcd.io/automated: "true"
fluxcd.io/tag.app: semver:^5.0
```
These are the prefixes supported in Flux v1, and what to use in Flux v2:
| Flux v1 prefix | Meaning | Flux v2 equivalent |
|----------------|---------|--------------------|
| `glob:` | Filter for tags matching the glob pattern, then select the newest by build time | [Use sortable tags](#how-to-use-sortable-image-tags) |
| `regex:` | Filter for tags matching the regular expression, then select the newest by build time |[Use sortable tags](#how-to-use-sortable-image-tags) |
| `semver:` | Filter for tags that represent versions, and select the highest version in the given range | [Use semver ordering](#how-to-use-semver-image-tags) |
#### How to use sortable image tags
To give image tags a useful ordering, you can use a timestamp or serial number as part of each
image's tag, then sort either alphabetically or numerically.
This is a change from Flux v1, in which the build time was fetched from each image's config, and
didn't need to be included in the image tag. Therefore, this is likely to require a change to your
build process.
The guide [How to make sortable image tags][image-tags-guide] explains how to change your build
process to tag images with a timestamp. This will mean Flux v2 can sort the tags to find the most
recently built image.
##### Filtering the tags in an `ImagePolicy`
The recommended format for image tags using a timestamp is:
<branch>-<sha1>-<timestamp>
The timestamp (or serial number) is the part of the tag that you want to order on. The SHA1 is there
so you can trace an image back to the commit from which it was built. You don't need the branch for
sorting, but you may want to include only builds from a specific branch.
Say you want to filter for only images that are from `main` branch, and pick the most recent. Your
`ImagePolicy` would look like this:
```yaml
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImagePolicy
metadata:
name: my-app-policy
namespace: flux-system
spec:
imageRepositoryRef:
name: podinfo-image
filterTags:
pattern: '^main-[a-f0-9]+-(?P<ts>[0-9]+)'
extract: '$ts'
policy:
numerical:
order: asc
```
The `.spec.filterTags.pattern` field gives a regular expression that a tag must match to be included. The
`.spec.filterTags.extract` field gives a replacement pattern that can refer back to capture groups in the
filter pattern. The extracted values are sorted to find the selected image tag. In this case, the
timestamp part of the tag will be extracted and sorted numerically in ascending order. See [the
reference docs][imagepolicy-ref] for more examples.
Once you have made sure you have image tags and an `ImagePolicy`, jump ahead to [Checking
the ImagePolicy works](#checking-that-the-image-policy-works).
### How to use SemVer image tags
The other kind of sorting is by [SemVer][semver], picking the highest version from among those
included by the filter. A semver range will also filter for tags that fit in the range. For example,
```yaml
semver:
range: ^5.0
```
includes only tags that have a major version of `5`, and selects whichever is the highest.
This can be combined with a regular expression pattern, to filter on other parts of the tags. For
example, you might put a target environment as well as the version in your image tags, like
`dev-v1.0.3`.
Then you would use an `ImagePolicy` similar to this one:
```yaml
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImagePolicy
metadata:
name: my-app-policy
namespace: flux-system
spec:
imageRepositoryRef:
name: podinfo-image
filterTags:
pattern: '^dev-v(?P<version>.*)'
extract: '$version'
policy:
semver:
range: '^1.0'
```
Continue on to the next sections to see an example, and how to check that your `ImagePolicy` works.
#### An `ImagePolicy` for the example
The example Deployment has annotations using `semver:` as a prefix, so the policy object also uses
semver:
```bash
$ # the environment variable $AUTO_PATH was set earlier
$ flux create image policy my-app-policy \
--image-ref podinfo-image \
--semver '^5.0' \
--export > ./$AUTO_PATH/my-app-policy.yaml
$ cat ./$AUTO_PATH/my-app-policy.yaml
---
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImagePolicy
metadata:
name: my-app-policy
namespace: flux-system
spec:
imageRepositoryRef:
name: podinfo-image
policy:
semver:
range: ^5.0
```
#### Checking that the `ImagePolicy` works
Commit the manifest file, and push:
```bash
$ git add ./$AUTO_PATH/my-app-policy.yaml
$ git commit -s -m "Add image policy for my-app"
$ git push
# ...
```
Then you can reconcile and check that the image policy works:
```bash
$ flux reconcile kustomization --with-source flux-system
► annotating GitRepository flux-system in flux-system namespace
✔ GitRepository annotated
◎ waiting for GitRepository reconciliation
✔ GitRepository reconciliation completed
✔ fetched revision main/7dcf50222499be8c97e22cd37e26bbcda8f70b95
► annotating Kustomization flux-system in flux-system namespace
✔ Kustomization annotated
◎ waiting for Kustomization reconciliation
✔ Kustomization reconciliation completed
✔ reconciled revision main/7dcf50222499be8c97e22cd37e26bbcda8f70b95
$ flux get image policy flux-system
NAME READY MESSAGE LATEST IMAGE
my-app-policy True Latest image tag for 'ghcr.io/stefanprodan/podinfo' resolved to: 5.1.4 ghcr.io/stefanprodan/podinfo:5.1.4
```
### How to mark up files for update
The last thing to do in each manifest is to mark the fields that you want to be updated.
In Flux v1, the annotations in a manifest determines the fields to be updated. In the example, the
annotations target the image used by the container `app`:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app
namespace: default
annotations:
fluxcd.io/automated: "true"
fluxcd.io/tag.app: semver:^5.0 # <-- `.app` here
spec:
template:
spec:
containers:
- name: app # <-- targets `app` here
image: ghcr.io/stefanprodan/podinfo:5.0.0
```
This works straight-forwardly for Deployment manifests, but when it comes to `HelmRelease`
manifests, it [gets complicated][helm-auto], and it doesn't work at all for many kinds of resources.
For Flux v2, you mark the field you want to be updated directly, with the namespaced name of the
image policy to apply. This is the example Deployment, marked up for Flux v2:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: default
name: my-app
spec:
template:
spec:
containers:
- name: app
image: ghcr.io/stefanprodan/podinfo:5.0.0 # {"$imagepolicy": "flux-system:my-app-policy"}
```
The value `flux-system:my-app-policy` names the policy that selects the desired image.
This works in the same way for `DaemonSet` and `CronJob` manifests. For `HelmRelease` manifests, put
the marker alongside the part of the `values` that has the image tag. If the image tag is a separate
field, you can put `:tag` on the end of the name, to replace the value with just the selected
image's tag. The [image automation guide][image-update-tute-custom] has examples for `HelmRelease`
and other custom resources.
### Committing the marker change and checking that automation works
Referring to the image policy created earlier, you can see the example Deployment does not use the
most recent image. When you commit the manifest file with the update marker added, you would expect
automation to update the file.
Commit the change that adds an update marker:
```bash
$ git add app/my-app.yaml # the filename of the example
$ git commit -s -m "Add update marker to my-app manifest"
$ git push
# ...
```
Now to check that the automation makes a change:
```bash
$ flux reconcile image update my-app-auto
► annotating ImageUpdateAutomation my-app-auto in flux-system namespace
✔ ImageUpdateAutomation annotated
◎ waiting for ImageUpdateAutomation reconciliation
✔ ImageUpdateAutomation reconciliation completed
✔ committed and pushed a92a4b654f520c00cb6c46b2d5e4fb4861aa58fc
```
## Troubleshooting
If a change was not pushed by the image automation, there's several things you can check:
- it's possible it made a change that is not reported in the latest status -- pull from the origin
and check the commit log
- check that the name used in the marker corresponds to the namespace and name of an `ImagePolicy`
- check that the `ImageUpdateAutomation` is in the same namespace as the `ImagePolicy` objects
named in markers
- check that the image policy and the image repository are both reported as `Ready`
- check that the credentials referenced by the `GitRepository` object have write permission, and
create new credentials if necessary.
As a fallback, you can scan the logs of the automation controller to see if it logged errors:
```bash
$ kubectl logs -n flux-system deploy/image-automation-controller
```
Once you are satisfied that it is working, you can migrate the rest of the manifests using the steps
from ["Migrating each manifest to Flux v2"](#migrating-each-manifest-to-flux-v2) above.
[image-update-tute]: https://toolkit.fluxcd.io/guides/image-update/
[imagepolicy-ref]: https://toolkit.fluxcd.io/components/image/imagepolicies/
[helm-auto]: https://docs.fluxcd.io/en/1.21.1/references/helm-operator-integration/#automated-image-detection
[image-update-tute-custom]: https://toolkit.fluxcd.io/guides/image-update/#configure-image-update-for-custom-resources
[flux-v1-migration]: https://toolkit.fluxcd.io/guides/flux-v1-migration/
[install-cli]: https://toolkit.fluxcd.io/get-started/#install-the-flux-cli
[flux-bootstrap]: https://toolkit.fluxcd.io/guides/installation/#bootstrap
[github-pat]: https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token
[auto-object-ref]: https://toolkit.fluxcd.io/components/image/imageupdateautomations/
[image-update-tute-creds]: https://toolkit.fluxcd.io/guides/image-update/#configure-image-scanning
[image-update-tute-clouds]: https://toolkit.fluxcd.io/guides/image-update/#imagerepository-cloud-providers-authentication
[image-tags-guide]: https://toolkit.fluxcd.io/guides/sortable-image-tags/
[auto-ref]: https://toolkit.fluxcd.io/components/image/imageupdateautomations/
[semver]: https://semver.org

@ -1,330 +0,0 @@
# Migrate from Flux v1 to v2
This guide walks you through migrating from Flux v1 to v2.
Read the [FAQ](faq-migration.md) to find out what differences are between v1 and v2.
!!! info "Automated image updates"
The image automation feature is under development in Flux v2.
Please consult the [roadmap](../roadmap/index.md) for more details.
!!! info "Feature parity"
"Feature parity" does not mean Flux v2 works exactly the same as v1 (or is
backward-compatible); it means you can accomplish the same results, while
accounting for the fact that it's a system with a substantially different
design.
This may at times mean that you have to make adjustments to the way your
current cluster configuration is structured. If you are in this situation
and need help, please refer to the [support page](https://fluxcd.io/support/).
## Prerequisites
You will need a Kubernetes cluster version **1.16** or newer
and kubectl version **1.18** or newer.
### Install Flux v2 CLI
With Homebrew:
```sh
brew install fluxcd/tap/flux
```
With Bash:
```sh
curl -s https://fluxcd.io/install.sh | sudo bash
# enable completions in ~/.bash_profile
. <(flux completion bash)
```
Command-line completion for `zsh`, `fish`, and `powershell`
are also supported with their own sub-commands.
Binaries for macOS, Windows and Linux AMD64/ARM are available for download on the
[release page](https://github.com/fluxcd/flux2/releases).
Verify that your cluster satisfies the prerequisites with:
```sh
flux check --pre
```
## GitOps migration
Flux v2 offers an installation procedure that is declarative first
and disaster resilient.
Using the `flux bootstrap` command you can install Flux on a
Kubernetes cluster and configure it to manage itself from a Git
repository. The Git repository created during bootstrap can be used
to define the state of your fleet of Kubernetes clusters.
For a detailed walk-through of the bootstrap procedure please see the [installation guide](installation.md).
!!! warning "`flux bootstrap` target"
`flux bootstrap` should not be run against a Git branch or path
that is already being synchronized by Flux v1, as this will make
them fight over the resources. Instead, bootstrap to a **new Git
repository, branch or path**, and continue with moving the
manifests.
After you've installed Flux v2 on your cluster using bootstrap,
you can delete the Flux v1 from your clusters and move the manifests from the
Flux v1 repository to the bootstrap one.
## In-place migration
!!! warning
For production use we recommend using the **bootstrap** procedure (see the [Gitops migration](#gitops-migration) section above),
but if you wish to install Flux v2 in the
same way as Flux v1 then follow along.
### Flux read-only mode
Assuming you've installed Flux v1 to sync a directory with plain YAMLs from a private Git repo:
```sh
# create namespace
kubectl create ns flux
# deploy Flux v1
fluxctl install \
--git-url=git@github.com:org/app \
--git-branch=main \
--git-path=./deploy \
--git-readonly \
--namespace=flux | kubectl apply -f -
# print deploy key
fluxctl identity --k8s-fwd-ns flux
# trigger sync
fluxctl sync --k8s-fwd-ns flux
```
!!! hint "Uninstall Flux v1"
Before you proceed, scale the Flux v1 deployment to zero
or delete its namespace and RBAC.
If there are YAML files in your `deploy` dir that are not meant to be
applied on the cluster, you can exclude them by placing a `.sourceignore` in your repo root:
```console
$ cat .sourceignore
# exclude all
/*
# include deploy dir
!/deploy
# exclude files from deploy dir
/deploy/**/eksctl.yaml
/deploy/**/charts
```
Install Flux v2 in the `flux-system` namespace:
```console
$ flux install \
--network-policy=true \
--watch-all-namespaces=true \
--namespace=flux-system
✚ generating manifests
✔ manifests build completed
► installing components in flux-system namespace
✔ install completed
◎ verifying installation
✔ source-controller ready
✔ kustomize-controller ready
✔ helm-controller ready
✔ notification-controller ready
✔ install finished
```
Register your Git repository and add the deploy key with read-only access:
```console
$ flux create source git app \
--url=ssh://git@github.com/org/app \
--branch=main \
--interval=1m
► generating deploy key pair
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp2x9ghVmv1zD...
Have you added the deploy key to your repository: y
► collecting preferred public key from SSH server
✔ collected public key from SSH server:
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A...
► applying secret with keys
✔ authentication configured
✚ generating GitRepository source
► applying GitRepository source
✔ GitRepository source created
◎ waiting for GitRepository source reconciliation
✔ GitRepository source reconciliation completed
✔ fetched revision: main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b
```
Configure the reconciliation of the `deploy` dir on your cluster:
```console
$ flux create kustomization app \
--source=app \
--path="./deploy" \
--prune=true \
--interval=10m
✚ generating Kustomization
► applying Kustomization
✔ Kustomization created
◎ waiting for Kustomization reconciliation
✔ Kustomization app is ready
✔ applied revision main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b
```
If your repository contains secrets encrypted with Mozilla SOPS, please read this [guide](mozilla-sops.md).
Pull changes from Git and apply them immediately:
```sh
flux reconcile kustomization app --with-source
```
List all Kubernetes objects reconciled by `app`:
```sh
kubectl get all --all-namespaces \
-l=kustomize.toolkit.fluxcd.io/name=app \
-l=kustomize.toolkit.fluxcd.io/namespace=flux-system
```
### Flux with Kustomize
Assuming you've installed Flux v1 to sync a Kustomize overlay from an HTTPS Git repository:
```sh
fluxctl install \
--git-url=https://github.com/org/app \
--git-branch=main \
--manifest-generation \
--namespace=flux | kubectl apply -f -
```
With the following `.flux.yaml` in the root dir:
```yaml
version: 1
patchUpdated:
generators:
- command: kustomize build ./overlays/prod
patchFile: flux-patch.yaml
```
!!! hint "Uninstall Flux v1"
Before you proceed, delete the Flux v1 namespace
and remove the `.flux.yaml` from your repo.
Install Flux v2 in the `flux-system` namespace:
```sh
flux install
```
Register the Git repository using a personal access token:
```sh
flux create source git app \
--url=https://github.com/org/app \
--branch=main \
--username=git \
--password=token \
--interval=1m
```
Configure the reconciliation of the `prod` overlay on your cluster:
```sh
flux create kustomization app \
--source=GitRepository/app \
--path="./overlays/prod" \
--prune=true \
--interval=10m
```
Check the status of the Kustomization reconciliation:
```console
$ flux get kustomizations app
NAME REVISION SUSPENDED READY
app main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b False True
```
### Flux with Slack notifications
Assuming you've configured Flux v1 to send notifications to Slack with FluxCloud.
With Flux v2, create an alert provider for a Slack channel:
```sh
flux create alert-provider slack \
--type=slack \
--channel=general \
--address=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
```
And configure notifications for the `app` reconciliation events:
```sh
flux create alert app \
--provider-ref=slack \
--event-severity=info \
--event-source=GitRepository/app \
--event-source=Kustomization/app
```
For more details, read the guides on how to configure
[notifications](notifications.md) and [webhooks](webhook-receivers.md).
### Flux debugging
Check the status of Git operations:
```console
$ kubectl -n flux-system get gitrepositories
NAME READY MESSAGE
app True Fetched revision: main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b
test False SSH handshake failed: unable to authenticate, attempted methods [none publickey]
```
Check the status of the cluster reconciliation with kubectl:
```console
$ kubectl -n flux-system get kustomizations
NAME READY STATUS
app True Applied revision: main/5302d04c2ab8f0579500747efa0fe7abc72c8f9
test False The Service 'backend' is invalid: spec.type: Unsupported value: 'Ingress'
```
Suspend a reconciliation:
```console
$ flux suspend kustomization app
► suspending kustomization app in flux-system namespace
✔ kustomization suspended
```
Check the status with kubectl:
```console
$ kubectl -n flux-system get kustomization app
NAME READY STATUS
app False Kustomization is suspended, skipping reconciliation
```
Resume a reconciliation:
```console
$ flux resume kustomization app
► resuming Kustomization app in flux-system namespace
✔ Kustomization resumed
◎ waiting for Kustomization reconciliation
✔ Kustomization reconciliation completed
✔ applied revision main/5302d04c2ab8f0579500747efa0fe7abc72c8f9b
```

@ -1,861 +0,0 @@
# Migrate to the Helm Controller
This guide will learn you everything you need to know to be able to migrate from the [Helm Operator](https://github.com/fluxcd/helm-operator) to the [Helm Controller](https://github.com/fluxcd/helm-controller).
## Overview of changes
### Support for Helm v2 dropped
The Helm Operator offered support for both Helm v2 and v3, due to Kubernetes client incompatibility issues between the versions. This has blocked the Helm Operator from being able to upgrade to a newer v3 version since the release of `3.2.0`.
In combination with the fact that [Helm v2 reaches end of life after November 13, 2020](https://helm.sh/blog/helm-v2-deprecation-timeline/), support for Helm v2 has been dropped.
### Helm and Git repositories, and even Helm charts are now Custom Resources
When working with the Helm Operator, you had to mount various files to either make it recognize new (private) Helm repositories or make it gain access to Helm and/or Git repositories. While this approach was declarative, it did not provide a great user experience and was at times hard to set up.
By moving this configuration to [`HelmRepository`](../components/source/helmrepositories.md), [`GitRepository`](../components/source/gitrepositories.md), [`Bucket`](../components/source/buckets.md) and [`HelmChart`](../components/source/helmcharts.md) Custom Resources, they can now be declaratively described (including their credentials using references to `Secret` resources), and applied to the cluster.
The reconciliation of these resources has been offloaded to a dedicated [Source Controller](../components/source/controller.md), specialized in the acquisition of artifacts from external sources.
The result of this all is an easier and more flexible configuration, with much better observability. Failures are traceable to the level of the resource that lead to a failure, and are easier to resolve. As polling intervals can now be configured per resource, you can customize your repository and/or chart configuration to a much finer grain.
From a technical perspective, this also means less overhead, as the resources managed by the Source Controller can be shared between multiple `HelmRelease` resources, or even reused by other controllers like the [Kustomize Controller](../components/kustomize/controller.md).
### The `HelmRelease` Custom Resource group domain changed
Due to the Helm Controller becoming part of the extensive set of controller components Flux now has, the Custom Resource group domain has changed from `helm.fluxcd.io` to `helm.toolkit.fluxcd.io`.
Together with the new API version (`v2beta1` at time of writing), the full `apiVersion` you use in your YAML document becomes `helm.toolkit.fluxcd.io/v2beta1`.
### The API specification changed (quite a lot), for the better
While developing the Helm Controller, we were given the chance to rethink what a declarative API for driving automated Helm releases would look like. This has, in short, resulted in the following changes:
- Extensive configuration options per Helm action (install, upgrade, test, rollback); this includes things like timeouts, disabling hooks, and ignoring failures for tests.
- Strategy-based remediation on failures. This makes it possible, for example, to uninstall a release instead of rolling it back after a failed upgrade. The number of retries or keeping the last failed state when the retries are exhausted is now a configurable option.
- Better observability. The `Status` field in the `HelmRelease` provides a much better view of the current state of the release, including dedicated `Ready`, `Released`, `TestSuccess`, and `Remediated` conditions.
For a comprehensive overview, see the [API spec changes](#api-spec-changes).
### Helm storage drift detection no longer relies on dry-runs
The Helm Controller no longer uses dry-runs as a way to detect mutations to the Helm storage. Instead, it uses a simpler model of bookkeeping based on the observed state and revisions. This has resulted in much better performance, a lower memory and CPU footprint, and more reliable drift detection.
### No longer supports [Helm downloader plugins](https://helm.sh/docs/topics/plugins/#downloader-plugins)
We have reduced our usage of Helm packages to a bare minimum (that being: as much as we need to be able to work with chart repositories and charts), and are avoiding shell outs as much as we can.
Given the latter, and the fact that Helm (downloader) plugins work based on shelling out to another command and/or binary, support for this had to be dropped.
We are aware some of our users are using this functionality to be able to retrieve charts from S3 or GCS. The Source Controller already has support for S3 storage compatible buckets ([this includes GCS](https://cloud.google.com/storage/docs/interoperability)), and we hope to extend this support in the foreseeable future to be on par with the plugins that offered support for these Helm repository types.
### Values from `ConfigMap` and `Secret` resources in other namespaces are no longer supported
Support for values references to `ConfigMap` and `Secret` resources in other namespaces than the namespace of the `HelmRelease` has been dropped, as this allowed information from other namespaces to leak into the composed values for the Helm release.
### Values from external source references (URLs) are no longer supported
We initially introduced this feature to support alternative (production focused) `values.yaml` files that sometimes come with charts. It was also used by users to use generic and/or dynamic `values.yaml` files in their `HelmRelease` resources.
The former can now be achieved by defining a [`ValuesFiles` overwrite in the `HelmChartTemplateSpec`](#chart-file-references), which will make the Source Controller look for the referenced file in the chart, and overwrite the default values with the contents from that file.
Support for the latter use has been dropped, as it goes against the principles of GitOps and declarative configuration. You can not reliably restore the cluster state from a Git repository if the configuration of a service relies on some URL being available.
Getting similar behaviour is still possible [using a workaround that makes use of a `CronJob` to download the contents of the external URL on an interval](#external-source-references).
### You can now merge single values at a given path
There was a long outstanding request for the Helm Operator to support merging single values at a given path.
With the Helm Controller this now possible by defining a [`targetPath` in the `ValuesReference`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.ValuesReference), which supports the same formatting as you would supply as an argument to the `helm` binary using `--set [path]=[value]`. In addition to this, the referred value can contain the same value formats (e.g. `{a,b,c}` for a list). You can read more about the available formats and limitations in the [Helm documentation](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set).
### Support added for depends-on relationships
We have added support for depends-on relationships to install `HelmRelease` resources in a given order; for example, because a chart relies on the presence of a Custom Resource Definition installed by another `HelmRelease` resource.
Entries defined in the `spec.dependsOn` list of the `HelmRelease` must be in a `Ready` state before the Helm Controller proceeds with installation and/or upgrade actions.
Note that this does not account for upgrade ordering. Kubernetes only allows applying one resource (`HelmRelease` in this case) at a time, so there is no way for the controller to know when a dependency `HelmRelease` may be updated.
Also, circular dependencies between `HelmRelease` resources must be avoided, otherwise the interdependent `HelmRelease` resources will never be reconciled.
### You can now suspend a HelmRelease
There is a new `spec.suspend` field, that if set to `true` causes the Helm Controller to skip reconciliation for the resource. This can be utilized to e.g. temporarily ignore chart changes, and prevent a Helm release from getting upgraded.
### Helm releases can target another cluster
We have added support for making Helm releases to other clusters. If the `spec.kubeConfig` field in the `HelmRelease` is set, Helm actions will run against the default cluster specified in that KubeConfig instead of the local cluster that is responsible for the reconciliation of the `HelmRelease`.
The Helm storage is stored on the remote cluster in a namespace that equals to the namespace of the `HelmRelease`, or the configured `spec.storageNamespace`. The release itself is made in a namespace that equals to the namespace of the `HelmRelease`, or the configured `spec.targetNamespace`. The namespaces are expected to exist, and can for example be created using the [Kustomize Controller](https://toolkit.fluxcd.io/components/kustomize/controller/) which has the same cross-cluster support.
Other references to Kubernetes resources in the `HelmRelease`, like `ValuesReference` resources, are expected to exist on the reconciling cluster.
### Added support for notifications and webhooks
Sending notifications and/or alerts to Slack, Microsoft Teams, Discord, or Rocker is now possible using the [Notification Controller](../components/notification/controller.md), [`Provider` Custom Resources](../components/notification/provider.md) and [`Alert` Custom Resources](../components/notification/alert.md).
It does not stop there, using [`Receiver` Custom Resources](../components/notification/receiver.md) you can trigger **push based** reconciliations from Harbor, GitHub, GitLab, BitBucket or your CI system by making use of the webhook endpoint the resource creates.
### Introduction of the `flux` CLI to create and/or generate Custom Resources
With the new [`flux` CLI](../cmd/flux.md) it is now possible to create and/or generate the Custom Resources mentioned earlier. To generate the YAML for a `HelmRepository` and `HelmRelease` resource, you can for example run:
```console
$ flux create source helm podinfo \
--url=https://stefanprodan.github.io/podinfo \
--interval=10m \
--export
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: podinfo
namespace: flux-system
spec:
interval: 10m0s
url: https://stefanprodan.github.io/podinfo
$ flux create helmrelease podinfo \
--interval=10m \
--source=HelmRepository/podinfo \
--chart=podinfo \
--chart-version=">4.0.0" \
--export
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: podinfo
namespace: flux-system
spec:
chart:
spec:
chart: podinfo
sourceRef:
kind: HelmRepository
name: podinfo
version: '>4.0.0'
interval: 10m0s
```
## API spec changes
The following is an overview of changes to the API spec, including behavioral changes compared to how the Helm Operator performs actions. For a full overview of the new API spec, consult the [API spec documentation](../components/helm/helmreleases.md#specification).
### Defining the Helm chart
#### Helm repository
For the Helm Operator, you used to configure a chart from a Helm repository as follows:
```yaml
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
chart:
# The repository URL
repository: https://charts.example.com
# The name of the chart (without an alias)
name: my-chart
# The SemVer version of the chart
version: 1.2.3
```
With the Helm Controller, you now create a `HelmRepository` resource in addition to the `HelmRelease` you would normally create (for all available fields, consult the [Source API reference](../components/source/api.md#source.toolkit.fluxcd.io/v1beta1.HelmRepository)):
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: my-repository
namespace: default
spec:
# The interval at wich to check the upstream for updates
interval: 10m
# The repository URL, a valid URL contains at least a protocol and host
url: https://chart.example.com
```
If you make use of a private Helm repository, instead of configuring the credentials by mounting a `repositories.yaml` file, you can now configure the HTTP/S basic auth and/or TLS credentials by referring to a `Secret` in the same namespace as the `HelmRepository`:
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: my-repository-creds
namespace: default
data:
# HTTP/S basic auth credentials
username: <base64 encoded username>
password: <base64 encoded password>
# TLS credentials (certFile and keyFile, and/or caCert)
certFile: <base64 encoded certificate>
keyFile: <base64 encoded key>
caCert: <base64 encoded CA certificate>
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: my-repository
namespace: default
spec:
# ...omitted for brevity
secretRef:
name: my-repository-creds
```
In the `HelmRelease`, you then use a reference to the `HelmRepository` resource in the `spec.chart.spec` (for all available fields, consult the [Helm API reference](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.HelmChartTemplate)):
```yaml
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# The interval at which to reconcile the Helm release
interval: 10m
chart:
spec:
# The name of the chart as made available by the HelmRepository
# (without any aliases)
chart: my-chart
# A fixed SemVer, or any SemVer range
# (i.e. >=4.0.0 <5.0.0)
version: 1.2.3
# The reference to the HelmRepository
sourceRef:
kind: HelmRepository
name: my-repository
# Optional, defaults to the namespace of the HelmRelease
namespace: default
```
The `spec.chart.spec` values are used by the Helm Controller as a template to create a new `HelmChart` resource in the same namespace as the `sourceRef`, to be reconciled by the Source Controller. The Helm Controller watches `HelmChart` resources for (revision) changes, and performs an installation or upgrade when it notices a change.
#### Git repository
For the Helm Operator, you used to configure a chart from a Git repository as follows:
```yaml
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
chart:
# The URL of the Git repository
git: https://example.com/org/repo
# The Git branch (or other Git reference)
ref: master
# The path of the chart relative to the repository root
path: ./charts/my-chart
```
With the Helm Controller, you create a `GitRepository` resource in addition to the `HelmRelease` you would normally create (for all available fields, consult the [Source API reference](../components/source/api.md#source.toolkit.fluxcd.io/v1beta1.GitRepository):
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: my-repository
namespace: default
spec:
# The interval at which to check the upstream for updates
interval: 10m
# The repository URL, can be a HTTP/S or SSH address
url: https://example.com/org/repo
# The Git reference to checkout and monitor for changes
# (defaults to master)
# For all available options, see:
# https://toolkit.fluxcd.io/components/source/api/#source.toolkit.fluxcd.io/v1beta1.GitRepositoryRef
ref:
branch: master
```
If you make use of a private Git repository, instead of configuring the credentials by mounting a private key and making changes to the `known_hosts` file, you can now configure the credentials for both HTTP/S and SSH by referring to a `Secret` in the same namespace as the `GitRepository`:
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: my-repository-creds
namespace: default
data:
# HTTP/S basic auth credentials
username: <base64 encoded username>
password: <base64 encoded password>
# SSH credentials
identity: <base64 encoded private key>
identity.pub: <base64 public key>
known_hosts: <base64 encoded known_hosts>
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: my-repository
namespace: default
spec:
# ...omitted for brevity
secretRef:
name: my-repository-creds
```
In the `HelmRelease`, you then use a reference to the `GitRepository` resource in the `spec.chart.spec` (for all available fields, consult the [Helm API reference](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.HelmChartTemplate)):
```yaml
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# The interval at which to reconcile the Helm release
interval: 10m
chart:
spec:
# The path of the chart relative to the repository root
chart: ./charts/my-chart
# The reference to the GitRepository
sourceRef:
kind: GitRepository
name: my-repository
# Optional, defaults to the namespace of the HelmRelease
namespace: default
```
The `spec.chart.spec` values are used by the Helm Controller as a template to create a new `HelmChart` resource in the same namespace as the `sourceRef`, to be reconciled by the Source Controller. The Helm Controller watches `HelmChart` resources for (revision) changes, and performs an installation or upgrade when it notices a change.
### Defining values
#### Inlined values
Inlined values (defined in the `spec.values` of the `HelmRelease`) still work as with the Helm operator. It represents a YAML map as you would put in a file and supply to `helm` with `-f values.yaml`, but inlined into the `HelmRelease` manifest:
```yaml
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
values:
foo: value1
bar:
baz: value2
oof:
- item1
- item2
```
#### Values from sources
As described in the [overview of changes](#overview-of-changes), there have been multiple changes to the way you can refer to values from sources (like `ConfigMap` and `Secret` references), including the [drop of support for external source (URL) references](#values-from-external-source-references-urls-are-no-longer-supported) and [added support for merging single values at a specific path](#you-can-now-merge-single-values-at-a-given-path).
Values are still merged in the order given, with later values overwriting earlier. The values from sources always have a lower priority than the values inlined in the `HelmRelease` via the `spec.values` key.
##### `ConfigMap` and `Secret` references
`ConfigMap` and `Secret` references used to be defined as follows:
```yaml
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
valuesFrom:
- configMapKeyRef:
name: my-config-values
namespace: my-ns
key: values.yaml
optional: false
- secretKeyRef:
name: my-secret-values
namespace: my-ns
key: values.yaml
optional: true
```
In the new API spec the individual `configMapKeyRef` and `secretKeyRef` objects are bundled into a single [`ValuesReference`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.ValuesReference) which [does no longer allow refering to resources in other namespaces](#values-from-external-source-references-urls-are-no-longer-supported):
```yaml
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
valuesFrom:
- kind: ConfigMap
name: my-config-values
valuesKey: values.yaml
optional: false
- kind: Secret
name: my-secret-values
valuesKey: values.yaml
optional: true
```
Another thing to take note of is that the behavior for values references marked as `optional` has changed. When set, a "not found" error for the values reference is ignored, but any `valuesKey`, `targetPath` or transient error will still result in a reconciliation failure.
##### Chart file references
With the Helm Operator it was possible to refer to an alternative values file (for e.g. production usage) in the directory of a chart from a Git repository:
```yaml
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
valuesFrom:
# Values file to merge in,
# expected to be a relative path in the chart directory
- chartFileRef:
path: values-prod.yaml
```
With the Helm Controller, this declaration has moved to the `spec.chart.spec`, and the feature is no longer limited to charts from a Git repository:
```yaml
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
chart:
spec:
chart: my-chart
version: 1.2.3
# Alternative values file to use as the default values,
# expected to be a relative path in the sourceRef
valuesFiles:
- values.yaml
- values-prod.yaml
sourceRef:
kind: HelmRepository
name: my-repository
```
When `valuesFiles` is defined, the chart will be (re)packaged with the values from the referenced files as the default values, merged in the order they appear. Note that this behavior is different from the Helm Operator as the default values (values.yaml) are not merged by default and must be explicitly added to the list.
##### External source references
While [the support for external source references has been dropped](#values-from-external-source-references-urls-are-no-longer-supported), it is possible to work around this limitation by creating a `CronJob` that periodically fetches the values from an external URL and saves them to a `ConfigMap` or `Secret` resource.
First, create a `ServiceAccount`, `Role` and `RoleBinding` capable of updating a limited set of `ConfigMap` resources:
```yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: values-fetcher
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: configmap-updater
namespace: default
rules:
- apiGroups: [""]
resources: ["configmaps"]
# ResourceNames limits the access of the role to
# a defined set of ConfigMap resources
resourceNames: ["my-external-values"]
verbs: ["patch", "get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: update-values-configmaps
namespace: default
subjects:
- kind: ServiceAccount
name: values-fetcher
namespace: default
roleRef:
kind: Role
name: configmap-updater
apiGroup: rbac.authorization.k8s.io
```
As `resourceNames` scoping in the `Role` [does not allow restricting `create` requests](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources), we need to create empty placeholder(s) for the `ConfigMap` resource(s) that will hold the fetched values:
```yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: my-external-values
namespace: default
data: {}
```
Lastly, create a `CronJob` that uses the `ServiceAccount` defined above, fetches the external values on an interval, and applies them to the `ConfigMap`:
```yaml
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: fetch-external-values
spec:
concurrencyPolicy: Forbid
schedule: "*/5 * * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
template:
spec:
serviceAccountName: values-fetcher
containers:
- name: kubectl
image: bitnami/kubectl:1.19
volumeMounts:
- mountPath: /tmp
name: tmp-volume
command:
- sh
- -c
args:
- >-
curl -f -# https://example.com/path/to/values.yaml -o /tmp/values.yaml &&
kubectl create configmap my-external-values --from-file=/tmp/values.yaml -oyaml --dry-run=client |
kubectl apply -f -
volumes:
- name: tmp-volume
emptyDir:
medium: Memory
restartPolicy: OnFailure
```
You can now refer to the `my-external-values` `ConfigMap` resource in your `HelmRelease`:
```yaml
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
valuesFrom:
- kind: ConfigMap
name: my-external-values
```
### Defining release options
With the Helm Operator the release options used to be configured in the `spec` of the `HelmRelease` and applied to both Helm install and upgrade actions.
This has changed for the Helm Controller, where some defaults can be defined in the [`spec`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.HelmReleaseSpec), but specific action configurations and overwrites for the defaults can be defined in the [`spec.install`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Install), [`spec.upgrade`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Upgrade) and [`spec.test`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Test) sections of the `HelmRelease`.
### Defining a rollback / uninstall configuration
With the Helm Operator, uninstalling a release after an installation failure was done automatically, and rolling back from a faulty upgrade and configuring options like retries was done as follows:
```yaml
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
rollback:
enable: true
retries: true
maxRetries: 5
disableHooks: false
force: false
recreate: false
timeout: 300
```
The Helm Controller offers an extensive set of configuration options to remediate when a Helm release fails, using [`spec.install.remediate`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.InstallRemediation), [`spec.upgrade.remediate`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.UpgradeRemediation), [`spec.rollback`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Rollback) and [`spec.uninstall`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Uninstall). Some of the new features include the option to remediate with an uninstall after an upgrade failure, and the option to keep a failed release for debugging purposes when it has run out of retries.
#### Automated uninstalls
The configuration below mimics the uninstall behavior of the Helm Operator (for all available fields, consult the [`InstallRemediation`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.InstallRemediation) and [`Uninstall`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Uninstall) API references):
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
install:
# Remediation configuration for when the Helm install
# (or sequent Helm test) action fails
remediation:
# Number of retries that should be attempted on failures before
# bailing, a negative integer equals to unlimited retries
retries: -1
# Configuration options for the Helm uninstall action
uninstall:
timeout: 5m
disableHooks: false
keepHistory: false
```
#### Automated rollbacks
The configuration below shows an automated rollback configuration that equals [the configuration for the Helm Operator showed above](#defining-a-rollback-uninstall-configuration) (for all available fields, consult the [`UpgradeRemediation`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.UpgradeRemediation) and [`Rollback`](../components/helm/api.md#helm.toolkit.fluxcd.io/v2beta1.Rollback) API references):
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
upgrade:
# Remediaton configuration for when an Helm upgrade action fails
remediation:
# Amount of retries to attempt after a failure,
# setting this to 0 means no remedation will be
# attempted
retries: 5
# Configuration options for the Helm rollback action
rollback:
timeout: 5m
disableWait: false
disableHooks: false
recreate: false
force: false
cleanupOnFail: false
```
## Migration strategy
Due to the high number of changes to the API spec, there are no detailed instructions available to provide a simple migration path. But there is a [simple procedure to follow](#steps), which combined with the detailed list of [API spec changes](#api-spec-changes) should make the migration path relatively easy.
Here are some things to know:
* The Helm Controller will ignore the old custom resources (and the Helm Operator will ignore the new resources).
* Deleting a resource while the corresponding controller is running will result in the Helm release also being deleted.
* Deleting a `CustomResourceDefinition` will also delete all custom resources of that kind.
* If both the Helm Controller and Helm Operator are running, and both a new and old custom resources define a release, they will fight over the release.
* The Helm Controller will always perform an upgrade the first time it encounters a new `HelmRelease` for an existing release; this is [due to the changes to release mechanics and bookkeeping](#helm-storage-drift-detection-no-longer-relies-on-dry-runs).
The safest way to upgrade is to avoid deletions and fights by stopping the Helm Operator. Once the operator is not running, it is safe to deploy the Helm Controller (e.g., by following the [Get Started guide](../get-started/index.md), [utilizing `flux install`](../cmd/flux_install.md), or using the manifests from the [release page](https://github.com/fluxcd/helm-controller/releases)), and start replacing the old resources with new resources. You can keep the old resources around during this process, since the Helm Controller will ignore them.
### Steps
The recommended migration steps for a single `HelmRelease` are as follows:
1. Ensure the Helm Operator is not running, as otherwise the Helm Controller and Helm Operator will fight over the release.
1. Create a [`GitRepository` or `HelmRepository` resource for the `HelmRelease`](#defining-the-helm-chart), including any `Secret` that may be required to access the source. Note that it is possible for multiple `HelmRelease` resources to share a `GitRepository` or `HelmRepository` resource.
1. Create a new `HelmRelease` resource ([with the `helm.toolkit.fluxcd.io` group domain](#the-helmrelease-custom-resource-group-domain-changed)), define the `spec.releaseName` (plus the `spec.targetNamespace` and `spec.storageNamespace` if applicable) to match that of the existing release, and rewrite the configuration to adhere to the [API spec changes](#api-spec-changes).
1. Confirm the Helm Controller successfully upgrades the release.
### Example
As a full example, this is an old resource:
```yaml
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: podinfo
namespace: default
spec:
chart:
repository: https://stefanprodan.github.io/podinfo
name: podinfo
version: 5.0.3
values:
replicaCount: 1
```
The custom resources for the Helm Controller would be:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: podinfo
namespace: default
spec:
interval: 10m
url: https://stefanprodan.github.io/podinfo
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: podinfo
namespace: default
spec:
interval: 5m
releaseName: default-podinfo
chart:
spec:
chart: podinfo
version: 5.0.3
sourceRef:
kind: HelmRepository
name: podinfo
interval: 10m
values:
replicaCount: 1
```
### Migrating gradually
Gradually migrating to the Helm Controller is possible by scaling down the Helm Operator while you move over resources, and scaling it up again once you have migrated some of the releases to the Helm Controller.
While doing this, make sure that once you scale up the Helm Operator again, there are no old and new `HelmRelease` resources pointing towards the same release, as they will fight over the release.
Alternatively, you can gradually migrate per namespace without ever needing to shut the Helm Operator down, enabling no continuous delivery interruption on most namespaces. To do so, you can customize the Helm Operator roles associated to its `ServiceAccount` to prevent it from interfering with the Helm Controller in namespaces you are migrating. First, create a new `ClusterRole` for the Helm Operator to operate in "read-only" mode cluster-wide:
```yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: helm-operator-ro
rules:
- apiGroups: ['*']
resources: ['*']
verbs:
- get
- watch
- list
- nonResourceURLs: ['*']
verbs: ['*']
```
By default, [the `helm-operator` `ServiceAccount` is bound to a `ClusterRole` that allows it to create, patch and delete resources in all namespaces](https://github.com/fluxcd/helm-operator/blob/1baacd6dee865b57da80e0e767286ed68d578246/deploy/rbac.yaml#L9-L36). Bind the `ServiceAccount` to the new `helm-operator-ro` `ClusterRole`:
```diff
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: helm-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
- name: helm-operator
+ name: helm-operator-ro
subjects:
- kind: ServiceAccount
name: helm-operator
namespace: flux
```
Finally, create `RoleBindings` for each namespace, but the one you are currently migrating:
```yaml
# Create a `RoleBinding` for each namespace the Helm Operator is allowed to process `HelmReleases` in
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: helm-operator
namespace: helm-operator-watched-namespace
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: helm-operator
subjects:
- name: helm-operator
namespace: flux
kind: ServiceAccount
# Do not create the following to prevent the Helm Operator from watching `HelmReleases` in `helm-controller-watched-namespace`
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: RoleBinding
# metadata:
# name: helm-operator
# namespace: helm-controller-watched-namespace
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: ClusterRole
# name: helm-operator
# subjects:
# - name: helm-operator
# namespace: flux
# kind: ServiceAccount
```
If you are using [the Helm Operator chart](https://github.com/fluxcd/helm-operator/tree/master/chart/helm-operator), make sure to set `rbac.create` to `false` in order to take over `ClusterRoleBindings` and `RoleBindings` as you wish.
### Deleting old resources
Once you have migrated all your `HelmRelease` resources to the Helm Controller. You can remove all of the old resources by removing the old Custom Resource Definition.
```sh
kubectl delete crd helmreleases.helm.fluxcd.io
```
## Frequently Asked Questions
### Are automated image updates supported?
Not yet, but the feature is under active development. See the [image update feature parity section on the roadmap](https://toolkit.fluxcd.io/roadmap/#flux-image-update-feature-parity) for updates on this topic.
### How do I automatically apply my `HelmRelease` resources to the cluster?
If you are currently a Flux v1 user, you can commit the `HelmRelease` resources to Git, and Flux will automatically apply them to the cluster like any other resource. It does however not support automated image updates for Helm Controller resources.
If you are not a Flux v1 user or want to fully migrate to Flux v2, the [Kustomize Controller](https://toolkit.fluxcd.io/components/kustomize/controller/) will serve your needs.
### I am still running Helm v2, what is the right upgrade path for me?
Migrate your Helm v2 releases to v3 using [the Helm Operator's migration feature](https://docs.fluxcd.io/projects/helm-operator/en/stable/helmrelease-guide/release-configuration/#migrating-from-helm-v2-to-v3), or make use of the [`helm-2to3`](https://github.com/helm/helm-2to3) plugin directly, before continuing following the [migration steps](#steps).
### Is the Helm Controller ready for production?
Probably, but with some side notes:
1. It is still under active development, and while our focus has been to stabilize the API as much as we can during the first development phase, we do not guarantee there will not be any breaking changes before we reach General Availability. We are however committed to provide [conversion webhooks](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) for upcoming API versions.
1. There may be (internal) behavioral changes in upcoming releases, but they should be aimed at further stabilizing the Helm Controller itself, solving edge case issues, providing better logging, observability, and/or other improvements.
### Can I use Helm Controller standalone?
Helm Controller depends on [Source Controller](../components/source/controller.md), you can install both controllers
and manager Helm releases in a declarative way without GitOps.
For more details please see this [answer](../faq/index.md#can-i-use-flux-helmreleases-without-gitops).
### I have another question
Given the amount of changes, it is quite possible that this document did not provide you with a clear answer for you specific setup. If this applies to you, do not hesitate to ask for help in the [GitHub Discussions](https://github.com/fluxcd/flux2/discussions/new?category_id=31999889) or on the [`#flux` CNCF Slack channel](https://slack.cncf.io)!

@ -1,493 +0,0 @@
# Manage Helm Releases
The [helm-controller](../components/helm/controller.md) allows you to
declaratively manage Helm chart releases with Kubernetes manifests.
It makes use of the artifacts produced by the
[source-controller](../components/source/controller.md) from
`HelmRepository`, `GitRepository`, `Bucket` and `HelmChart` resources.
The helm-controller is part of the default toolkit installation.
## Prerequisites
To follow this guide you'll need a Kubernetes cluster with the GitOps
toolkit controllers installed on it.
Please see the [get started guide](../get-started/index.md)
or the [installation guide](installation.md).
## Define a chart source
To be able to release a Helm chart, the source that contains the chart
(either a `HelmRepository`, `GitRepository`, or `Bucket`) has to be known
first to the source-controller, so that the `HelmRelease` can reference
to it.
### Helm repository
Helm repositories are the recommended source to retrieve Helm charts
from, as they are lightweight in processing and make it possible to
configure a semantic version selector for the chart version that should
be released.
They can be declared by creating a `HelmRepository` resource, the
source-controller will fetch the Helm repository index for this
resource on an interval and expose it as an artifact:
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: podinfo
namespace: flux-system
spec:
interval: 1m
url: https://stefanprodan.github.io/podinfo
```
The `interval` defines at which interval the Helm repository index
is fetched, and should be at least `1m`. Setting this to a higher
value means newer chart versions will be detected at a slower pace,
a push-based fetch can be introduced using [webhook receivers](webhook-receivers.md)
The `url` can be any HTTP/S Helm repository URL.
!!! hint "Authentication"
HTTP/S basic and TLS authentication can be configured for private
Helm repositories. See the [`HelmRepository` CRD docs](../components/source/helmrepositories.md)
for more details.
### Git repository
Charts from Git repositories can be released by declaring a
`GitRepository`, the source-controller will fetch the contents of the
repository on an interval and expose it as an artifact.
The source-controller can build and expose Helm charts as artifacts
from the contents of the `GitRepository` artifact (more about this
later on in the guide).
**There is one caveat you should be aware of:** to make the
source-controller produce a new chart artifact, the `version` in the
`Chart.yaml` of the chart must be bumped.
An example `GitRepository`:
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: podinfo
namespace: flux-system
spec:
interval: 1m
url: https://github.com/stefanprodan/podinfo
ref:
branch: master
ignore: |
# exclude all
/*
# include charts directory
!/charts/
```
The `interval` defines at which interval the Git repository contents
are fetched, and should be at least `1m`. Setting this to a higher
value means newer chart versions will be detected at a slower pace,
a push-based fetch can be introduced using [webhook receivers](webhook-receivers.md)
The `url` can be any HTTP/S or SSH address (the latter requiring
authentication).
The `ref` defines the checkout strategy, and is set to follow the
`master` branch in the above example. For other strategies like
tags or commits, see the [`GitRepository` CRD docs](../components/source/gitrepositories.md).
The `ignore` defines file and folder exclusion for the
artifact produced, and follows the [`.gitignore` pattern
format](https://git-scm.com/docs/gitignore#_pattern_format).
The above example only includes the `charts` directory of the
repository and omits all other files.
!!! hint "Authentication"
HTTP/S basic and SSH authentication can be configured for private
Git repositories. See the [`GitRepository` CRD docs](../components/source/gitrepositories.md)
for more details.
### Cloud Storage
It is inadvisable while still possible to use a `Bucket` as a source for a `HelmRelease`,
as the whole storage bucket will be downloaded by source controller at each sync. The
bucket can easily become very large if there are frequent releases of multiple charts
that are stored in the same bucket.
A better option is to use [Chartmuseum](https://github.com/helm/chartmuseum) and run a cluster
local Helm repository that can be used by source controller. Chartmuseum has support
for multiple different cloud storage solutions such as S3, GCS, and Azure Blob Storage,
meaning that you are not limited to only using storage providers that support the S3 protocol.
You can deploy a Chartmuseum instance with a `HelmRelease` that exposes a Helm repository stored
in a S3 bucket. Please refer to [Chartmuseums how to run documentation](https://chartmuseum.com/docs/#how-to-run)
for details about how to use other storage backends.
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: chartmuseum
namespace: flux-system
spec:
url: https://chartmuseum.github.io/charts
interval: 10m
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: chartmuseum
namespace: flux-system
spec:
interval: 5m
chart:
spec:
chart: chartmuseum
version: "2.14.2"
sourceRef:
kind: HelmRepository
name: chartmuseum
namespace: flux-system
interval: 1m
values:
env:
open:
AWS_SDK_LOAD_CONFIG: true
STORAGE: amazon
STORAGE_AMAZON_BUCKET: "bucket-name"
STORAGE_AMAZON_PREFIX: ""
STORAGE_AMAZON_REGION: "region-name"
serviceAccount:
create: true
annotations:
eks.amazonaws.com/role-arn: "role-arn"
securityContext:
enabled: true
fsGroup: 65534
```
After Chartmuseum is up and running it should be possible to use the accompanying
service as the url for the `HelmRepository`.
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: helm-charts
namespace: flux-system
spec:
interval: 1m
url: http://chartmuseum-chartmuseum:8080
```
## Define a Helm release
With the chart source created, define a new `HelmRelease` to release
the Helm chart:
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: podinfo
namespace: default
spec:
interval: 5m
chart:
spec:
chart: <name|path>
version: '4.0.x'
sourceRef:
kind: <HelmRepository|GitRepository|Bucket>
name: podinfo
namespace: flux-system
interval: 1m
values:
replicaCount: 2
```
The `chart.spec` values are used by the helm-controller as a template
to create a new `HelmChart` resource in the same namespace as the
`sourceRef`. The source-controller will then lookup the chart in the
artifact of the referenced source, and either fetch the chart for a
`HelmRepository`, or build it from a `GitRepository` or `Bucket`.
It will then make it available as a `HelmChart` artifact to be used by
the helm-controller.
The `chart.spec.chart` can either contain:
* The name of the chart as made available by the `HelmRepository`
(without any aliases), for example: `podinfo`
* The relative path the chart can be found at in the `GitRepository`
or `Bucket`, for example: `./charts/podinfo`
* The relative path the chart package can be found at in the
`GitRepository` or `Bucket`, for example: `./charts/podinfo-1.2.3.tgz`
The `chart.spec.version` can be a fixed semver, or any semver range
(i.e. `>=4.0.0 <5.0.0`). It is only taken into account for `HelmRelease`
resources that reference a `HelmRepository` source.
!!! hint "Advanced configuration"
The `HelmRelease` offers an extensive set of configurable flags
for finer grain control over how Helm actions are performed.
See the [`HelmRelease` CRD docs](../components/helm/helmreleases.md)
for more details.
## Refer to values in `ConfigMap` and `Secret` resources
It is possible to define a list of `ConfigMap` and `Secret` resources
from which to take values. The values are merged in the order given,
with the later values overwriting earlier. These values always have a
lower priority than the values inlined in the `HelmRelease` via the
`spec.values` parameter.
```yaml
spec:
valuesFrom:
- kind: ConfigMap
name: prod-env-values
valuesKey: values-prod.yaml
- kind: Secret
name: prod-tls-values
valuesKey: crt
targetPath: tls.crt
```
The definition of the listed keys is as follows:
- `kind`: Kind of the values referent (`ConfigMap` or `Secret`).
- `name`: Name of the values referent, in the same namespace as the
`HelmRelease`.
- `valuesKey` _(Optional)_: The data key where the values.yaml or a
specific value can be found. Defaults to `values.yaml` when omitted.
- `targetPath` _(Optional)_: The YAML dot notation path at which the
value should be merged. When set, the `valuesKey` is expected to be
a single flat value. Defaults to `None` when omitted, which results
in the values getting merged at the root.
!!! hint "Note"
The `targetPath` supports the same formatting as you would supply
as an argument to the `helm` binary using `--set [path]=[value]`.
In addition to this, the referred value can contain the same
value formats (e.g. `{a,b,c}` for a list).
You can read more about the available formats and limitations in
the [Helm documentation](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set).
!!! warning "`TargetPath` and JSON values"
When using `TargetPath` in combination with a JSON string, the
[limitations are the same as while using `helm`](https://github.com/helm/helm/issues/5618),
and require you to escape the full JSON string (including `=`, `[`, `,`, `.`).
## Refer to values in `ConfigMaps` generated with Kustomize
It is possible to use Kustomize [ConfigMap generator](https://kubectl.docs.kubernetes.io/references/kustomize/configmapgenerator/)
to trigger a Helm release upgrade every time the encoded values change.
First create a `kustomizeconfig.yaml` for Kustomize to be able to patch
`ConfigMaps` referenced in `HelmRelease` manifests:
```yaml
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease
```
Create a `HelmRelease` definition that references a `ConfigMap`:
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: podinfo
namespace: podinfo
spec:
interval: 5m
releaseName: podinfo
chart:
spec:
chart: podinfo
sourceRef:
kind: HelmRepository
name: podinfo
valuesFrom:
- kind: ConfigMap
name: podinfo-values
```
Create a `kustomization.yaml` that generates the `ConfigMap` using our kustomize config:
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: podinfo
resources:
- namespace.yaml
- repository.yaml
- release.yaml
configMapGenerator:
- name: podinfo-values
files:
- values.yaml=my-values.yaml
configurations:
- kustomizeconfig.yaml
```
When [kustomize-controller](../components/kustomize/controller.md) reconciles the above manifests, it will generate
a unique name of the `ConfigMap` every time `my-values.yaml` content is updated in Git:
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: podinfo
namespace: podinfo
spec:
valuesFrom:
- kind: ConfigMap
name: podinfo-values-2mh2t8m94h
```
!!! hint "Note"
Stale `ConfigMaps`, previously generated by Kustomize, will be
removed from the cluster by kustomize-controller if
[pruning](../components/kustomize/kustomization/#garbage-collection) is enabled.
## Refer to values inside the chart
It is possible to replace the `values.yaml` with a different file present inside the Helm chart.
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: mongodb
namespace: mongodb
spec:
interval: 5m
chart:
spec:
chart: mongodb
sourceRef:
kind: HelmRepository
name: bitnami
valuesFiles:
- values.yaml
- values-production.yaml
values:
replicaCount: 5
```
If the `spec.chart.spec.valuesFiles` doesn't exists inside the chart, helm-controller will not be able to
fetch the chart. To determine why the `HelmChart` fails to produce an artifact, you can inspect the status with:
```console
$ kubectl get helmcharts --all-namespaces
NAME READY STATUS
mongodb False failed to locate override values file: values-prod.yaml
```
## Configure notifications
The default toolkit installation configures the helm-controller to
broadcast events to the [notification-controller](../components/notification/controller.md).
To receive the events as notifications, a `Provider` needs to be setup
first as described in the [notifications guide](notifications.md#define-a-provider).
Once you have set up the `Provider`, create a new `Alert` resource in
the `flux-system` to start receiving notifications about the Helm
release:
```yaml
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Alert
metadata:
generation: 2
name: helm-podinfo
namespace: flux-system
spec:
providerRef:
name: slack
eventSeverity: info
eventSources:
- kind: HelmRepository
name: podinfo
- kind: HelmChart
name: default-podinfo
- kind: HelmRelease
name: podinfo
namespace: default
```
![helm-controller alerts](../_files/helm-controller-alerts.png)
## Configure webhook receivers
When using semver ranges for Helm releases, you may want to trigger an update
as soon as a new chart version is published to your Helm repository.
In order to notify source-controller about a chart update,
you can [setup webhook receivers](webhook-receivers.md).
First generate a random string and create a secret with a `token` field:
```sh
TOKEN=$(head -c 12 /dev/urandom | shasum | cut -d ' ' -f1)
echo $TOKEN
kubectl -n flux-system create secret generic webhook-token \
--from-literal=token=$TOKEN
```
When using [Harbor](https://goharbor.io/) as your Helm repository, you can define a receiver with:
```yaml
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Receiver
metadata:
name: helm-podinfo
namespace: flux-system
spec:
type: harbor
secretRef:
name: webhook-token
resources:
- kind: HelmRepository
name: podinfo
```
The notification-controller generates a unique URL using the provided token and the receiver name/namespace.
Find the URL with:
```console
$ kubectl -n flux-system get receiver/helm-podinfo
NAME READY STATUS
helm-podinfo True Receiver initialised with URL: /hook/bed6d00b5555b1603e1f59b94d7fdbca58089cb5663633fb83f2815dc626d92b
```
Log in to the Harbor interface, go to Projects, select a project, and select Webhooks.
Fill the form with:
* Endpoint URL: compose the address using the receiver LB and the generated URL `http://<LoadBalancerAddress>/<ReceiverURL>`
* Auth Header: use the `token` string
With the above settings, when you upload a chart, the following happens:
* Harbor sends the chart push event to the receiver address
* Notification controller validates the authenticity of the payload using the auth header
* Source controller is notified about the changes
* Source controller pulls the changes into the cluster and updates the `HelmChart` version
* Helm controller is notified about the version change and upgrades the release
!!! hint "Note"
Besides Harbor, you can define receivers for **GitHub**, **GitLab**, **Bitbucket**
and any other system that supports webhooks e.g. Jenkins, CircleCI, etc.
See the [Receiver CRD docs](../components/notification/receiver.md) for more details.

@ -1,949 +0,0 @@
# Automate image updates to Git
This guide walks you through configuring container image scanning and deployment rollouts with Flux.
For a container image you can configure Flux to:
- scan the container registry and fetch the image tags
- select the latest tag based on the defined policy (semver, calver, regex)
- replace the tag in Kubernetes manifests (YAML format)
- checkout a branch, commit and push the changes to the remote Git repository
- apply the changes in-cluster and rollout the container image
!!! warning "Alpha version"
Note that the image update feature is currently alpha,
see the [roadmap](../roadmap/index.md) for more details.
For production environments, this feature allows you to automatically deploy application patches
(CVEs and bug fixes), and keep a record of all deployments in Git history.
**Production CI/CD workflow**
* DEV: push a bug fix to the app repository
* DEV: bump the patch version and release e.g. `v1.0.1`
* CI: build and push a container image tagged as `registry.domain/org/app:v1.0.1`
* CD: pull the latest image metadata from the app registry (Flux image scanning)
* CD: update the image tag in the app manifest to `v1.0.1` (Flux cluster to Git reconciliation)
* CD: deploy `v1.0.1` to production clusters (Flux Git to cluster reconciliation)
For staging environments, this features allow you to deploy the latest build of a branch,
without having to manually edit the app deployment manifest in Git.
**Staging CI/CD workflow**
* DEV: push code changes to the app repository `main` branch
* CI: build and push a container image tagged as `${GIT_BRANCH}-${GIT_SHA:0:7}-$(date +%s)`
* CD: pull the latest image metadata from the app registry (Flux image scanning)
* CD: update the image tag in the app manifest to `main-2d3fcbd-1611906956` (Flux cluster to Git reconciliation)
* CD: deploy `main-2d3fcbd-1611906956` to staging clusters (Flux Git to cluster reconciliation)
## Prerequisites
You will need a Kubernetes cluster version 1.16 or newer and kubectl version 1.18.
For a quick local test, you can use [Kubernetes kind](https://kind.sigs.k8s.io/docs/user/quick-start/).
Any other Kubernetes setup will work as well.
In order to follow the guide you'll need a GitHub account and a
[personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line)
that can create repositories (check all permissions under `repo`).
Export your GitHub personal access token and username:
```sh
export GITHUB_TOKEN=<your-token>
export GITHUB_USER=<your-username>
```
## Install Flux
!!! hint "Enable image automation components"
If you bootstrapped Flux before without the `--components-extra=` argument, you need to add
`--components-extra=image-reflector-controller,image-automation-controller` to your
bootstrapping routine as image automation components are not installed by default.
Install Flux with the image automation components:
```sh
flux bootstrap github \
--components-extra=image-reflector-controller,image-automation-controller \
--owner=$GITHUB_USER \
--repository=flux-image-updates \
--branch=main \
--path=clusters/my-cluster \
--token-auth \
--personal
```
The bootstrap command creates a repository if one doesn't exist, and commits the manifests for the
Flux components to the default branch at the specified path. It then configures the target cluster to
synchronize with the specified path inside the repository.
!!! hint "GitLab and other Git platforms"
You can install Flux and bootstrap repositories hosted on GitLab, BitBucket, Azure DevOps and
any other Git provider that support SSH or token-based authentication.
When using SSH, make sure the deploy key is configured with write access.
Please see the [installation guide](installation.md) for more details.
## Deploy a demo app
We'll be using a tiny webapp called [podinfo](https://github.com/stefanprodan/podinfo) to
showcase the image update feature.
Clone your repository with:
```sh
git clone https://github.com/$GITHUB_USER/flux-image-updates
cd flux-image-updates
```
Add the podinfo Kubernetes deployment file inside `cluster/my-cluster`:
```sh
curl -sL https://raw.githubusercontent.com/stefanprodan/podinfo/5.0.0/kustomize/deployment.yaml \
> ./clusters/my-cluster/podinfo-deployment.yaml
```
Commit and push changes to main branch:
```sh
git add -A && \
git commit -m "add podinfo deployment" && \
git push origin main
```
Tell Flux to pull and apply the changes or wait one minute for Flux to detect the changes on its own:
```sh
flux reconcile kustomization flux-system --with-source
```
Print the podinfo image deployed on your cluster:
```console
$ kubectl get deployment/podinfo -oyaml | grep 'image:'
image: ghcr.io/stefanprodan/podinfo:5.0.0
```
## Configure image scanning
Create an `ImageRepository` to tell Flux which container registry to scan for new tags:
```sh
flux create image repository podinfo \
--image=ghcr.io/stefanprodan/podinfo \
--interval=1m \
--export > ./clusters/my-cluster/podinfo-registry.yaml
```
The above command generates the following manifest:
```yaml
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImageRepository
metadata:
name: podinfo
namespace: flux-system
spec:
image: ghcr.io/stefanprodan/podinfo
interval: 1m0s
```
For private images, you can create a Kubernetes secret
in the same namespace as the `ImageRepository` with
`kubectl create secret docker-registry`. Then you can configure
Flux to use the credentials by referencing the Kubernetes secret
in the `ImageRepository`:
```yaml
kind: ImageRepository
spec:
secretRef:
name: regcred
```
!!! hint "Storing secrets in Git"
Note that if you want to store the image pull secret in Git, you can encrypt
the manifest with [Mozilla SOPS](mozilla-sops.md) or [Sealed Secrets](sealed-secrets.md).
Create an `ImagePolicy` to tell Flux which semver range to use when filtering tags:
```sh
flux create image policy podinfo \
--image-ref=podinfo \
--select-semver=5.0.x \
--export > ./clusters/my-cluster/podinfo-policy.yaml
```
The above command generates the following manifest:
```yaml
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImagePolicy
metadata:
name: podinfo
namespace: flux-system
spec:
imageRepositoryRef:
name: podinfo
policy:
semver:
range: 5.0.x
```
!!! hint "semver ranges"
A semver range that includes stable releases can be defined with
`1.0.x` (patch versions only) or `>=1.0.0 <2.0.0` (minor and patch versions).
If you want to include pre-release e.g. `1.0.0-rc.1`,
you can define a range like: `^1.x-0` or `>1.0.0-rc <2.0.0-rc`.
!!! hint "Other policy examples"
For policies that make use of CalVer, build IDs or alphabetical sorting,
have a look at [the examples](../components/image/imagepolicies.md#examples).
Commit and push changes to main branch:
```sh
git add -A && \
git commit -m "add podinfo image scan" && \
git push origin main
```
Tell Flux to pull and apply changes:
```sh
flux reconcile kustomization flux-system --with-source
```
Wait for Flux to fetch the image tag list from GitHub container registry:
```console
$ flux get image repository podinfo
NAME READY MESSAGE LAST SCAN
podinfo True successful scan, found 13 tags 2020-12-13T17:51:48+02:00
```
Find which image tag matches the policy semver range with:
```console
$ flux get image policy podinfo
NAME READY MESSAGE
podinfo True Latest image tag for 'ghcr.io/stefanprodan/podinfo' resolved to: 5.0.3
```
## Configure image updates
Edit the `podinfo-deployment.yaml` and add a marker to tell Flux which policy to use when updating the container image:
```yaml
spec:
containers:
- name: podinfod
image: ghcr.io/stefanprodan/podinfo:5.0.0 # {"$imagepolicy": "flux-system:podinfo"}
```
Create an `ImageUpdateAutomation` to tell Flux which Git repository to write image updates to:
```sh
flux create image update flux-system \
--git-repo-ref=flux-system \
--git-repo-path="./clusters/my-cluster" \
--checkout-branch=main \
--push-branch=main \
--author-name=fluxcdbot \
--author-email=fluxcdbot@users.noreply.github.com \
--commit-template="{{range .Updated.Images}}{{println .}}{{end}}" \
--export > ./clusters/my-cluster/flux-system-automation.yaml
```
The above command generates the following manifest:
```yaml
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImageUpdateAutomation
metadata:
name: flux-system
namespace: flux-system
spec:
interval: 1m0s
sourceRef:
kind: GitRepository
name: flux-system
git:
checkout:
ref:
branch: main
commit:
author:
email: fluxcdbot@users.noreply.github.com
name: fluxcdbot
messageTemplate: '{{range .Updated.Images}}{{println .}}{{end}}'
push:
branch: main
update:
path: ./clusters/my-cluster
strategy: Setters
```
Commit and push changes to main branch:
```sh
git add -A && \
git commit -m "add image updates automation" && \
git push origin main
```
Note that the `ImageUpdateAutomation` runs all the policies found in its namespace at the specified interval.
Tell Flux to pull and apply changes:
```sh
flux reconcile kustomization flux-system --with-source
```
In a couple of seconds, Flux will push a commit to your repository with
the latest image tag that matches the podinfo policy:
```console
$ git pull && cat clusters/my-cluster/podinfo-deployment.yaml | grep "image:"
image: ghcr.io/stefanprodan/podinfo:5.0.3 # {"$imagepolicy": "flux-system:podinfo"}
```
Wait for Flux to apply the latest commit on the cluster and verify that podinfo was updated to `5.0.3`:
```console
$ watch "kubectl get deployment/podinfo -oyaml | grep 'image:'"
image: ghcr.io/stefanprodan/podinfo:5.0.3
```
You can check the status of the image automation objects with:
```sh
flux get images all --all-namespaces
```
## Configure image update for custom resources
Besides Kubernetes native kinds (Deployment, StatefulSet, DaemonSet, CronJob),
Flux can be used to patch image tags in any Kubernetes custom resource stored in Git.
The image policy marker format is:
* `{"$imagepolicy": "<policy-namespace>:<policy-name>"}`
* `{"$imagepolicy": "<policy-namespace>:<policy-name>:tag"}`
`HelmRelease` example:
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: podinfo
namespace: default
spec:
values:
image:
repository: ghcr.io/stefanprodan/podinfo
tag: 5.0.0 # {"$imagepolicy": "flux-system:podinfo:tag"}
```
Tekton `Task` example:
```yaml
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: golang
namespace: default
spec:
steps:
- name: golang
image: docker.io/golang:1.15.6 # {"$imagepolicy": "flux-system:golang"}
```
Flux `Kustomization` example:
```yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: podinfo
namespace: default
spec:
images:
- name: ghcr.io/stefanprodan/podinfo
newName: ghcr.io/stefanprodan/podinfo
newTag: 5.0.0 # {"$imagepolicy": "flux-system:podinfo:tag"}
```
Kustomize config (`kustomization.yaml`) example:
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
images:
- name: ghcr.io/stefanprodan/podinfo
newName: ghcr.io/stefanprodan/podinfo
newTag: 5.0.0 # {"$imagepolicy": "flux-system:podinfo:tag"}
```
## Push updates to a different branch
With `.spec.git.push.branch` you can configure Flux to push the image updates to different branch
than the one used for checkout. If the specified branch doesn't exist, Flux will create it for you.
```yaml
kind: ImageUpdateAutomation
metadata:
name: flux-system
spec:
git:
checkout:
ref:
branch: main
push:
branch: flux-image-updates
```
You can use CI automation e.g. GitHub Actions such as
[create-pull-request](https://github.com/peter-evans/create-pull-request)
to open a pull request against the checkout branch.
This way you can manually approve the image updates before they are applied on your clusters.
## Configure the commit message
The `.spec.git.commit.messageTemplate` field is a string which is used as a template for the commit message.
The message template is a [Go text template](https://golang.org/pkg/text/template/) that
lets you range over the objects and images e.g.:
```yaml
kind: ImageUpdateAutomation
metadata:
name: flux-system
spec:
git:
commit:
messageTemplate: |
Automated image update
Automation name: {{ .AutomationObject }}
Files:
{{ range $filename, $_ := .Updated.Files -}}
- {{ $filename }}
{{ end -}}
Objects:
{{ range $resource, $_ := .Updated.Objects -}}
- {{ $resource.Kind }} {{ $resource.Name }}
{{ end -}}
Images:
{{ range .Updated.Images -}}
- {{.}}
{{ end -}}
author:
email: fluxcdbot@users.noreply.github.com
name: fluxcdbot
```
## Trigger image updates with webhooks
You may want to trigger a deployment
as soon as a new image tag is pushed to your container registry.
In order to notify the image-reflector-controller about new images,
you can [setup webhook receivers](webhook-receivers.md).
First generate a random string and create a secret with a `token` field:
```sh
TOKEN=$(head -c 12 /dev/urandom | shasum | cut -d ' ' -f1)
echo $TOKEN
kubectl -n flux-system create secret generic webhook-token \
--from-literal=token=$TOKEN
```
Define a receiver for DockerHub:
```yaml
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Receiver
metadata:
name: podinfo
namespace: flux-system
spec:
type: dockerhub
secretRef:
name: webhook-token
resources:
- kind: ImageRepository
name: podinfo
```
The notification-controller generates a unique URL using the provided token and the receiver name/namespace.
Find the URL with:
```console
$ kubectl -n flux-system get receiver/podinfo
NAME READY STATUS
podinfo True Receiver initialised with URL: /hook/bed6d00b5555b1603e1f59b94d7fdbca58089cb5663633fb83f2815dc626d92b
```
Log in to DockerHub web interface, go to your image registry Settings and select Webhooks.
Fill the form "Webhook URL" by composing the address using the receiver
LB and the generated URL `http://<LoadBalancerAddress>/<ReceiverURL>`.
!!! hint "Note"
Besides DockerHub, you can define receivers for **Harbor**, **Quay**, **Nexus**, **GCR**,
and any other system that supports webhooks e.g. GitHub Actions, Jenkins, CircleCI, etc.
See the [Receiver CRD docs](../components/notification/receiver.md) for more details.
## Incident management
### Suspend automation
During an incident you may wish to stop Flux from pushing image updates to Git.
You can suspend the image automation directly in-cluster:
```sh
flux suspend image update flux-system
```
Or by editing the `ImageUpdateAutomation` manifest in Git:
```yaml
kind: ImageUpdateAutomation
metadata:
name: flux-system
namespace: flux-system
spec:
suspend: true
```
Once the incident is resolved, you can resume automation with:
```sh
flux resume image update flux-system
```
If you wish to pause the automation for a particular image only,
you can suspend/resume the image scanning:
```sh
flux suspend image repository podinfo
```
### Revert image updates
Assuming you've configured Flux to update an app to its latest stable version:
```sh
flux create image policy podinfo \
--image-ref=podinfo \
--select-semver=">=5.0.0"
```
If the latest version e.g. `5.0.1` causes an incident in production, you can tell Flux to
revert the image tag to a previous version e.g. `5.0.0` with:
```sh
flux create image policy podinfo \
--image-ref=podinfo \
--select-semver=5.0.0
```
Or by changing the semver range in Git:
```yaml
kind: ImagePolicy
metadata:
name: podinfo
namespace: flux-system
spec:
policy:
semver:
range: 5.0.0
```
Based on the above configuration, Flux will patch the podinfo deployment manifest in Git
and roll out `5.0.0` in-cluster.
When a new version is available e.g. `5.0.2`, you can update the policy once more
and tell Flux to consider only versions greater than `5.0.1`:
```sh
flux create image policy podinfo \
--image-ref=podinfo \
--select-semver=">5.0.1"
```
## ImageRepository cloud providers authentication
If relying on a cloud provider image repository, you might need to do some extra
work in order to configure the ImageRepository resource credentials. Here are
some common examples for the most popular cloud provider docker registries.
!!! warning "Workarounds"
The examples below are intended as workaround solutions until native
authentication mechanisms are implemented in Flux itself to support this in
a more straightforward manner.
### AWS Elastic Container Registry
The registry authentication credentials for ECR expire every 12 hours.
Considering this limitation, one needs to ensure the credentials are being
refreshed before expiration so that the controller can rely on them for
authentication.
The solution proposed is to create a cronjob that runs every 6 hours which would
re-create the `docker-registry` secret using a new token.
Edit and save the following snippet to a file
`./clusters/my-cluster/ecr-sync.yaml`, commit and push it to git.
```yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ecr-credentials-sync
namespace: flux-system
rules:
- apiGroups: [""]
resources:
- secrets
verbs:
- delete
- create
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ecr-credentials-sync
namespace: flux-system
subjects:
- kind: ServiceAccount
name: ecr-credentials-sync
roleRef:
kind: Role
name: ecr-credentials-sync
apiGroup: ""
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ecr-credentials-sync
namespace: flux-system
# Uncomment and edit if using IRSA
# annotations:
# eks.amazonaws.com/role-arn: <role arn>
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: ecr-credentials-sync
namespace: flux-system
spec:
suspend: false
schedule: 0 */6 * * *
failedJobsHistoryLimit: 1
successfulJobsHistoryLimit: 1
jobTemplate:
spec:
template:
spec:
serviceAccountName: ecr-credentials-sync
restartPolicy: Never
volumes:
- name: token
emptyDir:
medium: Memory
initContainers:
- image: amazon/aws-cli
name: get-token
imagePullPolicy: IfNotPresent
# You will need to set the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables if not using
# IRSA. It is recommended to store the values in a Secret and load them in the container using envFrom.
# envFrom:
# - secretRef:
# name: aws-credentials
env:
- name: REGION
value: us-east-1 # change this if ECR repo is in a different region
volumeMounts:
- mountPath: /token
name: token
command:
- /bin/sh
- -ce
- aws ecr get-login-password --region ${REGION} > /token/ecr-token
containers:
- image: bitnami/kubectl
name: create-secret
imagePullPolicy: IfNotPresent
env:
- name: SECRET_NAME
value: ecr-credentials
- name: ECR_REGISTRY
value: <account id>.dkr.ecr.<region>.amazonaws.com # fill in the account id and region
volumeMounts:
- mountPath: /token
name: token
command:
- /bin/bash
- -ce
- |-
kubectl delete secret --ignore-not-found $SECRET_NAME
kubectl create secret docker-registry $SECRET_NAME \
--docker-server="$ECR_REGISTRY" \
--docker-username=AWS \
--docker-password="$(</token/ecr-token)"
```
!!! hint "Using IAM Roles for Service Accounts (IRSA)"
If using IRSA, make sure the role attached to the service account has
readonly access to ECR. The AWS managed policy
`arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` can be attached
to the role.
Since the cronjob will not create a job right away, after applying the manifest,
you can manually create an init job using the following command:
```console
$ kubectl create job --from=cronjob/ecr-credentials-sync -n flux-system ecr-credentials-sync-init
```
After the job runs, a secret named `ecr-credentials` should be created. Use this
name in your ECR ImageRepository resource manifest as the value for
`.spec.secretRef.name`.
```yaml
spec:
secretRef:
name: ecr-credentials
```
### GCP Container Registry
#### Using access token [short-lived]
!!! note "Workload Identity"
Please ensure that you enable workload identity for your cluster, create a GCP service account that has
access to the container registry and create an IAM policy binding between the GCP service account and
the Kubernetes service account so that the pods created by the cronjob can access GCP APIs and get the token.
Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)
The access token for GCR expires hourly.
Considering this limitation, one needs to ensure the credentials are being
refreshed before expiration so that the controller can rely on them for
authentication.
The solution proposed is to create a cronjob that runs every 45 minutes which would
re-create the `docker-registry` secret using a new token.
Edit and save the following snippet to a file
`./clusters/my-cluster/gcr-sync.yaml`, commit and push it to git.
```yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: gcr-credentials-sync
namespace: flux-system
rules:
- apiGroups: [""]
resources:
- secrets
verbs:
- delete
- create
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: gcr-credentials-sync
namespace: flux-system
subjects:
- kind: ServiceAccount
name: gcr-credentials-sync
roleRef:
kind: Role
name: gcr-credentials-sync
apiGroup: ""
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
iam.gke.io/gcp-service-account: <name-of-service-account>@<project-id>.iam.gserviceaccount.com
name: gcr-credentials-sync
namespace: flux-system
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: gcr-credentials-sync
namespace: flux-system
spec:
suspend: false
schedule: "*/45 * * * *"
failedJobsHistoryLimit: 1
successfulJobsHistoryLimit: 1
jobTemplate:
spec:
template:
spec:
serviceAccountName: gcr-credentials-sync
restartPolicy: Never
containers:
- image: google/cloud-sdk
name: create-secret
imagePullPolicy: IfNotPresent
env:
- name: SECRET_NAME
value: gcr-credentials
- name: GCR_REGISTRY
value: <REGISTRY_NAME> # fill in the registry name e.g gcr.io, eu.gcr.io
command:
- /bin/bash
- -ce
- |-
kubectl delete secret --ignore-not-found $SECRET_NAME
kubectl create secret docker-registry $SECRET_NAME \
--docker-server="$GCR_REGISTRY" \
--docker-username=oauth2accesstoken \
--docker-password="$(gcloud auth print-access-token)"
```
Since the cronjob will not create a job right away, after applying the manifest,
you can manually create an init job using the following command:
```console
$ kubectl create job --from=cronjob/gcr-credentials-sync -n flux-system gcr-credentials-sync-init
```
After the job runs, a secret named `gcr-credentials` should be created. Use this
name in your GCR ImageRepository resource manifest as the value for
`.spec.secretRef.name`.
```yaml
spec:
secretRef:
name: gcr-credentials
```
#### Using a JSON key [long-lived]
!!! warning "Less secure option"
From [Google documentation on authenticating container registry](https://cloud.google.com/container-registry/docs/advanced-authentication#json-key)
> A user-managed key-pair that you can use as a credential for a service account.
> Because the credential is long-lived, it is the least secure option of all the available authentication methods.
> When possible, use an access token or another available authentication method to reduce the risk of
> unauthorized access to your artifacts. If you must use a service account key,
> ensure that you follow best practices for managing credentials.
A Json key doesn't expire, so we don't need a cronjob,
we just need to create the secret and reference it in the ImagePolicy.
First, create a json key file by following this
[documentation](https://cloud.google.com/container-registry/docs/advanced-authentication).
Grant the service account the role of `Container Registry Service Agent`
so that it can access GCR and download the json file.
Then create a secret, encrypt it using [Mozilla SOPS](mozilla-sops.md)
or [Sealed Secrets](sealed-secrets.md) , commit and push the encypted file to git.
```sh
kubectl create secret docker-registry <secret-name> \
--docker-server=<GCR-REGISTRY> \ # e.g gcr.io
--docker-username=_json_key \
--docker-password="$(cat <downloaded-json-file>)"
```
### Azure Container Registry
AKS clusters are not able to pull and run images from ACR by default.
Read [Integrating AKS /w ACR](https://docs.microsoft.com/en-us/azure/aks/cluster-container-registry-integration) as a potential pre-requisite
before integrating Flux `ImageRepositories` with ACR.
Note that the resulting ImagePullSecret for Flux could also be specified by Pods within the same Namespace to pull and run ACR images as well.
#### Generating Tokens for Managed Identities [short-lived]
As a pre-requisite, your AKS cluster will need [AAD Pod Identity](../use-cases/azure.md#aad-pod-identity) installed.
Once we have AAD Pod Identity installed, we can create a Deployment that frequently refreshes an image pull secret into
our desired Namespace.
Create a directory in your control repository and save this `kustomization.yaml`:
```yaml
# kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://github.com/fluxcd/flux2/manifests/integrations/registry-credentials-sync/azure?ref=main
patchesStrategicMerge:
- config-patches.yaml
```
Save and configure the following patch -- note the instructional comments for configuring matching Azure resources:
```yaml
# config-patches.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: credentials-sync
data:
ACR_NAME: my-registry
KUBE_SECRET: my-registry # does not yet exist -- will be created in the same Namespace
SYNC_PERIOD: "3600" # ACR tokens expire every 3 hours; refresh faster than that
# Create an identity in Azure and assign it a role to pull from ACR (note: the identity's resourceGroup should match the desired ACR):
# az identity create -n acr-sync
# az role assignment create --role AcrPull --assignee-object-id "$(az identity show -n acr-sync -o tsv --query principalId)"
# Fetch the clientID and resourceID to configure the AzureIdentity spec below:
# az identity show -n acr-sync -otsv --query clientId
# az identity show -n acr-sync -otsv --query resourceId
---
apiVersion: aadpodidentity.k8s.io/v1
kind: AzureIdentity
metadata:
name: credentials-sync # name must match the stub-resource in az-identity.yaml
namespace: flux-system
spec:
clientID: 4ceaa448-d7b9-4a80-8f32-497eaf3d3287
resourceID: /subscriptions/8c69185e-55f9-4d00-8e71-a1b1bb1386a1/resourcegroups/stealthybox/providers/Microsoft.ManagedIdentity/userAssignedIdentities/acr-sync
type: 0 # user-managed identity
```
Verify that `kustomize build .` works, then commit the directory to you control repo.
Flux will apply the Deployment and it will use the AAD managed identity for that Pod to regularly fetch ACR tokens into your configured `KUBE_SECRET` name.
Reference the `KUBE_SECRET` value from any `ImageRepository` objects for that ACR registry.
This example uses the `fluxcd/flux2` github archive as a remote base, but you may copy the [./manifests/integrations/registry-credentials-sync/azure](https://github.com/fluxcd/flux2/tree/main/manifests/integrations/registry-credentials-sync/azure)
folder into your own repository or use a git submodule to vendor it if preferred.
#### Using Static Credentials [long-lived]
!!! info
Using a static credential requires a Secrets management solution compatible with your GitOps workflow.
Follow the official Azure documentation for [Creating an Image Pull Secret for ACR](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-auth-kubernetes).
Instead of creating the Secret directly into your Kubernetes cluster, encrypt it using [Mozilla SOPS](mozilla-sops.md)
or [Sealed Secrets](sealed-secrets.md), then commit and push the encypted file to git.
This Secret should be in the same Namespace as your flux `ImageRepository` object.
Update the `ImageRepository.spec.secretRef` to point to it.
It is also possible to create [Repository Scoped Tokens](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-repository-scoped-permissions).
!!! warning
Repository Scoped Tokens are in preview and do have limitations.

@ -1,570 +0,0 @@
# Installation
This guide walks you through setting up Flux v2 (hereafter: "Flux") to
manage one or more Kubernetes clusters.
## Prerequisites
You will need a Kubernetes cluster version **1.16** or newer
and kubectl version **1.18** or newer.
## Install the Flux CLI
With Homebrew:
```sh
brew install fluxcd/tap/flux
```
With Bash:
```sh
curl -s https://fluxcd.io/install.sh | sudo bash
# enable completions in ~/.bash_profile
. <(flux completion bash)
```
Command-line completion for `zsh`, `fish`, and `powershell`
are also supported with their own sub-commands.
Binaries for macOS, Windows and Linux AMD64/ARM are available for download on the
[release page](https://github.com/fluxcd/flux2/releases).
A container image with `kubectl` and `flux` is available on DockerHub and GitHub:
* `docker.io/fluxcd/flux-cli:<version>`
* `ghcr.io/fluxcd/flux-cli:<version>`
Verify that your cluster satisfies the prerequisites with:
```sh
flux check --pre
```
## Bootstrap
Using the `flux bootstrap` command you can install Flux on a
Kubernetes cluster and configure it to manage itself from a Git
repository.
If the Flux components are present on the cluster, the bootstrap
command will perform an upgrade if needed. The bootstrap is
idempotent, it's safe to run the command as many times as you want.
The Flux component images are published to DockerHub and GitHub Container Registry
as [multi-arch container images](https://docs.docker.com/docker-for-mac/multi-arch/)
with support for Linux `amd64`, `arm64` and `armv7` (e.g. 32bit Raspberry Pi)
architectures.
If your Git provider is **GitHub**, **GitLab** or **Azure DevOps** please follow the specific bootstrap procedure:
* [GitHub.com and GitHub Enterprise](#github-and-github-enterprise)
* [GitLab.com and GitLab Enterprise](#gitlab-and-gitlab-enterprise)
* [Azure DevOps](../use-cases/azure.md#flux-installation-for-azure-devops)
### Generic Git Server
The `bootstrap git` command takes an existing Git repository, clones it and
commits the Flux components manifests to the specified branch. Then it
configures the target cluster to synchronize with that repository.
Run bootstrap for a Git repository and authenticate with your SSH agent:
```sh
flux bootstrap git \
--url=ssh://git@<host>/<org>/<repository> \
--branch=<my-branch> \
--path=clusters/my-cluster
```
The above command will generate a SSH key (defaults to RSA 2048 but can be changed with `--ssh-key-algorithm`),
and it will prompt you to add the SSH public key as a deploy key to your repository.
If you want to use your own SSH key, you can provide a **passwordless** private key using
`--private-key-file=<path/to/private.key>`.
This option can also be used if no SSH agent is available on your machine.
!!! hint "Bootstrap options"
There are many options available when bootstrapping Flux, such as installing a subset of Flux components,
setting the Kubernetes context, changing the Git author name and email, enabling Git submodules, and more.
To list all the available options run `flux bootstrap git --help`.
If your Git server doesn't support SSH, you can run bootstrap for Git over HTTPS:
```sh
flux bootstrap git \
--url=https://<host>/<org>/<repository> \
--username=<my-username> \
--password=<my-password> \
--token-auth=true \
--path=clusters/my-cluster
```
If your Git server uses a self-signed TLS certificate, you can specify the CA file with
`--ca-file=<path/to/ca.crt>`.
With `--path` you can configure the directory which will be used to reconcile the target cluster.
To control multiple clusters from the same Git repository, you have to set a unique path per
cluster e.g. `clusters/staging` and `clusters/production`:
```sh
./clusters/
├── staging # <- path=clusters/staging
│   └── flux-system # <- namespace dir generated by bootstrap
│   ├── gotk-components.yaml
│   ├── gotk-sync.yaml
│   └── kustomization.yaml
└── production # <- path=clusters/production
└── flux-system
```
After running bootstrap you can place Kubernetes YAMLs inside a dir under path
e.g. `clusters/staging/my-app`, and Flux will reconcile them on your cluster.
For examples on how you can structure your Git repository see:
* [flux2-kustomize-helm-example](https://github.com/fluxcd/flux2-kustomize-helm-example)
* [flux2-multi-tenancy](https://github.com/fluxcd/flux2-multi-tenancy)
### GitHub and GitHub Enterprise
The `bootstrap github` command creates a GitHub repository if one doesn't exist and
commits the Flux components manifests to specified branch. Then it
configures the target cluster to synchronize with that repository by
setting up a SSH deploy key or by using token-based authentication.
Generate a [personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line)
that can create repositories by checking all permissions under `repo`.
Export your GitHub personal access token as an environment variable:
```sh
export GITHUB_TOKEN=<your-token>
```
Run the bootstrap for a repository on your personal GitHub account:
```sh
flux bootstrap github \
--owner=my-github-username \
--repository=my-repository \
--path=clusters/my-cluster \
--personal
```
!!! hint "Deploy key"
The bootstrap command creates an SSH key which it stores as a secret in the
Kubernetes cluster. The key is also used to create a deploy key in the GitHub
repository. The new deploy key will be linked to the personal access token used
to authenticate. **Removing the personal access token will also remove the deploy key.**
Run the bootstrap for a repository owned by a GitHub organization:
```sh
flux bootstrap github \
--owner=my-github-organization \
--repository=my-repository \
--team=team1-slug \
--team=team2-slug \
--path=clusters/my-cluster
```
When you specify a list of teams, those teams will be granted maintainer access to the repository.
To run the bootstrap for a repository hosted on GitHub Enterprise, you have to specify your GitHub hostname:
```sh
flux bootstrap github \
--hostname=my-github-enterprise.com \
--ssh-hostname=my-github-enterprise.com \
--owner=my-github-organization \
--repository=my-repository \
--branch=main \
--path=clusters/my-cluster
```
If your GitHub Enterprise has SSH access disabled, you can use HTTPS and token authentication with:
```sh
flux bootstrap github \
--token-auth \
--hostname=my-github-enterprise.com \
--owner=my-github-organization \
--repository=my-repository \
--branch=main \
--path=clusters/my-cluster
```
### GitLab and GitLab Enterprise
The `bootstrap gitlab` command creates a GitLab repository if one doesn't exist and
commits the Flux components manifests to specified branch. Then it
configures the target cluster to synchronize with that repository by
setting up a SSH deploy key or by using token-based authentication.
Generate a [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)
that grants complete read/write access to the GitLab API.
Export your GitLab personal access token as an environment variable:
```sh
export GITLAB_TOKEN=<your-token>
```
Run the bootstrap for a repository on your personal GitLab account:
```sh
flux bootstrap gitlab \
--owner=my-gitlab-username \
--repository=my-repository \
--branch=master \
--path=clusters/my-cluster \
--token-auth \
--personal
```
To run the bootstrap for a repository using deploy keys for authentication, you have to specify the SSH hostname:
```sh
flux bootstrap gitlab \
--ssh-hostname=gitlab.com \
--owner=my-gitlab-username \
--repository=my-repository \
--branch=master \
--path=clusters/my-cluster
```
!!! hint "Authentication"
When providing the `--ssh-hostname`, a read-only (SSH) deploy key will be added
to your repository, otherwise your GitLab personal token will be used to
authenticate against the HTTPS endpoint instead.
Run the bootstrap for a repository owned by a GitLab (sub)group:
```sh
flux bootstrap gitlab \
--owner=my-gitlab-group/my-gitlab-subgroup \
--repository=my-repository \
--branch=master \
--path=clusters/my-cluster
```
To run the bootstrap for a repository hosted on GitLab on-prem or enterprise, you have to specify your GitLab hostname:
```sh
flux bootstrap gitlab \
--hostname=my-gitlab.com \
--token-auth \
--owner=my-gitlab-group \
--repository=my-repository \
--branch=master \
--path=clusters/my-cluster
```
### Air-gapped Environments
To bootstrap Flux on air-gapped environments without access to github.com and ghcr.io, first you'll need
to download the `flux` binary, and the container images from a computer with access to internet.
List all container images:
```console
$ flux install --export | grep ghcr.io
image: ghcr.io/fluxcd/helm-controller:v0.8.0
image: ghcr.io/fluxcd/kustomize-controller:v0.9.0
image: ghcr.io/fluxcd/notification-controller:v0.9.0
image: ghcr.io/fluxcd/source-controller:v0.9.0
```
Pull the images locally and push them to your container registry:
```sh
docker pull ghcr.io/fluxcd/source-controller:v0.9.0
docker tag ghcr.io/fluxcd/source-controller:v0.9.0 registry.internal/fluxcd/source-controller:v0.9.0
docker push registry.internal/fluxcd/source-controller:v0.9.0
```
Copy `flux` binary to a computer with access to your air-gapped cluster,
and create the pull secret in the `flux-system` namespace:
```sh
kubectl create ns flux-system
kubectl -n flux-system create secret generic regcred \
--from-file=.dockerconfigjson=/.docker/config.json \
--type=kubernetes.io/dockerconfigjson
```
Finally, bootstrap Flux using the images from your private registry:
```sh
flux bootstrap <GIT-PROVIDER> \
--registry=registry.internal/fluxcd \
--image-pull-secret=regcred \
--hostname=my-git-server.internal
```
Note that when running `flux bootstrap` without specifying a `--version`,
the CLI will use the manifests embedded in its binary instead of downloading
them from GitHub. You can determine which version you'll be installing,
with `flux --version`.
## Bootstrap with Terraform
The bootstrap procedure can be implemented with Terraform using the Flux provider published on
[registry.terraform.io](https://registry.terraform.io/providers/fluxcd/flux).
The provider consists of two data sources (`flux_install` and `flux_sync`) for generating the
Kubernetes manifests that can be used to install or upgrade Flux:
```hcl
data "flux_install" "main" {
target_path = "clusters/my-cluster"
network_policy = false
version = "latest"
}
data "flux_sync" "main" {
target_path = "clusters/my-cluster"
url = "https://github.com/${var.github_owner}/${var.repository_name}"
branch = "main"
}
```
For more details on how to use the Terraform provider
please see [fluxcd/terraform-provider-flux](https://github.com/fluxcd/terraform-provider-flux).
## Customize Flux manifests
You can customize the Flux components before or after running bootstrap.
Assuming you want to customise the Flux controllers before they get deployed on the cluster,
first you'll need to create a Git repository and clone it locally.
Create the file structure required by bootstrap with:
```sh
mkdir -p clusters/my-cluster/flux-system
touch clusters/my-cluster/flux-system/gotk-components.yaml \
clusters/my-cluster/flux-system/gotk-patches.yaml \
clusters/my-cluster/flux-system/gotk-sync.yaml \
clusters/my-cluster/flux-system/kustomization.yaml
```
Assuming you want to add custom annotations and labels to the Flux controllers,
edit `clusters/my-cluster/gotk-patches.yaml` and set the metadata for source-controller and kustomize-controller pods:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: source-controller
namespace: flux-system
spec:
template:
metadata:
annotations:
custom: annotation
labels:
custom: label
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kustomize-controller
namespace: flux-system
spec:
template:
metadata:
annotations:
custom: annotation
labels:
custom: label
```
Edit `clusters/my-cluster/kustomization.yaml` and set the resources and patches:
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- gotk-components.yaml
- gotk-sync.yaml
patchesStrategicMerge:
- gotk-patches.yaml
```
Push the changes to main branch:
```sh
git add -A && git commit -m "add flux customisations" && git push
```
Now run the bootstrap for `clusters/my-cluster`:
```sh
flux bootstrap git \
--url=ssh://git@<host>/<org>/<repository> \
--branch=main \
--path=clusters/my-cluster
```
When the controllers are deployed for the first time on your cluster, they will contain all
the customisations from `gotk-patches.yaml`.
You can make changes to the patches after bootstrap and Flux will apply them in-cluster on its own.
## Dev install
For testing purposes you can install Flux without storing its manifests in a Git repository:
```sh
flux install
```
Or using kubectl:
```sh
kubectl apply -f https://github.com/fluxcd/flux2/releases/latest/download/install.yaml
```
Then you can register Git repositories and reconcile them on your cluster:
```sh
flux create source git podinfo \
--url=https://github.com/stefanprodan/podinfo \
--tag-semver=">=4.0.0" \
--interval=1m
flux create kustomization podinfo-default \
--source=podinfo \
--path="./kustomize" \
--prune=true \
--validation=client \
--interval=10m \
--health-check="Deployment/podinfo.default" \
--health-check-timeout=2m
```
You can register Helm repositories and create Helm releases:
```sh
flux create source helm bitnami \
--interval=1h \
--url=https://charts.bitnami.com/bitnami
flux create helmrelease nginx \
--interval=1h \
--release-name=nginx-ingress-controller \
--target-namespace=kube-system \
--source=HelmRepository/bitnami \
--chart=nginx-ingress-controller \
--chart-version="5.x.x"
```
## Upgrade
!!! note "Patch versions"
It is safe and advised to use the latest PATCH version when upgrading to a
new MINOR version.
Update Flux CLI to the latest release with `brew upgrade fluxcd/tap/flux` or by
downloading the binary from [GitHub](https://github.com/fluxcd/flux2/releases).
Verify that you are running the latest version with:
```sh
flux --version
```
### Bootstrap upgrade
If you've used the [bootstrap](#bootstrap) procedure to deploy Flux,
then rerun the bootstrap command for each cluster using the same arguments as before:
```sh
flux bootstrap github \
--owner=my-github-username \
--repository=my-repository \
--branch=main \
--path=clusters/my-cluster \
--personal
```
The above command will clone the repository, it will update the components manifest in
`<path>/flux-system/gotk-components.yaml` and it will push the changes to the remote branch.
Tell Flux to pull the manifests from Git and upgrade itself with:
```sh
flux reconcile source git flux-system
```
Verify that the controllers have been upgrade with:
```sh
flux check
```
!!! hint "Automated upgrades"
You can automate the components manifest update with GitHub Actions
and open a PR when there is a new Flux version available.
For more details please see [Flux GitHub Action docs](https://github.com/fluxcd/flux2/tree/main/action).
### Terraform upgrade
Update the Flux provider to the [latest release](https://github.com/fluxcd/terraform-provider-flux/releases)
and run `terraform apply`.
Tell Flux to upgrade itself in-cluster or wait for it to pull the latest commit from Git:
```sh
kubectl annotate --overwrite gitrepository/flux-system reconcile.fluxcd.io/requestedAt="$(date +%s)"
```
### In-cluster upgrade
If you've installed Flux directly on the cluster, then rerun the install command:
```sh
flux install
```
The above command will apply the new manifests on your cluster.
You can verify that the controllers have been upgraded to the latest version with `flux check`.
If you've installed Flux directly on the cluster with kubectl,
then rerun the command using the latest manifests from the `main` branch:
```sh
kustomize build https://github.com/fluxcd/flux2/manifests/install?ref=main | kubectl apply -f-
```
## Uninstall
You can uninstall Flux with:
```sh
flux uninstall --namespace=flux-system
```
The above command performs the following operations:
- deletes Flux components (deployments and services)
- deletes Flux network policies
- deletes Flux RBAC (service accounts, cluster roles and cluster role bindings)
- removes the Kubernetes finalizers from Flux custom resources
- deletes Flux custom resource definitions and custom resources
- deletes the namespace where Flux was installed
If you've installed Flux in a namespace that you wish to preserve, you
can skip the namespace deletion with:
```sh
flux uninstall --namespace=infra --keep-namespace
```
!!! hint
Note that the `uninstall` command will not remove any Kubernetes objects
or Helm releases that were reconciled on the cluster by Flux.

@ -1,112 +0,0 @@
# Monitoring
This guide walks you through configuring monitoring for the Flux control plane.
Flux comes with a monitoring stack composed of:
* **Prometheus** server - collects metrics from the toolkit controllers and stores them for 2h
* **Grafana** dashboards - displays the control plane resource usage and reconciliation stats
## Install the monitoring stack
To install the monitoring stack with `flux`, first register the toolkit Git repository on your cluster:
```sh
flux create source git monitoring \
--interval=30m \
--url=https://github.com/fluxcd/flux2 \
--branch=main
```
Then apply the [manifests/monitoring](https://github.com/fluxcd/flux2/tree/main/manifests/monitoring)
kustomization:
```sh
flux create kustomization monitoring \
--interval=1h \
--prune=true \
--source=monitoring \
--path="./manifests/monitoring" \
--health-check="Deployment/prometheus.flux-system" \
--health-check="Deployment/grafana.flux-system"
```
You can access Grafana using port forwarding:
```sh
kubectl -n flux-system port-forward svc/grafana 3000:3000
```
## Grafana dashboards
Control plane dashboard [http://localhost:3000/d/gitops-toolkit-control-plane](http://localhost:3000/d/gitops-toolkit-control-plane/gitops-toolkit-control-plane):
![](../_files/cp-dashboard-p1.png)
![](../_files/cp-dashboard-p2.png)
Cluster reconciliation dashboard [http://localhost:3000/d/gitops-toolkit-cluster](http://localhost:3000/d/gitops-toolkit-cluster/gitops-toolkit-cluster-stats):
![](../_files/cluster-dashboard.png)
If you wish to use your own Prometheus and Grafana instances, then you can import the dashboards from
[GitHub](https://github.com/fluxcd/flux2/tree/main/manifests/monitoring/grafana/dashboards).
!!! hint
Note that the toolkit controllers expose the `/metrics` endpoint on port `8080`.
When using Prometheus Operator you should create a `PodMonitor` object for each controller to configure scraping.
```yaml
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: source-controller
namespace: flux-system
spec:
namespaceSelector:
matchNames:
- flux-system
selector:
matchLabels:
app: source-controller
podMetricsEndpoints:
- port: http-prom
```
## Metrics
For each `toolkit.fluxcd.io` kind,
the controllers expose a gauge metric to track the Ready condition status,
and a histogram with the reconciliation duration in seconds.
Ready status metrics:
```sh
gotk_reconcile_condition{kind, name, namespace, type="Ready", status="True"}
gotk_reconcile_condition{kind, name, namespace, type="Ready", status="False"}
gotk_reconcile_condition{kind, name, namespace, type="Ready", status="Unknown"}
gotk_reconcile_condition{kind, name, namespace, type="Ready", status="Deleted"}
```
Time spent reconciling:
```
gotk_reconcile_duration_seconds_bucket{kind, name, namespace, le}
gotk_reconcile_duration_seconds_sum{kind, name, namespace}
gotk_reconcile_duration_seconds_count{kind, name, namespace}
```
Alert manager example:
```yaml
groups:
- name: GitOpsToolkit
rules:
- alert: ReconciliationFailure
expr: max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind) + on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) by (namespace, name, kind)) * 2 == 1
for: 10m
labels:
severity: page
annotations:
summary: '{{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation has been failing for more than ten minutes.'
```

@ -1,393 +0,0 @@
# Manage Kubernetes secrets with Mozilla SOPS
In order to store secrets safely in a public or private Git repository, you can use
Mozilla's [SOPS](https://github.com/mozilla/sops) CLI to encrypt
Kubernetes secrets with OpenPGP, AWS KMS, GCP KMS and Azure Key Vault.
## Prerequisites
To follow this guide you'll need a Kubernetes cluster with the GitOps
toolkit controllers installed on it.
Please see the [get started guide](../get-started/index.md)
or the [installation guide](installation.md).
Install [gnupg](https://www.gnupg.org/) and [SOPS](https://github.com/mozilla/sops):
```sh
brew install gnupg sops
```
## Generate a GPG key
Generate a GPG/OpenPGP key with no passphrase (`%no-protection`):
```sh
export KEY_NAME="cluster0.yourdomain.com"
export KEY_COMMENT="flux secrets"
gpg --batch --full-generate-key <<EOF
%no-protection
Key-Type: 1
Key-Length: 4096
Subkey-Type: 1
Subkey-Length: 4096
Expire-Date: 0
Name-Comment: ${KEY_COMMENT}
Name-Real: ${KEY_NAME}
EOF
```
The above configuration creates an rsa4096 key that does not expire.
For a full list of options to consider for your environment, see
[Unattended GPG key generation](https://www.gnupg.org/documentation/manuals/gnupg/Unattended-GPG-key-generation.html).
Retrieve the GPG key fingerprint (second row of the sec column):
```sh
gpg --list-secret-keys "${KEY_NAME}"
sec rsa4096 2020-09-06 [SC]
1F3D1CED2F865F5E59CA564553241F147E7C5FA4
```
Store the key fingerprint as an environment variable:
```sh
export KEY_FP=1F3D1CED2F865F5E59CA564553241F147E7C5FA4
```
Export the public and private keypair from your local GPG keyring and
create a Kubernetes secret named `sops-gpg` in the `flux-system` namespace:
```sh
gpg --export-secret-keys --armor "${KEY_FP}" |
kubectl create secret generic sops-gpg \
--namespace=flux-system \
--from-file=sops.asc=/dev/stdin
```
It's a good idea to back up this secret-key/K8s-Secret with a password manager or offline storage.
Also consider deleting the secret decryption key from you machine:
```sh
gpg --delete-secret-keys "${KEY_FP}"
```
## Configure in-cluster secrets decryption
Register the Git repository on your cluster:
```sh
flux create source git my-secrets \
--url=https://github.com/my-org/my-secrets
```
Create a kustomization for reconciling the secrets on the cluster:
```sh
flux create kustomization my-secrets \
--source=my-secrets \
--path=./clusters/cluster0 \
--prune=true \
--interval=10m \
--decryption-provider=sops \
--decryption-secret=sops-gpg
```
Note that the `sops-gpg` can contain more than one key, SOPS will try to decrypt the
secrets by iterating over all the private keys until it finds one that works.
## Optional: Export the public key into the Git directory
Commit the public key to the repository so that team members who clone the repo can encrypt new files:
```sh
gpg --export --armor "${KEY_FP}" > ./clusters/cluster0/.sops.pub.asc
```
Check the file contents to ensure it's the public key before adding it to the repo and committing.
```sh
git add ./clusters/cluster0/.sops.pub.asc
git commit -am 'Share GPG public key for secrets generation'
```
Team members can then import this key when they pull the Git repository:
```sh
gpg --import ./clusters/cluster0/.sops.pub.asc
```
!!! hint
The public key is sufficient for creating brand new files.
The secret key is required for decrypting and editing existing files because SOPS computes a MAC on all values.
When using solely the public key to add or remove a field, the whole file should be deleted and recreated.
## Configure the Git directory for encryption
Write a [SOPS config file](https://github.com/mozilla/sops#using-sops-yaml-conf-to-select-kms-pgp-for-new-files)
to the specific cluster or namespace directory used
to store encrypted objects with this particular GPG key's fingerprint.
```yaml
cat <<EOF > ./clusters/cluster0/.sops.yaml
creation_rules:
- path_regex: .*.yaml
encrypted_regex: ^(data|stringData)$
pgp: ${KEY_FP}
EOF
```
This config applies recursively to all sub-directories.
Multiple directories can use separate SOPS configs.
Contributors using the `sops` CLI to create and encrypt files
won't have to worry about specifying the proper key for the target cluster or namespace.
`encrypted_regex` helps encrypt the `data` and `stringData` fields for Secrets.
You may wish to add other fields if you are encrypting other types of Objects.
!!! hint
Note that you should encrypt only the `data` or `stringData` section. Encrypting the Kubernetes
secret metadata, kind or apiVersion is not supported by kustomize-controller.
## Encrypt secrets
Generate a Kubernetes secret manifest with kubectl:
```sh
kubectl -n default create secret generic basic-auth \
--from-literal=user=admin \
--from-literal=password=change-me \
--dry-run=client \
-o yaml > basic-auth.yaml
```
Encrypt the secret with SOPS using your GPG key:
```sh
sops --encrypt --in-place basic-auth.yaml
```
You can now commit the encrypted secret to your Git repository.
!!! hint
Note that you shouldn't apply the encrypted secrets onto the cluster with kubectl.
SOPS encrypted secrets are designed to be consumed by kustomize-controller.
### Using various cloud providers
When using AWS/GCP KMS, you don't have to include the gpg `secretRef` under
`spec.provider` (you can skip the `--decryption-secret` flag when running `flux create kustomization`),
instead you'll have to bind an IAM Role with access to the KMS
keys to the `kustomize-controller` service account of the `flux-system` namespace for
kustomize-controller to be able to fetch keys from KMS.
#### AWS
Enabled the [IAM OIDC provider](https://eksctl.io/usage/iamserviceaccounts/) on your EKS cluster:
```sh
eksctl utils associate-iam-oidc-provider --cluster=<clusterName>
```
Create an IAM Role with access to AWS KMS e.g.:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
],
"Effect": "Allow",
"Resource": "arn:aws:kms:eu-west-1:XXXXX209540:key/4f581f5b-7f78-45e9-a543-83a7022e8105"
}
]
}
```
Bind the IAM role to the `kustomize-controller` service account:
```sh
eksctl create iamserviceaccount \
--override-existing-serviceaccounts \
--name=kustomize-controller \
--namespace=flux-system \
--attach-policy-arn=<policyARN> \
--cluster=<clusterName>
```
Restart kustomize-controller for the binding to take effect:
```sh
kubectl -n flux-system rollout restart deployment/kustomize-controller
```
#### Azure
When using Azure Key Vault you need to authenticate kustomize-controller either with [aad-pod-identity](../use-cases/azure.md#aad-pod-identity)
or by passing [Service Principal credentials as environment variables](https://github.com/mozilla/sops#encrypting-using-azure-key-vault).
Create the Azure Key-Vault:
```sh
export VAULT_NAME="fluxcd-$(uuidgen | tr -d - | head -c 16)"
export KEY_NAME="sops-cluster0"
az keyvault create --name "${VAULT_NAME}"
az keyvault key create --name "${KEY_NAME}" \
--vault-name "${VAULT_NAME}"
--protection software \
--ops encrypt decrypt
az keyvault key show --name "${KEY_NAME}" \
--vault-name "${VAULT_NAME}" \
--query key.kid
```
If using AAD Pod-Identity, create an identity within Azure to bind against, then create an `AzureIdentity` object to match:
```yaml
# Create an identity in Azure and assign it a role to access Key Vault (note: the identity's resourceGroup should match the desired Key Vault):
# az identity create -n sops-akv-decryptor
# az role assignment create --role "Key Vault Crypto User" --assignee-object-id "$(az identity show -n sops-akv-decryptor -o tsv --query principalId)"
# Fetch the clientID and resourceID to configure the AzureIdentity spec below:
# az identity show -n sops-akv-decryptor -otsv --query clientId
# az identity show -n sops-akv-decryptor -otsv --query resourceId
---
apiVersion: aadpodidentity.k8s.io/v1
kind: AzureIdentity
metadata:
name: sops-akv-decryptor # kustomize-controller label will match this name
namespace: flux-system
spec:
clientID: 58027844-6b86-424b-9888-b5ae2dc28b4f
resourceID: /subscriptions/8c69185e-55f9-4d00-8e71-a1b1bb1386a1/resourcegroups/stealthybox/providers/Microsoft.ManagedIdentity/userAssignedIdentities/sops-akv-decryptor
type: 0 # user-managed identity
```
[Customize your Flux Manifests](../guides/installation.md#customize-flux-manifests) so that kustomize-controller has the proper credentials.
Patch the kustomize-controller Pod template so that the label matches the `AzureIdentity` name.
Additionally, the SOPS specific environment variable `AZURE_AUTH_METHOD=msi` to activate the proper auth method within kustomize-controller.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kustomize-controller
namespace: flux-system
spec:
template:
metadata:
labels:
aadpodidbinding: sops-akv-decryptor # match the AzureIdentity name
spec:
containers:
- name: manager
env:
- name: AZURE_AUTH_METHOD
value: msi
```
Alternatively, if using a Service Principal stored in a K8s Secret, patch the Pod's envFrom
to reference the `AZURE_TENANT_ID`/`AZURE_CLIENT_ID`/`AZURE_CLIENT_SECRET`
fields from your Secret.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kustomize-controller
namespace: flux-system
spec:
template:
spec:
containers:
- name: manager
envFrom:
- secretRef:
name: sops-akv-decryptor-service-principal
```
At this point, kustomize-controller is now authorized to decrypt values in
SOPS encrypted files from your Sources via the related Key Vault.
See Mozilla's guide to
[Encrypting Using Azure Key Vault](https://github.com/mozilla/sops#encrypting-using-azure-key-vault)
to get started committing encrypted files to your Git Repository or other Sources.
#### Google Cloud
Please ensure that the GKE cluster has Workload Identity enabled.
1. Create a service account with the role `Cloud KMS CryptoKey Encrypter/Decrypter`.
2. Create an IAM policy binding between the GCP service account to the `kustomize-controller` service account of the `flux-system`.
3. Annotate the `kustomize-controller` service account in the `flux-system` with the GCP service account.
```sh
kubectl annotate serviceaccount kustomize-controller \
--namespace flux-system \
iam.gke.io/gcp-service-account=<name-of-serviceaccount>@project-id.iam.gserviceaccount.com
```
## GitOps workflow
A cluster admin should create the Kubernetes secret with the PGP keys on each cluster and
add the GitRepository/Kustomization manifests to the fleet repository.
Git repository manifest:
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: my-secrets
namespace: flux-system
spec:
interval: 1m
url: https://github.com/my-org/my-secrets
```
Kustomization manifest:
```yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: my-secrets
namespace: flux-system
spec:
interval: 10m0s
sourceRef:
kind: GitRepository
name: my-secrets
path: ./
prune: true
decryption:
provider: sops
secretRef:
name: sops-gpg
```
!!! hint
You can generate the above manifests using `flux create <kind> --export > manifest.yaml`.
Assuming a team member wants to deploy an application that needs to connect
to a database using a username and password, they'll be doing the following:
* create a Kubernetes Secret manifest locally with the db credentials e.g. `db-auth.yaml`
* encrypt the secret `data` field with sops
* create a Kubernetes Deployment manifest for the app e.g. `app-deployment.yaml`
* add the Secret to the Deployment manifest as a [volume mount or env var](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets)
* commit the manifests `db-auth.yaml` and `app-deployment.yaml` to a Git repository that's being synced by the GitOps toolkit controllers
Once the manifests have been pushed to the Git repository, the following happens:
* source-controller pulls the changes from Git
* kustomize-controller loads the GPG keys from the `sops-pgp` secret
* kustomize-controller decrypts the Kubernetes secrets with SOPS and applies them on the cluster
* kubelet creates the pods and mounts the secret as a volume or env variable inside the app container

@ -1,295 +0,0 @@
# Setup Notifications
When operating a cluster, different teams may wish to receive notifications about
the status of their GitOps pipelines.
For example, the on-call team would receive alerts about reconciliation
failures in the cluster, while the dev team may wish to be alerted when a new version
of an app was deployed and if the deployment is healthy.
## Prerequisites
To follow this guide you'll need a Kubernetes cluster with the GitOps
toolkit controllers installed on it.
Please see the [get started guide](../get-started/index.md)
or the [installation guide](installation.md).
The GitOps toolkit controllers emit Kubernetes events whenever a resource status changes.
You can use the [notification-controller](../components/notification/controller.md)
to forward these events to Slack, Microsoft Teams, Discord or Rocket chart.
The notification controller is part of the default toolkit installation.
## Define a provider
First create a secret with your Slack incoming webhook:
```sh
kubectl -n flux-system create secret generic slack-url \
--from-literal=address=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
```
Note that the secret must contain an `address` field,
it can be a Slack, Microsoft Teams, Discord or Rocket webhook URL.
Create a notification provider for Slack by referencing the above secret:
```yaml
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Provider
metadata:
name: slack
namespace: flux-system
spec:
type: slack
channel: general
secretRef:
name: slack-url
```
The provider type can be `slack`, `msteams`, `discord`, `rocket`, `googlechat`, `webex`, `sentry` or `generic`.
When type `generic` is specified, the notification controller will post the incoming
[event](../components/notification/event.md) in JSON format to the webhook address.
This way you can create custom handlers that can store the events in
Elasticsearch, CloudWatch, Stackdriver, etc.
## Define an alert
Create an alert definition for all repositories and kustomizations:
```yaml
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Alert
metadata:
name: on-call-webapp
namespace: flux-system
spec:
providerRef:
name: slack
eventSeverity: info
eventSources:
- kind: GitRepository
name: '*'
- kind: Kustomization
name: '*'
```
Apply the above files or commit them to the `fleet-infra` repository.
To verify that the alert has been acknowledge by the notification controller do:
```console
$ kubectl -n flux-system get alerts
NAME READY STATUS AGE
on-call-webapp True Initialized 1m
```
Multiple alerts can be used to send notifications to different channels or Slack organizations.
The event severity can be set to `info` or `error`.
When the severity is set to `error`, the kustomize controller will alert on any error
encountered during the reconciliation process.
This includes kustomize build and validation errors,
apply errors and health check failures.
![error alert](../_files/slack-error-alert.png)
When the verbosity is set to `info`, the controller will alert if:
* a Kubernetes object was created, updated or deleted
* heath checks are passing
* a dependency is delaying the execution
* an error occurs
![info alert](../_files/slack-info-alert.png)
## Git commit status
The GitHub, GitLab, Bitbucket, and Azure DevOps providers are slightly different to the other providers. Instead of
a stateless stream of events, the git notification providers will link the event with accompanying git commit which
triggered the event. The linking is done by updating the commit status of a specific commit.
- [GitHub](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-status-checks)
- [GitLab](https://docs.gitlab.com/ee/api/commits.html)
- [Bitbucket](https://developer.atlassian.com/server/bitbucket/how-tos/updating-build-status-for-commits/)
- [Azure DevOps](https://docs.microsoft.com/en-us/rest/api/azure/devops/git/statuses?view=azure-devops-rest-6.0)
In GitHub the commit status set by notification-controller will result in a green checkmark or red cross next to the commit hash.
Clicking the icon will show more detailed information about the status.
![commit status GitHub overview](../_files/commit-status-github-overview.png)
Receiving an event in the form of a commit status rather than a message in a chat conversation has the benefit
that it closes the deployment loop giving quick and visible feedback if a commit has reconciled and if it succeeded.
This means that a deployment will work in a similar manner that people are used to with "traditional" push based CD pipelines.
Additionally the status can be fetched from the git providers API for a specific commit. Allowing for custom automation tools
that can automatically promote, commit to a new directory, after receiving a successful commit status. This can all be
done without requiring any access to the Kubernetes cluster.
As stated before the provider works by referencing the same git repository as the Kustomization controller does.
When a new commit is pushed to the repository, source-controller will sync the commit, triggering the kustomize-controller
to reconcile the new commit. After this is done the kustomize-controller sends an event to the notification-controller
with the result and the commit hash it reconciled. Then notification-controller can update the correct commit and repository
when receiving the event.
![commit status flow](../_files/commit-status-flow.png)
!!! hint "Limitations"
The git notification providers require that a commit hash present in the meta data
of the event. There for the the providers will only work with `Kustomization` as an
event source, as it is the only resource which includes this data.
First follow the [get started guide](../../get-started) if you do not have a Kubernetes cluster with Flux installed in it.
You will need a authentication token to communicate with the API. The authentication method depends on
the git provider used, refer to the [Provider CRD](../../components/notification/provider/#git-commit-status)
for details about how to get the correct token. The guide will use GitHub, but the other providers will work in a very similar manner.
The token will need to have write access to the repository it is going to update the commit status in.
Store the generated token in a Secret with the following data format in the cluster.
```yaml
apiVersion: v1
kind: Secret
metadata:
name: github
namespace: flux-system
data:
token: <token>
```
When sending notification events the kustomization-controller will include the commit hash related to the event.
Note that the commit hash in the event does not come from the git repository the `Kustomization` resource
comes from but rather the kustomization source ref. This mean that commit status notifications will not work
if the manifests comes from a repository which the API token is not allowed to write to.
Copy the manifest content in the "[kustomize](https://github.com/stefanprodan/podinfo/tree/master/kustomize)" directory
into the directory "./clusters/my-cluster/podinfo" in your fleet-infra repository. Make sure that you also add the
namespace podinfo.
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: podinfo
```
Then create a Kustomization to deploy podinfo.
```yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: podinfo
namespace: flux-system
spec:
interval: 5m
targetNamespace: podinfo
path: ./clusters/my-cluster/podinfo
prune: true
sourceRef:
kind: GitRepository
name: flux-system
healthChecks:
- apiVersion: apps/v1
kind: Deployment
name: podinfo
namespace: podinfo
timeout: 1m
```
Creating a git provider is very similar to creating other types of providers.
The only caveat being that the provider address needs to point to the same
git repository as the event source originates from.
```yaml
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Provider
metadata:
name: flux-system
namespace: flux-system
spec:
type: github
address: https://github.com/<username>/fleet-infra
secretRef:
name: github
---
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Alert
metadata:
name: podinfo
namespace: flux-system
spec:
providerRef:
name: flux-system
eventSeverity: info
eventSources:
- kind: Kustomization
name: podinfo
namespace: flux-system
```
By now the fleet-infra repository should have a similar directory structure.
```
fleet-infra
└── clusters/
└── my-cluster/
├── flux-system/
│ ├── gotk-components.yaml
│ ├── gotk-sync.yaml
│ └── kustomization.yaml
├── podinfo/
│ ├── namespace.yaml
│ ├── deployment.yaml
│ ├── hpa.yaml
│ ├── service.yaml
│ └── kustomization.yaml
├── podinfo-kustomization.yaml
└── podinfo-notification.yaml
```
If podinfo is deployed and the health checks pass you should get a successful status in
your forked podinfo repository.
If everything is setup correctly there should now be a green check-mark next to the latest commit.
Clicking the check-mark should show a detailed view.
| GitHub | GitLab |
| ------------- | ------------- |
| ![commit status GitHub successful](../_files/commit-status-github-success.png) | ![commit status GitLab successful](../_files/commit-status-gitlab-success.png) |
Generate error
A deployment failure can be forced by setting an invalid image tag in the podinfo deployment.
```yaml
apiVersion: apps/v1
kind: Deployment
spec:
template:
spec:
containers:
- name: podinfod
image: ghcr.io/stefanprodan/podinfo:fake
```
After the commit has been reconciled it should return a failed commit status.
This is where the health check in the Kustomization comes into play together
with the timeout. The health check is used to asses the health of the Kustomization.
A failed commit status will not be sent until the health check timeout. Setting
a lower timeout will give feedback faster, but may sometimes not allow enough time
for a new application to deploy.
| GitHub | GitLab |
| ------------- | ------------- |
| ![commit status GitHub failure](../_files/commit-status-github-failure.png) | ![commit status GitLab failure](../_files/commit-status-gitlab-failure.png) |
### Status changes
The provider will continuously receive events as they happen, and multiple events may
be received for the same commit hash. The git providers are configured to only update
the status if the status has changed. This is to avoid spamming the commit status
history with the same status over and over again.
There is an aspect of state fullness that needs to be considered, compared to the other
notification providers, as the events are stored by the git provider. This means that
the status of a commit can change over time. Initially a deployment may be healthy, resulting
in a successful status. Down the line the application, and the health check, may start failing
due to the amount of traffic it receives or external dependencies no longer being available.
The change in the health check would cause the status to go from successful to failed.
It is important to keep this in mind when building any automation tools that deals with the
status, and consider the fact that receiving a successful status once does not mean it will
always be successful.

@ -1,179 +0,0 @@
# Sealed Secrets
In order to store secrets safely in a public or private Git repository, you can use
Bitnami's [sealed-secrets controller](https://github.com/bitnami-labs/sealed-secrets)
and encrypt your Kubernetes Secrets into SealedSecrets.
The sealed secrets can be decrypted only by the controller running in your cluster and
nobody else can obtain the original secret, even if they have access to the Git repository.
## Prerequisites
To follow this guide you'll need a Kubernetes cluster with the GitOps
toolkit controllers installed on it.
Please see the [get started guide](../get-started/index.md)
or the [installation guide](installation.md).
The sealed-secrets controller comes with a companion CLI tool called kubeseal.
With kubeseal you can create SealedSecret custom resources in YAML format
and store those in your Git repository.
Install the kubeseal CLI:
```sh
brew install kubeseal
```
For Linux or Windows you can download the kubeseal binary from
[GitHub](https://github.com/bitnami-labs/sealed-secrets/releases).
## Deploy sealed-secrets with a HelmRelease
You'll be using [helm-controller](../components/helm/controller.md) APIs to install
the sealed-secrets controller from its [Helm chart](https://hub.kubeapps.com/charts/stable/sealed-secrets).
First you have to register the Helm repository where the sealed-secrets chart is published:
```sh
flux create source helm sealed-secrets \
--interval=1h \
--url=https://bitnami-labs.github.io/sealed-secrets
```
With `interval` we configure [source-controller](../components/source/controller.md) to download
the Helm repository index every hour. If a newer version of sealed-secrets is published,
source-controller will signal helm-controller that a new chart is available.
Create a Helm release that installs the latest version of sealed-secrets controller:
```sh
flux create helmrelease sealed-secrets \
--interval=1h \
--release-name=sealed-secrets \
--target-namespace=flux-system \
--source=HelmRepository/sealed-secrets \
--chart=sealed-secrets \
--chart-version=">=1.15.0-0" \
--crds=CreateReplace
```
With chart version `>=1.15.0-0` we configure helm-controller to automatically upgrade the release
when a new chart version is fetched by source-controller.
At startup, the sealed-secrets controller generates a 4096-bit RSA key pair and
persists the private and public keys as Kubernetes secrets in the `flux-system` namespace.
You can retrieve the public key with:
```sh
kubeseal --fetch-cert \
--controller-name=sealed-secrets \
--controller-namespace=flux-system \
> pub-sealed-secrets.pem
```
The public key can be safely stored in Git, and can be used to encrypt secrets
without direct access to the Kubernetes cluster.
## Encrypt secrets
Generate a Kubernetes secret manifest with kubectl:
```sh
kubectl -n default create secret generic basic-auth \
--from-literal=user=admin \
--from-literal=password=change-me \
--dry-run=client \
-o yaml > basic-auth.yaml
```
Encrypt the secret with kubeseal:
```sh
kubeseal --format=yaml --cert=pub-sealed-secrets.pem \
< basic-auth.yaml > basic-auth-sealed.yaml
```
Delete the plain secret and apply the sealed one:
```sh
rm basic-auth.yaml
kubectl apply -f basic-auth-sealed.yaml
```
Verify that the sealed-secrets controller has created the `basic-auth` Kubernetes Secret:
```console
$ kubectl -n default get secrets basic-auth
NAME TYPE DATA AGE
basic-auth Opaque 2 1m43s
```
## GitOps workflow
A cluster admin should add the stable `HelmRepository` manifest and the sealed-secrets `HelmRelease`
to the fleet repository.
Helm repository manifest:
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: sealed-secrets
namespace: flux-system
spec:
interval: 1h0m0s
url: https://bitnami-labs.github.io/sealed-secrets
```
Helm release manifest:
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: sealed-secrets
namespace: flux-system
spec:
chart:
spec:
chart: sealed-secrets
sourceRef:
kind: HelmRepository
name: sealed-secrets
version: ">=1.15.0-0"
interval: 1h0m0s
releaseName: sealed-secrets
targetNamespace: flux-system
install:
crds: Create
upgrade:
crds: CreateReplace
```
!!! hint
You can generate the above manifests using `flux create <kind> --export > manifest.yaml`.
Once the sealed-secrets controller is installed, the admin fetches the
public key and shares it with the teams that operate on the fleet clusters via Git.
When a team member wants to create a Kubernetes Secret on a cluster,
they uses kubeseal and the public key corresponding to that cluster to generate a SealedSecret.
Assuming a team member wants to deploy an application that needs to connect
to a database using a username and password, they'll be doing the following:
* create a Kubernetes Secret manifest locally with the db credentials e.g. `db-auth.yaml`
* encrypt the secret with kubeseal as `db-auth-sealed.yaml`
* delete the original secret file `db-auth.yaml`
* create a Kubernetes Deployment manifest for the app e.g. `app-deployment.yaml`
* add the Secret to the Deployment manifest as a [volume mount or env var](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets) using the original name `db-auth`
* commit the manifests `db-auth-sealed.yaml` and `app-deployment.yaml` to a Git repository that's being synced by the GitOps toolkit controllers
Once the manifests have been pushed to the Git repository, the following happens:
* source-controller pulls the changes from Git
* kustomize-controller applies the SealedSecret and the Deployment manifests
* sealed-secrets controller decrypts the SealedSecret and creates a Kubernetes Secret
* kubelet creates the pods and mounts the secret as a volume or env variable inside the app container

@ -1,192 +0,0 @@
<!-- -*- fill-column: 100 -*- -->
# How to make sortable image tags to use with automation
Flux v2 does not support selecting the lastest image by build time. Obtaining the build time needs
the container config for each image, and fetching that is subject to strict rate limiting by image
registries (e.g., by [DockerHub][dockerhub-rates]).
This guide explains how to construct image tags so that the most recent image has the tag that comes
last in alphabetical or numerical order. The technique suggested is to put a timestamp or serial
number in each image tag.
## Formats and alternatives
The important properties for sorting are that the parts of the timestamp go from most significant to
least (e.g., the year down to the second). For numbers it is best to use numerical order, since this
will work with values of different width (e.g., '12' sorts after '2').
Image tags are often shown in user interfaces, so readability matters. Here is an example of a
readable timestamp that will sort well:
```bash
$ # date and time (remember ':' is not allowed in a tag)
$ date +%F.%H%M%S
2021-01-28.133158
```
You can use a timestamp that sorts as a number, like [Unix
time](https://en.wikipedia.org/wiki/Unix_time):
```
$ # seconds since Jan 1 1970
$ date +%s
1611840548
```
Alternatively, you can use a serial number as part of the tag. Some CI platforms will provide a
build number in an environment variable, but that may not be reliable to use as a serial number --
check the platform documentation.
For example, Github makes availabe the variable `github.run_number` which can be used as a reliable ever increasing serial number.
A commit count can be a reasonable stand-in for a serial number, if you build an image per commit
and you don't rewrite the branch in question:
```bash
$ # commits in branch
$ git --rev-list --count HEAD
1504
```
Beware: this will not give a useful number if you have a shallow clone.
### Other things to include in the image tag
It is also handy to quickly trace an image to the branch and commit of its source code. Including
the branch also means you can filter for images from a particular branch.
A useful tag format is
<branch>-<sha1>-<timestamp>
The branch and tag will usually be made available in a CI platform as environment variables. See
- [CircleCI's built-in variables `CIRCLE_BRANCH` and `CIRCLE_SHA1`][circle-ci-env]
- [GitHub Actions' `GITHUB_REF` and `GITHUB_SHA`][github-actions-env]
- [Travis CI's `TRAVIS_BRANCH` and `TRAVIS_COMMIT`][travis-env].
## Example of a build process with timestamp tagging
Here is an example of a [GitHub Actions job][gha-syntax] that creates a "build ID" with the git
branch, SHA1, and a timestamp, and uses it as a tag when building an image:
```yaml
jobs:
build-push:
env:
IMAGE: org/my-app
runs-on: ubuntu-latest
steps:
- name: Generate build ID
id: prep
run: |
branch=${GITHUB_REF##*/}
sha=${GITHUB_SHA::8}
ts=$(date +%s)
echo "::set-output name=BUILD_ID::${branch}-${sha}-${ts}"
# These are prerequisites for the docker build step
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and publish container image with tag
uses: docker/build-push-action@v2
with:
push: true
context: .
file: ./Dockerfile
tags: |
${{ env.IMAGE }}:${{ steps.prep.outputs.BUILD_ID }}
```
### Alternative example utilizing github.run_number
Here is another example example of a [GitHub Actions job][gha-syntax] which tags images using Github action's built in `run_number`
and the git SHA1:
```yaml
jobs:
build-push:
env:
IMAGE: org/my-app
runs-on: ubuntu-latest
steps:
# These are prerequisites for the docker build step
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and publish container image with tag
uses: docker/build-push-action@v2
with:
push: true
context: .
file: ./Dockerfile
tags: |
${{ env.IMAGE }}:${{ github.sha }}-${{ github.run_number }}
```
## Using in an `ImagePolicy` object
When creating an `ImagePolicy` object, you will need to extract just the timestamp part of the tag,
using the `tagFilter` field. You can filter for a particular branch to restrict images to only those
built from that branch.
Here is an example that filters for only images built from `main` branch, and selects the most
recent according to a timestamp (created with `date +%s`) or according to the run number (`github.run_number` for example):
```yaml
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImagePolicy
metadata:
name: image-repo-policy
namespace: flux-system
spec:
imageRepositoryRef:
name: image-repo
filterTags:
## use "pattern: '(?P<ts>.*)-.+'" if you copied the workflow example using github.run_number
pattern: '^main-[a-f0-9]+-(?P<ts>[0-9]+)'
extract: '$ts'
policy:
numerical:
order: asc
```
If you don't care about the branch, that part can be a wildcard in the pattern:
```yaml
apiVersion: image.toolkit.fluxcd.io/v1alpha2
kind: ImagePolicy
metadata:
name: image-repo-policy
namespace: flux-system
spec:
imageRepositoryRef:
name: image-repo
filterTags:
pattern: '^.+-[a-f0-9]+-(?P<ts>[0-9]+)'
extract: '$ts'
policy:
numerical:
order: asc
```
[circle-ci-env]: https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
[github-actions-env]: https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables
[travis-env]: https://docs.travis-ci.com/user/environment-variables/#default-environment-variables
[dockerhub-rates]: https://docs.docker.com/docker-hub/billing/faq/#pull-rate-limiting-faqs
[gha-syntax]: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions

@ -1,138 +0,0 @@
# Setup Webhook Receivers
The GitOps toolkit controllers are by design **pull-based**.
In order to notify the controllers about changes in Git or Helm repositories,
you can setup webhooks and trigger a cluster reconciliation
every time a source changes. Using webhook receivers, you can build **push-based**
GitOps pipelines that react to external events.
## Prerequisites
To follow this guide you'll need a Kubernetes cluster with the GitOps
toolkit controllers installed on it.
Please see the [get started guide](../get-started/index.md)
or the [installation guide](installation.md).
The [notification controller](../components/notification/controller.md)
can handle events coming from external systems
(GitHub, GitLab, Bitbucket, Harbor, Jenkins, etc)
and notify the GitOps toolkit controllers about source changes.
The notification controller is part of the default toolkit installation.
## Expose the webhook receiver
In order to receive Git push or Helm chart upload events, you'll have to
expose the webhook receiver endpoint outside of your Kubernetes cluster on
a public address.
The notification controller handles webhook requests on port `9292`.
This port can be used to create a Kubernetes LoadBalancer Service or Ingress.
Create a `LoadBalancer` service:
```yaml
apiVersion: v1
kind: Service
metadata:
name: receiver
namespace: flux-system
spec:
type: LoadBalancer
selector:
app: notification-controller
ports:
- name: http
port: 80
protocol: TCP
targetPort: 9292
```
Wait for Kubernetes to assign a public address with:
```sh
watch kubectl -n flux-system get svc/receiver
```
## Define a Git repository
Create a Git source pointing to a GitHub repository that you have control over:
```yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: webapp
namespace: flux-system
spec:
interval: 60m
url: https://github.com/<GH-ORG>/<GH-REPO>
ref:
branch: master
```
!!! hint "Authentication"
SSH or token based authentication can be configured for private repositories.
See the [GitRepository CRD docs](../components/source/gitrepositories.md) for more details.
## Define a Git repository receiver
First generate a random string and create a secret with a `token` field:
```sh
TOKEN=$(head -c 12 /dev/urandom | shasum | cut -d ' ' -f1)
echo $TOKEN
kubectl -n flux-system create secret generic webhook-token \
--from-literal=token=$TOKEN
```
Create a receiver for GitHub and specify the `GitRepository` object:
```yaml
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Receiver
metadata:
name: webapp
namespace: flux-system
spec:
type: github
events:
- "ping"
- "push"
secretRef:
name: webhook-token
resources:
- kind: GitRepository
name: webapp
```
!!! hint "Note"
Besides GitHub, you can define receivers for **GitLab**, **Bitbucket**, **Harbor**
and any other system that supports webhooks e.g. Jenkins, CircleCI, etc.
See the [Receiver CRD docs](../components/notification/receiver.md) for more details.
The notification controller generates a unique URL using the provided token and the receiver name/namespace.
Find the URL with:
```console
$ kubectl -n flux-system get receiver/webapp
NAME READY STATUS
webapp True Receiver initialised with URL: /hook/bed6d00b5555b1603e1f59b94d7fdbca58089cb5663633fb83f2815dc626d92b
```
On GitHub, navigate to your repository and click on the "Add webhook" button under "Settings/Webhooks".
Fill the form with:
* **Payload URL**: compose the address using the receiver LB and the generated URL `http://<LoadBalancerAddress>/<ReceiverURL>`
* **Secret**: use the `token` string
With the above settings, when you push a commit to the repository, the following happens:
* GitHub sends the Git push event to the receiver address
* Notification controller validates the authenticity of the payload using HMAC
* Source controller is notified about the changes
* Source controller pulls the changes into the cluster and updates the `GitRepository` revision
* Kustomize controller is notified about the revision change
* Kustomize controller reconciles all the `Kustomizations` that reference the `GitRepository` object

@ -1,38 +0,0 @@
---
hide:
# The table data on this page is easier to read when wider
# The TOC right column is already blank anyway
- toc
---
# Migration and Support Timetable
!!! heart "Flux Migration Commitment"
This public timetable clarifies our commitment to end users.
Its purpose is to help improve your experience in deciding how and when to plan infra decisions related to Flux versions.
Please refer to the [Roadmap](../roadmap/index.md) for additional details.
<!-- Note: this div allows us to set fixed column widths in custom.css -->
<!-- See: https://github.com/squidfunk/mkdocs-material/issues/118 -->
<div markdown="1" class="timetable-explicit-col-widths">
<!-- Requires mkdocs markdown_extensions footnotes and pymdownx.caret -->
<!-- markdownlint-disable-file MD033 -->
| Date | Flux 1 | Flux 2 CLI | GOTK[^1] |
| -- | -- | -- | -- |
| Oct 6, 2020 | ^^[Maintenance Mode](https://github.com/fluxcd/website/pull/25)^^<br><ul><li>Flux 1 releases only include critical bug fixes (which dont require changing Flux 1 architecture), and security patches (for OS packages, Go runtime, and kubectl). No new features</li><li>Existing projects encouraged to test migration to Flux 2 pre-releases in non-production</li></ul> | ^^Development Mode^^<br><ul><li>Working to finish parity with Flux 1</li><li>New projects encouraged to test Flux 2 pre-releases in non-production</li></ul> | ^^All Alpha^^[^2] |
Feb 18, 2021 | ^^Partial Migration Mode^^<br><ul><li>Existing projects encouraged to migrate to `v1beta1`/`v2beta1` if you only use those features (Flux 1 read-only mode, and Helm Operator)</li><li>Existing projects encouraged to test image automation Alpha in non-production</li></ul> | ^^Feature Parity^^ | ^^Image Automation Alpha. All others reached Feature Parity, Beta^^ |
| TBD | ^^Superseded^^<br><ul><li>All existing projects encouraged to [migrate to Flux 2](https://toolkit.fluxcd.io/guides/flux-v1-migration/), and [report any bugs](https://github.com/fluxcd/flux2/issues/new/choose)</li><li>Flux 1 Helm Operator archived no further updates due to unsupported dependencies</li></ul> | ^^Needs further testing, may get breaking changes^^<br><ul><li>CLI needs further user testing during this migration period</li></ul> | ^^All Beta, Production Ready^^[^3]<br><ul><li>All Flux 1 features stable and supported in Flux 2</li><li>Promoting Alpha versions to Beta makes this Production Ready</li></ul> |
| TBD | ^^Migration and security support only^^<br><ul><li>Flux 1 releases only include security patches (no bug fixes)</li><li>Maintainers support users with migration to Flux 2 only, no longer with Flux 1 issues</li><li>Flux 1 archive date announced</li></ul> | ^^Public release (GA), Production Ready^^<br><ul><li>CLI commits to backwards compatibility moving forward</li><li>CLI follows kubectl style backwards compatibility support: +1 -1 MINOR version for server components (e.g., APIs, Controllers, validation webhooks)</li></ul> | ^^All Beta, Production Ready^^ |
| TBD | ^^Archived^^<br><ul><li>Flux 1 obsolete, no further releases or maintainer support</li><li>Flux 1 repo archived</li></ul> | ^^Continued active development^^ | ^^Continued active development^^ |
<!-- end .timetable-explicit-col-widths -->
</div>
[^1]: GOTK is shorthand for the [GitOps Toolkit](https://toolkit.fluxcd.io/components/) APIs and Controllers
[^2]: Versioning: Flux 2 is a multi-service architecture, so requires a more complex explanation than Flux 1:
- Flux 2 CLI follows [Semantic Versioning](https://semver.org/) scheme
- The GitOps Toolkit APIs follow the [Kubernetes API versioning](https://kubernetes.io/docs/reference/using-api/#api-versioning) pattern. See [Roadmap](https://toolkit.fluxcd.io/roadmap/) for component versions.
- These are coordinated for cross-compatibility: For each Flux 2 CLI tag, CLI and GOTK versions are end-to-end tested together, so you may safely upgrade from one MINOR/PATCH version to another.
[^3]: The GOTK Custom Resource Definitions which are at `v1beta1` and `v2beta1` and their controllers are considered stable and production ready. Going forward, breaking changes to the beta CRDs will be accompanied by a conversion mechanism.

@ -1,510 +0,0 @@
# go-git-providers
## Abstract
This proposal aims to create a library with the import path `github.com/fluxcd/go-git-providers`'
(import name: `gitprovider`), which provides an abstraction layer for talking to Git providers
like GitHub, GitLab and Bitbucket.
This would become a new repository, specifically targeted at being a general-purpose Git provider
client for multiple providers and domains.
## Goals
- Support multiple Git provider backends (e.g. GitHub, GitLab, Bitbucket, etc.) using the same interface
- Support talking to multiple domains at once, including custom domains (e.g. talking to "gitlab.com" and "version.aalto.fi" from the same client)
- Support both no authentication (for public repos), basic auth, and OAuth2 for authentication
- Manipulating the following resources:
- **Organizations**: `GET`, `LIST` (both all accessible top-level orgs and sub-orgs)
- For a given **Organization**:
- **Teams**: `GET` and `LIST`
- **Repositories**: `GET`, `LIST` and `POST`
- **Team Access**: `LIST`, `POST` and `DELETE`
- **Credentials**: `LIST`, `POST` and `DELETE`
- Support sub-organizations (or "sub-groups" in GitLab) if possible
- Support reconciling an object for idempotent operations
- Pagination is automatically handled for `LIST` requests
- Transparently can manage teams (collections of users, sub-groups in Gitlab) with varying access to repos
- Follow library best practices in order to be easy to vendor (e.g. use major `vX` versioning & go.mod)
## Non-goals
- Support for features not mentioned above
## Design decisions
- A `context.Context` should be passed to every request as the first argument
- There should be two interfaces per resource, if applicable:
- one collection-specific interface, with a plural name (e.g. `OrganizationsClient`), that has methods like `Get()` and `List()`
- one instance-specific interface, with a singular name (e.g. `OrganizationClient`), that operates on that instance, e.g. allowing access to child resources, e.g. `Teams()`
- Every `Create()` signature shall have a `{Resource}CreateOptions` struct as the last argument.
- `Delete()` and similar methods may use the same pattern if needed
- All `*Options` structs shall be passed by value (i.e. non-nillable) and contain only nillable, optional fields
- All optional fields in the type structs shall be nillable
- It should be possible to create a fake API client for testing, implementing the same interfaces
- All type structs shall have a `Validate()` method, and optionally a `Default()` one
- All type structs shall expose their internal representation (from the underlying library) through the `InternalGetter` interface with a method `GetInternal() interface{}`
- Typed errors shall be returned, wrapped using Go 1.14's new features
- Go-style enums are used when there are only a few supported values for a field
- Every field is documented using Godoc comment, including `+required` or `+optional` to clearly signify its importance
- Support serializing the types to JSON (if needed for e.g. debugging) by adding tags
## Implementation
### Provider package
The provider package, e.g. at `github.com/fluxcd/go-git-providers/github`, will have constructor methods so a client can be created, e.g. as follows:
```go
// Create a client for github.com without any authentication
c := github.NewClient()
// Create a client for an enterprise GitHub account, without any authentication
c = github.NewClient(github.WithBaseURL("enterprise.github.com"))
// Create a client for github.com using a personal oauth2 token
c = github.NewClient(github.WithOAuth2("<token-here>"))
```
### Client
The definition of a `Client` is as follows:
```go
// Client is an interface that allows talking to a Git provider
type Client interface {
// The Client allows accessing all known resources
ResourceClient
// SupportedDomain returns the supported domain
// This field is set at client creation time, and can't be changed
SupportedDomain() string
// ProviderID returns the provider ID (e.g. "github", "gitlab") for this client
// This field is set at client creation time, and can't be changed
ProviderID() ProviderID
// Raw returns the Go client used under the hood for accessing the Git provider
Raw() interface{}
}
```
As one can see, the `Client` is scoped for a single backing domain. `ProviderID` is a typed string, and every
implementation package defines their own constant, e.g. `const ProviderName = gitprovider.ProviderID("github")`.
The `ResourceClient` actually allows talking to resources of the API, both for single objects, and collections:
```go
// ResourceClient allows access to resource-specific clients
type ResourceClient interface {
// Organization gets the OrganizationClient for the specific top-level organization
// ErrNotTopLevelOrganization will be returned if the organization is not top-level when using
Organization(o OrganizationRef) OrganizationClient
// Organizations returns the OrganizationsClient handling sets of organizations
Organizations() OrganizationsClient
// Repository gets the RepositoryClient for the specified RepositoryRef
Repository(r RepositoryRef) RepositoryClient
// Repositories returns the RepositoriesClient handling sets of organizations
Repositories() RepositoriesClient
}
```
In order to reference organizations and repositories, there are the `OrganizationRef` and `RepositoryRef`
interfaces:
```go
// OrganizationRef references an organization in a Git provider
type OrganizationRef interface {
// String returns the HTTPS URL
fmt.Stringer
// GetDomain returns the URL-domain for the Git provider backend, e.g. gitlab.com or version.aalto.fi
GetDomain() string
// GetOrganization returns the top-level organization, i.e. "weaveworks" or "kubernetes-sigs"
GetOrganization() string
// GetSubOrganizations returns the names of sub-organizations (or sub-groups),
// e.g. ["engineering", "frontend"] would be returned for gitlab.com/weaveworks/engineering/frontend
GetSubOrganizations() []string
}
// RepositoryRef references a repository hosted by a Git provider
type RepositoryRef interface {
// RepositoryRef requires an OrganizationRef to fully-qualify a repo reference
OrganizationRef
// GetRepository returns the name of the repository
GetRepository() string
}
```
Along with these, there is `OrganizationInfo` and `RepositoryInfo` which implement the above mentioned interfaces in a straightforward way.
If you want to create an `OrganizationRef` or `RepositoryRef`, you can either use `NewOrganizationInfo()` or `NewRepositoryInfo()`, filling in all parts of the reference, or use the `ParseRepositoryURL(r string) (RepositoryRef, error)` or `ParseOrganizationURL(o string) (OrganizationRef, error)` methods.
As mentioned above, only one target domain is supported by the `Client`. This means e.g. that if the `Client` is configured for GitHub, and you feed it a GitLab URL to parse, `ErrDomainUnsupported` will be returned.
This brings us to a higher-level client abstraction, `MultiClient`.
### MultiClient
In order to automatically support multiple domains and providers using the same interface, `MultiClient` is introduced.
The user would use the `MultiClient` as follows:
```go
// Create a client to github.com without authentication
gh := github.NewClient()
// Create a client to gitlab.com, authenticating with basic auth
gl := gitlab.NewClient(gitlab.WithBasicAuth("<username>", "<password"))
// Create a client to the GitLab instance at version.aalto.fi, with a given OAuth2 token
aalto := gitlab.NewClient(gitlab.WithBaseURL("version.aalto.fi"), gitlab.WithOAuth2Token("<your-token>"))
// Create a MultiClient which supports talking to any of these backends
client := gitprovider.NewMultiClient(gh, gl, aalto)
```
The interface definition of `MultiClient` is similar to that one of `Client`, both embedding `ResourceClient`, but it also allows access to domain-specific underlying `Client`'s:
```go
// MultiClient allows talking to multiple Git providers at once
type MultiClient interface {
// The MultiClient allows accessing all known resources, automatically choosing the right underlying
// Client based on the resource's domain
ResourceClient
// SupportedDomains returns a list of known domains
SupportedDomains() []string
// ClientForDomain returns the Client used for a specific domain
ClientForDomain(domain string) (Client, bool)
}
```
### OrganizationsClient
The `OrganizationsClient` provides access to a set of organizations, as follows:
```go
// OrganizationsClient operates on organizations the user has access to
type OrganizationsClient interface {
// Get a specific organization the user has access to
// This might also refer to a sub-organization
// ErrNotFound is returned if the resource does not exist
Get(ctx context.Context, o OrganizationRef) (*Organization, error)
// List all top-level organizations the specific user has access to
// List should return all available organizations, using multiple paginated requests if needed
List(ctx context.Context) ([]Organization, error)
// Children returns the immediate child-organizations for the specific OrganizationRef o.
// The OrganizationRef may point to any sub-organization that exists
// This is not supported in GitHub
// Children should return all available organizations, using multiple paginated requests if needed
Children(ctx context.Context, o OrganizationRef) ([]Organization, error)
// Possibly add Create/Update/Delete methods later
}
```
The `Organization` struct is fairly straightforward for now:
```go
// Organization represents an (top-level- or sub-) organization
type Organization struct {
// OrganizationInfo provides the required fields
// (Domain, Organization and SubOrganizations) required for being an OrganizationRef
OrganizationInfo `json:",inline"`
// InternalHolder implements the InternalGetter interface
// +optional
InternalHolder `json:",inline"`
// Name is the human-friendly name of this organization, e.g. "Weaveworks" or "Kubernetes SIGs"
// +required
Name string `json:"name"`
// Description returns a description for the organization
// No default value at POST-time
// +optional
Description *string `json:"description"`
}
```
The `OrganizationInfo` struct is a straightforward struct just implementing the `OrganizationRef` interface
with basic fields & getters. `InternalHolder` is implementing the `InternalGetter` interface as follows, and is
embedded into all main structs:
```go
// InternalGetter allows access to the underlying object
type InternalGetter interface {
// GetInternal returns the underlying struct that's used
GetInternal() interface{}
}
// InternalHolder can be embedded into other structs to implement the InternalGetter interface
type InternalHolder struct {
// Internal contains the underlying object.
// +optional
Internal interface{} `json:"-"`
}
```
### OrganizationClient
`OrganizationClient` allows access to a specific organization's underlying resources as follows:
```go
// OrganizationClient operates on a given/specific organization
type OrganizationClient interface {
// Teams gives access to the TeamsClient for this specific organization
Teams() OrganizationTeamsClient
}
```
#### Organization Teams
Teams belonging to a certain organization can at this moment be fetched on an individual basis, or listed.
```go
// OrganizationTeamsClient handles teams organization-wide
type OrganizationTeamsClient interface {
// Get a team within the specific organization
// teamName may include slashes, to point to e.g. "sub-teams" i.e. subgroups in Gitlab
// teamName must not be an empty string
// ErrNotFound is returned if the resource does not exist
Get(ctx context.Context, teamName string) (*Team, error)
// List all teams (recursively, in terms of subgroups) within the specific organization
// List should return all available organizations, using multiple paginated requests if needed
List(ctx context.Context) ([]Team, error)
// Possibly add Create/Update/Delete methods later
}
```
The `Team` struct is defined as follows:
```go
// Team is a representation for a team of users inside of an organization
type Team struct {
// Team embeds OrganizationInfo which makes it automatically comply with OrganizationRef
OrganizationInfo `json:",inline"`
// Team embeds InternalHolder for accessing the underlying object
// +optional
InternalHolder `json:",inline"`
// Name describes the name of the team. The team name may contain slashes
// +required
Name string `json:"name"`
// Members points to a set of user names (logins) of the members of this team
// +required
Members []string `json:"members"`
}
```
In GitLab, teams could be modelled as users in a sub-group. Those users can later be added as a single unit
to access a given repository.
### RepositoriesClient
`RepositoriesClient` provides access to a set of repositories for the user.
```go
// RepositoriesClient operates on repositories the user has access to
type RepositoriesClient interface {
// Get returns the repository at the given path
// ErrNotFound is returned if the resource does not exist
Get(ctx context.Context, r RepositoryRef) (*Repository, error)
// List all repositories in the given organization
// List should return all available organizations, using multiple paginated requests if needed
List(ctx context.Context, o OrganizationRef) ([]Repository, error)
// Create creates a repository at the given organization path, with the given URL-encoded name and options
// ErrAlreadyExists will be returned if the resource already exists
Create(ctx context.Context, r *Repository, opts RepositoryCreateOptions) (*Repository, error)
// Reconcile makes sure r is the actual state in the backing Git provider. If r doesn't exist
// under the hood, it is created. If r is already the actual state, this is a no-op. If r isn't
// the actual state, the resource will either be updated or deleted/recreated.
Reconcile(ctx context.Context, r *Repository) error
}
```
`RepositoryCreateOptions` has options like `AutoInit *bool`, `LicenseTemplate *string` and so forth to allow an
one-time initialization step.
The `Repository` struct is defined as follows:
```go
// Repository represents a Git repository provided by a Git provider
type Repository struct {
// RepositoryInfo provides the required fields
// (Domain, Organization, SubOrganizations and RepositoryName)
// required for being an RepositoryRef
RepositoryInfo `json:",inline"`
// InternalHolder implements the InternalGetter interface
// +optional
InternalHolder `json:",inline"`
// Description returns a description for the repository
// No default value at POST-time
// +optional
Description *string `json:"description"`
// Visibility returns the desired visibility for the repository
// Default value at POST-time: RepoVisibilityPrivate
// +optional
Visibility *RepoVisibility
}
// GetCloneURL gets the clone URL for the specified transport type
func (r *Repository) GetCloneURL(transport TransportType) string {
return GetCloneURL(r, transport)
}
```
As can be seen, there is also a `GetCloneURL` function for the repository which allows
resolving the URL from which to clone the repo, for a given transport method (`ssh` and `https`
are supported `TransportType`s)
### RepositoryClient
`RepositoryClient` allows access to a given repository's underlying resources, like follows:
```go
// RepositoryClient operates on a given/specific repository
type RepositoryClient interface {
// TeamAccess gives access to what teams have access to this specific repository
TeamAccess() RepositoryTeamAccessClient
// Credentials gives access to manipulating credentials for accessing this specific repository
Credentials() RepositoryCredentialsClient
}
```
#### Repository Teams
`RepositoryTeamAccessClient` allows adding & removing teams from the list of authorized persons to access a repository.
```go
// RepositoryTeamAccessClient operates on the teams list for a specific repository
type RepositoryTeamAccessClient interface {
// Create adds a given team to the repo's team access control list
// ErrAlreadyExists will be returned if the resource already exists
// The embedded RepositoryInfo of ta does not need to be populated, but if it is,
// it must equal to the RepositoryRef given to the RepositoryClient.
Create(ctx context.Context, ta *TeamAccess, opts RepositoryAddTeamOptions) error
// Lists the team access control list for this repo
List(ctx context.Context) ([]TeamAccess, error)
// Reconcile makes sure ta is the actual state in the backing Git provider. If ta doesn't exist
// under the hood, it is created. If ta is already the actual state, this is a no-op. If ta isn't
// the actual state, the resource will either be updated or deleted/recreated.
// The embedded RepositoryInfo of ta does not need to be populated, but if it is,
// it must equal to the RepositoryRef given to the RepositoryClient.
Reconcile(ctx context.Context, ta *TeamAccess) error
// Delete removes the given team from the repo's team access control list
// ErrNotFound is returned if the resource does not exist
Delete(ctx context.Context, teamName string) error
}
```
The `TeamAccess` struct looks as follows:
```go
// TeamAccess describes a binding between a repository and a team
type TeamAccess struct {
// TeamAccess embeds RepositoryInfo which makes it automatically comply with RepositoryRef
// +optional
RepositoryInfo `json:",inline"`
// TeamAccess embeds InternalHolder for accessing the underlying object
// +optional
InternalHolder `json:",inline"`
// Name describes the name of the team. The team name may contain slashes
// +required
Name string `json:"name"`
// Permission describes the permission level for which the team is allowed to operate
// Default: read
// Available options: See the TeamRepositoryPermission enum
// +optional
Permission *TeamRepositoryPermission
}
```
#### Repository Credentials
`RepositoryCredentialsClient` allows adding & removing credentials (e.g. deploy keys) from accessing a specific repository.
```go
// RepositoryCredentialsClient operates on the access credential list for a specific repository
type RepositoryCredentialsClient interface {
// Create a credential with the given human-readable name, the given bytes and optional options
// ErrAlreadyExists will be returned if the resource already exists
Create(ctx context.Context, c RepositoryCredential, opts CredentialCreateOptions) error
// Lists all credentials for the given credential type
List(ctx context.Context, t RepositoryCredentialType) ([]RepositoryCredential, error)
// Reconcile makes sure c is the actual state in the backing Git provider. If c doesn't exist
// under the hood, it is created. If c is already the actual state, this is a no-op. If c isn't
// the actual state, the resource will either be updated or deleted/recreated.
Reconcile(ctx context.Context, c RepositoryCredential) error
// Deletes a credential from the repo. name corresponds to GetName() of the credential
// ErrNotFound is returned if the resource does not exist
Delete(ctx context.Context, t RepositoryCredentialType, name string) error
}
```
In order to support multiple different types of credentials, `RepositoryCredential` is an interface:
```go
// RepositoryCredential is a credential that allows access (either read-only or read-write) to the repo
type RepositoryCredential interface {
// GetType returns the type of the credential
GetType() RepositoryCredentialType
// GetName returns a name (or title/description) of the credential
GetName() string
// GetData returns the key that will be authorized to access the repo, this can e.g. be a SSH public key
GetData() []byte
// IsReadOnly returns whether this credential is authorized to write to the repository or not
IsReadOnly() bool
}
```
The default implementation of `RepositoryCredential` is `DeployKey`:
```go
// DeployKey represents a short-lived credential (e.g. an SSH public key) used for accessing a repository
type DeployKey struct {
// DeployKey embeds InternalHolder for accessing the underlying object
// +optional
InternalHolder `json:",inline"`
// Title is the human-friendly interpretation of what the key is for (and does)
// +required
Title string `json:"title"`
// Key specifies the public part of the deploy (e.g. SSH) key
// +required
Key []byte `json:"key"`
// ReadOnly specifies whether this DeployKey can write to the repository or not
// Default value at POST-time: true
// +optional
ReadOnly *bool `json:"readOnly"`
}
```

@ -1,148 +0,0 @@
# Roadmap
!!! hint "Production readiness"
The Flux custom resource definitions which are at `v1beta1` and `v2beta1`
and their controllers are considered stable and production ready.
Going forward, breaking changes to the beta CRDs will be accompanied by a conversion mechanism.
Please see the [Migration and Suport Timetable](../migration/timetable.md) for our commitment to end users.
The following components (included by default in [flux bootstrap](../guides/installation.md#bootstrap))
are considered production ready:
- [source-controller](../components/source)
- [kustomize-controller](../components/kustomize)
- [notification-controller](../components/notification)
- [helm-controller](../components/helm)
The following GitOps Toolkit APIs are considered production ready:
- `source.toolkit.fluxcd.io/v1beta1`
- `kustomize.toolkit.fluxcd.io/v1beta1`
- `notification.toolkit.fluxcd.io/v1beta1`
- `helm.toolkit.fluxcd.io/v2beta1`
## The road to Flux v2 GA
In our planning discussions we have identified these possible areas of work,
this list is subject to change while we gather feedback:
- Stabilize the image automation APIs
* Review the spec of `ImageRepository`, `ImagePolicy` and `ImageUpdateAutomation`
* Promote the image automation APIs to `v1beta1`
* Include the image automation controllers in the default components list
- Improve the documentation
* Gather feedback on the [migration guides](https://github.com/fluxcd/flux2/discussions/413) and address more use-cases
* Incident management and troubleshooting guides
* Cloud specific guides (AWS, Azure, Google Cloud, more?)
* Consolidate the docs under [fluxcd.io](https://fluxcd.io) website
## The road to Flux v1 feature parity
In our planning discussions we identified three areas of work:
- Feature parity with Flux v1 in read-only mode
- Feature parity with the image-update functionality in Flux v1
- Feature parity with Helm Operator v1
### Flux read-only feature parity
[= 100% "100%"]
Flux v2 read-only is ready to try. See the [Getting
Started](https://toolkit.fluxcd.io/get-started/) how-to, and the
[Migration
guide](https://toolkit.fluxcd.io/guides/flux-v1-migration/).
This would be the first stepping stone: we want Flux v2 to be on-par with today's Flux in
[read-only mode](https://github.com/fluxcd/flux/blob/master/docs/faq.md#can-i-run-flux-with-readonly-git-access)
and [FluxCloud](https://github.com/justinbarrick/fluxcloud) notifications.
Goals
- <span class="check-bullet">:material-check-bold:</span> [Offer a migration guide for those that are using Flux in read-only mode to synchronize plain manifests](https://toolkit.fluxcd.io/guides/flux-v1-migration/)
- <span class="check-bullet">:material-check-bold:</span> [Offer a migration guide for those that are using Flux in read-only mode to synchronize Kustomize overlays](https://toolkit.fluxcd.io/guides/flux-v1-migration/)
- <span class="check-bullet">:material-check-bold:</span> [Offer a dedicated component for forwarding events to external messaging platforms](https://toolkit.fluxcd.io/guides/notifications/)
Non-Goals
- Migrate users that are using Flux to run custom scripts with `flux.yaml`
- Automate the migration of `flux.yaml` kustomize users
Tasks
- [x] <span style="color:grey">Design the events API</span>
- [x] <span style="color:grey">Implement events in source and kustomize controllers</span>
- [x] <span style="color:grey">Make the kustomize-controller apply/gc events on-par with Flux v1 apply events</span>
- [x] <span style="color:grey">Design the notifications and events filtering API</span>
- [x] <span style="color:grey">Implement a notification controller for Slack, MS Teams, Discord, Rocket</span>
- [x] <span style="color:grey">Implement Prometheus metrics in source and kustomize controllers</span>
- [x] <span style="color:grey">Review the git source and kustomize APIs</span>
- [x] <span style="color:grey">Support [bash-style variable substitution](https://toolkit.fluxcd.io/components/kustomize/kustomization/#variable-substitution) as an alternative to `flux.yaml` envsubst/sed usage</span>
- [x] <span style="color:grey">Create a migration guide for `flux.yaml` kustomize users</span>
- [x] <span style="color:grey">Include support for SOPS</span>
### Flux image update feature parity
[= 100% "100%"]
Image automation is available as a prerelease. See [this
guide](https://toolkit.fluxcd.io/guides/image-update/) for how to
install and use it.
Goals
- Offer components that can replace Flux v1 image update feature
Non-Goals
- Maintain backwards compatibility with Flux v1 annotations
- [Order by timestamps found inside image layers](https://github.com/fluxcd/flux2/discussions/802)
Tasks
- [x] <span style="color:grey">[Design the image scanning and automation API](https://github.com/fluxcd/flux2/discussions/107)</span>
- [x] <span style="color:grey">Implement an image scanning controller</span>
- [x] <span style="color:grey">Public image repo support</span>
- [x] <span style="color:grey">Credentials from Secret [fluxcd/image-reflector-controller#35](https://github.com/fluxcd/image-reflector-controller/pull/35)</span>
- [x] <span style="color:grey">Design the automation component</span>
- [x] <span style="color:grey">Implement the image scan/patch/push workflow</span>
- [x] <span style="color:grey">Integrate the new components in the Flux CLI [fluxcd/flux2#538](https://github.com/fluxcd/flux2/pull/538)</span>
- [x] <span style="color:grey">Write a guide for how to use image automation ([guide here](https://toolkit.fluxcd.io/guides/image-update/))</span>
- [x] <span style="color:grey">ACR/ECR/GCR integration ([guide here](https://toolkit.fluxcd.io/guides/image-update/#imagerepository-cloud-providers-authentication))</span>
- [x] <span style="color:grey">Write a migration guide from Flux v1 annotations ([guide here](https://toolkit.fluxcd.io/guides/flux-v1-automation-migration/))</span>
### Helm v3 feature parity
[= 100% "100%"]
Helm support in Flux v2 is ready to try. See the [Helm controller
guide](https://toolkit.fluxcd.io/guides/helmreleases/), and the [Helm
controller migration
guide](https://toolkit.fluxcd.io/guides/helm-operator-migration/).
Goals
- Offer a migration guide for those that are using Helm Operator with Helm v3 and charts from
Helm and Git repositories
Non-Goals
- Migrate users that are using Helm v2
Tasks
- [x] <span style="color:grey">Implement a Helm controller for Helm v3 covering all the current release options</span>
- [x] <span style="color:grey">Discuss and design Helm releases based on source API:</span>
* [x] <span style="color:grey">Providing values from sources</span>
* [x] <span style="color:grey">Conditional remediation on failed Helm actions</span>
* [x] <span style="color:grey">Support for Helm charts from Git</span>
- [x] <span style="color:grey">Review the Helm release, chart and repository APIs</span>
- [x] <span style="color:grey">Implement events in Helm controller</span>
- [x] <span style="color:grey">Implement Prometheus metrics in Helm controller</span>
- [x] <span style="color:grey">Implement support for values from `Secret` and `ConfigMap` resources</span>
- [x] <span style="color:grey">Implement conditional remediation on (failed) Helm actions</span>
- [x] <span style="color:grey">Implement support for Helm charts from Git</span>
- [x] <span style="color:grey">Implement support for referring to an alternative chart values file</span>
- [x] <span style="color:grey">Stabilize API</span>
- [x] <span style="color:grey">[Create a migration guide for Helm Operator users](../guides/helm-operator-migration.md)</span>

@ -1,233 +0,0 @@
# Using Flux on Azure
## AKS Cluster Options
It's important to follow some guidelines when installing Flux on AKS.
### CNI and Network Policy
Previously, there has been an issue with Flux and Network Policy on AKS.
([Upstream Azure Issue](https://github.com/Azure/AKS/issues/2031)) ([Flux Issue](https://github.com/fluxcd/flux2/issues/703))
If you ensure your AKS cluster is upgraded, and your Nodes have been restarted with the most recent Node images,
this could resolve flux reconciliation failures where source-controller is unreachable.
Using `--network-plugin=azure --network-policy=calico` has been tested to work properly.
This issue only affects you if you are using `--network-policy` on AKS, which is not a default option.
!!! warning
AKS `--network-policy` is currently in Preview
### AAD Pod-Identity
Depending on the features you are interested in using with Flux, you may want to install AAD Pod Identity.
With [AAD Pod-Identity](https://azure.github.io/aad-pod-identity/docs/), we can create Pods that have their own
cloud credentials for accessing Azure services like Azure Container Registry(ACR) and Azure Key Vault(AKV).
If you do not use AAD Pod-Identity, you'll need to manage and store Service Principal credentials
in K8s Secrets, to integrate Flux with other Azure Services.
As a pre-requisite, your cluster must have `--enable-managed-identity` configured.
This software can be [installed via Helm](https://azure.github.io/aad-pod-identity/docs/getting-started/installation/)
(unmanaged by Azure).
Use Flux's `HelmRepository` and `HelmRelease` object to manage the aad-pod-identity installation
from a bootstrap repository and keep it up to date.
!!! note
As an alternative to Helm, the `--enable-aad-pod-identity` flag for the `az aks create` is currently in Preview.
Follow the Azure guide for [Creating an AKS cluster with AAD Pod Identity](https://docs.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity)
if you would like to enable this feature with the Azure CLI.
### Cluster Creation
The following creates an AKS cluster with some minimal configuration that will work well with Flux:
```sh
az aks create \
--network-plugin="azure" \
--network-policy="calico" \
--enable-managed-identity \
--enable-pod-identity \
--name="my-cluster"
```
!!! info
When working with the Azure CLI, it can help to set a default `location`, `group`, and `acr`.
See `az configure --help`, `az configure --list-defaults`, and `az configure --defaults key=value`.
## Flux Installation for Azure DevOps
Ensure you can login to [dev.azure.com](https://dev.azure.com) for your proper organization,
and create a new repository to hold your Flux install and other Kubernetes resources.
Clone the Git repository locally:
```sh
git clone ssh://git@ssh.dev.azure.com/v3/<org>/<project>/<my-repository>
cd my-repository
```
Create a directory inside the repository:
```sh
mkdir -p ./clusters/my-cluster/flux-system
```
Download the [Flux CLI](../guides/installation.md#install-the-flux-cli) and generate the manifests with:
```sh
flux install \
--export > ./clusters/my-cluster/flux-system/gotk-components.yaml
```
Commit and push the manifest to the master branch:
```sh
git add -A && git commit -m "add components" && git push
```
Apply the manifests on your cluster:
```sh
kubectl apply -f ./clusters/my-cluster/flux-system/gotk-components.yaml
```
Verify that the controllers have started:
```sh
flux check
```
Create a `GitRepository` object on your cluster by specifying the SSH address of your repo:
```sh
flux create source git flux-system \
--git-implementation=libgit2 \
--url=ssh://git@ssh.dev.azure.com/v3/<org>/<project>/<repository> \
--branch=<branch> \
--ssh-key-algorithm=rsa \
--ssh-rsa-bits=4096 \
--interval=1m
```
The above command will prompt you to add a deploy key to your repository, but Azure DevOps
[does not support repository or org-specific deploy keys](https://developercommunity.visualstudio.com/t/allow-the-creation-of-ssh-deploy-keys-for-vsts-hos/365747).
You may add the deploy key to a user's personal SSH keys, but take note that
revoking the user's access to the repository will also revoke Flux's access.
The better alternative is to create a machine-user whose sole purpose is
to store credentials for automation.
Using a machine-user also has the benefit of being able to be read-only or
restricted to specific repositories if this is needed.
!!! note
Unlike `git`, Flux does not support the
["shorter" scp-like syntax for the SSH protocol](https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#_the_ssh_protocol)
(e.g. `ssh.dev.azure.com:v3`).
Use the [RFC 3986 compatible syntax](https://tools.ietf.org/html/rfc3986#section-3) instead: `ssh.dev.azure.com/v3`.
If you wish to use Git over HTTPS, then generate a personal access token and supply it as the password:
```sh
flux create source git flux-system \
--git-implementation=libgit2 \
--url=https://dev.azure.com/<org>/<project>/_git/<repository> \
--branch=main \
--username=git \
--password=${AZ_PAT_TOKEN} \
--interval=1m
```
Please consult the [Azure DevOps documentation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=preview-page)
on how to generate personal access tokens for Git repositories.
Azure DevOps PAT's always have an expiration date, so be sure to have some process for renewing or updating these tokens.
Similar to the lack of repo-specific deploy keys, a user needs to generate a user-specific PAT.
If you are using a machine-user, you can generate a PAT or simply use the machine-user's password which does not expire.
Create a `Kustomization` object on your cluster:
```sh
flux create kustomization flux-system \
--source=flux-system \
--path="./clusters/my-cluster" \
--prune=true \
--interval=10m
```
Export both objects, generate a `kustomization.yaml`, commit and push the manifests to Git:
```sh
flux export source git flux-system \
> ./clusters/my-cluster/flux-system/gotk-sync.yaml
flux export kustomization flux-system \
>> ./clusters/my-cluster/flux-system/gotk-sync.yaml
cd ./clusters/my-cluster/flux-system && kustomize create --autodetect
git add -A && git commit -m "add sync manifests" && git push
```
Wait for Flux to reconcile your previous commit with:
```sh
watch flux get kustomization flux-system
```
### Flux Upgrade
To upgrade the Flux components to a newer version, download the latest `flux` binary,
run the install command in your repository root, commit and push the changes:
```sh
flux install \
--export > ./clusters/my-cluster/flux-system/gotk-components.yaml
git add -A && git commit -m "Upgrade to $(flux -v)" && git push
```
The [source-controller](../components/source/controller.md) will pull the changes on the cluster,
then [kustomize-controller](../components/source/controller.md)
will perform a rolling update of all Flux components including itself.
## Helm Repositories on Azure Container Registry
The Flux `HelmRepository` object currently supports
[Chart Repositories](https://helm.sh/docs/topics/chart_repository/)
as well as fetching `HelmCharts` from paths in `GitRepository` sources.
Azure Container Registry has a sub-command ([`az acr helm`](https://docs.microsoft.com/en-us/cli/azure/acr/helm))
for working with ACR-Hosted Chart Repositories, but it is deprecated.
If you are using these deprecated Azure Chart Repositories,
you can use Flux `HelmRepository` objects with them.
[Newer ACR Helm documentation](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-helm-repos)
suggests using ACR as an experimental [Helm OCI Registry](https://helm.sh/docs/topics/registries/).
This will not work with Flux, because using Charts from OCI Registries is not yet supported.
## Secrets Management with SOPS and Azure Key Vault
You will need to create an Azure Key Vault and bind a credential such as a Service Principal or Managed Identity to it.
If you want to use Managed Identities, install or enable [AAD Pod Identity](#aad-pod-identity).
Patch kustomize-controller with the proper Azure credentials, so that it may access your Azure Key Vault, and then begin
committing SOPS encrypted files to the Git repository with the proper Azure Key Vault configuration.
See the [Mozilla SOPS Azure Guide](../guides/mozilla-sops.md#azure) for further detail.
## Image Updates with Azure Container Registry
You will need to create an ACR registry and bind a credential such as a Service Principal or Managed Identity to it.
If you want to use Managed Identities, install or enable [AAD Pod Identity](#aad-pod-identity).
You may need to update your Flux install to include additional components:
```sh
flux install \
--components-extra="image-reflector-controller,image-automation-controller" \
--export > ./clusters/my-cluster/flux-system/gotk-components.yaml
```
Follow the [Image Update Automation Guide](../guides/image-update.md) and see the
[ACR specific section](../guides/image-update.md#azure-container-registry) for more details.
Your AKS cluster's configuration can also be updated to
[allow the kubelets to pull images from ACR](https://docs.microsoft.com/en-us/azure/aks/cluster-container-registry-integration)
without ImagePullSecrets as an optional, complimentary step.

File diff suppressed because it is too large Load Diff

@ -1,208 +0,0 @@
# Flux for Helm Users
Welcome Helm users!
We think Flux's Helm Controller is the best way to do Helm according to GitOps principles, and we're dedicated to doing what we can to help you feel the same way.
## What Does Flux add to Helm?
Helm 3 was designed with both a client and an SDK, but no running software agents.
This architecture intended anything outside of the client scope to be addressed by other tools in the ecosystem, which could then make use of Helm's SDK.
Built on Kubernetes controller-runtime, Flux's Helm Controller is an example of a mature software agent that uses Helm's SDK to full effect.
<!-- Flux is the only CD project that uses Helm as a library  not shelling out to the client and does not fork the SDK to diverge from how Helm does things. -->
Flux's biggest addition to Helm is a structured declaration layer for your releases that automatically gets reconciled to your cluster based on your configured rules:
- While the Helm client commands let you imperatively do things
- Flux Helm Custom Resources let you declare what you want the Helm SDK to do automatically
Additional benefits Flux adds to Helm include:
- Managing / structuring multiple environments
- A control loop, with configurable retry logic
- Automated drift detection between the desired and actual state of your operations
- Automated responses to that drift, including reconciliation, notifications, and unified logging
## Getting Started
The simplest way to explain is by example.
Lets translate imperative Helm commands to Flux Helm Controller Custom Resources:
Helm client:
```console
helm repo add traefik https://helm.traefik.io/traefik
helm install my-traefik traefik/traefik \
--version 9.18.2 \
--namespace traefik
```
Flux client:
```console
flux create source helm traefik --url https://helm.traefik.io/traefik
flux create helmrelease --chart my-traefik \
--source HelmRepository/traefik \
--chart-version 9.18.2 \
--namespace traefik
```
The main difference is Flux client will not imperatively create resources in the cluster.
Instead these commands create Custom Resource *files*, which are committed to version control as instructions only (note: you may use the `--export` flag to manage any file edits with finer grained control before pushing to version control).
Separately, the Flux Helm Controller software agent automatically reconciles these instructions with the running state of your cluster based on your configured rules.
Lets check out what the Custom Resoruce instruction files look like:
```yaml
# /flux/boot/traefik/helmrepo.yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: traefik
namespace: traefik
spec:
interval: 1m0s
url: https://helm.traefik.io/traefik
```
```yaml
# /flux/boot/traefik/helmrelease.yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: traefik
namespace: traefik
spec:
chart:
spec:
chart: traefik
sourceRef:
kind: HelmRepository
name: traefik
version: 9.18.2
interval: 1m0s
```
<!-- Using the Flux Kustomize Controller, these are automatically applied to your cluster, just as any other Kubernetes resources are. Note that while you may find value in combining Kustomize overlays with your Helm Controller manifests to further reduce file duplication, that is entirely optional and unrelated to the Helm Controller. -->
Once these are applied to your cluster, the Flux Helm Controller automatically uses the Helm SDK to do your bidding according to the rules you've set.
Why is this important?
If you or your team has ever collaborated with multiple engineers on one or more apps, and/or in more than one namespace or cluster, you probably have a good idea of how declarative, automatic reconciliation can help solve common problems.
If not, or either way, you may want to check out this [short introduction to GitOps](https://youtu.be/r-upyR-cfDY).
## Customizing Your Release
While Helm charts are usually installable using default configurations, users will often customize charts with their preferred configuration by [overriding the default values](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing).
The Helm client allows this by imperatively specifying override values with `--set` on the command line, and in additional `--values` files. For example:
```console
helm install my-traefik traefik/traefik --set service.type=ClusterIP
```
and
```console
helm install my-traefik traefik/traefik --values ci/kind-values.yaml
```
where `ci/kind-values.yaml` contains:
```yaml
service:
type: ClusterIP
```
Flux Helm Controller allows these same YAML values overrides on the `HelmRelease` CRD.
These can be declared directly in `spec.values`:
```yaml
spec:
values:
service:
type: ClusterIP
```
and defined in `spec.valuesFrom` as a list of `ConfigMap` and `Secret` resources from which to draw values, allowing reusability and/or greater security.
See `HelmRelease` CRD [values overrides](https://toolkit.fluxcd.io/components/helm/helmreleases/#values-overrides) documentation for the latest spec.
## Managing Secrets and ConfigMaps
You may manage these `ConfigMap` and `Secret` resources any way you wish, but there are several benefits to managing these with the Flux Kustomize Controller.
It is fairly straigtforward to use Kustomize `configMapGenerator` to [trigger a Helm release upgrade every time the encoded values change](https://toolkit.fluxcd.io/guides/helmreleases/#refer-to-values-in-configmaps-generated-with-kustomize).
This common use case currently solveable in Helm by [adding specially crafted annotations](https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments) to a chart.
The Flux Kustomize Controller method allows you to accomplish this on any chart without additional templated annotations.
You may also use Kustomize Controller built-in [Mozilla SOPS integration](https://toolkit.fluxcd.io/components/kustomize/kustomization/#secrets-decryption) to securely manage your encrypted secrets stored in git.
See the [Flux SOPS guide](https://toolkit.fluxcd.io/guides/mozilla-sops/) for step-by-step instructions through various use cases.
## Automatic Release Upgrades
If you want Helm Controller to automatically upgrade your releases when a new chart version is available in the release's referenced `HelmRepository`, you may specify a SemVer range (i.e. `>=4.0.0 <5.0.0`) instead of a fixed version.
This is useful if your release should use a fixed MAJOR chart version, but want the latest MINOR or PATCH versions as they become available.
For full SemVer range syntax, see `Masterminds/semver` [Checking Version Constraints](https://github.com/Masterminds/semver/blob/master/README.md#checking-version-constraints) documentation.
## Automatic Uninstalls and Rollback
The Helm Controller offers an extensive set of configuration options to remediate when a Helm release fails, using [spec.install.remediate](https://toolkit.fluxcd.io/components/helm/api/#helm.toolkit.fluxcd.io/v2beta1.InstallRemediation), [spec.upgrade.remediate](https://toolkit.fluxcd.io/components/helm/api/#helm.toolkit.fluxcd.io/v2beta1.UpgradeRemediation), [spec.rollback](https://toolkit.fluxcd.io/components/helm/api/#helm.toolkit.fluxcd.io/v2beta1.Rollback) and [spec.uninstall](https://toolkit.fluxcd.io/components/helm/api/#helm.toolkit.fluxcd.io/v2beta1.Uninstall).
Features include the option to remediate with an uninstall after an upgrade failure, and the option to keep a failed release for debugging purposes when it has run out of retries.
Here is an example for configuring automated uninstalls (for all available fields, consult the `InstallRemediation` and `Uninstall` API references linked above):
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
install:
# Remediation configuration for when the Helm install
# (or sequent Helm test) action fails
remediation:
# Number of retries that should be attempted on failures before
# bailing, a negative integer equals to unlimited retries
retries: -1
# Configuration options for the Helm uninstall action
uninstall:
timeout: 5m
disableHooks: false
keepHistory: false
```
Here is an example of automated rollback configuration (for all available fields, consult the `UpgradeRemediation` and `Rollback` API references linked above):
```yaml
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: my-release
namespace: default
spec:
# ...omitted for brevity
upgrade:
# Remediaton configuration for when an Helm upgrade action fails
remediation:
# Amount of retries to attempt after a failure,
# setting this to 0 means no remedation will be
# attempted
retries: 5
# Configuration options for the Helm rollback action
rollback:
timeout: 5m
disableWait: false
disableHooks: false
recreate: false
force: false
cleanupOnFail: false
```
## Next Steps
- [Guides > Manage Helm Releases](/guides/helmreleases/)
- [Toolkit Components > Helm Controller](/components/helm/controller/)
- [Migration > Migrate to the Helm Controller](/guides/helm-operator-migration/)

@ -1,216 +0,0 @@
site_name: Flux | GitOps Toolkit
site_description: Open and extensible continuous delivery solution for Kubernetes
site_author: The Flux project
site_url: https://toolkit.fluxcd.io
repo_name: fluxcd/flux2
repo_url: https://github.com/fluxcd/flux2
edit_uri: ""
theme:
name: material
logo: _files/flux-icon@2x.png
language: en
palette:
primary: blue
accent: indigo
custom_dir: mkdocs/
docs_dir: docs
extra_css:
- _static/custom.css
plugins:
- search
markdown_extensions:
- admonition
- codehilite:
guess_lang: false
- footnotes
- meta
- pymdownx.caret
- pymdownx.emoji:
emoji_generator: !!python/name:materialx.emoji.to_svg
emoji_index: !!python/name:materialx.emoji.twemoji
- pymdownx.extra
- pymdownx.progressbar
- pymdownx.superfences:
highlight_code: true
- pymdownx.tabbed
- pymdownx.tasklist
- pymdownx.tilde
- toc:
permalink: true
nav:
- Introduction: index.md
- Core Concepts: core-concepts/index.md
- Get Started: get-started/index.md
- Migration:
- Migration and Support Timetable: migration/timetable.md
- Migrate from Flux v1: guides/flux-v1-migration.md
- Migrate from Flux v1 image update automation: guides/flux-v1-automation-migration.md
- Migrate from the Helm Operator: guides/helm-operator-migration.md
- FAQ: guides/faq-migration.md
- Guides:
- Installation: guides/installation.md
- Manage Helm Releases: guides/helmreleases.md
- Setup Notifications: guides/notifications.md
- Setup Webhook Receivers: guides/webhook-receivers.md
- Monitoring with Prometheus: guides/monitoring.md
- Sealed Secrets: guides/sealed-secrets.md
- Mozilla SOPS: guides/mozilla-sops.md
- Automate image updates to Git: guides/image-update.md
- Sortable image tags to use with automation: guides/sortable-image-tags.md
- Use Cases:
- Azure: use-cases/azure.md
- GitHub Actions Manifest Generation: use-cases/gh-actions-manifest-generation.md
- Helm: use-cases/helm.md
- Toolkit Components:
- Overview: components/index.md
- Source Controller:
- Overview: components/source/controller.md
- GitRepository CRD: components/source/gitrepositories.md
- HelmRepository CRD: components/source/helmrepositories.md
- HelmChart CRD: components/source/helmcharts.md
- Bucket CRD: components/source/buckets.md
- Source API Reference: components/source/api.md
- Kustomize Controller:
- Overview: components/kustomize/controller.md
- Kustomization CRD: components/kustomize/kustomization.md
- Kustomize API Reference: components/kustomize/api.md
- Helm Controller:
- Overview: components/helm/controller.md
- HelmRelease CRD: components/helm/helmreleases.md
- Helm API Reference: components/helm/api.md
- Notification Controller:
- Overview: components/notification/controller.md
- Event: components/notification/event.md
- Provider CRD: components/notification/provider.md
- Alert CRD: components/notification/alert.md
- Receiver CRD: components/notification/receiver.md
- Notification API Reference: components/notification/api.md
- Image Automation Controllers:
- Overview: components/image/controller.md
- ImageRepository CRD: components/image/imagerepositories.md
- ImagePolicy CRD: components/image/imagepolicies.md
- ImageUpdateAutomation CRD: components/image/imageupdateautomations.md
- Automation API Reference: components/image/automation-api.md
- Flux CLI:
- Overview: cmd/flux.md
- Bootstrap: cmd/flux_bootstrap.md
- Bootstrap github: cmd/flux_bootstrap_github.md
- Bootstrap gitlab: cmd/flux_bootstrap_gitlab.md
- Check: cmd/flux_check.md
- Create: cmd/flux_create.md
- Create kustomization: cmd/flux_create_kustomization.md
- Create helmrelease: cmd/flux_create_helmrelease.md
- Create source: cmd/flux_create_source.md
- Create source git: cmd/flux_create_source_git.md
- Create source helm: cmd/flux_create_source_helm.md
- Create source bucket: cmd/flux_create_source_bucket.md
- Create alert provider: cmd/flux_create_alert-provider.md
- Create alert: cmd/flux_create_alert.md
- Create receiver: cmd/flux_create_receiver.md
- Create image: cmd/flux_create_image.md
- Create image policy: cmd/flux_create_image_policy.md
- Create image repository: cmd/flux_create_image_repository.md
- Create image update: cmd/flux_create_image_update.md
- Create tenant: cmd/flux_create_tenant.md
- Create secret: cmd/flux_create_secret.md
- Create secret git: cmd/flux_create_secret_git.md
- Create secret helm: cmd/flux_create_secret_helm.md
- Create secret tls: cmd/flux_create_secret_tls.md
- Delete: cmd/flux_delete.md
- Delete kustomization: cmd/flux_delete_kustomization.md
- Delete helmrelease: cmd/flux_delete_helmrelease.md
- Delete source: cmd/flux_delete_source.md
- Delete source git: cmd/flux_delete_source_git.md
- Delete source helm: cmd/flux_delete_source_helm.md
- Delete source bucket: cmd/flux_delete_source_bucket.md
- Delete image: cmd/flux_delete_image.md
- Delete image policy: cmd/flux_delete_image_policy.md
- Delete image repository: cmd/flux_delete_image_repository.md
- Delete image update: cmd/flux_delete_image_update.md
- Export: cmd/flux_export.md
- Export kustomization: cmd/flux_export_kustomization.md
- Export helmrelease: cmd/flux_export_helmrelease.md
- Export source: cmd/flux_export_source.md
- Export source git: cmd/flux_export_source_git.md
- Export source helm: cmd/flux_export_source_helm.md
- Export source bucket: cmd/flux_export_source_bucket.md
- Export alert provider: cmd/flux_export_alert-provider.md
- Export alert: cmd/flux_export_alert.md
- Export receiver: cmd/flux_export_receiver.md
- Export image: cmd/flux_export_image.md
- Export image policy: cmd/flux_export_image_policy.md
- Export image repository: cmd/flux_export_image_repository.md
- Export image update: cmd/flux_export_image_update.md
- Get: cmd/flux_get.md
- Get all: cmd/flux_get_all.md
- Get kustomizations: cmd/flux_get_kustomizations.md
- Get helmreleases: cmd/flux_get_helmreleases.md
- Get sources: cmd/flux_get_sources.md
- Get sources all: cmd/flux_get_sources_all.md
- Get sources git: cmd/flux_get_sources_git.md
- Get sources helm: cmd/flux_get_sources_helm.md
- Get sources chart: cmd/flux_get_sources_chart.md
- Get sources bucket: cmd/flux_get_sources_bucket.md
- Get alert provider: cmd/flux_get_alert-provider.md
- Get alerts: cmd/flux_get_alerts.md
- Get alert providers: cmd/flux_get_alert-providers.md
- Get receivers: cmd/flux_get_receivers.md
- Get images: cmd/flux_get_images.md
- Get images all: cmd/flux_get_images_all.md
- Get images policy: cmd/flux_get_images_policy.md
- Get images repository: cmd/flux_get_images_repository.md
- Get images update: cmd/flux_get_images_update.md
- Install: cmd/flux_install.md
- Logs: cmd/flux_logs.md
- Resume: cmd/flux_resume.md
- Resume kustomization: cmd/flux_resume_kustomization.md
- Resume helmrelease: cmd/flux_resume_helmrelease.md
- Resume source: cmd/flux_resume_source.md
- Resume source git: cmd/flux_resume_source_git.md
- Resume source helm: cmd/flux_resume_source_helm.md
- Resume source chart: cmd/flux_resume_source_chart.md
- Resume source bucket: cmd/flux_resume_source_bucket.md
- Resume alert provider: cmd/flux_resume_alert-provider.md
- Resume alert: cmd/flux_resume_alert.md
- Resume receiver: cmd/flux_resume_receiver.md
- Resume image: cmd/flux_resume_image.md
- Resume image repository: cmd/flux_resume_image_repository.md
- Resume image update: cmd/flux_resume_image_update.md
- Suspend: cmd/flux_suspend.md
- Suspend kustomization: cmd/flux_suspend_kustomization.md
- Suspend helmrelease: cmd/flux_suspend_helmrelease.md
- Suspend source: cmd/flux_suspend_source.md
- Suspend source git: cmd/flux_suspend_source_git.md
- Suspend source helm: cmd/flux_suspend_source_helm.md
- Suspend source chart: cmd/flux_suspend_source_chart.md
- Suspend source bucket: cmd/flux_suspend_source_bucket.md
- Suspend alert provider: cmd/flux_suspend_alert-provider.md
- Suspend alert: cmd/flux_suspend_alert.md
- Suspend receiver: cmd/flux_suspend_receiver.md
- Suspend image: cmd/flux_suspend_image.md
- Suspend image repository: cmd/flux_suspend_image_repository.md
- Suspend image update: cmd/flux_suspend_image_update.md
- Reconcile: cmd/flux_reconcile.md
- Reconcile kustomization: cmd/flux_reconcile_kustomization.md
- Reconcile helmrelease: cmd/flux_reconcile_helmrelease.md
- Reconcile source: cmd/flux_reconcile_source.md
- Reconcile source git: cmd/flux_reconcile_source_git.md
- Reconcile source helm: cmd/flux_reconcile_source_helm.md
- Reconcile source bucket: cmd/flux_reconcile_source_bucket.md
- Reconcile image: cmd/flux_reconcile_image.md
- Reconcile image repository: cmd/flux_reconcile_image_repository.md
- Reconcile image update: cmd/flux_reconcile_image_update.md
- Uninstall: cmd/flux_uninstall.md
- Dev Guides:
- Watching for source changes: dev-guides/source-watcher.md
- Advanced debugging: dev-guides/debugging.md
- Roadmap: roadmap/index.md
- Contributing: contributing/index.md
- FAQ: faq/index.md

@ -1,32 +0,0 @@
{% extends "base.html" %}
{% block extrahead %}
<meta property="og:url" content="{{ page.canonical_url }}">
{% if page and page.meta and page.meta.title %}
<meta property="og:title" content="{{ page.meta.title }}">
{% elif page and page.title and not page.is_homepage %}
<meta property="og:title" content="{{ page.title }} - {{ config.site_name }}">
{% else %}
<meta property="og:title" content="{{ config.site_name }}">
{% endif %}
<meta property="og:description" content="{{ config.site_description }}">
<meta property="og:image" content="https://toolkit.fluxcd.io/_files/toolkit-icon.png">
<meta property="og:image:alt" content="GitOps Toolkit">
<meta property="og:image:type" content="image/png">
<meta name="twitter:card" content="summary">
<meta name="twitter:site" content="@stefanprodan">
<meta name="twitter:creator" content="@stefanprodan">
{% if page and page.meta and page.meta.title %}
<meta property="twitter:title" content="{{ page.meta.title }}">
{% elif page and page.title and not page.is_homepage %}
<meta property="twitter:title" content="{{ page.title }} - {{ config.site_name }}">
{% else %}
<meta property="twitter:title" content="{{ config.site_name }}">
{% endif %}
<meta name="twitter:description" content="{{ config.site_description }}">
<meta name="twitter:image" content="https://toolkit.fluxcd.io/_files/toolkit-icon.png">
<meta name="twitter:image:alt" content="GitOps Toolkit">
{% endblock %}
Loading…
Cancel
Save