Skip to content
This repository has been archived by the owner on Nov 1, 2022. It is now read-only.

Commit

Permalink
Merge pull request #1848 from 2opremio/generators-releasers
Browse files Browse the repository at this point in the history
Add Manifest Factorization through .flux.yaml config files
  • Loading branch information
squaremo authored May 30, 2019
2 parents cbb9ddf + b55d0ff commit e81b697
Show file tree
Hide file tree
Showing 40 changed files with 2,297 additions and 346 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,6 @@ testdata/sidecar/.sidecar
docker/fluxy-dumbconf.priv
test/profiles
test/bin/kubectl
test/bin/kustomize
test/bin/helm
test/bin/kind
21 changes: 15 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ SUDO := $(shell docker info > /dev/null 2> /dev/null || echo "sudo")
TEST_FLAGS?=

include docker/kubectl.version
include docker/kustomize.version
include docker/helm.version

# NB default target architecture is amd64. If you would like to try the
Expand Down Expand Up @@ -48,12 +49,12 @@ release-bins:
clean:
go clean
rm -rf ./build
rm -f test/bin/kubectl test/bin/helm test/bin/kind
rm -f test/bin/kubectl test/bin/helm test/bin/kind test/bin/kustomize

realclean: clean
rm -rf ./cache

test: test/bin/helm test/bin/kubectl
test: test/bin/helm test/bin/kubectl test/bin/kustomize
PATH="${PWD}/bin:${PWD}/test/bin:${PATH}" go test ${TEST_FLAGS} $(shell go list ./... | grep -v "^github.com/weaveworks/flux/vendor" | sort -u)

e2e: test/bin/helm test/bin/kubectl build/.flux.done build/.helm-operator.done
Expand All @@ -68,7 +69,7 @@ build/.%.done: docker/Dockerfile.%
-f build/docker/$*/Dockerfile.$* ./build/docker/$*
touch $@

build/.flux.done: build/fluxd build/kubectl docker/ssh_config docker/kubeconfig docker/known_hosts.sh
build/.flux.done: build/fluxd build/kubectl build/kustomize docker/ssh_config docker/kubeconfig docker/known_hosts.sh
build/.helm-operator.done: build/helm-operator build/kubectl build/helm docker/ssh_config docker/known_hosts.sh docker/helm-repositories.yaml

build/fluxd: $(FLUXD_DEPS)
Expand All @@ -83,22 +84,30 @@ build/kubectl: cache/linux-$(ARCH)/kubectl-$(KUBECTL_VERSION)
test/bin/kubectl: cache/$(CURRENT_OS_ARCH)/kubectl-$(KUBECTL_VERSION)
build/helm: cache/linux-$(ARCH)/helm-$(HELM_VERSION)
test/bin/helm: cache/$(CURRENT_OS_ARCH)/helm-$(HELM_VERSION)
build/kubectl test/bin/kubectl build/helm test/bin/helm:
build/kustomize: cache/linux-amd64/kustomize-$(KUSTOMIZE_VERSION)
test/bin/kustomize: cache/$(CURRENT_OS_ARCH)/kustomize-$(KUSTOMIZE_VERSION)

build/kubectl test/bin/kubectl build/kustomize test/bin/kustomize build/helm test/bin/helm:
mkdir -p build
cp $< $@
if [ `basename $@` = "build" -a $(CURRENT_OS_ARCH) = "linux-$(ARCH)" ]; then strip $@; fi
chmod a+x $@

cache/%/kubectl-$(KUBECTL_VERSION): docker/kubectl.version
mkdir -p cache/$*
curl -L -o cache/$*/kubectl-$(KUBECTL_VERSION).tar.gz "https://dl.k8s.io/$(KUBECTL_VERSION)/kubernetes-client-$*.tar.gz"
curl --fail -L -o cache/$*/kubectl-$(KUBECTL_VERSION).tar.gz "https://dl.k8s.io/$(KUBECTL_VERSION)/kubernetes-client-$*.tar.gz"
[ $* != "linux-$(ARCH)" ] || echo "$(KUBECTL_CHECKSUM_$(ARCH)) cache/$*/kubectl-$(KUBECTL_VERSION).tar.gz" | shasum -a 256 -c
tar -m --strip-components 3 -C ./cache/$* -xzf cache/$*/kubectl-$(KUBECTL_VERSION).tar.gz kubernetes/client/bin/kubectl
mv ./cache/$*/kubectl $@

cache/%/kustomize-$(KUSTOMIZE_VERSION): docker/kustomize.version
mkdir -p cache/$*
curl --fail -L -o $@ "https://github.com/kubernetes-sigs/kustomize/releases/download/v$(KUSTOMIZE_VERSION)/kustomize_$(KUSTOMIZE_VERSION)_`echo $* | tr - _`"
[ $* != "linux-amd64" ] || echo "$(KUSTOMIZE_CHECKSUM) $@" | shasum -a 256 -c

cache/%/helm-$(HELM_VERSION): docker/helm.version
mkdir -p cache/$*
curl -L -o cache/$*/helm-$(HELM_VERSION).tar.gz "https://storage.googleapis.com/kubernetes-helm/helm-v$(HELM_VERSION)-$*.tar.gz"
curl --fail -L -o cache/$*/helm-$(HELM_VERSION).tar.gz "https://storage.googleapis.com/kubernetes-helm/helm-v$(HELM_VERSION)-$*.tar.gz"
[ $* != "linux-$(ARCH)" ] || echo "$(HELM_CHECKSUM_$(ARCH)) cache/$*/helm-$(HELM_VERSION).tar.gz" | shasum -a 256 -c
tar -m -C ./cache -xzf cache/$*/helm-$(HELM_VERSION).tar.gz $*/helm
mv cache/$*/helm $@
Expand Down
33 changes: 29 additions & 4 deletions cluster/kubernetes/manifests.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,16 +107,41 @@ func (m *manifests) setEffectiveNamespaces(manifests map[string]kresource.KubeMa
return result, nil
}

func (m *manifests) LoadManifests(base string, paths []string) (map[string]resource.Resource, error) {
manifests, err := kresource.Load(base, paths)
func (m *manifests) LoadManifests(baseDir string, paths []string) (map[string]resource.Resource, error) {
manifests, err := kresource.Load(baseDir, paths)
if err != nil {
return nil, err
}
return m.setEffectiveNamespaces(manifests)
}

func (m *manifests) UpdateImage(def []byte, id flux.ResourceID, container string, image image.Ref) ([]byte, error) {
func (m *manifests) ParseManifest(def []byte, source string) (map[string]resource.Resource, error) {
resources, err := kresource.ParseMultidoc(def, source)
if err != nil {
return nil, err
}
// Note: setEffectiveNamespaces() won't work for CRD instances whose CRD is yet to be created
// (due to the CRD not being present in kresources).
// We could get out of our way to fix this (or give a better error) but:
// 1. With the exception of HelmReleases CRD instances are not workloads anyways.
// 2. The problem is eventually fixed by the first successful sync.
result, err := m.setEffectiveNamespaces(resources)
if err != nil {
return nil, err
}
return result, nil
}

func (m *manifests) SetWorkloadContainerImage(def []byte, id flux.ResourceID, container string, image image.Ref) ([]byte, error) {
return updateWorkload(def, id, container, image)
}

// UpdatePolicies and ServicesWithPolicies in policies.go
func (m *manifests) CreateManifestPatch(originalManifests, modifiedManifests []byte, originalSource, modifiedSource string) ([]byte, error) {
return createManifestPatch(originalManifests, modifiedManifests, originalSource, modifiedSource)
}

func (m *manifests) ApplyManifestPatch(originalManifests, patchManifests []byte, originalSource, patchSource string) ([]byte, error) {
return applyManifestPatch(originalManifests, patchManifests, originalSource, patchSource)
}

// UpdateWorkloadPolicies in policies.go
235 changes: 235 additions & 0 deletions cluster/kubernetes/patch.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,235 @@
package kubernetes

import (
"bytes"
"fmt"
"sort"

"github.com/evanphx/json-patch"
jsonyaml "github.com/ghodss/yaml"
"github.com/imdario/mergo"
"gopkg.in/yaml.v2"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
k8sscheme "k8s.io/client-go/kubernetes/scheme"

"github.com/weaveworks/flux"
"github.com/weaveworks/flux/cluster"
"github.com/weaveworks/flux/cluster/kubernetes/resource"
)

func createManifestPatch(originalManifests, modifiedManifests []byte, originalSource, modifiedSource string) ([]byte, error) {
originalResources, err := resource.ParseMultidoc(originalManifests, originalSource)
if err != nil {
fmt.Errorf("cannot parse %s: %s", originalSource, err)
}

modifiedResources, err := resource.ParseMultidoc(modifiedManifests, modifiedSource)
if err != nil {
fmt.Errorf("cannot parse %s: %s", modifiedSource, err)
}
// Sort output by resource identifiers
var originalIDs []string
for id, _ := range originalResources {
originalIDs = append(originalIDs, id)
}
sort.Strings(originalIDs)

buf := bytes.NewBuffer(nil)
scheme := getFullScheme()
for _, id := range originalIDs {
originalResource := originalResources[id]
modifiedResource, ok := modifiedResources[id]
if !ok {
// Only generate patches for resources present in both files
continue
}
patch, err := getPatch(originalResource, modifiedResource, scheme)
if err != nil {
return nil, fmt.Errorf("cannot obtain patch for resource %s: %s", id, err)
}
if bytes.Equal(patch, []byte("{}\n")) {
// Avoid outputting empty patches
continue
}
if err := cluster.AppendManifestToBuffer(patch, buf); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}

func applyManifestPatch(originalManifests, patchManifests []byte, originalSource, patchSource string) ([]byte, error) {
originalResources, err := resource.ParseMultidoc(originalManifests, originalSource)
if err != nil {
return nil, fmt.Errorf("cannot parse %s: %s", originalSource, err)
}

patchResources, err := resource.ParseMultidoc(patchManifests, patchSource)
if err != nil {
return nil, fmt.Errorf("cannot parse %s: %s", patchSource, err)
}

// Make sure all patch resources have a matching resource
for id, patchResource := range patchResources {
if _, ok := originalResources[id]; !ok {
return nil, fmt.Errorf("missing resource (%s) for patch", resourceID(patchResource))
}
}

// Sort output by resource identifiers
var originalIDs []string
for id, _ := range originalResources {
originalIDs = append(originalIDs, id)
}
sort.Strings(originalIDs)

buf := bytes.NewBuffer(nil)
scheme := getFullScheme()
for _, id := range originalIDs {
originalResource := originalResources[id]
resourceBytes := originalResource.Bytes()
if patchedResource, ok := patchResources[id]; ok {
// There was a patch, apply it
patched, err := applyPatch(originalResource, patchedResource, scheme)
if err != nil {
return nil, fmt.Errorf("cannot obtain patch for resource %s: %s", id, err)
}
resourceBytes = patched
}
if err := cluster.AppendManifestToBuffer(resourceBytes, buf); err != nil {
return nil, err
}
}

return buf.Bytes(), nil
}

func getFullScheme() *runtime.Scheme {
fullScheme := runtime.NewScheme()
utilruntime.Must(k8sscheme.AddToScheme(fullScheme))
// HelmRelease and FluxHelmRelease are intentionally not added to the scheme.
// This is done for two reasons:
// 1. The kubernetes strategic merge patcher chokes on the freeform
// values under `values:`.
// 2. External tools like kustomize won't be able to apply SMPs
// on Custom Resources, thus we use a normal jsonmerge instead.
//
// utilruntime.Must(fluxscheme.AddToScheme(fullScheme))
return fullScheme
}

func getPatch(originalManifest resource.KubeManifest, modifiedManifest resource.KubeManifest, scheme *runtime.Scheme) ([]byte, error) {
groupVersion, err := schema.ParseGroupVersion(originalManifest.GroupVersion())
if err != nil {
return nil, fmt.Errorf("cannot parse groupVersion %q: %s", originalManifest.GroupVersion(), err)
}
manifest1JSON, err := jsonyaml.YAMLToJSON(originalManifest.Bytes())
if err != nil {
return nil, fmt.Errorf("cannot transform original resource (%s) to JSON: %s",
resourceID(originalManifest), err)
}
manifest2JSON, err := jsonyaml.YAMLToJSON(modifiedManifest.Bytes())
if err != nil {
return nil, fmt.Errorf("cannot transform modified resource (%s) to JSON: %s",
resourceID(modifiedManifest), err)
}
gvk := groupVersion.WithKind(originalManifest.GetKind())
obj, err := scheme.New(gvk)
var patchJSON []byte
switch {
case runtime.IsNotRegisteredError(err):
// try a normal JSON merge patch
patchJSON, err = jsonpatch.CreateMergePatch(manifest1JSON, manifest2JSON)
case err != nil:
err = fmt.Errorf("cannot obtain scheme for GroupVersionKind %q: %s", gvk, err)
default:
patchJSON, err = strategicpatch.CreateTwoWayMergePatch(manifest1JSON, manifest2JSON, obj)
}
if err != nil {
return nil, err
}
var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshalling to interface{}
err = yaml.Unmarshal(patchJSON, &jsonObj)
if err != nil {
return nil, fmt.Errorf("cannot parse patch (resource %s): %s",
resourceID(originalManifest), err)
}
// Make sure the non-empty patches come with metadata so that they can be matched in multidoc yaml context
if m, ok := jsonObj.(map[interface{}]interface{}); ok && len(m) > 0 {
jsonObj, err = addIdentifyingData(originalManifest.GroupVersion(),
originalManifest.GetKind(), originalManifest.GetName(), originalManifest.GetNamespace(), m)
}
if err != nil {
return nil, fmt.Errorf("cannot add metadata to patch (resource %s): %s", resourceID(originalManifest), err)
}
patch, err := yaml.Marshal(jsonObj)
if err != nil {
return nil, fmt.Errorf("cannot transform updated patch (resource %s) to YAML: %s",
resourceID(originalManifest), err)
}
return patch, nil
}

func addIdentifyingData(apiVersion string, kind string, name string, namespace string,
obj map[interface{}]interface{}) (map[interface{}]interface{}, error) {

toMerge := map[interface{}]interface{}{}
toMerge["apiVersion"] = apiVersion
toMerge["kind"] = kind
metadata := map[string]string{
"name": name,
}
if len(namespace) > 0 {
metadata["namespace"] = namespace
}
toMerge["metadata"] = metadata
err := mergo.Merge(&obj, toMerge)
return obj, err
}

func applyPatch(originalManifest, patchManifest resource.KubeManifest, scheme *runtime.Scheme) ([]byte, error) {
groupVersion, err := schema.ParseGroupVersion(originalManifest.GroupVersion())
if err != nil {
return nil, fmt.Errorf("cannot parse groupVersion %q: %s", originalManifest.GroupVersion(), err)
}
originalJSON, err := jsonyaml.YAMLToJSON(originalManifest.Bytes())
if err != nil {
return nil, fmt.Errorf("cannot transform original resource (%s) to JSON: %s",
resourceID(originalManifest), err)
}
patchJSON, err := jsonyaml.YAMLToJSON(patchManifest.Bytes())
if err != nil {
return nil, fmt.Errorf("cannot transform patch resource (%s) to JSON: %s",
resourceID(patchManifest), err)
}
obj, err := scheme.New(groupVersion.WithKind(originalManifest.GetKind()))
var patchedJSON []byte
switch {
case runtime.IsNotRegisteredError(err):
// try a normal JSON merging
patchedJSON, err = jsonpatch.MergePatch(originalJSON, patchJSON)
default:
patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, patchJSON, obj)
}
if err != nil {
return nil, fmt.Errorf("cannot patch resource %s: %s", resourceID(originalManifest), err)
}
patched, err := jsonyaml.JSONToYAML(patchedJSON)
if err != nil {
return nil, fmt.Errorf("cannot transform patched resource (%s) to YAML: %s",
resourceID(originalManifest), err)
}
return patched, nil
}

// resourceID works like Resource.ResourceID() but avoids <cluster> namespaces,
// since they may be incorrect
func resourceID(manifest resource.KubeManifest) flux.ResourceID {
return flux.MakeResourceID(manifest.GetNamespace(), manifest.GetKind(), manifest.GetKind())
}
Loading

0 comments on commit e81b697

Please sign in to comment.