diff --git a/config/rbac/status-reporter-clusterrole.yaml b/config/rbac/status-reporter-clusterrole.yaml index 3748b9de2..7de4c30a0 100644 --- a/config/rbac/status-reporter-clusterrole.yaml +++ b/config/rbac/status-reporter-clusterrole.yaml @@ -18,3 +18,11 @@ rules: - get - list - update + - apiGroups: + - config.openshift.io + resources: + - clusterversions + verbs: + - get + - list + - watch diff --git a/config/rbac/status-reporter-role.yaml b/config/rbac/status-reporter-role.yaml new file mode 100644 index 000000000..4ba378bc7 --- /dev/null +++ b/config/rbac/status-reporter-role.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + name: status-reporter +rules: +- apiGroups: + - operators.coreos.com + resources: + - clusterserviceversions + verbs: + - get + - list + - watch diff --git a/config/rbac/status-reporter-role_binding.yaml b/config/rbac/status-reporter-role_binding.yaml new file mode 100644 index 000000000..f3d013416 --- /dev/null +++ b/config/rbac/status-reporter-role_binding.yaml @@ -0,0 +1,12 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: status-reporter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: status-reporter +subjects: + - kind: ServiceAccount + name: status-reporter + namespace: system diff --git a/go.mod b/go.mod index 8b481d127..f9b851628 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.27.9 github.com/openshift/api v0.0.0-20230816181854-a7ca92db022a + github.com/operator-framework/api v0.17.7-0.20230626210316-aa3e49803e7b github.com/pkg/errors v0.9.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.67.1 github.com/red-hat-storage/ocs-operator/v4 v4.0.0-20230915072501-d290a4270291 @@ -31,6 +32,7 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -61,6 +63,7 @@ require ( github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.25.0 // indirect diff --git a/go.sum b/go.sum index 08514dcee..bac2a4a43 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -106,6 +108,8 @@ github.com/onsi/gomega v1.27.9 h1:qIyVWbOsvQEye2QCqLsNSeH/5L1RS9vS382erEWfT3o= github.com/onsi/gomega v1.27.9/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/openshift/api v0.0.0-20230816181854-a7ca92db022a h1:1bylAza0mFIchRCRPVY9qy62CxJE18fpjEAUSpIA5O4= github.com/openshift/api v0.0.0-20230816181854-a7ca92db022a/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= +github.com/operator-framework/api v0.17.7-0.20230626210316-aa3e49803e7b h1:prJEMyFQde4yxxaTuvqx1A/ukuCg/EZ2MbfdZiJwlls= +github.com/operator-framework/api v0.17.7-0.20230626210316-aa3e49803e7b/go.mod h1:lnurXgadLnoZ7pufKMHkErr2BVOIZSpHtvEkHBcKvdk= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -123,6 +127,8 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -186,6 +192,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/service/status-report/main.go b/service/status-report/main.go index bf26ac76e..374eac054 100644 --- a/service/status-report/main.go +++ b/service/status-report/main.go @@ -19,13 +19,17 @@ package main import ( "context" "os" + "strings" "time" "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1" "github.com/red-hat-storage/ocs-client-operator/pkg/csi" "github.com/red-hat-storage/ocs-client-operator/pkg/utils" + configv1 "github.com/openshift/api/config/v1" + opv1a1 "github.com/operator-framework/api/pkg/operators/v1alpha1" providerclient "github.com/red-hat-storage/ocs-operator/v4/services/provider/client" + "github.com/red-hat-storage/ocs-operator/v4/services/provider/consumerstatus" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -34,6 +38,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" ) +const ( + csvPrefix = "ocs-client-operator" +) + func main() { scheme := runtime.NewScheme() if err := v1alpha1.AddToScheme(scheme); err != nil { @@ -77,6 +85,43 @@ func main() { klog.Exitf("Failed to get storageClient %q/%q: %v", storageClient.Namespace, storageClient.Name, err) } + csvList := opv1a1.ClusterServiceVersionList{} + if err = cl.List(ctx, &csvList, client.InNamespace(storageClientNamespace)); err != nil { + klog.Exitf("Failed to list csv resources: %v", err) + } + + var opVersion string + for idx := range csvList.Items { + candidate := &csvList.Items[idx] + if strings.HasPrefix(candidate.Name, csvPrefix) { + opVersion = candidate.Spec.Version.String() + break + } + } + + if opVersion == "" { + klog.Exitf("Unable to find csv with prefix %q", csvPrefix) + } + + clusterVersion := &configv1.ClusterVersion{} + clusterVersion.Name = "version" + if err = cl.Get(ctx, types.NamespacedName{Name: clusterVersion.Name}, clusterVersion); err != nil { + klog.Exitf("Failed to get clusterVersion: %v", err) + } + + var cVersion string + for idx := range clusterVersion.Status.History { + candidate := &clusterVersion.Status.History[idx] + if candidate.State == configv1.CompletedUpdate { + cVersion = candidate.Version + break + } + } + + if cVersion == "" { + klog.Exitf("Unable to find ocp version with completed update") + } + providerClient, err := providerclient.NewProviderClient( ctx, storageClient.Spec.StorageProviderEndpoint, @@ -87,8 +132,12 @@ func main() { } defer providerClient.Close() - if _, err = providerClient.ReportStatus(ctx, storageClient.Status.ConsumerID); err != nil { - klog.Exitf("Failed to update lastHeartbeat of storageClient %v: %v", storageClient.Status.ConsumerID, err) + status := &consumerstatus.Status{ + ClusterVersion: cVersion, + OperatorVersion: opVersion, + } + if _, err = providerClient.ReportStatus(ctx, storageClient.Status.ConsumerID, status); err != nil { + klog.Exitf("Failed to report status of storageClient %v: %v", storageClient.Status.ConsumerID, err) } var csiClusterConfigEntry = new(csi.ClusterConfigEntry) diff --git a/vendor/github.com/blang/semver/v4/LICENSE b/vendor/github.com/blang/semver/v4/LICENSE new file mode 100644 index 000000000..5ba5c86fc --- /dev/null +++ b/vendor/github.com/blang/semver/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/v4/json.go b/vendor/github.com/blang/semver/v4/json.go new file mode 100644 index 000000000..a74bf7c44 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/v4/range.go b/vendor/github.com/blang/semver/v4/range.go new file mode 100644 index 000000000..95f7139b9 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Contains(ap, "x") { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/v4/semver.go b/vendor/github.com/blang/semver/v4/semver.go new file mode 100644 index 000000000..307de610f --- /dev/null +++ b/vendor/github.com/blang/semver/v4/semver.go @@ -0,0 +1,476 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precedence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// FinalizeVersion discards prerelease and build number and only returns +// major, minor and patch number. +func (v Version) FinalizeVersion() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// IncrementPatch increments the patch version +func (v *Version) IncrementPatch() error { + v.Patch++ + return nil +} + +// IncrementMinor increments the minor version +func (v *Version) IncrementMinor() error { + v.Minor++ + v.Patch = 0 + return nil +} + +// IncrementMajor increments the major version +func (v *Version) IncrementMajor() error { + v.Major++ + v.Minor = 0 + v.Patch = 0 + return nil +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (*Version, error) { + v, err := Parse(s) + vp := &v + return vp, err +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions +// with only major and minor components specified, and removes leading 0s. +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + // Remove leading zeros. + for i, p := range parts { + if len(p) > 1 { + p = strings.TrimLeft(p, "0") + if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") { + p = "0" + p + } + parts[i] = p + } + } + // Fill up shortened versions. + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + } + s = strings.Join(parts, ".") + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} + +// FinalizeVersion returns the major, minor and patch number only and discards +// prerelease and build number. +func FinalizeVersion(s string) (string, error) { + v, err := Parse(s) + if err != nil { + return "", err + } + v.Pre = nil + v.Build = nil + + finalVer := v.String() + return finalVer, nil +} diff --git a/vendor/github.com/blang/semver/v4/sort.go b/vendor/github.com/blang/semver/v4/sort.go new file mode 100644 index 000000000..e18f88082 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/v4/sql.go b/vendor/github.com/blang/semver/v4/sql.go new file mode 100644 index 000000000..db958134f --- /dev/null +++ b/vendor/github.com/blang/semver/v4/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("version.Scan: cannot convert %T to string", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/operator-framework/api/LICENSE b/vendor/github.com/operator-framework/api/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/operator-framework/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/operator-framework/api/pkg/lib/version/version.go b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go new file mode 100644 index 000000000..a0ffb9fcb --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go @@ -0,0 +1,67 @@ +package version + +import ( + "encoding/json" + + semver "github.com/blang/semver/v4" +) + +// +k8s:openapi-gen=true +// OperatorVersion is a wrapper around semver.Version which supports correct +// marshaling to YAML and JSON. +// +kubebuilder:validation:Type=string +type OperatorVersion struct { + semver.Version `json:"-"` +} + +// DeepCopyInto creates a deep-copy of the Version value. +func (v *OperatorVersion) DeepCopyInto(out *OperatorVersion) { + out.Major = v.Major + out.Minor = v.Minor + out.Patch = v.Patch + + if v.Pre != nil { + pre := make([]semver.PRVersion, len(v.Pre)) + copy(pre, v.Pre) + out.Pre = pre + } + + if v.Build != nil { + build := make([]string, len(v.Build)) + copy(build, v.Build) + out.Build = build + } +} + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v OperatorVersion) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *OperatorVersion) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + version := semver.Version{} + version, err = semver.ParseTolerant(versionString) + if err != nil { + return err + } + v.Version = version + return +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ OperatorVersion) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// "semver" is not a standard openapi format but tooling may use the value regardless +func (_ OperatorVersion) OpenAPISchemaFormat() string { return "semver" } diff --git a/vendor/github.com/operator-framework/api/pkg/operators/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/doc.go new file mode 100644 index 000000000..7eba79448 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/doc.go @@ -0,0 +1,4 @@ +// +kubebuilder:skip + +// Package operators contains all resource types of the operators.coreos.com API group. +package operators diff --git a/vendor/github.com/operator-framework/api/pkg/operators/register.go b/vendor/github.com/operator-framework/api/pkg/operators/register.go new file mode 100644 index 000000000..e3c31d51a --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/register.go @@ -0,0 +1,31 @@ +package operators + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // GroupName is the group name used in this package. + GroupName = "operators.coreos.com" + // GroupVersion is the group version used in this package. + GroupVersion = runtime.APIVersionInternal + + // LEGACY: Exported kind names, remove after major version bump + + // ClusterServiceVersionKind is the kind name for ClusterServiceVersion resources. + ClusterServiceVersionKind = "ClusterServiceVersion" + // CatalogSourceKind is the kind name for CatalogSource resources. + CatalogSourceKind = "CatalogSource" + // InstallPlanKind is the kind name for InstallPlan resources. + InstallPlanKind = "InstallPlan" + // SubscriptionKind is the kind name for Subscription resources. + SubscriptionKind = "Subscription" + // OperatorKind is the kind name for Operator resources. + OperatorKind = "Operator" + // OperatorGroupKind is the kind name for OperatorGroup resources. + OperatorGroupKind = "OperatorGroup" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go new file mode 100644 index 000000000..ef8b2c8f2 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go @@ -0,0 +1,334 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + CatalogSourceCRDAPIVersion = GroupName + "/" + GroupVersion + CatalogSourceKind = "CatalogSource" + DefaultRegistryPollDuration = 15 * time.Minute +) + +// SourceType indicates the type of backing store for a CatalogSource +type SourceType string + +const ( + // SourceTypeInternal (deprecated) specifies a CatalogSource of type SourceTypeConfigmap + SourceTypeInternal SourceType = "internal" + + // SourceTypeConfigmap specifies a CatalogSource that generates a configmap-server registry + SourceTypeConfigmap SourceType = "configmap" + + // SourceTypeGrpc specifies a CatalogSource that can use an operator registry image to generate a + // registry-server or connect to a pre-existing registry at an address. + SourceTypeGrpc SourceType = "grpc" +) + +const ( + // CatalogSourceSpecInvalidError denotes when fields on the spec of the CatalogSource are not valid. + CatalogSourceSpecInvalidError ConditionReason = "SpecInvalidError" + // CatalogSourceConfigMapError denotes when there is an issue extracting manifests from the specified ConfigMap. + CatalogSourceConfigMapError ConditionReason = "ConfigMapError" + // CatalogSourceRegistryServerError denotes when there is an issue querying the specified registry server. + CatalogSourceRegistryServerError ConditionReason = "RegistryServerError" + // CatalogSourceIntervalInvalidError denotes if the registry polling interval is invalid. + CatalogSourceIntervalInvalidError ConditionReason = "InvalidIntervalError" +) + +type CatalogSourceSpec struct { + // SourceType is the type of source + SourceType SourceType `json:"sourceType"` + + // Priority field assigns a weight to the catalog source to prioritize them so that it can be consumed by the dependency resolver. + // Usage: + // Higher weight indicates that this catalog source is preferred over lower weighted catalog sources during dependency resolution. + // The range of the priority value can go from positive to negative in the range of int32. + // The default value to a catalog source with unassigned priority would be 0. + // The catalog source with the same priority values will be ranked lexicographically based on its name. + // +optional + Priority int `json:"priority,omitempty"` + + // ConfigMap is the name of the ConfigMap to be used to back a configmap-server registry. + // Only used when SourceType = SourceTypeConfigmap or SourceTypeInternal. + // +optional + ConfigMap string `json:"configMap,omitempty"` + + // Address is a host that OLM can use to connect to a pre-existing registry. + // Format: : + // Only used when SourceType = SourceTypeGrpc. + // Ignored when the Image field is set. + // +optional + Address string `json:"address,omitempty"` + + // Image is an operator-registry container image to instantiate a registry-server with. + // Only used when SourceType = SourceTypeGrpc. + // If present, the address field is ignored. + // +optional + Image string `json:"image,omitempty"` + + // GrpcPodConfig exposes different overrides for the pod spec of the CatalogSource Pod. + // Only used when SourceType = SourceTypeGrpc and Image is set. + // +optional + GrpcPodConfig *GrpcPodConfig `json:"grpcPodConfig,omitempty"` + + // UpdateStrategy defines how updated catalog source images can be discovered + // Consists of an interval that defines polling duration and an embedded strategy type + // +optional + UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"` + + // Secrets represent set of secrets that can be used to access the contents of the catalog. + // It is best to keep this list small, since each will need to be tried for every catalog entry. + // +optional + Secrets []string `json:"secrets,omitempty"` + + // Metadata + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + Publisher string `json:"publisher,omitempty"` + Icon Icon `json:"icon,omitempty"` +} + +type SecurityConfig string + +const ( + Legacy SecurityConfig = "legacy" + Restricted SecurityConfig = "restricted" +) + +// GrpcPodConfig contains configuration specified for a catalog source +type GrpcPodConfig struct { + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations are the catalog source's pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Affinity is the catalog source's pod's affinity. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // If specified, indicates the pod's priority. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + + // SecurityContextConfig can be one of `legacy` or `restricted`. The CatalogSource's pod is either injected with the + // right pod.spec.securityContext and pod.spec.container[*].securityContext values to allow the pod to run in Pod + // Security Admission (PSA) `restricted` mode, or doesn't set these values at all, in which case the pod can only be + // run in PSA `baseline` or `privileged` namespaces. Currently if the SecurityContextConfig is unspecified, the default + // value of `legacy` is used. Specifying a value other than `legacy` or `restricted` result in a validation error. + // When using older catalog images, which could not be run in `restricted` mode, the SecurityContextConfig should be + // set to `legacy`. + // + // In a future version will the default will be set to `restricted`, catalog maintainers should rebuild their catalogs + // with a version of opm that supports running catalogSource pods in `restricted` mode to prepare for these changes. + // + // More information about PSA can be found here: https://kubernetes.io/docs/concepts/security/pod-security-admission/' + // +optional + // +kubebuilder:validation:Enum=legacy;restricted + // +kubebuilder:default:=legacy + SecurityContextConfig SecurityConfig `json:"securityContextConfig,omitempty"` +} + +// UpdateStrategy holds all the different types of catalog source update strategies +// Currently only registry polling strategy is implemented +type UpdateStrategy struct { + *RegistryPoll `json:"registryPoll,omitempty"` +} + +type RegistryPoll struct { + // Interval is used to determine the time interval between checks of the latest catalog source version. + // The catalog operator polls to see if a new version of the catalog source is available. + // If available, the latest image is pulled and gRPC traffic is directed to the latest catalog source. + RawInterval string `json:"interval,omitempty"` + Interval *metav1.Duration `json:"-"` + ParsingError string `json:"-"` +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (u *UpdateStrategy) UnmarshalJSON(data []byte) (err error) { + type alias struct { + *RegistryPoll `json:"registryPoll,omitempty"` + } + us := alias{} + if err = json.Unmarshal(data, &us); err != nil { + return err + } + registryPoll := &RegistryPoll{ + RawInterval: us.RegistryPoll.RawInterval, + } + duration, err := time.ParseDuration(registryPoll.RawInterval) + if err != nil { + registryPoll.ParsingError = fmt.Sprintf("error parsing spec.updateStrategy.registryPoll.interval. Using the default value of %s instead. Error: %s", DefaultRegistryPollDuration, err) + registryPoll.Interval = &metav1.Duration{Duration: DefaultRegistryPollDuration} + } else { + registryPoll.Interval = &metav1.Duration{Duration: duration} + } + u.RegistryPoll = registryPoll + return nil +} + +type RegistryServiceStatus struct { + Protocol string `json:"protocol,omitempty"` + ServiceName string `json:"serviceName,omitempty"` + ServiceNamespace string `json:"serviceNamespace,omitempty"` + Port string `json:"port,omitempty"` + CreatedAt metav1.Time `json:"createdAt,omitempty"` +} + +func (s *RegistryServiceStatus) Address() string { + return fmt.Sprintf("%s.%s.svc:%s", s.ServiceName, s.ServiceNamespace, s.Port) +} + +type GRPCConnectionState struct { + Address string `json:"address,omitempty"` + LastObservedState string `json:"lastObservedState"` + LastConnectTime metav1.Time `json:"lastConnect,omitempty"` +} + +type CatalogSourceStatus struct { + // A human readable message indicating details about why the CatalogSource is in this condition. + // +optional + Message string `json:"message,omitempty"` + // Reason is the reason the CatalogSource was transitioned to its current state. + // +optional + Reason ConditionReason `json:"reason,omitempty"` + + // The last time the CatalogSource image registry has been polled to ensure the image is up-to-date + LatestImageRegistryPoll *metav1.Time `json:"latestImageRegistryPoll,omitempty"` + + ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"` + RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"` + GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"` + + // Represents the state of a CatalogSource. Note that Message and Reason represent the original + // status information, which may be migrated to be conditions based in the future. Any new features + // introduced will use conditions. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +type ConfigMapResourceReference struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + UID types.UID `json:"uid,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` +} + +func (r *ConfigMapResourceReference) IsAMatch(object *metav1.ObjectMeta) bool { + return r.UID == object.GetUID() && r.ResourceVersion == object.GetResourceVersion() +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +kubebuilder:resource:shortName=catsrc,categories=olm +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The pretty name of the catalog" +// +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.spec.sourceType`,description="The type of the catalog" +// +kubebuilder:printcolumn:name="Publisher",type=string,JSONPath=`.spec.publisher`,description="The publisher of the catalog" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// CatalogSource is a repository of CSVs, CRDs, and operator packages. +type CatalogSource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec CatalogSourceSpec `json:"spec"` + // +optional + Status CatalogSourceStatus `json:"status"` +} + +func (c *CatalogSource) Address() string { + if c.Spec.Address != "" { + return c.Spec.Address + } + return c.Status.RegistryServiceStatus.Address() +} + +func (c *CatalogSource) SetError(reason ConditionReason, err error) { + c.Status.Reason = reason + c.Status.Message = "" + if err != nil { + c.Status.Message = err.Error() + } +} + +func (c *CatalogSource) SetLastUpdateTime() { + now := metav1.Now() + c.Status.LatestImageRegistryPoll = &now +} + +// Check if it is time to update based on polling setting +func (c *CatalogSource) Update() bool { + if !c.Poll() { + return false + } + interval := c.Spec.UpdateStrategy.Interval.Duration + latest := c.Status.LatestImageRegistryPoll + if latest == nil { + logrus.WithField("CatalogSource", c.Name).Debugf("latest poll %v", latest) + } else { + logrus.WithField("CatalogSource", c.Name).Debugf("latest poll %v", *c.Status.LatestImageRegistryPoll) + } + + if c.Status.LatestImageRegistryPoll.IsZero() { + logrus.WithField("CatalogSource", c.Name).Debugf("creation timestamp plus interval before now %t", c.CreationTimestamp.Add(interval).Before(time.Now())) + if c.CreationTimestamp.Add(interval).Before(time.Now()) { + return true + } + } else { + logrus.WithField("CatalogSource", c.Name).Debugf("latest poll plus interval before now %t", c.Status.LatestImageRegistryPoll.Add(interval).Before(time.Now())) + if c.Status.LatestImageRegistryPoll.Add(interval).Before(time.Now()) { + return true + } + } + + return false +} + +// Poll determines whether the polling feature is enabled on the particular catalog source +func (c *CatalogSource) Poll() bool { + if c.Spec.UpdateStrategy == nil { + return false + } + // if polling interval is zero polling will not be done + if c.Spec.UpdateStrategy.RegistryPoll == nil { + return false + } + // if catalog source is not backed by an image polling will not be done + if c.Spec.Image == "" { + return false + } + // if image is not type gRPC polling will not be done + if c.Spec.SourceType != SourceTypeGrpc { + return false + } + return true +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CatalogSourceList is a repository of CSVs, CRDs, and operator packages. +type CatalogSourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []CatalogSource `json:"items"` +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go new file mode 100644 index 000000000..ffc357b12 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go @@ -0,0 +1,208 @@ +package v1alpha1 + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" +) + +const ( + CopiedLabelKey = "olm.copiedFrom" + + // ConditionsLengthLimit is the maximum length of Status.Conditions of a + // given ClusterServiceVersion object. The oldest condition(s) are removed + // from the list as it grows over time to keep it at limit. + ConditionsLengthLimit = 20 +) + +// obsoleteReasons are the set of reasons that mean a CSV should no longer be processed as active +var obsoleteReasons = map[ConditionReason]struct{}{ + CSVReasonReplaced: {}, + CSVReasonBeingReplaced: {}, +} + +// uncopiableReasons are the set of reasons that should prevent a CSV from being copied to target namespaces +var uncopiableReasons = map[ConditionReason]struct{}{ + CSVReasonCopied: {}, + CSVReasonInvalidInstallModes: {}, + CSVReasonNoTargetNamespaces: {}, + CSVReasonUnsupportedOperatorGroup: {}, + CSVReasonNoOperatorGroup: {}, + CSVReasonTooManyOperatorGroups: {}, + CSVReasonInterOperatorGroupOwnerConflict: {}, + CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs: {}, +} + +// safeToAnnotateOperatorGroupReasons are the set of reasons that it's safe to attempt to update the operatorgroup +// annotations +var safeToAnnotateOperatorGroupReasons = map[ConditionReason]struct{}{ + CSVReasonOwnerConflict: {}, + CSVReasonInstallSuccessful: {}, + CSVReasonInvalidInstallModes: {}, + CSVReasonNoTargetNamespaces: {}, + CSVReasonUnsupportedOperatorGroup: {}, + CSVReasonNoOperatorGroup: {}, + CSVReasonTooManyOperatorGroups: {}, + CSVReasonInterOperatorGroupOwnerConflict: {}, + CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs: {}, +} + +// SetPhaseWithEventIfChanged emits a Kubernetes event with details of a phase change and sets the current phase if phase, reason, or message would changed +func (c *ClusterServiceVersion) SetPhaseWithEventIfChanged(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time, recorder record.EventRecorder) { + if c.Status.Phase == phase && c.Status.Reason == reason && c.Status.Message == message { + return + } + + c.SetPhaseWithEvent(phase, reason, message, now, recorder) +} + +// SetPhaseWithEvent generates a Kubernetes event with details about the phase change and sets the current phase +func (c *ClusterServiceVersion) SetPhaseWithEvent(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time, recorder record.EventRecorder) { + var eventtype string + if phase == CSVPhaseFailed { + eventtype = v1.EventTypeWarning + } else { + eventtype = v1.EventTypeNormal + } + go recorder.Event(c, eventtype, string(reason), message) + c.SetPhase(phase, reason, message, now) +} + +// SetPhase sets the current phase and adds a condition if necessary +func (c *ClusterServiceVersion) SetPhase(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time) { + newCondition := func() ClusterServiceVersionCondition { + return ClusterServiceVersionCondition{ + Phase: c.Status.Phase, + LastTransitionTime: c.Status.LastTransitionTime, + LastUpdateTime: c.Status.LastUpdateTime, + Message: message, + Reason: reason, + } + } + + defer c.TrimConditionsIfLimitExceeded() + + c.Status.LastUpdateTime = now + if c.Status.Phase != phase { + c.Status.Phase = phase + c.Status.LastTransitionTime = now + } + c.Status.Message = message + c.Status.Reason = reason + if len(c.Status.Conditions) == 0 { + c.Status.Conditions = append(c.Status.Conditions, newCondition()) + return + } + + previousCondition := c.Status.Conditions[len(c.Status.Conditions)-1] + if previousCondition.Phase != c.Status.Phase || previousCondition.Reason != c.Status.Reason { + c.Status.Conditions = append(c.Status.Conditions, newCondition()) + } +} + +// SetRequirementStatus adds the status of all requirements to the CSV status +func (c *ClusterServiceVersion) SetRequirementStatus(statuses []RequirementStatus) { + c.Status.RequirementStatus = statuses +} + +// IsObsolete returns if this CSV is being replaced or is marked for deletion +func (c *ClusterServiceVersion) IsObsolete() bool { + for _, condition := range c.Status.Conditions { + _, ok := obsoleteReasons[condition.Reason] + if ok { + return true + } + } + return false +} + +// IsCopied returns true if the CSV has been copied and false otherwise. +func (c *ClusterServiceVersion) IsCopied() bool { + operatorNamespace, ok := c.GetAnnotations()[OperatorGroupNamespaceAnnotationKey] + if c.Status.Reason == CSVReasonCopied || ok && c.GetNamespace() != operatorNamespace { + return true + } + + if labels := c.GetLabels(); labels != nil { + if _, ok := labels[CopiedLabelKey]; ok { + return true + } + } + return false +} + +func (c *ClusterServiceVersion) IsUncopiable() bool { + if c.Status.Phase == CSVPhaseNone { + return true + } + _, ok := uncopiableReasons[c.Status.Reason] + return ok +} + +func (c *ClusterServiceVersion) IsSafeToUpdateOperatorGroupAnnotations() bool { + _, ok := safeToAnnotateOperatorGroupReasons[c.Status.Reason] + return ok +} + +// NewInstallModeSet returns an InstallModeSet instantiated from the given list of InstallModes. +// If the given list is not a set, an error is returned. +func NewInstallModeSet(modes []InstallMode) (InstallModeSet, error) { + set := InstallModeSet{} + for _, mode := range modes { + if _, exists := set[mode.Type]; exists { + return nil, fmt.Errorf("InstallMode list contains duplicates, cannot make set: %v", modes) + } + set[mode.Type] = mode.Supported + } + + return set, nil +} + +// Supports returns an error if the InstallModeSet does not support configuration for +// the given operatorNamespace and list of target namespaces. +func (set InstallModeSet) Supports(operatorNamespace string, namespaces []string) error { + numNamespaces := len(namespaces) + switch { + case numNamespaces == 0: + return fmt.Errorf("operatorgroup has invalid selected namespaces, cannot configure to watch zero namespaces") + case numNamespaces == 1: + switch namespaces[0] { + case operatorNamespace: + if !set[InstallModeTypeOwnNamespace] { + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch own namespace", InstallModeTypeOwnNamespace) + } + case v1.NamespaceAll: + if !set[InstallModeTypeAllNamespaces] { + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch all namespaces", InstallModeTypeAllNamespaces) + } + default: + if !set[InstallModeTypeSingleNamespace] { + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch one namespace", InstallModeTypeSingleNamespace) + } + } + case numNamespaces > 1 && !set[InstallModeTypeMultiNamespace]: + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch %d namespaces", InstallModeTypeMultiNamespace, numNamespaces) + case numNamespaces > 1: + for _, namespace := range namespaces { + if namespace == operatorNamespace && !set[InstallModeTypeOwnNamespace] { + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch own namespace", InstallModeTypeOwnNamespace) + } + if namespace == v1.NamespaceAll { + return fmt.Errorf("operatorgroup has invalid selected namespaces, NamespaceAll found when |selected namespaces| > 1") + } + } + } + + return nil +} + +func (c *ClusterServiceVersion) TrimConditionsIfLimitExceeded() { + if len(c.Status.Conditions) <= ConditionsLengthLimit { + return + } + + firstIndex := len(c.Status.Conditions) - ConditionsLengthLimit + c.Status.Conditions = c.Status.Conditions[firstIndex:len(c.Status.Conditions)] +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go new file mode 100644 index 000000000..3e6d32480 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go @@ -0,0 +1,737 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + rbac "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/operator-framework/api/pkg/lib/version" +) + +const ( + ClusterServiceVersionAPIVersion = GroupName + "/" + GroupVersion + ClusterServiceVersionKind = "ClusterServiceVersion" + OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace" + InstallStrategyNameDeployment = "deployment" + SkipRangeAnnotationKey = "olm.skipRange" +) + +// InstallModeType is a supported type of install mode for CSV installation +type InstallModeType string + +const ( + // InstallModeTypeOwnNamespace indicates that the operator can be a member of an `OperatorGroup` that selects its own namespace. + InstallModeTypeOwnNamespace InstallModeType = "OwnNamespace" + // InstallModeTypeSingleNamespace indicates that the operator can be a member of an `OperatorGroup` that selects one namespace. + InstallModeTypeSingleNamespace InstallModeType = "SingleNamespace" + // InstallModeTypeMultiNamespace indicates that the operator can be a member of an `OperatorGroup` that selects more than one namespace. + InstallModeTypeMultiNamespace InstallModeType = "MultiNamespace" + // InstallModeTypeAllNamespaces indicates that the operator can be a member of an `OperatorGroup` that selects all namespaces (target namespace set is the empty string ""). + InstallModeTypeAllNamespaces InstallModeType = "AllNamespaces" +) + +// InstallMode associates an InstallModeType with a flag representing if the CSV supports it +// +k8s:openapi-gen=true +type InstallMode struct { + Type InstallModeType `json:"type"` + Supported bool `json:"supported"` +} + +// InstallModeSet is a mapping of unique InstallModeTypes to whether they are supported. +type InstallModeSet map[InstallModeType]bool + +// NamedInstallStrategy represents the block of an ClusterServiceVersion resource +// where the install strategy is specified. +// +k8s:openapi-gen=true +type NamedInstallStrategy struct { + StrategyName string `json:"strategy"` + StrategySpec StrategyDetailsDeployment `json:"spec,omitempty"` +} + +// StrategyDeploymentPermissions describe the rbac rules and service account needed by the install strategy +// +k8s:openapi-gen=true +type StrategyDeploymentPermissions struct { + ServiceAccountName string `json:"serviceAccountName"` + Rules []rbac.PolicyRule `json:"rules"` +} + +// StrategyDeploymentSpec contains the name, spec and labels for the deployment ALM should create +// +k8s:openapi-gen=true +type StrategyDeploymentSpec struct { + Name string `json:"name"` + Spec appsv1.DeploymentSpec `json:"spec"` + Label labels.Set `json:"label,omitempty"` +} + +// StrategyDetailsDeployment represents the parsed details of a Deployment +// InstallStrategy. +// +k8s:openapi-gen=true +type StrategyDetailsDeployment struct { + DeploymentSpecs []StrategyDeploymentSpec `json:"deployments"` + Permissions []StrategyDeploymentPermissions `json:"permissions,omitempty"` + ClusterPermissions []StrategyDeploymentPermissions `json:"clusterPermissions,omitempty"` +} + +func (d *StrategyDetailsDeployment) GetStrategyName() string { + return InstallStrategyNameDeployment +} + +// StatusDescriptor describes a field in a status block of a CRD so that OLM can consume it +// +k8s:openapi-gen=true +type StatusDescriptor struct { + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +// SpecDescriptor describes a field in a spec block of a CRD so that OLM can consume it +// +k8s:openapi-gen=true +type SpecDescriptor struct { + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +// ActionDescriptor describes a declarative action that can be performed on a custom resource instance +// +k8s:openapi-gen=true +type ActionDescriptor struct { + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +// CRDDescription provides details to OLM about the CRDs +// +k8s:openapi-gen=true +type CRDDescription struct { + Name string `json:"name"` + Version string `json:"version"` + Kind string `json:"kind"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + Resources []APIResourceReference `json:"resources,omitempty"` + StatusDescriptors []StatusDescriptor `json:"statusDescriptors,omitempty"` + SpecDescriptors []SpecDescriptor `json:"specDescriptors,omitempty"` + ActionDescriptor []ActionDescriptor `json:"actionDescriptors,omitempty"` +} + +// APIServiceDescription provides details to OLM about apis provided via aggregation +// +k8s:openapi-gen=true +type APIServiceDescription struct { + Name string `json:"name"` + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + DeploymentName string `json:"deploymentName,omitempty"` + ContainerPort int32 `json:"containerPort,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + Resources []APIResourceReference `json:"resources,omitempty"` + StatusDescriptors []StatusDescriptor `json:"statusDescriptors,omitempty"` + SpecDescriptors []SpecDescriptor `json:"specDescriptors,omitempty"` + ActionDescriptor []ActionDescriptor `json:"actionDescriptors,omitempty"` +} + +// APIResourceReference is a reference to a Kubernetes resource type that the referrer utilizes. +// +k8s:openapi-gen=true +type APIResourceReference struct { + // Plural name of the referenced resource type (CustomResourceDefinition.Spec.Names[].Plural). Empty string if the referenced resource type is not a custom resource. + Name string `json:"name"` + // Kind of the referenced resource type. + Kind string `json:"kind"` + // API Version of the referenced resource type. + Version string `json:"version"` +} + +// GetName returns the name of an APIService as derived from its group and version. +func (d APIServiceDescription) GetName() string { + return fmt.Sprintf("%s.%s", d.Version, d.Group) +} + +// WebhookAdmissionType is the type of admission webhooks supported by OLM +type WebhookAdmissionType string + +const ( + // ValidatingAdmissionWebhook is for validating admission webhooks + ValidatingAdmissionWebhook WebhookAdmissionType = "ValidatingAdmissionWebhook" + // MutatingAdmissionWebhook is for mutating admission webhooks + MutatingAdmissionWebhook WebhookAdmissionType = "MutatingAdmissionWebhook" + // ConversionWebhook is for conversion webhooks + ConversionWebhook WebhookAdmissionType = "ConversionWebhook" +) + +// WebhookDescription provides details to OLM about required webhooks +// +k8s:openapi-gen=true +type WebhookDescription struct { + GenerateName string `json:"generateName"` + // +kubebuilder:validation:Enum=ValidatingAdmissionWebhook;MutatingAdmissionWebhook;ConversionWebhook + Type WebhookAdmissionType `json:"type"` + DeploymentName string `json:"deploymentName,omitempty"` + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:default=443 + ContainerPort int32 `json:"containerPort,omitempty"` + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + Rules []admissionregistrationv1.RuleWithOperations `json:"rules,omitempty"` + FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"` + MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty"` + SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects"` + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` + AdmissionReviewVersions []string `json:"admissionReviewVersions"` + ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + WebhookPath *string `json:"webhookPath,omitempty"` + ConversionCRDs []string `json:"conversionCRDs,omitempty"` +} + +// GetValidatingWebhook returns a ValidatingWebhook generated from the WebhookDescription +func (w *WebhookDescription) GetValidatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.ValidatingWebhook { + return admissionregistrationv1.ValidatingWebhook{ + Name: w.GenerateName, + Rules: w.Rules, + FailurePolicy: w.FailurePolicy, + MatchPolicy: w.MatchPolicy, + NamespaceSelector: namespaceSelector, + ObjectSelector: w.ObjectSelector, + SideEffects: w.SideEffects, + TimeoutSeconds: w.TimeoutSeconds, + AdmissionReviewVersions: w.AdmissionReviewVersions, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: w.DomainName() + "-service", + Namespace: namespace, + Path: w.WebhookPath, + Port: &w.ContainerPort, + }, + CABundle: caBundle, + }, + } +} + +// GetMutatingWebhook returns a MutatingWebhook generated from the WebhookDescription +func (w *WebhookDescription) GetMutatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.MutatingWebhook { + return admissionregistrationv1.MutatingWebhook{ + Name: w.GenerateName, + Rules: w.Rules, + FailurePolicy: w.FailurePolicy, + MatchPolicy: w.MatchPolicy, + NamespaceSelector: namespaceSelector, + ObjectSelector: w.ObjectSelector, + SideEffects: w.SideEffects, + TimeoutSeconds: w.TimeoutSeconds, + AdmissionReviewVersions: w.AdmissionReviewVersions, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: w.DomainName() + "-service", + Namespace: namespace, + Path: w.WebhookPath, + Port: &w.ContainerPort, + }, + CABundle: caBundle, + }, + ReinvocationPolicy: w.ReinvocationPolicy, + } +} + +// DomainName returns the result of replacing all periods in the given Webhook name with hyphens +func (w *WebhookDescription) DomainName() string { + // Replace all '.'s with "-"s to convert to a DNS-1035 label + return strings.Replace(w.DeploymentName, ".", "-", -1) +} + +// CustomResourceDefinitions declares all of the CRDs managed or required by +// an operator being ran by ClusterServiceVersion. +// +// If the CRD is present in the Owned list, it is implicitly required. +// +k8s:openapi-gen=true +type CustomResourceDefinitions struct { + Owned []CRDDescription `json:"owned,omitempty"` + Required []CRDDescription `json:"required,omitempty"` +} + +// APIServiceDefinitions declares all of the extension apis managed or required by +// an operator being ran by ClusterServiceVersion. +// +k8s:openapi-gen=true +type APIServiceDefinitions struct { + Owned []APIServiceDescription `json:"owned,omitempty"` + Required []APIServiceDescription `json:"required,omitempty"` +} + +// ClusterServiceVersionSpec declarations tell OLM how to install an operator +// that can manage apps for a given version. +// +k8s:openapi-gen=true +type ClusterServiceVersionSpec struct { + InstallStrategy NamedInstallStrategy `json:"install"` + Version version.OperatorVersion `json:"version,omitempty"` + Maturity string `json:"maturity,omitempty"` + CustomResourceDefinitions CustomResourceDefinitions `json:"customresourcedefinitions,omitempty"` + APIServiceDefinitions APIServiceDefinitions `json:"apiservicedefinitions,omitempty"` + WebhookDefinitions []WebhookDescription `json:"webhookdefinitions,omitempty"` + NativeAPIs []metav1.GroupVersionKind `json:"nativeAPIs,omitempty"` + MinKubeVersion string `json:"minKubeVersion,omitempty"` + + // The name of the operator in display format. + DisplayName string `json:"displayName"` + + // Description of the operator. Can include the features, limitations or use-cases of the + // operator. + // +optional + Description string `json:"description,omitempty"` + + // A list of keywords describing the operator. + // +optional + Keywords []string `json:"keywords,omitempty"` + + // A list of organizational entities maintaining the operator. + // +optional + Maintainers []Maintainer `json:"maintainers,omitempty"` + + // The publishing entity behind the operator. + // +optional + Provider AppLink `json:"provider,omitempty"` + + // A list of links related to the operator. + // +optional + Links []AppLink `json:"links,omitempty"` + + // The icon for this operator. + // +optional + Icon []Icon `json:"icon,omitempty"` + + // InstallModes specify supported installation types + // +optional + InstallModes []InstallMode `json:"installModes,omitempty"` + + // The name of a CSV this one replaces. Should match the `metadata.Name` field of the old CSV. + // +optional + Replaces string `json:"replaces,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // Label selector for related resources. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Cleanup specifies the cleanup behaviour when the CSV gets deleted + // +optional + Cleanup CleanupSpec `json:"cleanup,omitempty"` + + // The name(s) of one or more CSV(s) that should be skipped in the upgrade graph. + // Should match the `metadata.Name` field of the CSV that should be skipped. + // This field is only used during catalog creation and plays no part in cluster runtime. + // +optional + Skips []string `json:"skips,omitempty"` + + // List any related images, or other container images that your Operator might require to perform their functions. + // This list should also include operand images as well. All image references should be specified by + // digest (SHA) and not by tag. This field is only used during catalog creation and plays no part in cluster runtime. + // +optional + RelatedImages []RelatedImage `json:"relatedImages,omitempty"` +} + +// +k8s:openapi-gen=true +type CleanupSpec struct { + Enabled bool `json:"enabled"` +} + +// +k8s:openapi-gen=true +type Maintainer struct { + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` +} + +// +k8s:openapi-gen=true +type AppLink struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// +k8s:openapi-gen=true +type Icon struct { + Data string `json:"base64data"` + MediaType string `json:"mediatype"` +} + +// +k8s:openapi-gen=true +type RelatedImage struct { + Name string `json:"name"` + Image string `json:"image"` +} + +// ClusterServiceVersionPhase is a label for the condition of a ClusterServiceVersion at the current time. +type ClusterServiceVersionPhase string + +// These are the valid phases of ClusterServiceVersion +const ( + CSVPhaseNone = "" + // CSVPhasePending means the csv has been accepted by the system, but the install strategy has not been attempted. + // This is likely because there are unmet requirements. + CSVPhasePending ClusterServiceVersionPhase = "Pending" + // CSVPhaseInstallReady means that the requirements are met but the install strategy has not been run. + CSVPhaseInstallReady ClusterServiceVersionPhase = "InstallReady" + // CSVPhaseInstalling means that the install strategy has been initiated but not completed. + CSVPhaseInstalling ClusterServiceVersionPhase = "Installing" + // CSVPhaseSucceeded means that the resources in the CSV were created successfully. + CSVPhaseSucceeded ClusterServiceVersionPhase = "Succeeded" + // CSVPhaseFailed means that the install strategy could not be successfully completed. + CSVPhaseFailed ClusterServiceVersionPhase = "Failed" + // CSVPhaseUnknown means that for some reason the state of the csv could not be obtained. + CSVPhaseUnknown ClusterServiceVersionPhase = "Unknown" + // CSVPhaseReplacing means that a newer CSV has been created and the csv's resources will be transitioned to a new owner. + CSVPhaseReplacing ClusterServiceVersionPhase = "Replacing" + // CSVPhaseDeleting means that a CSV has been replaced by a new one and will be checked for safety before being deleted + CSVPhaseDeleting ClusterServiceVersionPhase = "Deleting" + // CSVPhaseAny matches all other phases in CSV queries + CSVPhaseAny ClusterServiceVersionPhase = "" +) + +// ConditionReason is a camelcased reason for the state transition +type ConditionReason string + +const ( + CSVReasonRequirementsUnknown ConditionReason = "RequirementsUnknown" + CSVReasonRequirementsNotMet ConditionReason = "RequirementsNotMet" + CSVReasonRequirementsMet ConditionReason = "AllRequirementsMet" + CSVReasonOwnerConflict ConditionReason = "OwnerConflict" + CSVReasonComponentFailed ConditionReason = "InstallComponentFailed" + CSVReasonComponentFailedNoRetry ConditionReason = "InstallComponentFailedNoRetry" + CSVReasonInvalidStrategy ConditionReason = "InvalidInstallStrategy" + CSVReasonWaiting ConditionReason = "InstallWaiting" + CSVReasonInstallSuccessful ConditionReason = "InstallSucceeded" + CSVReasonInstallCheckFailed ConditionReason = "InstallCheckFailed" + CSVReasonComponentUnhealthy ConditionReason = "ComponentUnhealthy" + CSVReasonBeingReplaced ConditionReason = "BeingReplaced" + CSVReasonReplaced ConditionReason = "Replaced" + CSVReasonNeedsReinstall ConditionReason = "NeedsReinstall" + CSVReasonNeedsCertRotation ConditionReason = "NeedsCertRotation" + CSVReasonAPIServiceResourceIssue ConditionReason = "APIServiceResourceIssue" + CSVReasonAPIServiceResourcesNeedReinstall ConditionReason = "APIServiceResourcesNeedReinstall" + CSVReasonAPIServiceInstallFailed ConditionReason = "APIServiceInstallFailed" + CSVReasonCopied ConditionReason = "Copied" + CSVReasonInvalidInstallModes ConditionReason = "InvalidInstallModes" + CSVReasonNoTargetNamespaces ConditionReason = "NoTargetNamespaces" + CSVReasonUnsupportedOperatorGroup ConditionReason = "UnsupportedOperatorGroup" + CSVReasonNoOperatorGroup ConditionReason = "NoOperatorGroup" + CSVReasonTooManyOperatorGroups ConditionReason = "TooManyOperatorGroups" + CSVReasonInterOperatorGroupOwnerConflict ConditionReason = "InterOperatorGroupOwnerConflict" + CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs ConditionReason = "CannotModifyStaticOperatorGroupProvidedAPIs" + CSVReasonDetectedClusterChange ConditionReason = "DetectedClusterChange" + CSVReasonInvalidWebhookDescription ConditionReason = "InvalidWebhookDescription" + CSVReasonOperatorConditionNotUpgradeable ConditionReason = "OperatorConditionNotUpgradeable" + CSVReasonWaitingForCleanupToComplete ConditionReason = "WaitingOnCleanup" +) + +// HasCaResources returns true if the CSV has owned APIServices or Webhooks. +func (c *ClusterServiceVersion) HasCAResources() bool { + // Return early if there are no owned APIServices + if len(c.Spec.APIServiceDefinitions.Owned)+len(c.Spec.WebhookDefinitions) == 0 { + return false + } + return true +} + +// Conditions appear in the status as a record of state transitions on the ClusterServiceVersion +// +k8s:openapi-gen=true +type ClusterServiceVersionCondition struct { + // Condition of the ClusterServiceVersion + Phase ClusterServiceVersionPhase `json:"phase,omitempty"` + // A human readable message indicating details about why the ClusterServiceVersion is in this condition. + // +optional + Message string `json:"message,omitempty"` + // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state. + // e.g. 'RequirementsNotMet' + // +optional + Reason ConditionReason `json:"reason,omitempty"` + // Last time we updated the status + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + // Last time the status transitioned from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// OwnsCRD determines whether the current CSV owns a particular CRD. +func (csv ClusterServiceVersion) OwnsCRD(name string) bool { + for _, desc := range csv.Spec.CustomResourceDefinitions.Owned { + if desc.Name == name { + return true + } + } + + return false +} + +// OwnsAPIService determines whether the current CSV owns a particular APIService. +func (csv ClusterServiceVersion) OwnsAPIService(name string) bool { + for _, desc := range csv.Spec.APIServiceDefinitions.Owned { + apiServiceName := fmt.Sprintf("%s.%s", desc.Version, desc.Group) + if apiServiceName == name { + return true + } + } + + return false +} + +// StatusReason is a camelcased reason for the status of a RequirementStatus or DependentStatus +type StatusReason string + +const ( + RequirementStatusReasonPresent StatusReason = "Present" + RequirementStatusReasonNotPresent StatusReason = "NotPresent" + RequirementStatusReasonPresentNotSatisfied StatusReason = "PresentNotSatisfied" + // The CRD is present but the Established condition is False (not available) + RequirementStatusReasonNotAvailable StatusReason = "PresentNotAvailable" + DependentStatusReasonSatisfied StatusReason = "Satisfied" + DependentStatusReasonNotSatisfied StatusReason = "NotSatisfied" +) + +// DependentStatus is the status for a dependent requirement (to prevent infinite nesting) +// +k8s:openapi-gen=true +type DependentStatus struct { + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + Status StatusReason `json:"status"` + UUID string `json:"uuid,omitempty"` + Message string `json:"message,omitempty"` +} + +// +k8s:openapi-gen=true +type RequirementStatus struct { + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + Name string `json:"name"` + Status StatusReason `json:"status"` + Message string `json:"message"` + UUID string `json:"uuid,omitempty"` + Dependents []DependentStatus `json:"dependents,omitempty"` +} + +// ClusterServiceVersionStatus represents information about the status of a CSV. Status may trail the actual +// state of a system. +// +k8s:openapi-gen=true +type ClusterServiceVersionStatus struct { + // Current condition of the ClusterServiceVersion + Phase ClusterServiceVersionPhase `json:"phase,omitempty"` + // A human readable message indicating details about why the ClusterServiceVersion is in this condition. + // +optional + Message string `json:"message,omitempty"` + // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state. + // e.g. 'RequirementsNotMet' + // +optional + Reason ConditionReason `json:"reason,omitempty"` + // Last time we updated the status + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + // Last time the status transitioned from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + // List of conditions, a history of state transitions + Conditions []ClusterServiceVersionCondition `json:"conditions,omitempty"` + // The status of each requirement for this CSV + RequirementStatus []RequirementStatus `json:"requirementStatus,omitempty"` + // Last time the owned APIService certs were updated + // +optional + CertsLastUpdated *metav1.Time `json:"certsLastUpdated,omitempty"` + // Time the owned APIService certs will rotate next + // +optional + CertsRotateAt *metav1.Time `json:"certsRotateAt,omitempty"` + // CleanupStatus represents information about the status of cleanup while a CSV is pending deletion + // +optional + Cleanup CleanupStatus `json:"cleanup,omitempty"` +} + +// CleanupStatus represents information about the status of cleanup while a CSV is pending deletion +// +k8s:openapi-gen=true +type CleanupStatus struct { + // PendingDeletion is the list of custom resource objects that are pending deletion and blocked on finalizers. + // This indicates the progress of cleanup that is blocking CSV deletion or operator uninstall. + // +optional + PendingDeletion []ResourceList `json:"pendingDeletion,omitempty"` +} + +// ResourceList represents a list of resources which are of the same Group/Kind +// +k8s:openapi-gen=true +type ResourceList struct { + Group string `json:"group"` + Kind string `json:"kind"` + Instances []ResourceInstance `json:"instances"` +} + +// +k8s:openapi-gen=true +type ResourceInstance struct { + Name string `json:"name"` + // Namespace can be empty for cluster-scoped resources + Namespace string `json:"namespace,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName={csv, csvs},categories=olm +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The name of the CSV" +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`,description="The version of the CSV" +// +kubebuilder:printcolumn:name="Replaces",type=string,JSONPath=`.spec.replaces`,description="The name of a CSV that this one replaces" +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` + +// ClusterServiceVersion is a Custom Resource of type `ClusterServiceVersionSpec`. +type ClusterServiceVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec ClusterServiceVersionSpec `json:"spec"` + // +optional + Status ClusterServiceVersionStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterServiceVersionList represents a list of ClusterServiceVersions. +type ClusterServiceVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterServiceVersion `json:"items"` +} + +// GetAllCRDDescriptions returns a deduplicated set of CRDDescriptions that is +// the union of the owned and required CRDDescriptions. +// +// Descriptions with the same name prefer the value in Owned. +// Descriptions are returned in alphabetical order. +func (csv ClusterServiceVersion) GetAllCRDDescriptions() []CRDDescription { + set := make(map[string]CRDDescription) + for _, required := range csv.Spec.CustomResourceDefinitions.Required { + set[required.Name] = required + } + + for _, owned := range csv.Spec.CustomResourceDefinitions.Owned { + set[owned.Name] = owned + } + + keys := make([]string, 0) + for key := range set { + keys = append(keys, key) + } + sort.StringSlice(keys).Sort() + + descs := make([]CRDDescription, 0) + for _, key := range keys { + descs = append(descs, set[key]) + } + + return descs +} + +// GetAllAPIServiceDescriptions returns a deduplicated set of APIServiceDescriptions that is +// the union of the owned and required APIServiceDescriptions. +// +// Descriptions with the same name prefer the value in Owned. +// Descriptions are returned in alphabetical order. +func (csv ClusterServiceVersion) GetAllAPIServiceDescriptions() []APIServiceDescription { + set := make(map[string]APIServiceDescription) + for _, required := range csv.Spec.APIServiceDefinitions.Required { + name := fmt.Sprintf("%s.%s", required.Version, required.Group) + set[name] = required + } + + for _, owned := range csv.Spec.APIServiceDefinitions.Owned { + name := fmt.Sprintf("%s.%s", owned.Version, owned.Group) + set[name] = owned + } + + keys := make([]string, 0) + for key := range set { + keys = append(keys, key) + } + sort.StringSlice(keys).Sort() + + descs := make([]APIServiceDescription, 0) + for _, key := range keys { + descs = append(descs, set[key]) + } + + return descs +} + +// GetRequiredAPIServiceDescriptions returns a deduplicated set of required APIServiceDescriptions +// with the intersection of required and owned removed +// Equivalent to the set subtraction required - owned +// +// Descriptions are returned in alphabetical order. +func (csv ClusterServiceVersion) GetRequiredAPIServiceDescriptions() []APIServiceDescription { + set := make(map[string]APIServiceDescription) + for _, required := range csv.Spec.APIServiceDefinitions.Required { + name := fmt.Sprintf("%s.%s", required.Version, required.Group) + set[name] = required + } + + // Remove any shared owned from the set + for _, owned := range csv.Spec.APIServiceDefinitions.Owned { + name := fmt.Sprintf("%s.%s", owned.Version, owned.Group) + if _, ok := set[name]; ok { + delete(set, name) + } + } + + keys := make([]string, 0) + for key := range set { + keys = append(keys, key) + } + sort.StringSlice(keys).Sort() + + descs := make([]APIServiceDescription, 0) + for _, key := range keys { + descs = append(descs, set[key]) + } + + return descs +} + +// GetOwnedAPIServiceDescriptions returns a deduplicated set of owned APIServiceDescriptions +// +// Descriptions are returned in alphabetical order. +func (csv ClusterServiceVersion) GetOwnedAPIServiceDescriptions() []APIServiceDescription { + set := make(map[string]APIServiceDescription) + for _, owned := range csv.Spec.APIServiceDefinitions.Owned { + name := owned.GetName() + set[name] = owned + } + + keys := make([]string, 0) + for key := range set { + keys = append(keys, key) + } + sort.StringSlice(keys).Sort() + + descs := make([]APIServiceDescription, 0) + for _, key := range keys { + descs = append(descs, set[key]) + } + + return descs +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go new file mode 100644 index 000000000..74bc9b819 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +groupName=operators.coreos.com +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators + +// Package v1alpha1 contains resources types for version v1alpha1 of the operators.coreos.com API group. +package v1alpha1 diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go new file mode 100644 index 000000000..09deba525 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go @@ -0,0 +1,389 @@ +package v1alpha1 + +import ( + "errors" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + InstallPlanKind = "InstallPlan" + InstallPlanAPIVersion = GroupName + "/" + GroupVersion +) + +// Approval is the user approval policy for an InstallPlan. +// It must be one of "Automatic" or "Manual". +type Approval string + +const ( + ApprovalAutomatic Approval = "Automatic" + ApprovalManual Approval = "Manual" +) + +// InstallPlanSpec defines a set of Application resources to be installed +type InstallPlanSpec struct { + CatalogSource string `json:"source,omitempty"` + CatalogSourceNamespace string `json:"sourceNamespace,omitempty"` + ClusterServiceVersionNames []string `json:"clusterServiceVersionNames"` + Approval Approval `json:"approval"` + Approved bool `json:"approved"` + Generation int `json:"generation,omitempty"` +} + +// InstallPlanPhase is the current status of a InstallPlan as a whole. +type InstallPlanPhase string + +const ( + InstallPlanPhaseNone InstallPlanPhase = "" + InstallPlanPhasePlanning InstallPlanPhase = "Planning" + InstallPlanPhaseRequiresApproval InstallPlanPhase = "RequiresApproval" + InstallPlanPhaseInstalling InstallPlanPhase = "Installing" + InstallPlanPhaseComplete InstallPlanPhase = "Complete" + InstallPlanPhaseFailed InstallPlanPhase = "Failed" +) + +// InstallPlanConditionType describes the state of an InstallPlan at a certain point as a whole. +type InstallPlanConditionType string + +const ( + InstallPlanResolved InstallPlanConditionType = "Resolved" + InstallPlanInstalled InstallPlanConditionType = "Installed" +) + +// ConditionReason is a camelcased reason for the state transition. +type InstallPlanConditionReason string + +const ( + InstallPlanReasonPlanUnknown InstallPlanConditionReason = "PlanUnknown" + InstallPlanReasonInstallCheckFailed InstallPlanConditionReason = "InstallCheckFailed" + InstallPlanReasonDependencyConflict InstallPlanConditionReason = "DependenciesConflict" + InstallPlanReasonComponentFailed InstallPlanConditionReason = "InstallComponentFailed" +) + +// StepStatus is the current status of a particular resource an in +// InstallPlan +type StepStatus string + +const ( + StepStatusUnknown StepStatus = "Unknown" + StepStatusNotPresent StepStatus = "NotPresent" + StepStatusPresent StepStatus = "Present" + StepStatusCreated StepStatus = "Created" + StepStatusNotCreated StepStatus = "NotCreated" + StepStatusWaitingForAPI StepStatus = "WaitingForApi" + StepStatusUnsupportedResource StepStatus = "UnsupportedResource" +) + +// ErrInvalidInstallPlan is the error returned by functions that operate on +// InstallPlans when the InstallPlan does not contain totally valid data. +var ErrInvalidInstallPlan = errors.New("the InstallPlan contains invalid data") + +// InstallPlanStatus represents the information about the status of +// steps required to complete installation. +// +// Status may trail the actual state of a system. +type InstallPlanStatus struct { + Phase InstallPlanPhase `json:"phase"` + Conditions []InstallPlanCondition `json:"conditions,omitempty"` + CatalogSources []string `json:"catalogSources"` + Plan []*Step `json:"plan,omitempty"` + // BundleLookups is the set of in-progress requests to pull and unpackage bundle content to the cluster. + // +optional + BundleLookups []BundleLookup `json:"bundleLookups,omitempty"` + // AttenuatedServiceAccountRef references the service account that is used + // to do scoped operator install. + AttenuatedServiceAccountRef *corev1.ObjectReference `json:"attenuatedServiceAccountRef,omitempty"` + + // StartTime is the time when the controller began applying + // the resources listed in the plan to the cluster. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // Message is a human-readable message containing detailed + // information that may be important to understanding why the + // plan has its current status. + // +optional + Message string `json:"message,omitempty"` +} + +// InstallPlanCondition represents the overall status of the execution of +// an InstallPlan. +type InstallPlanCondition struct { + Type InstallPlanConditionType `json:"type,omitempty"` + Status corev1.ConditionStatus `json:"status,omitempty"` // True, False, or Unknown + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason InstallPlanConditionReason `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +// allow overwriting `now` function for deterministic tests +var now = metav1.Now + +// GetCondition returns the InstallPlanCondition of the given type if it exists in the InstallPlanStatus' Conditions. +// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. +func (s InstallPlanStatus) GetCondition(conditionType InstallPlanConditionType) InstallPlanCondition { + for _, cond := range s.Conditions { + if cond.Type == conditionType { + return cond + } + } + + return InstallPlanCondition{ + Type: conditionType, + Status: corev1.ConditionUnknown, + } +} + +// SetCondition adds or updates a condition, using `Type` as merge key. +func (s *InstallPlanStatus) SetCondition(cond InstallPlanCondition) InstallPlanCondition { + for i, existing := range s.Conditions { + if existing.Type != cond.Type { + continue + } + if existing.Status == cond.Status { + cond.LastTransitionTime = existing.LastTransitionTime + } + s.Conditions[i] = cond + return cond + } + s.Conditions = append(s.Conditions, cond) + return cond +} + +func OrderSteps(steps []*Step) []*Step { + // CSVs must be applied first + csvList := []*Step{} + + // CRDs must be applied second + crdList := []*Step{} + + // Other resources may be applied in any order + remainingResources := []*Step{} + for _, step := range steps { + switch step.Resource.Kind { + case crdKind: + crdList = append(crdList, step) + case ClusterServiceVersionKind: + csvList = append(csvList, step) + default: + remainingResources = append(remainingResources, step) + } + } + + result := make([]*Step, len(steps)) + i := 0 + + for j := range csvList { + result[i] = csvList[j] + i++ + } + + for j := range crdList { + result[i] = crdList[j] + i++ + } + + for j := range remainingResources { + result[i] = remainingResources[j] + i++ + } + + return result +} + +func (s InstallPlanStatus) NeedsRequeue() bool { + for _, step := range s.Plan { + switch step.Status { + case StepStatusWaitingForAPI: + return true + } + } + + return false +} +func ConditionFailed(cond InstallPlanConditionType, reason InstallPlanConditionReason, message string, now *metav1.Time) InstallPlanCondition { + return InstallPlanCondition{ + Type: cond, + Status: corev1.ConditionFalse, + Reason: reason, + Message: message, + LastUpdateTime: now, + LastTransitionTime: now, + } +} + +func ConditionMet(cond InstallPlanConditionType, now *metav1.Time) InstallPlanCondition { + return InstallPlanCondition{ + Type: cond, + Status: corev1.ConditionTrue, + LastUpdateTime: now, + LastTransitionTime: now, + } +} + +// Step represents the status of an individual step in an InstallPlan. +type Step struct { + Resolving string `json:"resolving"` + Resource StepResource `json:"resource"` + Optional bool `json:"optional,omitempty"` + Status StepStatus `json:"status"` +} + +// BundleLookupConditionType is a category of the overall state of a BundleLookup. +type BundleLookupConditionType string + +const ( + // BundleLookupPending describes BundleLookups that are not complete. + BundleLookupPending BundleLookupConditionType = "BundleLookupPending" + + // BundleLookupFailed describes conditions types for when BundleLookups fail + BundleLookupFailed BundleLookupConditionType = "BundleLookupFailed" + + crdKind = "CustomResourceDefinition" +) + +type BundleLookupCondition struct { + // Type of condition. + Type BundleLookupConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty"` + // Last time the condition was probed. + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// BundleLookup is a request to pull and unpackage the content of a bundle to the cluster. +type BundleLookup struct { + // Path refers to the location of a bundle to pull. + // It's typically an image reference. + Path string `json:"path"` + // Identifier is the catalog-unique name of the operator (the name of the CSV for bundles that contain CSVs) + Identifier string `json:"identifier"` + // Replaces is the name of the bundle to replace with the one found at Path. + Replaces string `json:"replaces"` + // CatalogSourceRef is a reference to the CatalogSource the bundle path was resolved from. + CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"` + // Conditions represents the overall state of a BundleLookup. + // +optional + Conditions []BundleLookupCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // The effective properties of the unpacked bundle. + // +optional + Properties string `json:"properties,omitempty"` +} + +// GetCondition returns the BundleLookupCondition of the given type if it exists in the BundleLookup's Conditions. +// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. +func (b BundleLookup) GetCondition(conditionType BundleLookupConditionType) BundleLookupCondition { + for _, cond := range b.Conditions { + if cond.Type == conditionType { + return cond + } + } + + return BundleLookupCondition{ + Type: conditionType, + Status: corev1.ConditionUnknown, + } +} + +// RemoveCondition removes the BundleLookupCondition of the given type from the BundleLookup's Conditions if it exists. +func (b *BundleLookup) RemoveCondition(conditionType BundleLookupConditionType) { + for i, cond := range b.Conditions { + if cond.Type == conditionType { + b.Conditions = append(b.Conditions[:i], b.Conditions[i+1:]...) + if len(b.Conditions) == 0 { + b.Conditions = nil + } + return + } + } +} + +// SetCondition replaces the existing BundleLookupCondition of the same type, or adds it if it was not found. +func (b *BundleLookup) SetCondition(cond BundleLookupCondition) BundleLookupCondition { + for i, existing := range b.Conditions { + if existing.Type != cond.Type { + continue + } + if existing.Status == cond.Status { + cond.LastTransitionTime = existing.LastTransitionTime + } + b.Conditions[i] = cond + return cond + } + b.Conditions = append(b.Conditions, cond) + + return cond +} + +func (s *Step) String() string { + return fmt.Sprintf("%s: %s (%s)", s.Resolving, s.Resource, s.Status) +} + +// StepResource represents the status of a resource to be tracked by an +// InstallPlan. +type StepResource struct { + CatalogSource string `json:"sourceName"` + CatalogSourceNamespace string `json:"sourceNamespace"` + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + Name string `json:"name"` + Manifest string `json:"manifest,omitempty"` +} + +func (r StepResource) String() string { + return fmt.Sprintf("%s[%s/%s/%s (%s/%s)]", r.Name, r.Group, r.Version, r.Kind, r.CatalogSource, r.CatalogSourceNamespace) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +kubebuilder:resource:shortName=ip,categories=olm +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="CSV",type=string,JSONPath=`.spec.clusterServiceVersionNames[0]`,description="The first CSV in the list of clusterServiceVersionNames" +// +kubebuilder:printcolumn:name="Approval",type=string,JSONPath=`.spec.approval`,description="The approval mode" +// +kubebuilder:printcolumn:name="Approved",type=boolean,JSONPath=`.spec.approved` + +// InstallPlan defines the installation of a set of operators. +type InstallPlan struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec InstallPlanSpec `json:"spec"` + // +optional + Status InstallPlanStatus `json:"status"` +} + +// EnsureCatalogSource ensures that a CatalogSource is present in the Status +// block of an InstallPlan. +func (p *InstallPlan) EnsureCatalogSource(sourceName string) { + for _, srcName := range p.Status.CatalogSources { + if srcName == sourceName { + return + } + } + + p.Status.CatalogSources = append(p.Status.CatalogSources, sourceName) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InstallPlanList is a list of InstallPlan resources. +type InstallPlanList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []InstallPlan `json:"items"` +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go new file mode 100644 index 000000000..f1cd86f1a --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go @@ -0,0 +1,55 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/operator-framework/api/pkg/operators" +) + +const ( + // GroupName is the group name used in this package. + GroupName = operators.GroupName + // GroupVersion is the group version used in this package. + GroupVersion = "v1alpha1" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder initializes a scheme builder + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme + + // localSchemeBuilder is expected by generated conversion functions + localSchemeBuilder = &SchemeBuilder +) + +// addKnownTypes adds the list of known types to Scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CatalogSource{}, + &CatalogSourceList{}, + &InstallPlan{}, + &InstallPlanList{}, + &Subscription{}, + &SubscriptionList{}, + &ClusterServiceVersion{}, + &ClusterServiceVersionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go new file mode 100644 index 000000000..2452f9a1c --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go @@ -0,0 +1,340 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + SubscriptionKind = "Subscription" + SubscriptionCRDAPIVersion = GroupName + "/" + GroupVersion +) + +// SubscriptionState tracks when updates are available, installing, or service is up to date +type SubscriptionState string + +const ( + SubscriptionStateNone = "" + SubscriptionStateFailed = "UpgradeFailed" + SubscriptionStateUpgradeAvailable = "UpgradeAvailable" + SubscriptionStateUpgradePending = "UpgradePending" + SubscriptionStateAtLatest = "AtLatestKnown" +) + +const ( + SubscriptionReasonInvalidCatalog ConditionReason = "InvalidCatalog" + SubscriptionReasonUpgradeSucceeded ConditionReason = "UpgradeSucceeded" +) + +// SubscriptionSpec defines an Application that can be installed +type SubscriptionSpec struct { + CatalogSource string `json:"source"` + CatalogSourceNamespace string `json:"sourceNamespace"` + Package string `json:"name"` + Channel string `json:"channel,omitempty"` + StartingCSV string `json:"startingCSV,omitempty"` + InstallPlanApproval Approval `json:"installPlanApproval,omitempty"` + Config *SubscriptionConfig `json:"config,omitempty"` +} + +// SubscriptionConfig contains configuration specified for a subscription. +type SubscriptionConfig struct { + // Selector is the label selector for pods to be configured. + // Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector,omitempty"` + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations are the pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Resources represents compute resources required by this container. + // Immutable. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // EnvFrom is a list of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Immutable. + // +optional + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` + // Env is a list of environment variables to set in the container. + // Cannot be updated. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge"` + + // List of Volumes to set in the podSpec. + // +optional + Volumes []corev1.Volume `json:"volumes,omitempty"` + + // List of VolumeMounts to set in the container. + // +optional + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + + // If specified, overrides the pod's scheduling constraints. + // nil sub-attributes will *not* override the original values in the pod.spec for those sub-attributes. + // Use empty object ({}) to erase original sub-attribute values. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` +} + +// SubscriptionConditionType indicates an explicit state condition about a Subscription in "abnormal-true" +// polarity form (see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties). +type SubscriptionConditionType string + +const ( + // SubscriptionCatalogSourcesUnhealthy indicates that some or all of the CatalogSources to be used in resolution are unhealthy. + SubscriptionCatalogSourcesUnhealthy SubscriptionConditionType = "CatalogSourcesUnhealthy" + + // SubscriptionInstallPlanMissing indicates that a Subscription's InstallPlan is missing. + SubscriptionInstallPlanMissing SubscriptionConditionType = "InstallPlanMissing" + + // SubscriptionInstallPlanPending indicates that a Subscription's InstallPlan is pending installation. + SubscriptionInstallPlanPending SubscriptionConditionType = "InstallPlanPending" + + // SubscriptionInstallPlanFailed indicates that the installation of a Subscription's InstallPlan has failed. + SubscriptionInstallPlanFailed SubscriptionConditionType = "InstallPlanFailed" + + // SubscriptionResolutionFailed indicates that the dependency resolution in the namespace in which the subscription is created has failed + SubscriptionResolutionFailed SubscriptionConditionType = "ResolutionFailed" + + // SubscriptionBundleUnpacking indicates that the unpack job is currently running + SubscriptionBundleUnpacking SubscriptionConditionType = "BundleUnpacking" + + // SubscriptionBundleUnpackFailed indicates that the unpack job failed + SubscriptionBundleUnpackFailed SubscriptionConditionType = "BundleUnpackFailed" +) + +const ( + // NoCatalogSourcesFound is a reason string for Subscriptions with unhealthy CatalogSources due to none being available. + NoCatalogSourcesFound = "NoCatalogSourcesFound" + + // AllCatalogSourcesHealthy is a reason string for Subscriptions that transitioned due to all CatalogSources being healthy. + AllCatalogSourcesHealthy = "AllCatalogSourcesHealthy" + + // CatalogSourcesAdded is a reason string for Subscriptions that transitioned due to CatalogSources being added. + CatalogSourcesAdded = "CatalogSourcesAdded" + + // CatalogSourcesUpdated is a reason string for Subscriptions that transitioned due to CatalogSource being updated. + CatalogSourcesUpdated = "CatalogSourcesUpdated" + + // CatalogSourcesDeleted is a reason string for Subscriptions that transitioned due to CatalogSources being removed. + CatalogSourcesDeleted = "CatalogSourcesDeleted" + + // UnhealthyCatalogSourceFound is a reason string for Subscriptions that transitioned because an unhealthy CatalogSource was found. + UnhealthyCatalogSourceFound = "UnhealthyCatalogSourceFound" + + // ReferencedInstallPlanNotFound is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being found. + ReferencedInstallPlanNotFound = "ReferencedInstallPlanNotFound" + + // InstallPlanNotYetReconciled is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being reconciled yet. + InstallPlanNotYetReconciled = "InstallPlanNotYetReconciled" + + // InstallPlanFailed is a reason string for Subscriptions that transitioned due to a referenced InstallPlan failing without setting an explicit failure condition. + InstallPlanFailed = "InstallPlanFailed" +) + +// SubscriptionCondition represents the latest available observations of a Subscription's state. +type SubscriptionCondition struct { + // Type is the type of Subscription condition. + Type SubscriptionConditionType `json:"type" description:"type of Subscription condition"` + + // Status is the status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // Reason is a one-word CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` + + // LastHeartbeatTime is the last time we got an update on a given condition + // +optional + LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty" description:"last time we got an update on a given condition"` + + // LastTransitionTime is the last time the condition transit from one status to another + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another" hash:"ignore"` +} + +// Equals returns true if a SubscriptionCondition equals the one given, false otherwise. +// Equality is determined by the equality of the type, status, reason, and message fields ONLY. +func (s SubscriptionCondition) Equals(condition SubscriptionCondition) bool { + return s.Type == condition.Type && s.Status == condition.Status && s.Reason == condition.Reason && s.Message == condition.Message +} + +type SubscriptionStatus struct { + // CurrentCSV is the CSV the Subscription is progressing to. + // +optional + CurrentCSV string `json:"currentCSV,omitempty"` + + // InstalledCSV is the CSV currently installed by the Subscription. + // +optional + InstalledCSV string `json:"installedCSV,omitempty"` + + // Install is a reference to the latest InstallPlan generated for the Subscription. + // DEPRECATED: InstallPlanRef + // +optional + Install *InstallPlanReference `json:"installplan,omitempty"` + + // State represents the current state of the Subscription + // +optional + State SubscriptionState `json:"state,omitempty"` + + // Reason is the reason the Subscription was transitioned to its current state. + // +optional + Reason ConditionReason `json:"reason,omitempty"` + + // InstallPlanGeneration is the current generation of the installplan + // +optional + InstallPlanGeneration int `json:"installPlanGeneration,omitempty"` + + // InstallPlanRef is a reference to the latest InstallPlan that contains the Subscription's current CSV. + // +optional + InstallPlanRef *corev1.ObjectReference `json:"installPlanRef,omitempty"` + + // CatalogHealth contains the Subscription's view of its relevant CatalogSources' status. + // It is used to determine SubscriptionStatusConditions related to CatalogSources. + // +optional + CatalogHealth []SubscriptionCatalogHealth `json:"catalogHealth,omitempty"` + + // Conditions is a list of the latest available observations about a Subscription's current state. + // +optional + Conditions []SubscriptionCondition `json:"conditions,omitempty" hash:"set"` + + // LastUpdated represents the last time that the Subscription status was updated. + LastUpdated metav1.Time `json:"lastUpdated"` +} + +// GetCondition returns the SubscriptionCondition of the given type if it exists in the SubscriptionStatus' Conditions. +// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. +func (s SubscriptionStatus) GetCondition(conditionType SubscriptionConditionType) SubscriptionCondition { + for _, cond := range s.Conditions { + if cond.Type == conditionType { + return cond + } + } + + return SubscriptionCondition{ + Type: conditionType, + Status: corev1.ConditionUnknown, + } +} + +// SetCondition sets the given SubscriptionCondition in the SubscriptionStatus' Conditions. +func (s *SubscriptionStatus) SetCondition(condition SubscriptionCondition) { + for i, cond := range s.Conditions { + if cond.Type == condition.Type { + s.Conditions[i] = condition + return + } + } + + s.Conditions = append(s.Conditions, condition) +} + +// RemoveConditions removes any conditions of the given types from the SubscriptionStatus' Conditions. +func (s *SubscriptionStatus) RemoveConditions(remove ...SubscriptionConditionType) { + exclusions := map[SubscriptionConditionType]struct{}{} + for _, r := range remove { + exclusions[r] = struct{}{} + } + + var filtered []SubscriptionCondition + for _, cond := range s.Conditions { + if _, ok := exclusions[cond.Type]; ok { + // Skip excluded condition types + continue + } + filtered = append(filtered, cond) + } + + s.Conditions = filtered +} + +type InstallPlanReference struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Name string `json:"name"` + UID types.UID `json:"uuid"` +} + +// SubscriptionCatalogHealth describes the health of a CatalogSource the Subscription knows about. +type SubscriptionCatalogHealth struct { + // CatalogSourceRef is a reference to a CatalogSource. + CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"` + + // LastUpdated represents the last time that the CatalogSourceHealth changed + LastUpdated *metav1.Time `json:"lastUpdated"` + + // Healthy is true if the CatalogSource is healthy; false otherwise. + Healthy bool `json:"healthy"` +} + +// Equals returns true if a SubscriptionCatalogHealth equals the one given, false otherwise. +// Equality is based SOLEY on health and UID. +func (s SubscriptionCatalogHealth) Equals(health SubscriptionCatalogHealth) bool { + return s.Healthy == health.Healthy && s.CatalogSourceRef.UID == health.CatalogSourceRef.UID +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +kubebuilder:resource:shortName={sub, subs},categories=olm +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Package",type=string,JSONPath=`.spec.name`,description="The package subscribed to" +// +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.source`,description="The catalog source for the specified package" +// +kubebuilder:printcolumn:name="Channel",type=string,JSONPath=`.spec.channel`,description="The channel of updates to subscribe to" + +// Subscription keeps operators up to date by tracking changes to Catalogs. +type Subscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec *SubscriptionSpec `json:"spec"` + // +optional + Status SubscriptionStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubscriptionList is a list of Subscription resources. +type SubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Subscription `json:"items"` +} + +// GetInstallPlanApproval gets the configured install plan approval or the default +func (s *Subscription) GetInstallPlanApproval() Approval { + if s.Spec.InstallPlanApproval == ApprovalManual { + return ApprovalManual + } + return ApprovalAutomatic +} + +// NewInstallPlanReference returns an InstallPlanReference for the given ObjectReference. +func NewInstallPlanReference(ref *corev1.ObjectReference) *InstallPlanReference { + return &InstallPlanReference{ + APIVersion: ref.APIVersion, + Kind: ref.Kind, + Name: ref.Name, + UID: ref.UID, + } +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..cf01001f5 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1601 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "encoding/json" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceReference) DeepCopyInto(out *APIResourceReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceReference. +func (in *APIResourceReference) DeepCopy() *APIResourceReference { + if in == nil { + return nil + } + out := new(APIResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceDefinitions) DeepCopyInto(out *APIServiceDefinitions) { + *out = *in + if in.Owned != nil { + in, out := &in.Owned, &out.Owned + *out = make([]APIServiceDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]APIServiceDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDefinitions. +func (in *APIServiceDefinitions) DeepCopy() *APIServiceDefinitions { + if in == nil { + return nil + } + out := new(APIServiceDefinitions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceDescription) DeepCopyInto(out *APIServiceDescription) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]APIResourceReference, len(*in)) + copy(*out, *in) + } + if in.StatusDescriptors != nil { + in, out := &in.StatusDescriptors, &out.StatusDescriptors + *out = make([]StatusDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpecDescriptors != nil { + in, out := &in.SpecDescriptors, &out.SpecDescriptors + *out = make([]SpecDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ActionDescriptor != nil { + in, out := &in.ActionDescriptor, &out.ActionDescriptor + *out = make([]ActionDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDescription. +func (in *APIServiceDescription) DeepCopy() *APIServiceDescription { + if in == nil { + return nil + } + out := new(APIServiceDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionDescriptor) DeepCopyInto(out *ActionDescriptor) { + *out = *in + if in.XDescriptors != nil { + in, out := &in.XDescriptors, &out.XDescriptors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDescriptor. +func (in *ActionDescriptor) DeepCopy() *ActionDescriptor { + if in == nil { + return nil + } + out := new(ActionDescriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppLink) DeepCopyInto(out *AppLink) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppLink. +func (in *AppLink) DeepCopy() *AppLink { + if in == nil { + return nil + } + out := new(AppLink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BundleLookup) DeepCopyInto(out *BundleLookup) { + *out = *in + if in.CatalogSourceRef != nil { + in, out := &in.CatalogSourceRef, &out.CatalogSourceRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]BundleLookupCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookup. +func (in *BundleLookup) DeepCopy() *BundleLookup { + if in == nil { + return nil + } + out := new(BundleLookup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BundleLookupCondition) DeepCopyInto(out *BundleLookupCondition) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookupCondition. +func (in *BundleLookupCondition) DeepCopy() *BundleLookupCondition { + if in == nil { + return nil + } + out := new(BundleLookupCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRDDescription) DeepCopyInto(out *CRDDescription) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]APIResourceReference, len(*in)) + copy(*out, *in) + } + if in.StatusDescriptors != nil { + in, out := &in.StatusDescriptors, &out.StatusDescriptors + *out = make([]StatusDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpecDescriptors != nil { + in, out := &in.SpecDescriptors, &out.SpecDescriptors + *out = make([]SpecDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ActionDescriptor != nil { + in, out := &in.ActionDescriptor, &out.ActionDescriptor + *out = make([]ActionDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDDescription. +func (in *CRDDescription) DeepCopy() *CRDDescription { + if in == nil { + return nil + } + out := new(CRDDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource. +func (in *CatalogSource) DeepCopy() *CatalogSource { + if in == nil { + return nil + } + out := new(CatalogSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CatalogSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSourceList) DeepCopyInto(out *CatalogSourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CatalogSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceList. +func (in *CatalogSourceList) DeepCopy() *CatalogSourceList { + if in == nil { + return nil + } + out := new(CatalogSourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CatalogSourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { + *out = *in + if in.GrpcPodConfig != nil { + in, out := &in.GrpcPodConfig, &out.GrpcPodConfig + *out = new(GrpcPodConfig) + (*in).DeepCopyInto(*out) + } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(UpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Icon = in.Icon +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec. +func (in *CatalogSourceSpec) DeepCopy() *CatalogSourceSpec { + if in == nil { + return nil + } + out := new(CatalogSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { + *out = *in + if in.LatestImageRegistryPoll != nil { + in, out := &in.LatestImageRegistryPoll, &out.LatestImageRegistryPoll + *out = (*in).DeepCopy() + } + if in.ConfigMapResource != nil { + in, out := &in.ConfigMapResource, &out.ConfigMapResource + *out = new(ConfigMapResourceReference) + (*in).DeepCopyInto(*out) + } + if in.RegistryServiceStatus != nil { + in, out := &in.RegistryServiceStatus, &out.RegistryServiceStatus + *out = new(RegistryServiceStatus) + (*in).DeepCopyInto(*out) + } + if in.GRPCConnectionState != nil { + in, out := &in.GRPCConnectionState, &out.GRPCConnectionState + *out = new(GRPCConnectionState) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus. +func (in *CatalogSourceStatus) DeepCopy() *CatalogSourceStatus { + if in == nil { + return nil + } + out := new(CatalogSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CleanupSpec) DeepCopyInto(out *CleanupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupSpec. +func (in *CleanupSpec) DeepCopy() *CleanupSpec { + if in == nil { + return nil + } + out := new(CleanupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CleanupStatus) DeepCopyInto(out *CleanupStatus) { + *out = *in + if in.PendingDeletion != nil { + in, out := &in.PendingDeletion, &out.PendingDeletion + *out = make([]ResourceList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupStatus. +func (in *CleanupStatus) DeepCopy() *CleanupStatus { + if in == nil { + return nil + } + out := new(CleanupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersion) DeepCopyInto(out *ClusterServiceVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersion. +func (in *ClusterServiceVersion) DeepCopy() *ClusterServiceVersion { + if in == nil { + return nil + } + out := new(ClusterServiceVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterServiceVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersionCondition) DeepCopyInto(out *ClusterServiceVersionCondition) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionCondition. +func (in *ClusterServiceVersionCondition) DeepCopy() *ClusterServiceVersionCondition { + if in == nil { + return nil + } + out := new(ClusterServiceVersionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersionList) DeepCopyInto(out *ClusterServiceVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterServiceVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionList. +func (in *ClusterServiceVersionList) DeepCopy() *ClusterServiceVersionList { + if in == nil { + return nil + } + out := new(ClusterServiceVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterServiceVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec) { + *out = *in + in.InstallStrategy.DeepCopyInto(&out.InstallStrategy) + in.Version.DeepCopyInto(&out.Version) + in.CustomResourceDefinitions.DeepCopyInto(&out.CustomResourceDefinitions) + in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions) + if in.WebhookDefinitions != nil { + in, out := &in.WebhookDefinitions, &out.WebhookDefinitions + *out = make([]WebhookDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NativeAPIs != nil { + in, out := &in.NativeAPIs, &out.NativeAPIs + *out = make([]metav1.GroupVersionKind, len(*in)) + copy(*out, *in) + } + if in.Keywords != nil { + in, out := &in.Keywords, &out.Keywords + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Maintainers != nil { + in, out := &in.Maintainers, &out.Maintainers + *out = make([]Maintainer, len(*in)) + copy(*out, *in) + } + out.Provider = in.Provider + if in.Links != nil { + in, out := &in.Links, &out.Links + *out = make([]AppLink, len(*in)) + copy(*out, *in) + } + if in.Icon != nil { + in, out := &in.Icon, &out.Icon + *out = make([]Icon, len(*in)) + copy(*out, *in) + } + if in.InstallModes != nil { + in, out := &in.InstallModes, &out.InstallModes + *out = make([]InstallMode, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + out.Cleanup = in.Cleanup + if in.Skips != nil { + in, out := &in.Skips, &out.Skips + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RelatedImages != nil { + in, out := &in.RelatedImages, &out.RelatedImages + *out = make([]RelatedImage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionSpec. +func (in *ClusterServiceVersionSpec) DeepCopy() *ClusterServiceVersionSpec { + if in == nil { + return nil + } + out := new(ClusterServiceVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersionStatus) DeepCopyInto(out *ClusterServiceVersionStatus) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterServiceVersionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequirementStatus != nil { + in, out := &in.RequirementStatus, &out.RequirementStatus + *out = make([]RequirementStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CertsLastUpdated != nil { + in, out := &in.CertsLastUpdated, &out.CertsLastUpdated + *out = (*in).DeepCopy() + } + if in.CertsRotateAt != nil { + in, out := &in.CertsRotateAt, &out.CertsRotateAt + *out = (*in).DeepCopy() + } + in.Cleanup.DeepCopyInto(&out.Cleanup) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionStatus. +func (in *ClusterServiceVersionStatus) DeepCopy() *ClusterServiceVersionStatus { + if in == nil { + return nil + } + out := new(ClusterServiceVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapResourceReference) DeepCopyInto(out *ConfigMapResourceReference) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapResourceReference. +func (in *ConfigMapResourceReference) DeepCopy() *ConfigMapResourceReference { + if in == nil { + return nil + } + out := new(ConfigMapResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitions) DeepCopyInto(out *CustomResourceDefinitions) { + *out = *in + if in.Owned != nil { + in, out := &in.Owned, &out.Owned + *out = make([]CRDDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]CRDDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitions. +func (in *CustomResourceDefinitions) DeepCopy() *CustomResourceDefinitions { + if in == nil { + return nil + } + out := new(CustomResourceDefinitions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DependentStatus) DeepCopyInto(out *DependentStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependentStatus. +func (in *DependentStatus) DeepCopy() *DependentStatus { + if in == nil { + return nil + } + out := new(DependentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) { + *out = *in + in.LastConnectTime.DeepCopyInto(&out.LastConnectTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConnectionState. +func (in *GRPCConnectionState) DeepCopy() *GRPCConnectionState { + if in == nil { + return nil + } + out := new(GRPCConnectionState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrpcPodConfig) DeepCopyInto(out *GrpcPodConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrpcPodConfig. +func (in *GrpcPodConfig) DeepCopy() *GrpcPodConfig { + if in == nil { + return nil + } + out := new(GrpcPodConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Icon) DeepCopyInto(out *Icon) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Icon. +func (in *Icon) DeepCopy() *Icon { + if in == nil { + return nil + } + out := new(Icon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallMode) DeepCopyInto(out *InstallMode) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallMode. +func (in *InstallMode) DeepCopy() *InstallMode { + if in == nil { + return nil + } + out := new(InstallMode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in InstallModeSet) DeepCopyInto(out *InstallModeSet) { + { + in := &in + *out = make(InstallModeSet, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallModeSet. +func (in InstallModeSet) DeepCopy() InstallModeSet { + if in == nil { + return nil + } + out := new(InstallModeSet) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlan) DeepCopyInto(out *InstallPlan) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlan. +func (in *InstallPlan) DeepCopy() *InstallPlan { + if in == nil { + return nil + } + out := new(InstallPlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstallPlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanCondition) DeepCopyInto(out *InstallPlanCondition) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanCondition. +func (in *InstallPlanCondition) DeepCopy() *InstallPlanCondition { + if in == nil { + return nil + } + out := new(InstallPlanCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanList) DeepCopyInto(out *InstallPlanList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InstallPlan, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanList. +func (in *InstallPlanList) DeepCopy() *InstallPlanList { + if in == nil { + return nil + } + out := new(InstallPlanList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstallPlanList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanReference) DeepCopyInto(out *InstallPlanReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanReference. +func (in *InstallPlanReference) DeepCopy() *InstallPlanReference { + if in == nil { + return nil + } + out := new(InstallPlanReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanSpec) DeepCopyInto(out *InstallPlanSpec) { + *out = *in + if in.ClusterServiceVersionNames != nil { + in, out := &in.ClusterServiceVersionNames, &out.ClusterServiceVersionNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanSpec. +func (in *InstallPlanSpec) DeepCopy() *InstallPlanSpec { + if in == nil { + return nil + } + out := new(InstallPlanSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanStatus) DeepCopyInto(out *InstallPlanStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]InstallPlanCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CatalogSources != nil { + in, out := &in.CatalogSources, &out.CatalogSources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = make([]*Step, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Step) + **out = **in + } + } + } + if in.BundleLookups != nil { + in, out := &in.BundleLookups, &out.BundleLookups + *out = make([]BundleLookup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AttenuatedServiceAccountRef != nil { + in, out := &in.AttenuatedServiceAccountRef, &out.AttenuatedServiceAccountRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanStatus. +func (in *InstallPlanStatus) DeepCopy() *InstallPlanStatus { + if in == nil { + return nil + } + out := new(InstallPlanStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Maintainer) DeepCopyInto(out *Maintainer) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintainer. +func (in *Maintainer) DeepCopy() *Maintainer { + if in == nil { + return nil + } + out := new(Maintainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedInstallStrategy) DeepCopyInto(out *NamedInstallStrategy) { + *out = *in + in.StrategySpec.DeepCopyInto(&out.StrategySpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedInstallStrategy. +func (in *NamedInstallStrategy) DeepCopy() *NamedInstallStrategy { + if in == nil { + return nil + } + out := new(NamedInstallStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryPoll) DeepCopyInto(out *RegistryPoll) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryPoll. +func (in *RegistryPoll) DeepCopy() *RegistryPoll { + if in == nil { + return nil + } + out := new(RegistryPoll) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryServiceStatus) DeepCopyInto(out *RegistryServiceStatus) { + *out = *in + in.CreatedAt.DeepCopyInto(&out.CreatedAt) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryServiceStatus. +func (in *RegistryServiceStatus) DeepCopy() *RegistryServiceStatus { + if in == nil { + return nil + } + out := new(RegistryServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedImage) DeepCopyInto(out *RelatedImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedImage. +func (in *RelatedImage) DeepCopy() *RelatedImage { + if in == nil { + return nil + } + out := new(RelatedImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequirementStatus) DeepCopyInto(out *RequirementStatus) { + *out = *in + if in.Dependents != nil { + in, out := &in.Dependents, &out.Dependents + *out = make([]DependentStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequirementStatus. +func (in *RequirementStatus) DeepCopy() *RequirementStatus { + if in == nil { + return nil + } + out := new(RequirementStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceInstance) DeepCopyInto(out *ResourceInstance) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceInstance. +func (in *ResourceInstance) DeepCopy() *ResourceInstance { + if in == nil { + return nil + } + out := new(ResourceInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceList) DeepCopyInto(out *ResourceList) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]ResourceInstance, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in *ResourceList) DeepCopy() *ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecDescriptor) DeepCopyInto(out *SpecDescriptor) { + *out = *in + if in.XDescriptors != nil { + in, out := &in.XDescriptors, &out.XDescriptors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecDescriptor. +func (in *SpecDescriptor) DeepCopy() *SpecDescriptor { + if in == nil { + return nil + } + out := new(SpecDescriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusDescriptor) DeepCopyInto(out *StatusDescriptor) { + *out = *in + if in.XDescriptors != nil { + in, out := &in.XDescriptors, &out.XDescriptors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDescriptor. +func (in *StatusDescriptor) DeepCopy() *StatusDescriptor { + if in == nil { + return nil + } + out := new(StatusDescriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Step) DeepCopyInto(out *Step) { + *out = *in + out.Resource = in.Resource +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. +func (in *Step) DeepCopy() *Step { + if in == nil { + return nil + } + out := new(Step) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepResource) DeepCopyInto(out *StepResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepResource. +func (in *StepResource) DeepCopy() *StepResource { + if in == nil { + return nil + } + out := new(StepResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyDeploymentPermissions) DeepCopyInto(out *StrategyDeploymentPermissions) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]rbacv1.PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentPermissions. +func (in *StrategyDeploymentPermissions) DeepCopy() *StrategyDeploymentPermissions { + if in == nil { + return nil + } + out := new(StrategyDeploymentPermissions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyDeploymentSpec) DeepCopyInto(out *StrategyDeploymentSpec) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = make(labels.Set, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentSpec. +func (in *StrategyDeploymentSpec) DeepCopy() *StrategyDeploymentSpec { + if in == nil { + return nil + } + out := new(StrategyDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyDetailsDeployment) DeepCopyInto(out *StrategyDetailsDeployment) { + *out = *in + if in.DeploymentSpecs != nil { + in, out := &in.DeploymentSpecs, &out.DeploymentSpecs + *out = make([]StrategyDeploymentSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]StrategyDeploymentPermissions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterPermissions != nil { + in, out := &in.ClusterPermissions, &out.ClusterPermissions + *out = make([]StrategyDeploymentPermissions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDetailsDeployment. +func (in *StrategyDetailsDeployment) DeepCopy() *StrategyDetailsDeployment { + if in == nil { + return nil + } + out := new(StrategyDetailsDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subscription) DeepCopyInto(out *Subscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(SubscriptionSpec) + (*in).DeepCopyInto(*out) + } + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription. +func (in *Subscription) DeepCopy() *Subscription { + if in == nil { + return nil + } + out := new(Subscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Subscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCatalogHealth) DeepCopyInto(out *SubscriptionCatalogHealth) { + *out = *in + if in.CatalogSourceRef != nil { + in, out := &in.CatalogSourceRef, &out.CatalogSourceRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCatalogHealth. +func (in *SubscriptionCatalogHealth) DeepCopy() *SubscriptionCatalogHealth { + if in == nil { + return nil + } + out := new(SubscriptionCatalogHealth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCondition) DeepCopyInto(out *SubscriptionCondition) { + *out = *in + if in.LastHeartbeatTime != nil { + in, out := &in.LastHeartbeatTime, &out.LastHeartbeatTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCondition. +func (in *SubscriptionCondition) DeepCopy() *SubscriptionCondition { + if in == nil { + return nil + } + out := new(SubscriptionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig. +func (in *SubscriptionConfig) DeepCopy() *SubscriptionConfig { + if in == nil { + return nil + } + out := new(SubscriptionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Subscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList. +func (in *SubscriptionList) DeepCopy() *SubscriptionList { + if in == nil { + return nil + } + out := new(SubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(SubscriptionConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. +func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec { + if in == nil { + return nil + } + out := new(SubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { + *out = *in + if in.Install != nil { + in, out := &in.Install, &out.Install + *out = new(InstallPlanReference) + **out = **in + } + if in.InstallPlanRef != nil { + in, out := &in.InstallPlanRef, &out.InstallPlanRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.CatalogHealth != nil { + in, out := &in.CatalogHealth, &out.CatalogHealth + *out = make([]SubscriptionCatalogHealth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SubscriptionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LastUpdated.DeepCopyInto(&out.LastUpdated) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus. +func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus { + if in == nil { + return nil + } + out := new(SubscriptionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) { + *out = *in + if in.RegistryPoll != nil { + in, out := &in.RegistryPoll, &out.RegistryPoll + *out = new(RegistryPoll) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy. +func (in *UpdateStrategy) DeepCopy() *UpdateStrategy { + if in == nil { + return nil + } + out := new(UpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookDescription) DeepCopyInto(out *WebhookDescription) { + *out = *in + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]admissionregistrationv1.RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(admissionregistrationv1.FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(admissionregistrationv1.MatchPolicyType) + **out = **in + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(admissionregistrationv1.SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(admissionregistrationv1.ReinvocationPolicyType) + **out = **in + } + if in.WebhookPath != nil { + in, out := &in.WebhookPath, &out.WebhookPath + *out = new(string) + **out = **in + } + if in.ConversionCRDs != nil { + in, out := &in.ConversionCRDs, &out.ConversionCRDs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookDescription. +func (in *WebhookDescription) DeepCopy() *WebhookDescription { + if in == nil { + return nil + } + out := new(WebhookDescription) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore new file mode 100644 index 000000000..1fb13abeb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -0,0 +1,4 @@ +logrus +vendor + +.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 000000000..65dc28503 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 000000000..c1dbd5a3a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: 1.15.x +os: linux +install: + - ./travis/install.sh +script: + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..7567f6128 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,259 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 000000000..d1d4a85fd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,515 @@ +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```text +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +func init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 000000000..8fd189e1c --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml new file mode 100644 index 000000000..df9d65c3a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go new file mode 100644 index 000000000..c7787f77c --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -0,0 +1,43 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 000000000..da67aba06 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 000000000..71cdbbc35 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,442 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: + isErrField = true + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +func (entry *Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() + } + + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() + } + + newEntry.fireHooks() + buffer = bufPool.Get() + defer func() { + newEntry.Buffer = nil + buffer.Reset() + bufPool.Put(buffer) + }() + buffer.Reset() + newEntry.Buffer = buffer + + newEntry.write() + + newEntry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(newEntry) + } +} + +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + +func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks + entry.Logger.mu.Lock() + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 000000000..017c30ce6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,270 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 000000000..408883773 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..c96dc5636 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,128 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 000000000..5ff0aef6d --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,417 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func() []interface{} + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, fn()...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 000000000..2f16224cb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 000000000..2403de981 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 000000000..499789984 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 000000000..ebdae3ec6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 000000000..97af92c68 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 000000000..3293fb3ca --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 000000000..f6710b3bd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 000000000..04748b851 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix zos +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 000000000..2879eb50e --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,27 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/windows" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + handle := windows.Handle(v.Fd()) + var mode uint32 + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true + } + return false +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..be2c6efe5 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,339 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + case InfoLevel: + levelColor = blue + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 000000000..074fd4b8b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,102 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" + "strings" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +// Writer returns an io.Writer that writes to the logger at the info log level +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that writes to the logger at the given log level +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + // Determine which log function to use based on the specified log level + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. + go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +// writerScanner scans the input from the reader and writes it to the logger +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } + + // If there was an error while scanning the input, log an error + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + + // Close the reader when we are done + reader.Close() +} + +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9ff8bc384..f30871b04 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,9 @@ # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile +# github.com/blang/semver/v4 v4.0.0 +## explicit; go 1.14 +github.com/blang/semver/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -144,6 +147,11 @@ github.com/onsi/gomega/types github.com/openshift/api/config/v1 github.com/openshift/api/console/v1alpha1 github.com/openshift/api/security/v1 +# github.com/operator-framework/api v0.17.7-0.20230626210316-aa3e49803e7b +## explicit; go 1.19 +github.com/operator-framework/api/pkg/lib/version +github.com/operator-framework/api/pkg/operators +github.com/operator-framework/api/pkg/operators/v1alpha1 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -178,6 +186,9 @@ github.com/prometheus/procfs/internal/util github.com/red-hat-storage/ocs-operator/v4/services/provider/client github.com/red-hat-storage/ocs-operator/v4/services/provider/consumerstatus github.com/red-hat-storage/ocs-operator/v4/services/provider/pb +# github.com/sirupsen/logrus v1.9.3 +## explicit; go 1.13 +github.com/sirupsen/logrus # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag